From ab210dac933185367485ee8e0efbe239283462bf Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 30 May 2023 13:18:13 +0200 Subject: [PATCH 01/64] data_sink with preliminary streaming callback/polling --- include/data_sink.hpp | 155 ++++++++++++++++++++++++++++++++++++++++++ test/CMakeLists.txt | 1 + test/qa_data_sink.cpp | 154 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 310 insertions(+) create mode 100644 include/data_sink.hpp create mode 100644 test/qa_data_sink.cpp diff --git a/include/data_sink.hpp b/include/data_sink.hpp new file mode 100644 index 00000000..ab3181d5 --- /dev/null +++ b/include/data_sink.hpp @@ -0,0 +1,155 @@ +#ifndef GNURADIO_DATA_SINK_HPP +#define GNURADIO_DATA_SINK_HPP + +#include "circular_buffer.hpp" +#include "node.hpp" +#include "tag.hpp" + +namespace fair::graph { + +enum class acquisition_mode { + Continuous, + Triggered, + PostMortem +}; + +enum class blocking_mode { + NonBlocking, + Blocking +}; + +template +class data_sink : public node> { +public: + IN in; + std::size_t n_samples_consumed = 0; + std::size_t n_samples_max = -1; + int64_t last_tag_position = -1; + float sample_rate = -1.0f; + + static constexpr std::size_t listener_buffer_size = 65536; + + struct poller { + std::atomic finished = false; + std::atomic drop_count = 0; + gr::circular_buffer buffer = gr::circular_buffer(listener_buffer_size); + decltype(buffer.new_reader()) reader = buffer.new_reader(); + + template + [[nodiscard]] bool process(Handler fnc) { + const auto available = reader.available(); + if (available == 0) { + return false; + } + + const auto read_data = reader.get(available); + fnc(read_data); + reader.consume(available); + return true; + } + }; + +private: + struct listener { + acquisition_mode mode = acquisition_mode::Triggered; + std::pair window; ///< window of data to return in relation to the matching tag position, e.g. [-2000, 3000] to obtain 2000 presamples and 3000 postsamples + std::size_t history_size = 0; + bool block = false; + int64_t drop_count = 0; + std::function trigger_predicate; + gr::circular_buffer buffer; + std::optional pending; ///< number of samples expected due to a previous trigger + std::function)> callback; // TODO we might want to pass back stats here like drop_count + std::weak_ptr polling_handler; + }; + + struct { + std::atomic dirty = false; + std::mutex mutex; + std::vector list; + } pending_listeners; + + std::vector listeners; + +public: + std::shared_ptr get_streaming_poller(blocking_mode block = blocking_mode::NonBlocking) { + auto handler = std::make_shared(); + pending_listeners.list.push_back({ + .mode = acquisition_mode::Continuous, + .block = block == blocking_mode::Blocking, + .buffer = gr::circular_buffer(0), + .polling_handler = handler + }); + pending_listeners.dirty = true; + return handler; + } + + template + void register_streaming_callback(Callback callback) { + std::lock_guard lg(pending_listeners.mutex); + pending_listeners.list.push_back({ + .mode = acquisition_mode::Continuous, + .buffer = gr::circular_buffer(0), + .callback = std::move(callback) + }); + pending_listeners.dirty = true; + } + + [[nodiscard]] work_return_t work() { + auto &in_port = input_port<"in">(this); + auto &reader = in_port.streamReader(); + + const auto n_readable = std::min(reader.available(), in_port.max_buffer_size()); + if (n_readable == 0) { + return fair::graph::work_return_t::INSUFFICIENT_INPUT_ITEMS; + } + + const auto noutput_items = std::min(listener_buffer_size, n_readable); + const auto in_data = reader.get(noutput_items); + + if (pending_listeners.dirty) { + std::lock_guard lg(pending_listeners.mutex); + listeners = pending_listeners.list; + pending_listeners.dirty = false; + } + + for (auto &listener : listeners) { + if (listener.mode == acquisition_mode::Continuous) { + if (auto poller = listener.polling_handler.lock()) { + auto writer = poller->buffer.new_writer(); + const auto read_data = reader.get(noutput_items); + if (listener.block) { + auto write_data = writer.reserve_output_range(noutput_items); + std::copy(read_data.begin(), read_data.end(), write_data.begin()); + write_data.publish(write_data.size()); + } else { + const auto can_write = writer.available(); + const auto to_write = std::min(read_data.size(), can_write); + poller->drop_count += read_data.size() - can_write; + if (to_write > 0) { + auto write_data = writer.reserve_output_range(to_write); + std::copy(read_data.begin(), read_data.begin() + to_write - 1, write_data.begin()); + write_data.publish(write_data.size()); + } + } + } else if (listener.callback) { + listener.callback(in_data); + } + } + } + + n_samples_consumed += noutput_items; + + if (!reader.consume(noutput_items)) { + return work_return_t::ERROR; + } + + return work_return_t::OK; + } +}; + +} + +ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink), in, n_samples_consumed, n_samples_max, last_tag_position, sample_rate); + +#endif diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 5baae9d6..d12e5820 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -28,6 +28,7 @@ function(add_app_test TEST_NAME) endfunction() add_ut_test(qa_buffer) +add_ut_test(qa_data_sink) add_ut_test(qa_dynamic_port) add_ut_test(qa_dynamic_node) add_ut_test(qa_hier_node) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp new file mode 100644 index 00000000..0ad5d4cf --- /dev/null +++ b/test/qa_data_sink.cpp @@ -0,0 +1,154 @@ +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#if defined(__clang__) && __clang_major__ >= 16 +// clang 16 does not like ut's default reporter_junit due to some issues with stream buffers and output redirection +template<> +auto boost::ut::cfg = boost::ut::runner>{}; +#endif + +namespace fair::graph::data_sink_test { + +template +struct Source : public node> { + OUT out; + std::int32_t n_samples_produced = 0; + std::int32_t n_samples_max = 1024; + std::int32_t n_tag_offset = 0; + float sample_rate = 1000.0f; + T next_value = {}; + + void + init(const tag_t::map_type &old_settings, const tag_t::map_type &new_settings) { + // optional init function that is called after construction and whenever settings change + fair::graph::publish_tag(out, { { "n_samples_max", n_samples_max } }, n_tag_offset); + } + + constexpr std::int64_t + available_samples(const Source &self) noexcept { + const auto ret = static_cast(n_samples_max - n_samples_produced); + return ret > 0 ? ret : -1; // '-1' -> DONE, produced enough samples + } + + [[nodiscard]] constexpr T + process_one() noexcept { + n_samples_produced++; + return next_value++; + } +}; + +} // namespace fair::graph::data_sink_test + +ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink_test::Source), out, n_samples_produced, n_samples_max, n_tag_offset, sample_rate); + +const boost::ut::suite DataSinkTests = [] { + using namespace boost::ut; + using namespace fair::graph; + using namespace fair::graph::data_sink_test; + + "callback continuous mode"_test = [] { + graph flow_graph; + constexpr std::int32_t n_samples = gr::util::round_up(1'000'000, 1024); + + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + auto &sink = flow_graph.make_node>(); + expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); + + std::size_t samples_seen = 0; + auto callback = [&samples_seen](std::span buffer) { + for (std::size_t i = 0; i < buffer.size(); ++i) { + expect(eq(buffer[i], static_cast(samples_seen + i))); + } + samples_seen += buffer.size(); + }; + + sink.register_streaming_callback(callback); + + fair::graph::scheduler::simple sched{std::move(flow_graph)}; + sched.work(); + + expect(eq(sink.n_samples_consumed, n_samples)); + expect(eq(samples_seen, n_samples)); + }; + + "blocking polling continuous mode"_test = [] { + constexpr std::int32_t n_samples = gr::util::round_up(1'000'000, 1024); + + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + auto &sink = flow_graph.make_node>(); + expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); + + std::atomic samples_seen = 0; + + auto poller = sink.get_streaming_poller(blocking_mode::Blocking); + + auto polling = std::async([poller, &samples_seen] { + while (!poller->finished) { + [[maybe_unused]] poller->process([&samples_seen](const auto &data) { + samples_seen += data.size(); + }); + } + }); + + fair::graph::scheduler::simple sched{std::move(flow_graph)}; + sched.work(); + + poller->finished = true; // TODO this should be done by the block + + polling.wait(); + + expect(eq(sink.n_samples_consumed, n_samples)); + expect(eq(samples_seen.load(), n_samples)); + expect(eq(poller->drop_count.load(), 0)); + }; + + "non-blocking polling continuous mode"_test = [] { + constexpr std::int32_t n_samples = gr::util::round_up(1'000'000, 1024); + + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + auto &sink = flow_graph.make_node>(); + expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); + + std::atomic samples_seen = 0; + + auto poller = sink.get_streaming_poller(); + + auto polling = std::async([poller, &samples_seen] { + while (!poller->finished) { + using namespace std::chrono_literals; + std::this_thread::sleep_for(20ms); + [[maybe_unused]] poller->process([&samples_seen](const auto &data) { + samples_seen += data.size(); + }); + } + }); + + fair::graph::scheduler::simple sched{std::move(flow_graph)}; + sched.work(); + + poller->finished = true; // TODO this should be done by the block + + polling.wait(); + + expect(eq(sink.n_samples_consumed, n_samples)); + expect(lt(samples_seen.load(), n_samples)); + expect(gt(poller->drop_count.load(), 0)); + }; +}; + +int +main() { /* tests are statically executed */ +} From 45a90701d530c483f9a2eafe2091e2b65207f6c4 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 6 Jun 2023 22:23:08 +0200 Subject: [PATCH 02/64] Add data_sink_registry Still a singleton, for now. Also need to find out where to do the registration, as the name isn't known in the ctor (see set_name). --- include/data_sink.hpp | 78 ++++++++++++++++++++++++++++++++++++++++++- test/qa_data_sink.cpp | 20 +++++++++-- 2 files changed, 94 insertions(+), 4 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index ab3181d5..64a2165e 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -5,6 +5,8 @@ #include "node.hpp" #include "tag.hpp" +#include + namespace fair::graph { enum class acquisition_mode { @@ -18,7 +20,9 @@ enum class blocking_mode { Blocking }; -template +class data_sink_registry; + +template class data_sink : public node> { public: IN in; @@ -72,6 +76,14 @@ class data_sink : public node> { std::vector listeners; public: + // TODO sink should register itself on construction, but name is set afterwards via + // set_name, which we have no hook into. Maybe the registration should be done by the + // graph creating/destroying the sink instead? + + ~data_sink() { + R::instance().unregister_sink(this); + } + std::shared_ptr get_streaming_poller(blocking_mode block = blocking_mode::NonBlocking) { auto handler = std::make_shared(); pending_listeners.list.push_back({ @@ -148,6 +160,70 @@ class data_sink : public node> { } }; +class data_sink_registry { + std::mutex mutex; + std::unordered_map sinks; + +public: + // TODO this shouldn't be a singleton but associated with the flow graph (?) + static data_sink_registry& instance() { + static data_sink_registry s_instance; + return s_instance; + } + + template + void register_sink(data_sink *sink) { + std::lock_guard lg{mutex}; + sinks[std::string(sink->name())] = sink; + } + + template + void unregister_sink(data_sink *sink) { + std::lock_guard lg{mutex}; + const auto it = sinks.find(std::string(sink->name())); + try { + if (it != sinks.end() && std::any_cast*>(it->second) == sink) { + sinks.erase(it); + } + } catch (...) { + } + } + + template + std::shared_ptr::poller> get_streaming_poller(std::string_view name, blocking_mode block = blocking_mode::NonBlocking) { + std::lock_guard lg{mutex}; + auto sink = get_typed_sink(name); + return sink ? sink->get_streaming_poller(block) : nullptr; + } + + template + bool register_streaming_callback(std::string_view name, Callback callback) { + std::lock_guard lg{mutex}; + auto sink = get_typed_sink(name); + if (!sink) { + return false; + } + + sink->register_streaming_callback(std::move(callback)); + return true; + } + +private: + template + data_sink* get_typed_sink(std::string_view name) { + const auto it = sinks.find(std::string(name)); + if (it == sinks.end()) { + return {}; + } + + try { + return std::any_cast*>(it->second); + } catch (...) { + return {}; + } + } +}; + } ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink), in, n_samples_consumed, n_samples_max, last_tag_position, sample_rate); diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 0ad5d4cf..b86feebb 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -63,6 +63,8 @@ const boost::ut::suite DataSinkTests = [] { auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); + sink.set_name("test_sink"); + data_sink_registry::instance().register_sink(&sink); // TODO this should be done elsewhere expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); std::size_t samples_seen = 0; @@ -73,7 +75,7 @@ const boost::ut::suite DataSinkTests = [] { samples_seen += buffer.size(); }; - sink.register_streaming_callback(callback); + expect(data_sink_registry::instance().register_streaming_callback("test_sink", callback)); fair::graph::scheduler::simple sched{std::move(flow_graph)}; sched.work(); @@ -88,11 +90,15 @@ const boost::ut::suite DataSinkTests = [] { graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); + sink.set_name("test_sink"); + data_sink_registry::instance().register_sink(&sink); // TODO this should be done elsewhere + expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); std::atomic samples_seen = 0; - auto poller = sink.get_streaming_poller(blocking_mode::Blocking); + auto poller = data_sink_registry::instance().get_streaming_poller("test_sink", blocking_mode::Blocking); + expect(poller); auto polling = std::async([poller, &samples_seen] { while (!poller->finished) { @@ -120,13 +126,21 @@ const boost::ut::suite DataSinkTests = [] { graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); + sink.set_name("test_sink"); + data_sink_registry::instance().register_sink(&sink); // TODO this should be done elsewhere + expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); std::atomic samples_seen = 0; - auto poller = sink.get_streaming_poller(); + auto invalid_type_poller = data_sink_registry::instance().get_streaming_poller("test_sink"); + expect(!invalid_type_poller); + + auto poller = data_sink_registry::instance().get_streaming_poller("test_sink"); + expect(poller); auto polling = std::async([poller, &samples_seen] { + expect(poller.get() != nullptr); while (!poller->finished) { using namespace std::chrono_literals; std::this_thread::sleep_for(20ms); From d52b91e30a752d47bd8f939a8aafad1aeeb171f8 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 7 Jun 2023 11:37:48 +0200 Subject: [PATCH 03/64] Registry: Query sinks when requesting callback/poller --- include/data_sink.hpp | 47 ++++++++++++++++++++++++++----------------- test/qa_data_sink.cpp | 8 +++----- 2 files changed, 32 insertions(+), 23 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 64a2165e..bd1f8afa 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -80,6 +80,10 @@ class data_sink : public node> { // set_name, which we have no hook into. Maybe the registration should be done by the // graph creating/destroying the sink instead? + data_sink() { + R::instance().register_sink(this); + } + ~data_sink() { R::instance().unregister_sink(this); } @@ -162,7 +166,7 @@ class data_sink : public node> { class data_sink_registry { std::mutex mutex; - std::unordered_map sinks; + std::vector sinks; public: // TODO this shouldn't be a singleton but associated with the flow graph (?) @@ -174,32 +178,32 @@ class data_sink_registry { template void register_sink(data_sink *sink) { std::lock_guard lg{mutex}; - sinks[std::string(sink->name())] = sink; + sinks.push_back(sink); } template void unregister_sink(data_sink *sink) { std::lock_guard lg{mutex}; - const auto it = sinks.find(std::string(sink->name())); - try { - if (it != sinks.end() && std::any_cast*>(it->second) == sink) { - sinks.erase(it); + std::erase_if(sinks, [sink](const std::any &v) { + try { + return std::any_cast *>(v) == sink; + } catch (...) { + return false; } - } catch (...) { - } + }); } template std::shared_ptr::poller> get_streaming_poller(std::string_view name, blocking_mode block = blocking_mode::NonBlocking) { std::lock_guard lg{mutex}; - auto sink = get_typed_sink(name); + auto sink = find_sink(name); return sink ? sink->get_streaming_poller(block) : nullptr; } template bool register_streaming_callback(std::string_view name, Callback callback) { std::lock_guard lg{mutex}; - auto sink = get_typed_sink(name); + auto sink = find_sink(name); if (!sink) { return false; } @@ -210,17 +214,24 @@ class data_sink_registry { private: template - data_sink* get_typed_sink(std::string_view name) { - const auto it = sinks.find(std::string(name)); + data_sink* find_sink(std::string_view name) { + const auto it = std::find_if(sinks.begin(), sinks.end(), matcher(name)); if (it == sinks.end()) { - return {}; + return nullptr; } - try { - return std::any_cast*>(it->second); - } catch (...) { - return {}; - } + return std::any_cast*>(*it); + } + + template + static auto matcher(std::string_view name) { + return [name](const std::any &v) { + try { + return std::any_cast*>(v)->name() == name; + } catch (...) { + return false; + } + }; } }; diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index b86feebb..3726f5b5 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -64,7 +64,7 @@ const boost::ut::suite DataSinkTests = [] { auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); - data_sink_registry::instance().register_sink(&sink); // TODO this should be done elsewhere + expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); std::size_t samples_seen = 0; @@ -91,18 +91,17 @@ const boost::ut::suite DataSinkTests = [] { auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); - data_sink_registry::instance().register_sink(&sink); // TODO this should be done elsewhere expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); std::atomic samples_seen = 0; auto poller = data_sink_registry::instance().get_streaming_poller("test_sink", blocking_mode::Blocking); - expect(poller); + expect(neq(poller, nullptr)); auto polling = std::async([poller, &samples_seen] { while (!poller->finished) { - [[maybe_unused]] poller->process([&samples_seen](const auto &data) { + [[maybe_unused]] auto r = poller->process([&samples_seen](const auto &data) { samples_seen += data.size(); }); } @@ -127,7 +126,6 @@ const boost::ut::suite DataSinkTests = [] { auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); - data_sink_registry::instance().register_sink(&sink); // TODO this should be done elsewhere expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); From aa41dd9c90a400fc0269ff638e4d8727c697e075 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 7 Jun 2023 11:56:45 +0200 Subject: [PATCH 04/64] Fix warnings and hopefully clang/emscripten compilation --- test/qa_data_sink.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 3726f5b5..bb806262 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -132,17 +132,17 @@ const boost::ut::suite DataSinkTests = [] { std::atomic samples_seen = 0; auto invalid_type_poller = data_sink_registry::instance().get_streaming_poller("test_sink"); - expect(!invalid_type_poller); + expect(eq(invalid_type_poller, nullptr)); auto poller = data_sink_registry::instance().get_streaming_poller("test_sink"); - expect(poller); + expect(neq(poller, nullptr)); auto polling = std::async([poller, &samples_seen] { - expect(poller.get() != nullptr); + expect(neq(poller, nullptr)); while (!poller->finished) { using namespace std::chrono_literals; std::this_thread::sleep_for(20ms); - [[maybe_unused]] poller->process([&samples_seen](const auto &data) { + [[maybe_unused]] auto r = poller->process([&samples_seen](const auto &data) { samples_seen += data.size(); }); } From cb53a7f13fc9e3b2501a2213ef38881a817cb574 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 7 Jun 2023 20:45:10 +0200 Subject: [PATCH 05/64] Simplify data_sink/data_sink_registry No need for the additional template parameter of data_sink. --- include/data_sink.hpp | 151 +++++++++++++++++++++--------------------- 1 file changed, 76 insertions(+), 75 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index bd1f8afa..0050c369 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -20,9 +20,81 @@ enum class blocking_mode { Blocking }; -class data_sink_registry; +template +class data_sink; -template +class data_sink_registry { + std::mutex mutex; + std::vector sinks; + +public: + // TODO this shouldn't be a singleton but associated with the flow graph (?) + static data_sink_registry& instance() { + static data_sink_registry s_instance; + return s_instance; + } + + template + void register_sink(data_sink *sink) { + std::lock_guard lg{mutex}; + sinks.push_back(sink); + } + + template + void unregister_sink(data_sink *sink) { + std::lock_guard lg{mutex}; + std::erase_if(sinks, [sink](const std::any &v) { + try { + return std::any_cast *>(v) == sink; + } catch (...) { + return false; + } + }); + } + + template + std::shared_ptr::poller> get_streaming_poller(std::string_view name, blocking_mode block = blocking_mode::NonBlocking) { + std::lock_guard lg{mutex}; + auto sink = find_sink(name); + return sink ? sink->get_streaming_poller(block) : nullptr; + } + + template + bool register_streaming_callback(std::string_view name, Callback callback) { + std::lock_guard lg{mutex}; + auto sink = find_sink(name); + if (!sink) { + return false; + } + + sink->register_streaming_callback(std::move(callback)); + return true; + } + +private: + template + data_sink* find_sink(std::string_view name) { + const auto it = std::find_if(sinks.begin(), sinks.end(), matcher(name)); + if (it == sinks.end()) { + return nullptr; + } + + return std::any_cast*>(*it); + } + + template + static auto matcher(std::string_view name) { + return [name](const std::any &v) { + try { + return std::any_cast*>(v)->name() == name; + } catch (...) { + return false; + } + }; + } +}; + +template class data_sink : public node> { public: IN in; @@ -81,11 +153,11 @@ class data_sink : public node> { // graph creating/destroying the sink instead? data_sink() { - R::instance().register_sink(this); + data_sink_registry::instance().register_sink(this); } ~data_sink() { - R::instance().unregister_sink(this); + data_sink_registry::instance().unregister_sink(this); } std::shared_ptr get_streaming_poller(blocking_mode block = blocking_mode::NonBlocking) { @@ -164,77 +236,6 @@ class data_sink : public node> { } }; -class data_sink_registry { - std::mutex mutex; - std::vector sinks; - -public: - // TODO this shouldn't be a singleton but associated with the flow graph (?) - static data_sink_registry& instance() { - static data_sink_registry s_instance; - return s_instance; - } - - template - void register_sink(data_sink *sink) { - std::lock_guard lg{mutex}; - sinks.push_back(sink); - } - - template - void unregister_sink(data_sink *sink) { - std::lock_guard lg{mutex}; - std::erase_if(sinks, [sink](const std::any &v) { - try { - return std::any_cast *>(v) == sink; - } catch (...) { - return false; - } - }); - } - - template - std::shared_ptr::poller> get_streaming_poller(std::string_view name, blocking_mode block = blocking_mode::NonBlocking) { - std::lock_guard lg{mutex}; - auto sink = find_sink(name); - return sink ? sink->get_streaming_poller(block) : nullptr; - } - - template - bool register_streaming_callback(std::string_view name, Callback callback) { - std::lock_guard lg{mutex}; - auto sink = find_sink(name); - if (!sink) { - return false; - } - - sink->register_streaming_callback(std::move(callback)); - return true; - } - -private: - template - data_sink* find_sink(std::string_view name) { - const auto it = std::find_if(sinks.begin(), sinks.end(), matcher(name)); - if (it == sinks.end()) { - return nullptr; - } - - return std::any_cast*>(*it); - } - - template - static auto matcher(std::string_view name) { - return [name](const std::any &v) { - try { - return std::any_cast*>(v)->name() == name; - } catch (...) { - return false; - } - }; - } -}; - } ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink), in, n_samples_consumed, n_samples_max, last_tag_position, sample_rate); From 486517d0cf6b89cf06edbf1a530c6f98a2156a1f Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 7 Jun 2023 20:47:19 +0200 Subject: [PATCH 06/64] Remove obsolete comment --- include/data_sink.hpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 0050c369..7ab1a48f 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -148,10 +148,6 @@ class data_sink : public node> { std::vector listeners; public: - // TODO sink should register itself on construction, but name is set afterwards via - // set_name, which we have no hook into. Maybe the registration should be done by the - // graph creating/destroying the sink instead? - data_sink() { data_sink_registry::instance().register_sink(this); } From ec59eee89ff256c019f479cb470e41df4c2b9327 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 7 Jun 2023 21:04:15 +0200 Subject: [PATCH 07/64] Lock mutex --- include/data_sink.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 7ab1a48f..a4a6789a 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -157,6 +157,7 @@ class data_sink : public node> { } std::shared_ptr get_streaming_poller(blocking_mode block = blocking_mode::NonBlocking) { + std::lock_guard lg(pending_listeners.mutex); auto handler = std::make_shared(); pending_listeners.list.push_back({ .mode = acquisition_mode::Continuous, From 03aeb73e6b454d5df08440a7a5081a8efcb95a6b Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Thu, 8 Jun 2023 21:30:30 +0200 Subject: [PATCH 08/64] Let's test with two pollers --- test/qa_data_sink.cpp | 45 ++++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index bb806262..711be5d4 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -20,6 +20,8 @@ auto boost::ut::cfg = boost::ut::runner struct Source : public node> { OUT out; @@ -59,7 +61,6 @@ const boost::ut::suite DataSinkTests = [] { "callback continuous mode"_test = [] { graph flow_graph; - constexpr std::int32_t n_samples = gr::util::round_up(1'000'000, 1024); auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); @@ -85,8 +86,6 @@ const boost::ut::suite DataSinkTests = [] { }; "blocking polling continuous mode"_test = [] { - constexpr std::int32_t n_samples = gr::util::round_up(1'000'000, 1024); - graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); @@ -96,32 +95,42 @@ const boost::ut::suite DataSinkTests = [] { std::atomic samples_seen = 0; - auto poller = data_sink_registry::instance().get_streaming_poller("test_sink", blocking_mode::Blocking); - expect(neq(poller, nullptr)); + auto poller1 = data_sink_registry::instance().get_streaming_poller("test_sink", blocking_mode::Blocking); + expect(neq(poller1, nullptr)); - auto polling = std::async([poller, &samples_seen] { - while (!poller->finished) { - [[maybe_unused]] auto r = poller->process([&samples_seen](const auto &data) { - samples_seen += data.size(); - }); - } - }); + auto poller2 = data_sink_registry::instance().get_streaming_poller("test_sink", blocking_mode::Blocking); + expect(neq(poller2, nullptr)); + + auto make_runner = [](auto poller) { + return std::async([poller] { + std::size_t samples_seen = 0; + while (!poller->finished) { + [[maybe_unused]] auto r = poller->process([&samples_seen](const auto &data) { + samples_seen += data.size(); + }); + } + + expect(eq(samples_seen, n_samples)); + expect(eq(poller->drop_count.load(), 0)); + }); + }; + + auto runner1 = make_runner(poller1); + auto runner2 = make_runner(poller2); fair::graph::scheduler::simple sched{std::move(flow_graph)}; sched.work(); - poller->finished = true; // TODO this should be done by the block + poller1->finished = true; // TODO this should be done by the block + poller2->finished = true; - polling.wait(); + runner1.wait(); + runner2.wait(); expect(eq(sink.n_samples_consumed, n_samples)); - expect(eq(samples_seen.load(), n_samples)); - expect(eq(poller->drop_count.load(), 0)); }; "non-blocking polling continuous mode"_test = [] { - constexpr std::int32_t n_samples = gr::util::round_up(1'000'000, 1024); - graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); From 550e9e795a428729f51e486acfc3296f2ce174b8 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Thu, 8 Jun 2023 21:52:31 +0200 Subject: [PATCH 09/64] Make test stricter A sample has either been seen or has been dropped. --- test/qa_data_sink.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 711be5d4..6d954bb5 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -165,8 +165,7 @@ const boost::ut::suite DataSinkTests = [] { polling.wait(); expect(eq(sink.n_samples_consumed, n_samples)); - expect(lt(samples_seen.load(), n_samples)); - expect(gt(poller->drop_count.load(), 0)); + expect(eq(samples_seen.load() + poller->drop_count.load(), n_samples)); }; }; From 196e36eaa1ca048cdef9d45b8f400ddcf5355f4e Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 12 Jun 2023 09:38:31 +0200 Subject: [PATCH 10/64] Fix listener handling Listener state got obviously lost when a listener was added during processing. Let's keep it simple, use a mutex for now, fix it later if there's actually a problem. --- include/data_sink.hpp | 62 ++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 36 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index a4a6789a..c08d2d5f 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -139,13 +139,8 @@ class data_sink : public node> { std::weak_ptr polling_handler; }; - struct { - std::atomic dirty = false; - std::mutex mutex; - std::vector list; - } pending_listeners; - std::vector listeners; + std::mutex listener_mutex; public: data_sink() { @@ -157,27 +152,25 @@ class data_sink : public node> { } std::shared_ptr get_streaming_poller(blocking_mode block = blocking_mode::NonBlocking) { - std::lock_guard lg(pending_listeners.mutex); + std::lock_guard lg(listener_mutex); auto handler = std::make_shared(); - pending_listeners.list.push_back({ + listeners.push_back({ .mode = acquisition_mode::Continuous, .block = block == blocking_mode::Blocking, .buffer = gr::circular_buffer(0), .polling_handler = handler }); - pending_listeners.dirty = true; return handler; } template void register_streaming_callback(Callback callback) { - std::lock_guard lg(pending_listeners.mutex); - pending_listeners.list.push_back({ + std::lock_guard lg(listener_mutex); + listeners.push_back({ .mode = acquisition_mode::Continuous, .buffer = gr::circular_buffer(0), .callback = std::move(callback) }); - pending_listeners.dirty = true; } [[nodiscard]] work_return_t work() { @@ -192,33 +185,30 @@ class data_sink : public node> { const auto noutput_items = std::min(listener_buffer_size, n_readable); const auto in_data = reader.get(noutput_items); - if (pending_listeners.dirty) { - std::lock_guard lg(pending_listeners.mutex); - listeners = pending_listeners.list; - pending_listeners.dirty = false; - } - - for (auto &listener : listeners) { - if (listener.mode == acquisition_mode::Continuous) { - if (auto poller = listener.polling_handler.lock()) { - auto writer = poller->buffer.new_writer(); - const auto read_data = reader.get(noutput_items); - if (listener.block) { - auto write_data = writer.reserve_output_range(noutput_items); - std::copy(read_data.begin(), read_data.end(), write_data.begin()); - write_data.publish(write_data.size()); - } else { - const auto can_write = writer.available(); - const auto to_write = std::min(read_data.size(), can_write); - poller->drop_count += read_data.size() - can_write; - if (to_write > 0) { - auto write_data = writer.reserve_output_range(to_write); - std::copy(read_data.begin(), read_data.begin() + to_write - 1, write_data.begin()); + { + std::lock_guard lg(listener_mutex); + for (auto &listener : listeners) { + if (listener.mode == acquisition_mode::Continuous) { + if (auto poller = listener.polling_handler.lock()) { + auto writer = poller->buffer.new_writer(); + const auto read_data = reader.get(noutput_items); + if (listener.block) { + auto write_data = writer.reserve_output_range(noutput_items); + std::copy(read_data.begin(), read_data.end(), write_data.begin()); write_data.publish(write_data.size()); + } else { + const auto can_write = writer.available(); + const auto to_write = std::min(read_data.size(), can_write); + poller->drop_count += read_data.size() - can_write; + if (to_write > 0) { + auto write_data = writer.reserve_output_range(to_write); + std::copy(read_data.begin(), read_data.begin() + to_write - 1, write_data.begin()); + write_data.publish(write_data.size()); + } } + } else if (listener.callback) { + listener.callback(in_data); } - } else if (listener.callback) { - listener.callback(in_data); } } } From 86f62f5b475e3f2de86da1202f7b33311a33ca72 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Thu, 8 Jun 2023 11:17:57 +0200 Subject: [PATCH 11/64] WIP Add support for Triggered/PostMortem --- include/data_sink.hpp | 247 +++++++++++++++++++++++++++++++++++++----- test/qa_data_sink.cpp | 158 ++++++++++++++++++++++++--- 2 files changed, 359 insertions(+), 46 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index c08d2d5f..085ec198 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -2,10 +2,12 @@ #define GNURADIO_DATA_SINK_HPP #include "circular_buffer.hpp" +#include "dataset.hpp" #include "node.hpp" #include "tag.hpp" #include +#include namespace fair::graph { @@ -59,6 +61,13 @@ class data_sink_registry { return sink ? sink->get_streaming_poller(block) : nullptr; } + template + std::shared_ptr::dataset_poller> get_trigger_poller(std::string_view name, TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { + std::lock_guard lg{mutex}; + auto sink = find_sink(name); + return sink ? sink->get_trigger_poller(std::move(p), pre_samples, post_samples, block) : nullptr; + } + template bool register_streaming_callback(std::string_view name, Callback callback) { std::lock_guard lg{mutex}; @@ -125,21 +134,49 @@ class data_sink : public node> { } }; + struct dataset_poller { + std::atomic finished = false; + std::atomic drop_count = 0; + gr::circular_buffer> buffer = gr::circular_buffer>(listener_buffer_size); // TODO use other size? + decltype(buffer.new_reader()) reader = buffer.new_reader(); + decltype(buffer.new_writer()) writer = buffer.new_writer(); + + template + [[nodiscard]] bool process(Handler fnc) { + const auto available = reader.available(); + if (available == 0) { + return false; + } + + const auto read_data = reader.get(1); + fnc(read_data[0]); + reader.consume(1); + return true; + } + }; + private: - struct listener { + struct pending_window_t { + tag_t trigger; + DataSet dataset; + std::size_t pending_post_samples = 0; + }; + + struct listener_t { acquisition_mode mode = acquisition_mode::Triggered; - std::pair window; ///< window of data to return in relation to the matching tag position, e.g. [-2000, 3000] to obtain 2000 presamples and 3000 postsamples - std::size_t history_size = 0; + std::size_t pre_samples = 0; + std::size_t post_samples = 0; bool block = false; + std::function trigger_predicate = {}; + gr::circular_buffer buffer = gr::circular_buffer(0); + std::deque pending_trigger_windows; // triggers that still didn't receive all their data + std::function)> callback = {}; // TODO we might want to pass back stats here like drop_count + std::weak_ptr dataset_polling_handler = {}; + std::weak_ptr polling_handler = {}; int64_t drop_count = 0; - std::function trigger_predicate; - gr::circular_buffer buffer; - std::optional pending; ///< number of samples expected due to a previous trigger - std::function)> callback; // TODO we might want to pass back stats here like drop_count - std::weak_ptr polling_handler; }; - std::vector listeners; + std::vector listeners; std::mutex listener_mutex; public: @@ -157,22 +194,60 @@ class data_sink : public node> { listeners.push_back({ .mode = acquisition_mode::Continuous, .block = block == blocking_mode::Blocking, - .buffer = gr::circular_buffer(0), .polling_handler = handler }); return handler; } + template + std::shared_ptr get_trigger_poller(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { + std::lock_guard lg(listener_mutex); + auto handler = std::make_shared(); + listeners.push_back({ + .mode = acquisition_mode::Triggered, + .pre_samples = pre_samples, + .post_samples = post_samples, + .block = block == blocking_mode::Blocking, + .trigger_predicate = std::move(p), + .dataset_polling_handler = handler + }); + history.resize(std::max(pre_samples, history.size())); + return handler; + } + template void register_streaming_callback(Callback callback) { std::lock_guard lg(listener_mutex); listeners.push_back({ .mode = acquisition_mode::Continuous, - .buffer = gr::circular_buffer(0), .callback = std::move(callback) }); } + // TODO this code should be called at the end of graph processing + void stop() { + std::lock_guard lg(listener_mutex); + for (auto &listener : listeners) { + if (listener.mode == acquisition_mode::Triggered || listener.mode == acquisition_mode::PostMortem) { + // send out any incomplete data windows + for (auto &window : listener.pending_trigger_windows) { + publish_dataset(listener, std::move(window.dataset)); + } + + if (auto p = listener.dataset_polling_handler.lock()) { + p->finished = true; + } + } else if (listener.mode == acquisition_mode::Continuous) { + if (auto p = listener.polling_handler.lock()) { + // TODO pass remaining data + p->finished = true; + } else { + // TODO pass remaining data to callback + } + } + } + } + [[nodiscard]] work_return_t work() { auto &in_port = input_port<"in">(this); auto &reader = in_port.streamReader(); @@ -183,36 +258,83 @@ class data_sink : public node> { } const auto noutput_items = std::min(listener_buffer_size, n_readable); + const auto reader_position = reader.position() + 1; const auto in_data = reader.get(noutput_items); + const auto history_view = std::span(history.begin(), history_available); + // TODO I'm not sure why the +1 in "reader.position() + 1". Bug or do I misunderstand? + assert(reader_position == n_samples_consumed); + assert(n_samples_consumed == std::round(in_data[0])); + + auto &tag_reader = in_port.tagReader(); + const auto n_tags = tag_reader.available(); + const auto tag_data = tag_reader.get(n_tags); + std::vector tags(tag_data.begin(), tag_data.end()); + auto out_of_range = [end_pos = reader_position + noutput_items](const auto &tag) { + return tag.index > end_pos; + }; + std::erase_if(tags, out_of_range); + tag_reader.consume(tags.size()); { std::lock_guard lg(listener_mutex); for (auto &listener : listeners) { if (listener.mode == acquisition_mode::Continuous) { - if (auto poller = listener.polling_handler.lock()) { - auto writer = poller->buffer.new_writer(); - const auto read_data = reader.get(noutput_items); - if (listener.block) { - auto write_data = writer.reserve_output_range(noutput_items); - std::copy(read_data.begin(), read_data.end(), write_data.begin()); - write_data.publish(write_data.size()); + write_to_listener(listener, std::vector>{in_data}); + } else if (listener.mode == acquisition_mode::Triggered || listener.mode == acquisition_mode::PostMortem) { + namespace views = std::ranges::views; + auto filtered = tags | views::filter(listener.trigger_predicate); + for (const auto &trigger : filtered) { + // TODO fill dataset with metadata etc. + DataSet dataset; + dataset.timing_events = {{trigger}}; + dataset.signal_values.reserve(listener.pre_samples + listener.post_samples); + listener.pending_trigger_windows.push_back({.trigger = trigger, .dataset = std::move(dataset), .pending_post_samples = listener.post_samples}); + } + auto window = listener.pending_trigger_windows.begin(); + while (window != listener.pending_trigger_windows.end()) { + auto &dataset = window->dataset; + const auto window_offset = window->trigger.index - reader_position; + + if (window_offset >= 0 && dataset.signal_values.empty()) { // new trigger, write history + // old history: pre-trigger data from previous in_data (if available) + const auto old_history_size = std::max(static_cast(listener.pre_samples) - window_offset, std::int64_t{0}); + const auto available = std::min(static_cast(old_history_size), history_view.size()); + const auto old_history_view = history_view.last(available); + dataset.signal_values.insert(dataset.signal_values.end(), old_history_view.begin(), old_history_view.end()); + + // new history: pre-trigger samples from the current in_data + const auto new_history_size = listener.pre_samples - old_history_size; + const auto new_history_view = in_data.subspan(window_offset - new_history_size, new_history_size); + dataset.signal_values.insert(dataset.signal_values.end(), new_history_view.begin(), new_history_view.end()); + } + + // write missing post-samples + const auto previous_post_samples = listener.post_samples - window->pending_post_samples; + const auto first_requested = window_offset + previous_post_samples; + const auto last_requested = window_offset + listener.post_samples - 1; + const auto last_available = std::min(last_requested, noutput_items - 1); + const auto post_sample_view = in_data.subspan(first_requested, last_available - first_requested + 1); + dataset.signal_values.insert(dataset.signal_values.end(), post_sample_view.begin(), post_sample_view.end()); + window->pending_post_samples -= post_sample_view.size(); + + if (window->pending_post_samples == 0) { + publish_dataset(listener, std::move(dataset)); + window = listener.pending_trigger_windows.erase(window); } else { - const auto can_write = writer.available(); - const auto to_write = std::min(read_data.size(), can_write); - poller->drop_count += read_data.size() - can_write; - if (to_write > 0) { - auto write_data = writer.reserve_output_range(to_write); - std::copy(read_data.begin(), read_data.begin() + to_write - 1, write_data.begin()); - write_data.publish(write_data.size()); - } + ++window; } - } else if (listener.callback) { - listener.callback(in_data); } } } + + // store potential pre-samples for triggers at the beginning of the next chunk + // TODO should use built-in history functionality that doesn't copy (but is resizable as listeners are added) + history_available = std::min(history.size(), noutput_items); + const auto history_data = in_data.last(history_available); + history.assign(history_data.begin(), history_data.end()); } + n_samples_consumed += noutput_items; if (!reader.consume(noutput_items)) { @@ -221,6 +343,75 @@ class data_sink : public node> { return work_return_t::OK; } + +private: + std::vector history; + std::size_t history_available = 0; + + inline void publish_dataset(listener_t &l, DataSet &&data) { + if (auto poller = l.dataset_polling_handler.lock()) { + auto write_data = poller->writer.reserve_output_range(1); + if (l.block) { + write_data[0] = std::move(data); + write_data.publish(1); + } else { + if (poller->writer.available() > 0) { + write_data[0] = std::move(data); + write_data.publish(1); + } else { + poller->drop_count++; + } + } + } else { // if callback... + // TODO call callback + } + } + + inline void write_to_listener(listener_t &l, const std::vector> &spans) { + std::size_t total_size = 0; + for (const auto &i : spans) { + total_size += i.size(); + } + + if (total_size == 0) { + return; + } + + if (auto poller = l.polling_handler.lock()) { + auto writer = poller->buffer.new_writer(); + if (l.block) { + auto write_data = writer.reserve_output_range(total_size); + auto target = write_data.begin(); + for (const auto &in_data : spans) { + std::copy(in_data.begin(), in_data.end(), target); + target += in_data.size(); + } + write_data.publish(write_data.size()); + } else { + const auto can_write = writer.available(); + auto to_write = std::min(total_size, can_write); + poller->drop_count += total_size - can_write; + if (to_write > 0) { + auto write_data = writer.reserve_output_range(to_write); + std::size_t written = 0; + for (const auto &in_data : spans) { + const auto n = std::min(in_data.size(), to_write - written); + std::copy(in_data.begin(), in_data.begin() + n, write_data.begin() + written); + written += n; + if (written == to_write) { + break; + } + } + write_data.publish(write_data.size()); + } + } + } else if (l.callback) { + // TODO use buffer/make this one call + for (const auto &data : spans) { + l.callback(data); + } + } + } }; } diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 6d954bb5..75744086 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #if defined(__clang__) && __clang_major__ >= 16 @@ -20,7 +21,7 @@ auto boost::ut::cfg = boost::ut::runner struct Source : public node> { @@ -30,6 +31,7 @@ struct Source : public node> { std::int32_t n_tag_offset = 0; float sample_rate = 1000.0f; T next_value = {}; + std::deque tags; // must be sorted by index void init(const tag_t::map_type &old_settings, const tag_t::map_type &new_settings) { @@ -45,8 +47,17 @@ struct Source : public node> { [[nodiscard]] constexpr T process_one() noexcept { + while (!tags.empty() && tags[0].index == n_samples_produced) { + // TODO there probably is, or should be, an easier way to do this + const auto pos = output_port<"out">(this).streamWriter().position(); + publish_tag(out, tags[0].map, n_samples_produced - pos); + tags.pop_front(); + } + n_samples_produced++; - return next_value++; + const auto v = next_value; + next_value++; + return v; } }; @@ -86,6 +97,9 @@ const boost::ut::suite DataSinkTests = [] { }; "blocking polling continuous mode"_test = [] { + + constexpr std::int32_t n_samples = 200000; + graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); @@ -103,14 +117,20 @@ const boost::ut::suite DataSinkTests = [] { auto make_runner = [](auto poller) { return std::async([poller] { - std::size_t samples_seen = 0; - while (!poller->finished) { - [[maybe_unused]] auto r = poller->process([&samples_seen](const auto &data) { - samples_seen += data.size(); - }); + std::vector received; + bool seen_finished = false; + while (!seen_finished) { + // TODO make finished vs. pending data handling actually thread-safe + seen_finished = poller->finished.load(); + while (poller->process([&received](const auto &data) { + received.insert(received.end(), data.begin(), data.end()); + })) {} } - expect(eq(samples_seen, n_samples)); + std::vector expected(n_samples); + std::iota(expected.begin(), expected.end(), 0.0); + expect(eq(received.size(), expected.size())); + expect(eq(received, expected)); expect(eq(poller->drop_count.load(), 0)); }); }; @@ -121,8 +141,7 @@ const boost::ut::suite DataSinkTests = [] { fair::graph::scheduler::simple sched{std::move(flow_graph)}; sched.work(); - poller1->finished = true; // TODO this should be done by the block - poller2->finished = true; + sink.stop(); // TODO the scheduler should call this runner1.wait(); runner2.wait(); @@ -130,15 +149,112 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(sink.n_samples_consumed, n_samples)); }; - "non-blocking polling continuous mode"_test = [] { + "blocking polling trigger mode non-overlapping"_test = [] { + constexpr std::int32_t n_samples = 200000; + graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + src.tags = {{3000, {{"TYPE", "TRIGGER"}}}, tag_t{8000, {{"TYPE", "NO_TRIGGER"}}}, {180000, {{"TYPE", "TRIGGER"}}}}; auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - std::atomic samples_seen = 0; + auto is_trigger = [](const tag_t &tag) { + const auto v = tag.get("TYPE"); + return v && std::get(v->get()) == "TRIGGER"; + }; + + auto poller = data_sink_registry::instance().get_trigger_poller("test_sink", is_trigger, 3, 2, blocking_mode::Blocking); + expect(neq(poller, nullptr)); + + std::mutex m; + std::vector received_data; + + auto polling = std::async([poller, &received_data, &m] { + while (!poller->finished) { + using namespace std::chrono_literals; + [[maybe_unused]] auto r = poller->process([&received_data, &m](const auto &dataset) { + std::lock_guard lg{m}; + received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); + }); + } + }); + + fair::graph::scheduler::simple sched{std::move(flow_graph)}; + sched.work(); + + sink.stop(); // TODO the scheduler should call this + + polling.wait(); + + std::lock_guard lg{m}; + expect(eq(sink.n_samples_consumed, n_samples)); + expect(eq(received_data.size(), 10)); + expect(eq(received_data, std::vector{2997, 2998, 2999, 3000, 3001, 179997, 179998, 179999, 180000, 180001})); + expect(eq(poller->drop_count.load(), 0)); + }; + + "blocking polling trigger mode overlapping"_test = [] { + constexpr std::int32_t n_samples = 2000000; + constexpr std::size_t n_triggers = 5000; + + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + + for (std::size_t i = 0; i < n_triggers; ++i) { + src.tags.push_back(tag_t{60000L + static_cast(i), {{"TYPE", "TRIGGER"}}}); + } + + auto &sink = flow_graph.make_node>(); + sink.set_name("test_sink"); + + expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); + + auto is_trigger = [](const tag_t &tag) { + return true; + }; + + auto poller = data_sink_registry::instance().get_trigger_poller("test_sink", is_trigger, 3000, 2000, blocking_mode::Blocking); + expect(neq(poller, nullptr)); + + std::mutex m; + std::vector received_data; + + auto polling = std::async([poller, &received_data, &m] { + while (!poller->finished) { + using namespace std::chrono_literals; + [[maybe_unused]] auto r = poller->process([&received_data, &m](const auto &dataset) { + std::lock_guard lg{m}; + expect(eq(dataset.signal_values.size(), 5000)); + received_data.push_back(dataset.signal_values.front()); + received_data.push_back(dataset.signal_values.back()); + }); + } + }); + + fair::graph::scheduler::simple sched{std::move(flow_graph)}; + sched.work(); + + sink.stop(); // TODO the scheduler should call this + + polling.wait(); + + std::lock_guard lg{m}; + auto expected_start = std::vector{57000, 61999, 57001, 62000, 57002}; + expect(eq(sink.n_samples_consumed, n_samples)); + expect(eq(received_data.size(), 2 * n_triggers)); + expect(eq(std::vector(received_data.begin(), received_data.begin() + 5), expected_start)); + expect(eq(poller->drop_count.load(), 0)); + }; + + "non-blocking polling continuous mode"_test = [] { + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + auto &sink = flow_graph.make_node>(); + sink.set_name("test_sink"); + + expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); auto invalid_type_poller = data_sink_registry::instance().get_streaming_poller("test_sink"); expect(eq(invalid_type_poller, nullptr)); @@ -146,26 +262,32 @@ const boost::ut::suite DataSinkTests = [] { auto poller = data_sink_registry::instance().get_streaming_poller("test_sink"); expect(neq(poller, nullptr)); - auto polling = std::async([poller, &samples_seen] { + auto polling = std::async([poller] { expect(neq(poller, nullptr)); - while (!poller->finished) { + std::size_t samples_seen = 0; + bool seen_finished = false; + while (!seen_finished) { + // TODO make finished vs. pending data handling actually thread-safe using namespace std::chrono_literals; std::this_thread::sleep_for(20ms); - [[maybe_unused]] auto r = poller->process([&samples_seen](const auto &data) { + + seen_finished = poller->finished.load(); + while (poller->process([&samples_seen](const auto &data) { samples_seen += data.size(); - }); + })) {} } + + expect(eq(samples_seen + poller->drop_count.load(), n_samples)); }); fair::graph::scheduler::simple sched{std::move(flow_graph)}; sched.work(); - poller->finished = true; // TODO this should be done by the block + sink.stop(); // TODO the scheduler should call this polling.wait(); expect(eq(sink.n_samples_consumed, n_samples)); - expect(eq(samples_seen.load() + poller->drop_count.load(), n_samples)); }; }; From 3758bcdb01910608f7a25052e6dbc94bb0c80327 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 13 Jun 2023 13:50:59 +0200 Subject: [PATCH 12/64] Minor fixes, remove ranges usage --- include/data_sink.hpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 085ec198..b5703b60 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -7,7 +7,6 @@ #include "tag.hpp" #include -#include namespace fair::graph { @@ -263,14 +262,13 @@ class data_sink : public node> { const auto history_view = std::span(history.begin(), history_available); // TODO I'm not sure why the +1 in "reader.position() + 1". Bug or do I misunderstand? assert(reader_position == n_samples_consumed); - assert(n_samples_consumed == std::round(in_data[0])); auto &tag_reader = in_port.tagReader(); const auto n_tags = tag_reader.available(); const auto tag_data = tag_reader.get(n_tags); std::vector tags(tag_data.begin(), tag_data.end()); auto out_of_range = [end_pos = reader_position + noutput_items](const auto &tag) { - return tag.index > end_pos; + return tag.index > static_cast(end_pos); }; std::erase_if(tags, out_of_range); tag_reader.consume(tags.size()); @@ -281,8 +279,10 @@ class data_sink : public node> { if (listener.mode == acquisition_mode::Continuous) { write_to_listener(listener, std::vector>{in_data}); } else if (listener.mode == acquisition_mode::Triggered || listener.mode == acquisition_mode::PostMortem) { - namespace views = std::ranges::views; - auto filtered = tags | views::filter(listener.trigger_predicate); + auto filtered = tags; // should use views::filter once that is working everywhere + std::erase_if(filtered, [&p = listener.trigger_predicate](const auto &tag) { + return !p(tag); + }); for (const auto &trigger : filtered) { // TODO fill dataset with metadata etc. DataSet dataset; From a3f3b03298c87b177faeb6df14df576f90e7faf8 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 13 Jun 2023 14:42:27 +0200 Subject: [PATCH 13/64] Fix compilation and tests --- test/qa_data_sink.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 75744086..aba3bbe3 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -203,7 +203,7 @@ const boost::ut::suite DataSinkTests = [] { auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); for (std::size_t i = 0; i < n_triggers; ++i) { - src.tags.push_back(tag_t{60000L + static_cast(i), {{"TYPE", "TRIGGER"}}}); + src.tags.push_back(tag_t{static_cast(60000 + i), {{"TYPE", "TRIGGER"}}}); } auto &sink = flow_graph.make_node>(); @@ -222,14 +222,16 @@ const boost::ut::suite DataSinkTests = [] { std::vector received_data; auto polling = std::async([poller, &received_data, &m] { - while (!poller->finished) { - using namespace std::chrono_literals; - [[maybe_unused]] auto r = poller->process([&received_data, &m](const auto &dataset) { + bool seen_finished = false; + while (!seen_finished) { + // TODO make finished vs. pending data handling actually thread-safe + seen_finished = poller->finished.load(); + while (poller->process([&received_data, &m](const auto &dataset) { std::lock_guard lg{m}; expect(eq(dataset.signal_values.size(), 5000)); received_data.push_back(dataset.signal_values.front()); received_data.push_back(dataset.signal_values.back()); - }); + })) {} } }); From 83103599accc277a1a213bed959d51b95935de15 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 14 Jun 2023 12:50:19 +0200 Subject: [PATCH 14/64] Continuous callback: Enforce fixed chunk size --- include/data_sink.hpp | 91 +++++++++++++++++++++++++------------------ test/qa_data_sink.cpp | 18 ++++++++- 2 files changed, 69 insertions(+), 40 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index b5703b60..72ceabba 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -68,14 +68,14 @@ class data_sink_registry { } template - bool register_streaming_callback(std::string_view name, Callback callback) { + bool register_streaming_callback(std::string_view name, std::size_t max_chunk_size, Callback callback) { std::lock_guard lg{mutex}; auto sink = find_sink(name); if (!sink) { return false; } - sink->register_streaming_callback(std::move(callback)); + sink->register_streaming_callback(max_chunk_size, std::move(callback)); return true; } @@ -118,6 +118,7 @@ class data_sink : public node> { std::atomic drop_count = 0; gr::circular_buffer buffer = gr::circular_buffer(listener_buffer_size); decltype(buffer.new_reader()) reader = buffer.new_reader(); + decltype(buffer.new_writer()) writer = buffer.new_writer(); template [[nodiscard]] bool process(Handler fnc) { @@ -161,14 +162,23 @@ class data_sink : public node> { std::size_t pending_post_samples = 0; }; + // TODO we might want to use separate template types for different { acquisition mode x polling/callback } combinations and ship + // our own type erasure instead of using std::function struct listener_t { acquisition_mode mode = acquisition_mode::Triggered; + bool block = false; + + // Continuous/Callback + std::size_t buffer_fill = 0; + std::vector buffer; + + // Triggered-only std::size_t pre_samples = 0; std::size_t post_samples = 0; - bool block = false; + std::function trigger_predicate = {}; - gr::circular_buffer buffer = gr::circular_buffer(0); std::deque pending_trigger_windows; // triggers that still didn't receive all their data + std::function)> callback = {}; // TODO we might want to pass back stats here like drop_count std::weak_ptr dataset_polling_handler = {}; std::weak_ptr polling_handler = {}; @@ -204,9 +214,9 @@ class data_sink : public node> { auto handler = std::make_shared(); listeners.push_back({ .mode = acquisition_mode::Triggered, + .block = block == blocking_mode::Blocking, .pre_samples = pre_samples, .post_samples = post_samples, - .block = block == blocking_mode::Blocking, .trigger_predicate = std::move(p), .dataset_polling_handler = handler }); @@ -215,10 +225,11 @@ class data_sink : public node> { } template - void register_streaming_callback(Callback callback) { + void register_streaming_callback(std::size_t max_chunk_size, Callback callback) { std::lock_guard lg(listener_mutex); listeners.push_back({ .mode = acquisition_mode::Continuous, + .buffer = std::vector(max_chunk_size), .callback = std::move(callback) }); } @@ -238,10 +249,11 @@ class data_sink : public node> { } } else if (listener.mode == acquisition_mode::Continuous) { if (auto p = listener.polling_handler.lock()) { - // TODO pass remaining data p->finished = true; } else { - // TODO pass remaining data to callback + if (!listener.buffer.empty()) { + listener.callback(std::span(std::span(listener.buffer).first(listener.buffer_fill))); + } } } } @@ -277,7 +289,7 @@ class data_sink : public node> { std::lock_guard lg(listener_mutex); for (auto &listener : listeners) { if (listener.mode == acquisition_mode::Continuous) { - write_to_listener(listener, std::vector>{in_data}); + write_continuous_data(listener, in_data); } else if (listener.mode == acquisition_mode::Triggered || listener.mode == acquisition_mode::PostMortem) { auto filtered = tags; // should use views::filter once that is working everywhere std::erase_if(filtered, [&p = listener.trigger_predicate](const auto &tag) { @@ -334,7 +346,6 @@ class data_sink : public node> { history.assign(history_data.begin(), history_data.end()); } - n_samples_consumed += noutput_items; if (!reader.consume(noutput_items)) { @@ -367,48 +378,52 @@ class data_sink : public node> { } } - inline void write_to_listener(listener_t &l, const std::vector> &spans) { - std::size_t total_size = 0; - for (const auto &i : spans) { - total_size += i.size(); - } - - if (total_size == 0) { + inline void write_continuous_data(listener_t &l, std::span data) { + if (data.empty()) { return; } if (auto poller = l.polling_handler.lock()) { - auto writer = poller->buffer.new_writer(); + auto &writer = poller->writer; if (l.block) { - auto write_data = writer.reserve_output_range(total_size); - auto target = write_data.begin(); - for (const auto &in_data : spans) { - std::copy(in_data.begin(), in_data.end(), target); - target += in_data.size(); - } + auto write_data = writer.reserve_output_range(data.size()); + std::copy(data.begin(), data.end(), write_data.begin()); write_data.publish(write_data.size()); } else { const auto can_write = writer.available(); - auto to_write = std::min(total_size, can_write); - poller->drop_count += total_size - can_write; + auto to_write = std::min(data.size(), can_write); + poller->drop_count += data.size() - can_write; if (to_write > 0) { auto write_data = writer.reserve_output_range(to_write); - std::size_t written = 0; - for (const auto &in_data : spans) { - const auto n = std::min(in_data.size(), to_write - written); - std::copy(in_data.begin(), in_data.begin() + n, write_data.begin() + written); - written += n; - if (written == to_write) { - break; - } - } + const auto sub = data.first(to_write); + std::copy(sub.begin(), sub.end(), write_data.begin()); write_data.publish(write_data.size()); } } } else if (l.callback) { - // TODO use buffer/make this one call - for (const auto &data : spans) { - l.callback(data); + // if there's pending data, fill buffer and send out + if (l.buffer_fill > 0) { + const auto n = std::min(data.size(), l.buffer.size() - l.buffer_fill); + std::copy(data.begin(), data.begin() + n, l.buffer.begin() + l.buffer_fill); + l.buffer_fill += n; + if (l.buffer_fill == l.buffer.size()) { + l.callback(std::span(l.buffer)); + l.buffer_fill = 0; + } + + data = data.last(data.size() - n); + } + + // send out complete chunks directly + while (data.size() > l.buffer.size()) { + l.callback(data.first(l.buffer.size())); + data = data.last(data.size() - l.buffer.size()); + } + + // write remaining data to the buffer + if (!data.empty()) { + std::copy(data.begin(), data.end(), l.buffer.begin()); + l.buffer_fill = data.size(); } } } diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index aba3bbe3..ed05447f 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -73,6 +73,9 @@ const boost::ut::suite DataSinkTests = [] { "callback continuous mode"_test = [] { graph flow_graph; + static constexpr std::int32_t n_samples = 200005; + static constexpr std::size_t chunk_size = 1000; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); @@ -80,18 +83,29 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); std::size_t samples_seen = 0; - auto callback = [&samples_seen](std::span buffer) { + std::size_t chunks_seen = 0; + auto callback = [&samples_seen, &chunks_seen](std::span buffer) { for (std::size_t i = 0; i < buffer.size(); ++i) { expect(eq(buffer[i], static_cast(samples_seen + i))); } + samples_seen += buffer.size(); + chunks_seen++; + if (chunks_seen < 201) { + expect(eq(buffer.size(), chunk_size)); + } else { + expect(eq(buffer.size(), 5)); + } }; - expect(data_sink_registry::instance().register_streaming_callback("test_sink", callback)); + expect(data_sink_registry::instance().register_streaming_callback("test_sink", chunk_size, callback)); fair::graph::scheduler::simple sched{std::move(flow_graph)}; sched.work(); + sink.stop(); // TODO the scheduler should call this + + expect(eq(chunks_seen, 201)); expect(eq(sink.n_samples_consumed, n_samples)); expect(eq(samples_seen, n_samples)); }; From 11283568c0f985ee9578a165b18946e5b511ded4 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 14 Jun 2023 15:40:23 +0200 Subject: [PATCH 15/64] Support triggered mode with callbacks --- include/data_sink.hpp | 33 ++++++++++++++++++++++++++++---- test/qa_data_sink.cpp | 44 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 4 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 72ceabba..89db3f47 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -79,6 +79,18 @@ class data_sink_registry { return true; } + template + bool register_trigger_callback(std::string_view name, TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { + std::lock_guard lg{mutex}; + auto sink = find_sink(name); + if (!sink) { + return false; + } + + sink->register_trigger_callback(std::move(p), pre_samples, post_samples, std::move(callback)); + return true; + } + private: template data_sink* find_sink(std::string_view name) { @@ -163,7 +175,7 @@ class data_sink : public node> { }; // TODO we might want to use separate template types for different { acquisition mode x polling/callback } combinations and ship - // our own type erasure instead of using std::function + // our own type erasure/or just virtuals instead of using std::function struct listener_t { acquisition_mode mode = acquisition_mode::Triggered; bool block = false; @@ -179,7 +191,8 @@ class data_sink : public node> { std::function trigger_predicate = {}; std::deque pending_trigger_windows; // triggers that still didn't receive all their data - std::function)> callback = {}; // TODO we might want to pass back stats here like drop_count + std::function)> callback = {}; // TODO we might want to optionally pass back stats here like drop_count + std::function&&)> dataset_callback = {}; std::weak_ptr dataset_polling_handler = {}; std::weak_ptr polling_handler = {}; int64_t drop_count = 0; @@ -224,6 +237,18 @@ class data_sink : public node> { return handler; } + template + void register_trigger_callback(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { + std::lock_guard lg(listener_mutex); + listeners.push_back({ + .mode = acquisition_mode::Triggered, + .pre_samples = pre_samples, + .post_samples = post_samples, + .trigger_predicate = std::move(p), + .dataset_callback = std::move(callback) + }); + } + template void register_streaming_callback(std::size_t max_chunk_size, Callback callback) { std::lock_guard lg(listener_mutex); @@ -373,8 +398,8 @@ class data_sink : public node> { poller->drop_count++; } } - } else { // if callback... - // TODO call callback + } else if (l.dataset_callback) { + l.dataset_callback(std::move(data)); } } diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index ed05447f..7818a7eb 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -264,6 +264,50 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(poller->drop_count.load(), 0)); }; + "callback trigger mode overlapping"_test = [] { + constexpr std::int32_t n_samples = 2000000; + constexpr std::size_t n_triggers = 5000; + + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + + for (std::size_t i = 0; i < n_triggers; ++i) { + src.tags.push_back(tag_t{static_cast(60000 + i), {{"TYPE", "TRIGGER"}}}); + } + + auto &sink = flow_graph.make_node>(); + sink.set_name("test_sink"); + + expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); + + auto is_trigger = [](const tag_t &tag) { + return true; + }; + + std::mutex m; + std::vector received_data; + + auto callback = [&received_data, &m](auto &&dataset) { + std::lock_guard lg{m}; + expect(eq(dataset.signal_values.size(), 5000)); + received_data.push_back(dataset.signal_values.front()); + received_data.push_back(dataset.signal_values.back()); + }; + + data_sink_registry::instance().register_trigger_callback("test_sink", is_trigger, 3000, 2000, callback); + + fair::graph::scheduler::simple sched{std::move(flow_graph)}; + sched.work(); + + sink.stop(); // TODO the scheduler should call this + + std::lock_guard lg{m}; + auto expected_start = std::vector{57000, 61999, 57001, 62000, 57002}; + expect(eq(sink.n_samples_consumed, n_samples)); + expect(eq(received_data.size(), 2 * n_triggers)); + expect(eq(std::vector(received_data.begin(), received_data.begin() + 5), expected_start)); + }; + "non-blocking polling continuous mode"_test = [] { graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); From 238af2cc246c2bd3a8d2032be535defcbbcb0648 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 14 Jun 2023 15:57:26 +0200 Subject: [PATCH 16/64] Unify pollers for T and DataSet --- include/data_sink.hpp | 20 ++++++++------------ test/qa_data_sink.cpp | 8 ++++---- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 89db3f47..fc02961f 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -125,15 +125,16 @@ class data_sink : public node> { static constexpr std::size_t listener_buffer_size = 65536; - struct poller { + template + struct poller_t { std::atomic finished = false; std::atomic drop_count = 0; - gr::circular_buffer buffer = gr::circular_buffer(listener_buffer_size); + gr::circular_buffer buffer = gr::circular_buffer(listener_buffer_size); decltype(buffer.new_reader()) reader = buffer.new_reader(); decltype(buffer.new_writer()) writer = buffer.new_writer(); template - [[nodiscard]] bool process(Handler fnc) { + [[nodiscard]] bool process_bulk(Handler fnc) { const auto available = reader.available(); if (available == 0) { return false; @@ -144,17 +145,9 @@ class data_sink : public node> { reader.consume(available); return true; } - }; - - struct dataset_poller { - std::atomic finished = false; - std::atomic drop_count = 0; - gr::circular_buffer> buffer = gr::circular_buffer>(listener_buffer_size); // TODO use other size? - decltype(buffer.new_reader()) reader = buffer.new_reader(); - decltype(buffer.new_writer()) writer = buffer.new_writer(); template - [[nodiscard]] bool process(Handler fnc) { + [[nodiscard]] bool process_one(Handler fnc) { const auto available = reader.available(); if (available == 0) { return false; @@ -167,6 +160,9 @@ class data_sink : public node> { } }; + using poller = poller_t; + using dataset_poller = poller_t>; + private: struct pending_window_t { tag_t trigger; diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 7818a7eb..48dda54a 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -136,7 +136,7 @@ const boost::ut::suite DataSinkTests = [] { while (!seen_finished) { // TODO make finished vs. pending data handling actually thread-safe seen_finished = poller->finished.load(); - while (poller->process([&received](const auto &data) { + while (poller->process_bulk([&received](const auto &data) { received.insert(received.end(), data.begin(), data.end()); })) {} } @@ -188,7 +188,7 @@ const boost::ut::suite DataSinkTests = [] { auto polling = std::async([poller, &received_data, &m] { while (!poller->finished) { using namespace std::chrono_literals; - [[maybe_unused]] auto r = poller->process([&received_data, &m](const auto &dataset) { + [[maybe_unused]] auto r = poller->process_one([&received_data, &m](const auto &dataset) { std::lock_guard lg{m}; received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); }); @@ -240,7 +240,7 @@ const boost::ut::suite DataSinkTests = [] { while (!seen_finished) { // TODO make finished vs. pending data handling actually thread-safe seen_finished = poller->finished.load(); - while (poller->process([&received_data, &m](const auto &dataset) { + while (poller->process_one([&received_data, &m](const auto &dataset) { std::lock_guard lg{m}; expect(eq(dataset.signal_values.size(), 5000)); received_data.push_back(dataset.signal_values.front()); @@ -332,7 +332,7 @@ const boost::ut::suite DataSinkTests = [] { std::this_thread::sleep_for(20ms); seen_finished = poller->finished.load(); - while (poller->process([&samples_seen](const auto &data) { + while (poller->process_bulk([&samples_seen](const auto &data) { samples_seen += data.size(); })) {} } From 3cca5eb0b76b0e30bb9295c2faf55220b2909af7 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Thu, 15 Jun 2023 10:12:39 +0200 Subject: [PATCH 17/64] Process blocking listeners last --- include/data_sink.hpp | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index fc02961f..3b21df16 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -194,7 +194,7 @@ class data_sink : public node> { int64_t drop_count = 0; }; - std::vector listeners; + std::deque listeners; std::mutex listener_mutex; public: @@ -209,7 +209,7 @@ class data_sink : public node> { std::shared_ptr get_streaming_poller(blocking_mode block = blocking_mode::NonBlocking) { std::lock_guard lg(listener_mutex); auto handler = std::make_shared(); - listeners.push_back({ + add_listener({ .mode = acquisition_mode::Continuous, .block = block == blocking_mode::Blocking, .polling_handler = handler @@ -221,7 +221,7 @@ class data_sink : public node> { std::shared_ptr get_trigger_poller(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { std::lock_guard lg(listener_mutex); auto handler = std::make_shared(); - listeners.push_back({ + add_listener({ .mode = acquisition_mode::Triggered, .block = block == blocking_mode::Blocking, .pre_samples = pre_samples, @@ -236,7 +236,7 @@ class data_sink : public node> { template void register_trigger_callback(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { std::lock_guard lg(listener_mutex); - listeners.push_back({ + add_listener({ .mode = acquisition_mode::Triggered, .pre_samples = pre_samples, .post_samples = post_samples, @@ -248,7 +248,7 @@ class data_sink : public node> { template void register_streaming_callback(std::size_t max_chunk_size, Callback callback) { std::lock_guard lg(listener_mutex); - listeners.push_back({ + add_listener({ .mode = acquisition_mode::Continuous, .buffer = std::vector(max_chunk_size), .callback = std::move(callback) @@ -380,6 +380,14 @@ class data_sink : public node> { std::vector history; std::size_t history_available = 0; + void add_listener(listener_t&& l) { + if (l.block) { + listeners.push_back(std::move(l)); + } else { + listeners.push_front(std::move(l)); + } + } + inline void publish_dataset(listener_t &l, DataSet &&data) { if (auto poller = l.dataset_polling_handler.lock()) { auto write_data = poller->writer.reserve_output_range(1); From 3998933630291fc5c98188a93f3ae2570aa7bf82 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Thu, 15 Jun 2023 16:45:32 +0200 Subject: [PATCH 18/64] Refactor listeners Use virtual baseclass instead of std::function. Make the individual listener simpler and gives us more compile-time knowledge about the callback types. --- include/data_sink.hpp | 393 +++++++++++++++++++++++------------------- 1 file changed, 211 insertions(+), 182 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 3b21df16..667fbd11 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -170,31 +170,213 @@ class data_sink : public node> { std::size_t pending_post_samples = 0; }; - // TODO we might want to use separate template types for different { acquisition mode x polling/callback } combinations and ship - // our own type erasure/or just virtuals instead of using std::function - struct listener_t { - acquisition_mode mode = acquisition_mode::Triggered; - bool block = false; + struct abstract_listener_t { + virtual ~abstract_listener_t() = default; + virtual void process_bulk(std::span history, std::span data, int64_t reader_position, const std::vector &tags) = 0; + virtual void flush() = 0; + }; - // Continuous/Callback + template + struct continuous_listener_t : public abstract_listener_t { + bool block = false; + // callback-only std::size_t buffer_fill = 0; std::vector buffer; - // Triggered-only + // polling-only + std::weak_ptr polling_handler = {}; + + Callback callback; + + explicit continuous_listener_t(std::size_t max_chunk_size, Callback c) + : buffer(max_chunk_size) + , callback{std::forward(c)} + {} + + explicit continuous_listener_t(std::shared_ptr poller, bool do_block) + : block(do_block) + , polling_handler{std::move(poller)} + {} + + void process_bulk(std::span, std::span data, int64_t /*reader_position*/, const std::vector &tags) override { + if constexpr (!std::is_same_v) { + // if there's pending data, fill buffer and send out + if (buffer_fill > 0) { + const auto n = std::min(data.size(), buffer.size() - buffer_fill); + std::copy(data.begin(), data.begin() + n, buffer.begin() + buffer_fill); + buffer_fill += n; + if (buffer_fill == buffer.size()) { + callback(std::span(buffer)); + buffer_fill = 0; + } + + data = data.last(data.size() - n); + } + + // send out complete chunks directly + while (data.size() > buffer.size()) { + callback(data.first(buffer.size())); + data = data.last(data.size() - buffer.size()); + } + + // write remaining data to the buffer + if (!data.empty()) { + std::copy(data.begin(), data.end(), buffer.begin()); + buffer_fill = data.size(); + } + } else { + auto poller = polling_handler.lock(); + if (!poller) { + // TODO someone remove this listener from the list + return; + } + + if (block) { + auto write_data = poller->writer.reserve_output_range(data.size()); + std::copy(data.begin(), data.end(), write_data.begin()); + write_data.publish(write_data.size()); + } else { + const auto can_write = poller->writer.available(); + auto to_write = std::min(data.size(), can_write); + poller->drop_count += data.size() - can_write; + if (to_write > 0) { + auto write_data = poller->writer.reserve_output_range(to_write); + const auto sub = data.first(to_write); + std::copy(sub.begin(), sub.end(), write_data.begin()); + write_data.publish(write_data.size()); + } + } + } + } + + void flush() override { + if constexpr (!std::is_same_v) { + if (buffer_fill > 0) { + callback(std::span(buffer).first(buffer_fill)); + buffer_fill = 0; + } + } else { + if (auto p = polling_handler.lock()) { + p->finished = true; + } + } + } + }; + + template + struct trigger_listener_t : public abstract_listener_t { + bool block = false; std::size_t pre_samples = 0; std::size_t post_samples = 0; - std::function trigger_predicate = {}; + TriggerPredicate trigger_predicate = {}; std::deque pending_trigger_windows; // triggers that still didn't receive all their data + std::weak_ptr polling_handler = {}; + + Callback callback; + + explicit trigger_listener_t(TriggerPredicate predicate, std::shared_ptr handler, std::size_t pre, std::size_t post, bool do_block) + : block(do_block) + , pre_samples(pre) + , post_samples(post) + , trigger_predicate(std::forward(predicate)) + , polling_handler{std::move(handler)} + {} + + explicit trigger_listener_t(TriggerPredicate predicate, std::size_t pre, std::size_t post, Callback cb) + : pre_samples(pre) + , post_samples(post) + , trigger_predicate(std::forward(predicate)) + , callback{std::forward(cb)} + {} + + inline void publish_dataset(DataSet &&data) { + if constexpr (!std::is_same_v) { + callback(std::move(data)); + } else { + auto poller = polling_handler.lock(); + if (!poller) { + return; + } - std::function)> callback = {}; // TODO we might want to optionally pass back stats here like drop_count - std::function&&)> dataset_callback = {}; - std::weak_ptr dataset_polling_handler = {}; - std::weak_ptr polling_handler = {}; - int64_t drop_count = 0; - }; + auto write_data = poller->writer.reserve_output_range(1); + if (block) { + write_data[0] = std::move(data); + write_data.publish(1); + } else { + if (poller->writer.available() > 0) { + write_data[0] = std::move(data); + write_data.publish(1); + } else { + poller->drop_count++; + } + } + } + } + + void process_bulk(std::span history, std::span in_data, int64_t reader_position, const std::vector &tags) override { + auto filtered = tags; // should use views::filter once that is working everywhere + std::erase_if(filtered, [this](const auto &tag) { + return !trigger_predicate(tag); + }); + for (const auto &trigger : filtered) { + // TODO fill dataset with metadata etc. + DataSet dataset; + dataset.timing_events = {{trigger}}; + dataset.signal_values.reserve(pre_samples + post_samples); // TODO maybe make the circ. buffer smaller but preallocate these + pending_trigger_windows.push_back({.trigger = trigger, .dataset = std::move(dataset), .pending_post_samples = post_samples}); + } + + auto window = pending_trigger_windows.begin(); + while (window != pending_trigger_windows.end()) { + auto &dataset = window->dataset; + const auto window_offset = window->trigger.index - reader_position; + + if (window_offset >= 0 && dataset.signal_values.empty()) { // new trigger, write history + // old history: pre-trigger data from previous in_data (if available) + const auto old_history_size = std::max(static_cast(pre_samples) - window_offset, std::int64_t{0}); + const auto available = std::min(static_cast(old_history_size), history.size()); + const auto old_history_view = history.last(available); + dataset.signal_values.insert(dataset.signal_values.end(), old_history_view.begin(), old_history_view.end()); + + // new history: pre-trigger samples from the current in_data + const auto new_history_size = pre_samples - old_history_size; + const auto new_history_view = in_data.subspan(window_offset - new_history_size, new_history_size); + dataset.signal_values.insert(dataset.signal_values.end(), new_history_view.begin(), new_history_view.end()); + } + + // write missing post-samples + const auto previous_post_samples = post_samples - window->pending_post_samples; + const auto first_requested = window_offset + previous_post_samples; + const auto last_requested = window_offset + post_samples - 1; + const auto last_available = std::min(last_requested, in_data.size() - 1); + const auto post_sample_view = in_data.subspan(first_requested, last_available - first_requested + 1); + dataset.signal_values.insert(dataset.signal_values.end(), post_sample_view.begin(), post_sample_view.end()); + window->pending_post_samples -= post_sample_view.size(); + + if (window->pending_post_samples == 0) { + publish_dataset(std::move(dataset)); + window = pending_trigger_windows.erase(window); + } else { + ++window; + } + } + } - std::deque listeners; + void flush() override { + for (auto &window : pending_trigger_windows) { + if (!window.dataset.signal_values.empty()) { + publish_dataset(std::move(window.dataset)); + } + } + pending_trigger_windows.clear(); + if (auto p = polling_handler.lock()) { + p->finished = true; + } + } + }; + + std::deque> listeners; std::mutex listener_mutex; public: @@ -206,29 +388,20 @@ class data_sink : public node> { data_sink_registry::instance().unregister_sink(this); } - std::shared_ptr get_streaming_poller(blocking_mode block = blocking_mode::NonBlocking) { + std::shared_ptr get_streaming_poller(blocking_mode block_mode = blocking_mode::NonBlocking) { std::lock_guard lg(listener_mutex); + const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); - add_listener({ - .mode = acquisition_mode::Continuous, - .block = block == blocking_mode::Blocking, - .polling_handler = handler - }); + add_listener(std::make_unique>(handler, block), block); return handler; } template - std::shared_ptr get_trigger_poller(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { - std::lock_guard lg(listener_mutex); + std::shared_ptr get_trigger_poller(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block_mode = blocking_mode::NonBlocking) { + const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); - add_listener({ - .mode = acquisition_mode::Triggered, - .block = block == blocking_mode::Blocking, - .pre_samples = pre_samples, - .post_samples = post_samples, - .trigger_predicate = std::move(p), - .dataset_polling_handler = handler - }); + std::lock_guard lg(listener_mutex); + add_listener(std::make_unique>(std::forward(p), handler, pre_samples, post_samples, block), block); history.resize(std::max(pre_samples, history.size())); return handler; } @@ -236,47 +409,20 @@ class data_sink : public node> { template void register_trigger_callback(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { std::lock_guard lg(listener_mutex); - add_listener({ - .mode = acquisition_mode::Triggered, - .pre_samples = pre_samples, - .post_samples = post_samples, - .trigger_predicate = std::move(p), - .dataset_callback = std::move(callback) - }); + add_listener(std::make_unique>(std::forward(p), pre_samples, post_samples, std::forward(callback)), false); + history.resize(std::max(pre_samples, history.size())); } template void register_streaming_callback(std::size_t max_chunk_size, Callback callback) { - std::lock_guard lg(listener_mutex); - add_listener({ - .mode = acquisition_mode::Continuous, - .buffer = std::vector(max_chunk_size), - .callback = std::move(callback) - }); + add_listener(std::make_unique>(max_chunk_size, std::forward(callback)), false); } // TODO this code should be called at the end of graph processing void stop() { std::lock_guard lg(listener_mutex); for (auto &listener : listeners) { - if (listener.mode == acquisition_mode::Triggered || listener.mode == acquisition_mode::PostMortem) { - // send out any incomplete data windows - for (auto &window : listener.pending_trigger_windows) { - publish_dataset(listener, std::move(window.dataset)); - } - - if (auto p = listener.dataset_polling_handler.lock()) { - p->finished = true; - } - } else if (listener.mode == acquisition_mode::Continuous) { - if (auto p = listener.polling_handler.lock()) { - p->finished = true; - } else { - if (!listener.buffer.empty()) { - listener.callback(std::span(std::span(listener.buffer).first(listener.buffer_fill))); - } - } - } + listener->flush(); } } @@ -309,55 +455,7 @@ class data_sink : public node> { { std::lock_guard lg(listener_mutex); for (auto &listener : listeners) { - if (listener.mode == acquisition_mode::Continuous) { - write_continuous_data(listener, in_data); - } else if (listener.mode == acquisition_mode::Triggered || listener.mode == acquisition_mode::PostMortem) { - auto filtered = tags; // should use views::filter once that is working everywhere - std::erase_if(filtered, [&p = listener.trigger_predicate](const auto &tag) { - return !p(tag); - }); - for (const auto &trigger : filtered) { - // TODO fill dataset with metadata etc. - DataSet dataset; - dataset.timing_events = {{trigger}}; - dataset.signal_values.reserve(listener.pre_samples + listener.post_samples); - listener.pending_trigger_windows.push_back({.trigger = trigger, .dataset = std::move(dataset), .pending_post_samples = listener.post_samples}); - } - auto window = listener.pending_trigger_windows.begin(); - while (window != listener.pending_trigger_windows.end()) { - auto &dataset = window->dataset; - const auto window_offset = window->trigger.index - reader_position; - - if (window_offset >= 0 && dataset.signal_values.empty()) { // new trigger, write history - // old history: pre-trigger data from previous in_data (if available) - const auto old_history_size = std::max(static_cast(listener.pre_samples) - window_offset, std::int64_t{0}); - const auto available = std::min(static_cast(old_history_size), history_view.size()); - const auto old_history_view = history_view.last(available); - dataset.signal_values.insert(dataset.signal_values.end(), old_history_view.begin(), old_history_view.end()); - - // new history: pre-trigger samples from the current in_data - const auto new_history_size = listener.pre_samples - old_history_size; - const auto new_history_view = in_data.subspan(window_offset - new_history_size, new_history_size); - dataset.signal_values.insert(dataset.signal_values.end(), new_history_view.begin(), new_history_view.end()); - } - - // write missing post-samples - const auto previous_post_samples = listener.post_samples - window->pending_post_samples; - const auto first_requested = window_offset + previous_post_samples; - const auto last_requested = window_offset + listener.post_samples - 1; - const auto last_available = std::min(last_requested, noutput_items - 1); - const auto post_sample_view = in_data.subspan(first_requested, last_available - first_requested + 1); - dataset.signal_values.insert(dataset.signal_values.end(), post_sample_view.begin(), post_sample_view.end()); - window->pending_post_samples -= post_sample_view.size(); - - if (window->pending_post_samples == 0) { - publish_dataset(listener, std::move(dataset)); - window = listener.pending_trigger_windows.erase(window); - } else { - ++window; - } - } - } + listener->process_bulk(history, in_data, reader_position, tags); } // store potential pre-samples for triggers at the beginning of the next chunk @@ -380,82 +478,13 @@ class data_sink : public node> { std::vector history; std::size_t history_available = 0; - void add_listener(listener_t&& l) { - if (l.block) { + void add_listener(std::unique_ptr&& l, bool block) { + if (block) { listeners.push_back(std::move(l)); } else { listeners.push_front(std::move(l)); } } - - inline void publish_dataset(listener_t &l, DataSet &&data) { - if (auto poller = l.dataset_polling_handler.lock()) { - auto write_data = poller->writer.reserve_output_range(1); - if (l.block) { - write_data[0] = std::move(data); - write_data.publish(1); - } else { - if (poller->writer.available() > 0) { - write_data[0] = std::move(data); - write_data.publish(1); - } else { - poller->drop_count++; - } - } - } else if (l.dataset_callback) { - l.dataset_callback(std::move(data)); - } - } - - inline void write_continuous_data(listener_t &l, std::span data) { - if (data.empty()) { - return; - } - - if (auto poller = l.polling_handler.lock()) { - auto &writer = poller->writer; - if (l.block) { - auto write_data = writer.reserve_output_range(data.size()); - std::copy(data.begin(), data.end(), write_data.begin()); - write_data.publish(write_data.size()); - } else { - const auto can_write = writer.available(); - auto to_write = std::min(data.size(), can_write); - poller->drop_count += data.size() - can_write; - if (to_write > 0) { - auto write_data = writer.reserve_output_range(to_write); - const auto sub = data.first(to_write); - std::copy(sub.begin(), sub.end(), write_data.begin()); - write_data.publish(write_data.size()); - } - } - } else if (l.callback) { - // if there's pending data, fill buffer and send out - if (l.buffer_fill > 0) { - const auto n = std::min(data.size(), l.buffer.size() - l.buffer_fill); - std::copy(data.begin(), data.begin() + n, l.buffer.begin() + l.buffer_fill); - l.buffer_fill += n; - if (l.buffer_fill == l.buffer.size()) { - l.callback(std::span(l.buffer)); - l.buffer_fill = 0; - } - - data = data.last(data.size() - n); - } - - // send out complete chunks directly - while (data.size() > l.buffer.size()) { - l.callback(data.first(l.buffer.size())); - data = data.last(data.size() - l.buffer.size()); - } - - // write remaining data to the buffer - if (!data.empty()) { - std::copy(data.begin(), data.end(), l.buffer.begin()); - l.buffer_fill = data.size(); - } - } - } }; } From b0253e435d27098b027e229d5fcaa6e4a5108f04 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Fri, 16 Jun 2023 21:32:38 +0200 Subject: [PATCH 19/64] Add Multiplexed listener mode --- include/data_sink.hpp | 260 ++++++++++++++++++++++++++++++++---------- test/qa_data_sink.cpp | 193 ++++++++++++++++++++++++++++++- 2 files changed, 391 insertions(+), 62 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 667fbd11..fd614217 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -10,17 +10,33 @@ namespace fair::graph { -enum class acquisition_mode { - Continuous, - Triggered, - PostMortem -}; - enum class blocking_mode { NonBlocking, Blocking }; +enum class trigger_observer_state { + Start, ///< Start a new dataset + Stop, ///< Finish dataset + StopAndStart, ///< Finish pending dataset, start a new one + Ignore ///< Ignore tag +}; + +template +concept TriggerPredicate = requires(T p, tag_t tag) { + {p(tag)} -> std::convertible_to; +}; + +template +concept TriggerObserver = requires(T o, tag_t tag) { + {o(tag)} -> std::convertible_to; +}; + +template +concept TriggerObserverFactory = requires(T f) { + {f()}; // TODO how assert that operator() must fullfill TriggerObserver? +}; + template class data_sink; @@ -60,13 +76,20 @@ class data_sink_registry { return sink ? sink->get_streaming_poller(block) : nullptr; } - template - std::shared_ptr::dataset_poller> get_trigger_poller(std::string_view name, TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { + template + std::shared_ptr::dataset_poller> get_trigger_poller(std::string_view name, P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { std::lock_guard lg{mutex}; auto sink = find_sink(name); return sink ? sink->get_trigger_poller(std::move(p), pre_samples, post_samples, block) : nullptr; } + template + std::shared_ptr::dataset_poller> get_multiplexed_poller(std::string_view name, F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block = blocking_mode::NonBlocking) { + std::lock_guard lg{mutex}; + auto sink = find_sink(name); + return sink ? sink->get_multiplexed_poller(std::forward(triggerObserverFactory), maximum_window_size, block) : nullptr; + } + template bool register_streaming_callback(std::string_view name, std::size_t max_chunk_size, Callback callback) { std::lock_guard lg{mutex}; @@ -79,8 +102,8 @@ class data_sink_registry { return true; } - template - bool register_trigger_callback(std::string_view name, TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { + template + bool register_trigger_callback(std::string_view name, P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { std::lock_guard lg{mutex}; auto sink = find_sink(name); if (!sink) { @@ -91,6 +114,18 @@ class data_sink_registry { return true; } + template + bool register_multiplexed_callback(std::string_view name, std::size_t maximum_window_size, Callback callback) { + std::lock_guard lg{mutex}; + auto sink = find_sink(name); + if (!sink) { + return false; + } + + sink->template register_multiplexed_callback(maximum_window_size, std::move(callback)); + return true; + } + private: template data_sink* find_sink(std::string_view name) { @@ -316,65 +351,154 @@ class data_sink : public node> { void process_bulk(std::span history, std::span in_data, int64_t reader_position, const std::vector &tags) override { auto filtered = tags; // should use views::filter once that is working everywhere - std::erase_if(filtered, [this](const auto &tag) { - return !trigger_predicate(tag); - }); - for (const auto &trigger : filtered) { - // TODO fill dataset with metadata etc. - DataSet dataset; - dataset.timing_events = {{trigger}}; - dataset.signal_values.reserve(pre_samples + post_samples); // TODO maybe make the circ. buffer smaller but preallocate these - pending_trigger_windows.push_back({.trigger = trigger, .dataset = std::move(dataset), .pending_post_samples = post_samples}); + std::erase_if(filtered, [this](const auto &tag) { + return !trigger_predicate(tag); + }); + for (const auto &trigger : filtered) { + // TODO fill dataset with metadata etc. + DataSet dataset; + dataset.timing_events = {{trigger}}; + dataset.signal_values.reserve(pre_samples + post_samples); // TODO maybe make the circ. buffer smaller but preallocate these + pending_trigger_windows.push_back({.trigger = trigger, .dataset = std::move(dataset), .pending_post_samples = post_samples}); + } + + auto window = pending_trigger_windows.begin(); + while (window != pending_trigger_windows.end()) { + auto &dataset = window->dataset; + const auto window_offset = window->trigger.index - reader_position; + + if (window_offset >= 0 && dataset.signal_values.empty()) { // new trigger, write history + // old history: pre-trigger data from previous in_data (if available) + const auto old_history_size = std::max(static_cast(pre_samples) - window_offset, std::int64_t{0}); + const auto available = std::min(static_cast(old_history_size), history.size()); + const auto old_history_view = history.last(available); + dataset.signal_values.insert(dataset.signal_values.end(), old_history_view.begin(), old_history_view.end()); + + // new history: pre-trigger samples from the current in_data + const auto new_history_size = pre_samples - old_history_size; + const auto new_history_view = in_data.subspan(window_offset - new_history_size, new_history_size); + dataset.signal_values.insert(dataset.signal_values.end(), new_history_view.begin(), new_history_view.end()); } - auto window = pending_trigger_windows.begin(); - while (window != pending_trigger_windows.end()) { - auto &dataset = window->dataset; - const auto window_offset = window->trigger.index - reader_position; - - if (window_offset >= 0 && dataset.signal_values.empty()) { // new trigger, write history - // old history: pre-trigger data from previous in_data (if available) - const auto old_history_size = std::max(static_cast(pre_samples) - window_offset, std::int64_t{0}); - const auto available = std::min(static_cast(old_history_size), history.size()); - const auto old_history_view = history.last(available); - dataset.signal_values.insert(dataset.signal_values.end(), old_history_view.begin(), old_history_view.end()); - - // new history: pre-trigger samples from the current in_data - const auto new_history_size = pre_samples - old_history_size; - const auto new_history_view = in_data.subspan(window_offset - new_history_size, new_history_size); - dataset.signal_values.insert(dataset.signal_values.end(), new_history_view.begin(), new_history_view.end()); - } + // write missing post-samples + const auto previous_post_samples = post_samples - window->pending_post_samples; + const auto first_requested = window_offset + previous_post_samples; + const auto last_requested = window_offset + post_samples - 1; + const auto last_available = std::min(last_requested, in_data.size() - 1); + const auto post_sample_view = in_data.subspan(first_requested, last_available - first_requested + 1); + dataset.signal_values.insert(dataset.signal_values.end(), post_sample_view.begin(), post_sample_view.end()); + window->pending_post_samples -= post_sample_view.size(); + + if (window->pending_post_samples == 0) { + publish_dataset(std::move(dataset)); + window = pending_trigger_windows.erase(window); + } else { + ++window; + } + } + } - // write missing post-samples - const auto previous_post_samples = post_samples - window->pending_post_samples; - const auto first_requested = window_offset + previous_post_samples; - const auto last_requested = window_offset + post_samples - 1; - const auto last_available = std::min(last_requested, in_data.size() - 1); - const auto post_sample_view = in_data.subspan(first_requested, last_available - first_requested + 1); - dataset.signal_values.insert(dataset.signal_values.end(), post_sample_view.begin(), post_sample_view.end()); - window->pending_post_samples -= post_sample_view.size(); - - if (window->pending_post_samples == 0) { - publish_dataset(std::move(dataset)); - window = pending_trigger_windows.erase(window); + void flush() override { + for (auto &window : pending_trigger_windows) { + if (!window.dataset.signal_values.empty()) { + publish_dataset(std::move(window.dataset)); + } + } + pending_trigger_windows.clear(); + if (auto p = polling_handler.lock()) { + p->finished = true; + } + } + }; + + template + struct multiplexed_listener_t : public abstract_listener_t { + bool block = false; + F observerFactory; + decltype(observerFactory()) observer; + std::optional> pending_dataset; + std::size_t maximum_window_size; + std::weak_ptr polling_handler = {}; + Callback callback; + + explicit multiplexed_listener_t(F factory, std::size_t max_window_size, Callback cb) : observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), callback(cb) {} + explicit multiplexed_listener_t(F factory, std::size_t max_window_size, std::shared_ptr handler, bool do_block) : observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), polling_handler{std::move(handler)}, block(do_block) {} + + inline void publish_dataset(DataSet &&data) { + if constexpr (!std::is_same_v) { + callback(std::move(data)); + } else { + auto poller = polling_handler.lock(); + if (!poller) { + return; + } + + auto write_data = poller->writer.reserve_output_range(1); + if (block) { + write_data[0] = std::move(data); + write_data.publish(1); + } else { + if (poller->writer.available() > 0) { + write_data[0] = std::move(data); + write_data.publish(1); } else { - ++window; + poller->drop_count++; } } } + } - void flush() override { - for (auto &window : pending_trigger_windows) { - if (!window.dataset.signal_values.empty()) { - publish_dataset(std::move(window.dataset)); + inline void fill_pending_dataset(std::span in_data, int64_t reader_position, int64_t last_sample) { + const auto max_samples = static_cast(maximum_window_size - pending_dataset->signal_values.size()); + const auto first_sample = std::max(pending_dataset->timing_events[0][0].index - reader_position, int64_t{0}); + const auto actual_last_sample = std::min(first_sample + max_samples - 1, last_sample); + if (actual_last_sample >= first_sample) { + pending_dataset->signal_values.insert(pending_dataset->signal_values.end(), in_data.begin() + first_sample, in_data.begin() + actual_last_sample + 1); + } + } + + void process_bulk(std::span, std::span in_data, int64_t reader_position, const std::vector &tags) override { + for (const auto &tag :tags) { + const auto obsr = observer(tag); + // TODO set proper error state instead of throwing + if (obsr == trigger_observer_state::Stop || obsr == trigger_observer_state::StopAndStart) { + if (obsr == trigger_observer_state::Stop && !pending_dataset) { + throw std::runtime_error("multiplexed: Stop without start"); } + + pending_dataset->timing_events[0].push_back(tag); + fill_pending_dataset(in_data, reader_position, tag.index - reader_position - 1); + publish_dataset(std::move(*pending_dataset)); + pending_dataset.reset(); } - pending_trigger_windows.clear(); - if (auto p = polling_handler.lock()) { - p->finished = true; + if (obsr == trigger_observer_state::Start || obsr == trigger_observer_state::StopAndStart) { + if (obsr == trigger_observer_state::Start && pending_dataset) { + throw std::runtime_error("multiplexed: Two starts without stop"); + } + pending_dataset = DataSet(); + pending_dataset->signal_values.reserve(maximum_window_size); // TODO might be too much? + pending_dataset->timing_events = {{tag}}; } } - }; + if (pending_dataset) { + fill_pending_dataset(in_data, reader_position, in_data.size() - 1); + if (pending_dataset->signal_values.size() == maximum_window_size) { + publish_dataset(std::move(*pending_dataset)); + pending_dataset.reset(); + } + } + } + + void flush() override { + if (pending_dataset) { + publish_dataset(std::move(*pending_dataset)); + pending_dataset.reset(); + } + if (auto p = polling_handler.lock()) { + p->finished = true; + } + } + }; std::deque> listeners; std::mutex listener_mutex; @@ -406,11 +530,13 @@ class data_sink : public node> { return handler; } - template - void register_trigger_callback(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { + template + std::shared_ptr get_multiplexed_poller(F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block_mode = blocking_mode::NonBlocking) { std::lock_guard lg(listener_mutex); - add_listener(std::make_unique>(std::forward(p), pre_samples, post_samples, std::forward(callback)), false); - history.resize(std::max(pre_samples, history.size())); + const auto block = block_mode == blocking_mode::Blocking; + auto handler = std::make_shared(); + add_listener(std::make_unique>(std::move(triggerObserverFactory), maximum_window_size, handler, block), block); + return handler; } template @@ -418,6 +544,18 @@ class data_sink : public node> { add_listener(std::make_unique>(max_chunk_size, std::forward(callback)), false); } + template + void register_trigger_callback(P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { + add_listener(std::make_unique>(std::forward

(p), pre_samples, post_samples, std::forward(callback)), false); + history.resize(std::max(pre_samples, history.size())); + } + + template + void register_multiplexed_callback(F triggerObserverFactory, std::size_t maximum_window_size, Callback callback) { + std::lock_guard lg(listener_mutex); + add_listener(std::make_unique(std::move(triggerObserverFactory), maximum_window_size, std::forward(callback)), false); + } + // TODO this code should be called at the end of graph processing void stop() { std::lock_guard lg(listener_mutex); diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 48dda54a..1b8b00c6 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -61,6 +61,114 @@ struct Source : public node> { } }; +struct Observer { + std::optional year; + std::optional month; + std::optional day; + std::optional> last_seen; + bool last_matched = false; + + explicit Observer(std::optional y, std::optional m, std::optional d) : year(y), month(m), day(d) {} + + static inline bool same(int x, std::optional other) { + return other && x == *other; + } + static inline bool changed(int x, std::optional other) { + return !same(x, other); + } + + trigger_observer_state operator()(const tag_t &tag) { + const auto ty = tag.get("Y"); + const auto tm = tag.get("M"); + const auto td = tag.get("D"); + if (!ty || !tm || !td) { + return trigger_observer_state::Ignore; + } + + const auto tup = std::make_tuple(std::get(ty->get()), std::get(tm->get()), std::get(td->get())); + const auto &[y, m, d] = tup; + const auto ly = last_seen ? std::optional(std::get<0>(*last_seen)) : std::nullopt; + const auto lm = last_seen ? std::optional(std::get<1>(*last_seen)) : std::nullopt; + const auto ld = last_seen ? std::optional(std::get<2>(*last_seen)) : std::nullopt; + + const auto year_restart = year && *year == -1 && changed(y, ly); + const auto year_matches = !year || *year == -1 || same(y, year); + const auto month_restart = month && *month == -1 && changed(m, lm); + const auto month_matches = !month || *month == -1 || same(m, month); + const auto day_restart = day && *day == -1 && changed(d, ld); + const auto day_matches = !day || *day == -1 || same(d, day); + const auto matches = year_matches && month_matches && day_matches; + const auto restart = year_restart || month_restart || day_restart; + + trigger_observer_state r = trigger_observer_state::Ignore; + + if (last_matched && !matches) { + r = trigger_observer_state::Stop; + } else if (!last_matched && matches) { + r = trigger_observer_state::Start; + } else if ((!last_seen || last_matched) && matches && restart) { + r = trigger_observer_state::StopAndStart; + } + + last_seen = tup; + last_matched = matches; + return r; + } +}; + +static tag_t make_tag(tag_t::index_type index, int y, int m, int d) { + tag_t::map_type map; + return tag_t{index, {{"Y", y}, {"M", m}, {"D", d}}}; +} + +static std::vector make_test_tags(tag_t::index_type first_index, tag_t::index_type interval) { + std::vector tags; + for (int y = 1; y <= 3; ++y) { + for (int m = 1; m <= 2; ++m) { + for (int d = 1; d <= 3; ++d) { + tags.push_back(make_tag(first_index, y, m, d)); + first_index += interval; + } + } + } + return tags; +} + +static std::string to_ascii_art(std::span states) { + bool started = false; + std::string r; + for (auto s : states) { + switch (s) { + case trigger_observer_state::Start: + r += started ? "E" : "|#"; + started = true; + break; + case trigger_observer_state::Stop: + r += started ? "|_" : "E"; + started = false; + break; + case trigger_observer_state::StopAndStart: + r += started ? "||#" : "|#"; + started = true; + break; + case trigger_observer_state::Ignore: + r += started ? "#" : "_"; + break; + } + }; + return r; +} + +template +std::string run_observer_test(std::span tags, O o) { + std::vector r; + r.reserve(tags.size()); + for (const auto &tag : tags) { + r.push_back(o(tag)); + } + return to_ascii_art(r); +} + } // namespace fair::graph::data_sink_test ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink_test::Source), out, n_samples_produced, n_samples_max, n_tag_offset, sample_rate); @@ -69,6 +177,7 @@ const boost::ut::suite DataSinkTests = [] { using namespace boost::ut; using namespace fair::graph; using namespace fair::graph::data_sink_test; + using namespace std::string_literals; "callback continuous mode"_test = [] { graph flow_graph; @@ -186,7 +295,9 @@ const boost::ut::suite DataSinkTests = [] { std::vector received_data; auto polling = std::async([poller, &received_data, &m] { - while (!poller->finished) { + bool seen_finished = false; + while (!seen_finished) { + seen_finished = poller->finished; using namespace std::chrono_literals; [[maybe_unused]] auto r = poller->process_one([&received_data, &m](const auto &dataset) { std::lock_guard lg{m}; @@ -209,6 +320,86 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(poller->drop_count.load(), 0)); }; + "blocking polling multiplexed mode"_test = [] { + const auto tags = make_test_tags(0, 10000); + + const std::int32_t n_samples = tags.size() * 10000 + 100000; + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + src.tags = std::deque(tags.begin(), tags.end()); + auto &sink = flow_graph.make_node>(); + sink.set_name("test_sink"); + + expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); + + { + const auto t = std::span(tags); + + // Test the test observer + expect(eq(run_observer_test(t, Observer({}, -1, {})), "|###||###||###||###||###||###"s)); + expect(eq(run_observer_test(t, Observer(-1, {}, {})), "|######||######||######"s)); + expect(eq(run_observer_test(t, Observer(1, {}, {})), "|######|____________"s)); + expect(eq(run_observer_test(t, Observer(1, {}, 2)), "_|#|__|#|_____________"s)); + expect(eq(run_observer_test(t, Observer({}, {}, 1)), "|#|__|#|__|#|__|#|__|#|__|#|__"s)); + } + + auto observer_factory = [](std::optional y, std::optional m, std::optional d) { + return [y, m, d]() { + return Observer(y, m, d); + }; + }; + const auto factories = std::array{observer_factory({}, -1, {}), + observer_factory(-1, {}, {}), + observer_factory(1, {}, {}), + observer_factory(1, {}, 2), + observer_factory({}, {}, 1)}; + + // Following the patterns above, where each #/_ is 10000 samples + const auto expected = std::array, factories.size()>{{ + {0, 29999, 30000, 59999, 60000, 89999, 90000, 119999, 120000, 149999, 150000, 249999}, + {0, 59999, 60000, 119999, 120000, 219999}, + {0, 59999}, + {10000, 19999, 40000, 49999}, + {0, 9999, 30000, 39999, 60000, 69999, 90000, 99999, 120000, 129999, 150000, 159999} + }}; + std::vector::dataset_poller>> pollers; + + for (const auto &f : factories) { + auto poller = data_sink_registry::instance().get_multiplexed_poller("test_sink", f, 100000, blocking_mode::Blocking); + expect(neq(poller, nullptr)); + pollers.push_back(poller); + } + + std::vector>> results; + + for (std::size_t i = 0; i < pollers.size(); ++i) { + auto f = std::async([poller = pollers[i]] { + std::vector ranges; + bool seen_finished = false; + while (!seen_finished) { + seen_finished = poller->finished.load(); + using namespace std::chrono_literals; + while (poller->process_one([&ranges](const auto &dataset) { + ranges.push_back(dataset.signal_values.front()); + ranges.push_back(dataset.signal_values.back()); + })) {} + } + return ranges; + }); + results.push_back(std::move(f)); + } + + fair::graph::scheduler::simple sched{std::move(flow_graph)}; + sched.work(); + + sink.stop(); // TODO the scheduler should call this + + for (std::size_t i = 0; i < results.size(); ++i) { + expect(eq(results[i].get(), expected[i])); + } + expect(eq(sink.n_samples_consumed, n_samples)); + }; + "blocking polling trigger mode overlapping"_test = [] { constexpr std::int32_t n_samples = 2000000; constexpr std::size_t n_triggers = 5000; From c79e6c7b3ba2b41d34dc0627104fd84c62bcc9b5 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Sat, 17 Jun 2023 11:17:37 +0200 Subject: [PATCH 20/64] Add snapshot mode --- include/data_sink.hpp | 159 +++++++++++++++++++++++++++++++++++++----- test/qa_data_sink.cpp | 48 ++++++++++++- 2 files changed, 186 insertions(+), 21 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index fd614217..ed0953cd 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -7,6 +7,7 @@ #include "tag.hpp" #include +#include namespace fair::graph { @@ -23,7 +24,7 @@ enum class trigger_observer_state { }; template -concept TriggerPredicate = requires(T p, tag_t tag) { +concept TriggerPredicate = requires(const T p, tag_t tag) { {p(tag)} -> std::convertible_to; }; @@ -34,7 +35,7 @@ concept TriggerObserver = requires(T o, tag_t tag) { template concept TriggerObserverFactory = requires(T f) { - {f()}; // TODO how assert that operator() must fullfill TriggerObserver? + {f()} -> TriggerObserver; }; template @@ -80,7 +81,7 @@ class data_sink_registry { std::shared_ptr::dataset_poller> get_trigger_poller(std::string_view name, P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { std::lock_guard lg{mutex}; auto sink = find_sink(name); - return sink ? sink->get_trigger_poller(std::move(p), pre_samples, post_samples, block) : nullptr; + return sink ? sink->get_trigger_poller(std::forward

(p), pre_samples, post_samples, block) : nullptr; } template @@ -90,6 +91,13 @@ class data_sink_registry { return sink ? sink->get_multiplexed_poller(std::forward(triggerObserverFactory), maximum_window_size, block) : nullptr; } + template + std::shared_ptr::dataset_poller> get_snapshot_poller(std::string_view name, P p, std::chrono::nanoseconds delay, blocking_mode block = blocking_mode::NonBlocking) { + std::lock_guard lg{mutex}; + auto sink = find_sink(name); + return sink ? sink->get_snapshot_poller(std::forward

(p), delay, block) : nullptr; + } + template bool register_streaming_callback(std::string_view name, std::size_t max_chunk_size, Callback callback) { std::lock_guard lg{mutex}; @@ -98,7 +106,7 @@ class data_sink_registry { return false; } - sink->register_streaming_callback(max_chunk_size, std::move(callback)); + sink->register_streaming_callback(max_chunk_size, std::forward(callback)); return true; } @@ -110,7 +118,7 @@ class data_sink_registry { return false; } - sink->register_trigger_callback(std::move(p), pre_samples, post_samples, std::move(callback)); + sink->register_trigger_callback(std::forward

(p), pre_samples, post_samples, std::forward(callback)); return true; } @@ -126,6 +134,18 @@ class data_sink_registry { return true; } + template + bool register_snapshot_callback(std::string_view name, P p, std::chrono::nanoseconds delay, Callback callback) { + std::lock_guard lg{mutex}; + auto sink = find_sink(name); + if (!sink) { + return false; + } + + sink->template register_snapshot_callback(std::forward

(p), delay, std::forward(callback)); + return true; + } + private: template data_sink* find_sink(std::string_view name) { @@ -154,9 +174,7 @@ class data_sink : public node> { public: IN in; std::size_t n_samples_consumed = 0; - std::size_t n_samples_max = -1; - int64_t last_tag_position = -1; - float sample_rate = -1.0f; + float sample_rate = 10000; static constexpr std::size_t listener_buffer_size = 65536; @@ -168,8 +186,7 @@ class data_sink : public node> { decltype(buffer.new_reader()) reader = buffer.new_reader(); decltype(buffer.new_writer()) writer = buffer.new_writer(); - template - [[nodiscard]] bool process_bulk(Handler fnc) { + [[nodiscard]] bool process_bulk(std::invocable> auto fnc) { const auto available = reader.available(); if (available == 0) { return false; @@ -181,8 +198,7 @@ class data_sink : public node> { return true; } - template - [[nodiscard]] bool process_one(Handler fnc) { + [[nodiscard]] bool process_one(std::invocable auto fnc) { const auto available = reader.available(); if (available == 0) { return false; @@ -207,6 +223,7 @@ class data_sink : public node> { struct abstract_listener_t { virtual ~abstract_listener_t() = default; + virtual void set_sample_rate(float) {} virtual void process_bulk(std::span history, std::span data, int64_t reader_position, const std::vector &tags) = 0; virtual void flush() = 0; }; @@ -298,33 +315,36 @@ class data_sink : public node> { } }; - template + template struct trigger_listener_t : public abstract_listener_t { bool block = false; std::size_t pre_samples = 0; std::size_t post_samples = 0; - TriggerPredicate trigger_predicate = {}; + P trigger_predicate = {}; std::deque pending_trigger_windows; // triggers that still didn't receive all their data std::weak_ptr polling_handler = {}; Callback callback; - explicit trigger_listener_t(TriggerPredicate predicate, std::shared_ptr handler, std::size_t pre, std::size_t post, bool do_block) + explicit trigger_listener_t(P predicate, std::shared_ptr handler, std::size_t pre, std::size_t post, bool do_block) : block(do_block) , pre_samples(pre) , post_samples(post) - , trigger_predicate(std::forward(predicate)) + , trigger_predicate(std::forward

(predicate)) , polling_handler{std::move(handler)} {} - explicit trigger_listener_t(TriggerPredicate predicate, std::size_t pre, std::size_t post, Callback cb) + explicit trigger_listener_t(P predicate, std::size_t pre, std::size_t post, Callback cb) : pre_samples(pre) , post_samples(post) - , trigger_predicate(std::forward(predicate)) + , trigger_predicate(std::forward

(predicate)) , callback{std::forward(cb)} {} + // TODO all the dataset-based listeners could share publish_dataset and parts of flush (closing pollers), + // but if we want to use different datastructures/pass additional info, this might become moot again, so + // I leave it as is for now. inline void publish_dataset(DataSet &&data) { if constexpr (!std::is_same_v) { callback(std::move(data)); @@ -500,6 +520,91 @@ class data_sink : public node> { } }; + struct pending_snapshot { + tag_t tag; + tag_t::index_type requested_sample; + }; + + template + struct snapshot_listener_t : public abstract_listener_t { + bool block = false; + std::chrono::nanoseconds time_delay; + tag_t::index_type sample_delay = 0; + P trigger_predicate = {}; + std::deque pending; + std::weak_ptr polling_handler = {}; + Callback callback; + + explicit snapshot_listener_t(P p, std::chrono::nanoseconds delay, std::shared_ptr poller, bool do_block) : block(do_block), time_delay(delay), trigger_predicate(std::forward

(p)), polling_handler{std::move(poller)} {} + explicit snapshot_listener_t(P p, std::chrono::nanoseconds delay, Callback cb) : trigger_predicate(std::forward

(p)), time_delay(std::forward(cb)) {} + + inline void publish_dataset(DataSet &&data) { + if constexpr (!std::is_same_v) { + callback(std::move(data)); + } else { + auto poller = polling_handler.lock(); + if (!poller) { + return; + } + + auto write_data = poller->writer.reserve_output_range(1); + if (block) { + write_data[0] = std::move(data); + write_data.publish(1); + } else { + if (poller->writer.available() > 0) { + write_data[0] = std::move(data); + write_data.publish(1); + } else { + poller->drop_count++; + } + } + } + } + + void set_sample_rate(float r) override { + sample_delay = std::round(std::chrono::duration_cast>(time_delay).count() * r); + } + + void process_bulk(std::span, std::span in_data, int64_t reader_position, const std::vector &tags) override { + auto triggers = tags; // should use views::filter once that is working everywhere + std::erase_if(triggers, [this](const auto &tag) { + return !trigger_predicate(tag); + }); + + if (!triggers.empty()) { + for (const auto &trigger : triggers) { + pending.push_back({trigger, trigger.index + sample_delay}); + } + // can be unsorted if sample_delay changed. Alternative: iterate the whole list below + std::stable_sort(pending.begin(), pending.end(), [](const auto &lhs, const auto &rhs) { return lhs.requested_sample < rhs.requested_sample; }); + } + + auto it = pending.begin(); + while (it != pending.end()) { + const auto rel_pos = it->requested_sample - reader_position; + assert(rel_pos >= 0); + if (rel_pos >= in_data.size()) { + break; + } + + DataSet dataset; + dataset.timing_events = {{it->tag}}; + dataset.signal_values = {in_data[rel_pos]}; + publish_dataset(std::move(dataset)); + + it = pending.erase(it); + } + } + + void flush() override { + pending.clear(); + if (auto p = polling_handler.lock()) { + p->finished = true; + } + } + }; + std::deque> listeners; std::mutex listener_mutex; @@ -539,6 +644,15 @@ class data_sink : public node> { return handler; } + template + std::shared_ptr get_snapshot_poller(P p, std::chrono::nanoseconds delay, blocking_mode block_mode = blocking_mode::NonBlocking) { + const auto block = block_mode == blocking_mode::Blocking; + auto handler = std::make_shared(); + std::lock_guard lg(listener_mutex); + add_listener(std::make_unique>(std::forward

(p), delay, handler, block), block); + return handler; + } + template void register_streaming_callback(std::size_t max_chunk_size, Callback callback) { add_listener(std::make_unique>(max_chunk_size, std::forward(callback)), false); @@ -556,6 +670,12 @@ class data_sink : public node> { add_listener(std::make_unique(std::move(triggerObserverFactory), maximum_window_size, std::forward(callback)), false); } + template + void register_snapshot_callback(P p, std::chrono::nanoseconds delay, Callback callback) { + std::lock_guard lg(listener_mutex); + add_listener(std::make_unique(std::forward

(p), delay, std::forward(callback)), false); + } + // TODO this code should be called at the end of graph processing void stop() { std::lock_guard lg(listener_mutex); @@ -617,6 +737,7 @@ class data_sink : public node> { std::size_t history_available = 0; void add_listener(std::unique_ptr&& l, bool block) { + l->set_sample_rate(sample_rate); // TODO also call when sample_rate changes if (block) { listeners.push_back(std::move(l)); } else { @@ -627,6 +748,6 @@ class data_sink : public node> { } -ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink), in, n_samples_consumed, n_samples_max, last_tag_position, sample_rate); +ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink), in, n_samples_consumed, sample_rate); #endif diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 1b8b00c6..3e51f439 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -298,7 +298,6 @@ const boost::ut::suite DataSinkTests = [] { bool seen_finished = false; while (!seen_finished) { seen_finished = poller->finished; - using namespace std::chrono_literals; [[maybe_unused]] auto r = poller->process_one([&received_data, &m](const auto &dataset) { std::lock_guard lg{m}; received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); @@ -320,6 +319,52 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(poller->drop_count.load(), 0)); }; + "blocking polling snapshot mode"_test = [] { + constexpr std::int32_t n_samples = 200000; + + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + src.tags = {{3000, {{"TYPE", "TRIGGER"}}}, tag_t{8000, {{"TYPE", "NO_TRIGGER"}}}, {180000, {{"TYPE", "TRIGGER"}}}}; + auto &sink = flow_graph.make_node>(); + sink.set_name("test_sink"); + + expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); + + auto is_trigger = [](const tag_t &tag) { + const auto v = tag.get("TYPE"); + return v && std::get(v->get()) == "TRIGGER"; + }; + + const auto delay = std::chrono::milliseconds{500}; // sample rate 10000 -> 5000 samples + auto poller = data_sink_registry::instance().get_snapshot_poller("test_sink", is_trigger, delay, blocking_mode::Blocking); + expect(neq(poller, nullptr)); + + auto poller_result = std::async([poller] { + std::vector received_data; + + bool seen_finished = false; + while (!seen_finished) { + seen_finished = poller->finished; + [[maybe_unused]] auto r = poller->process_one([&received_data](const auto &dataset) { + received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); + }); + } + + return received_data; + }); + + fair::graph::scheduler::simple sched{std::move(flow_graph)}; + sched.work(); + + sink.stop(); // TODO the scheduler should call this + + const auto received_data = poller_result.get(); + + expect(eq(sink.n_samples_consumed, n_samples)); + expect(eq(received_data, std::vector{8000, 185000})); + expect(eq(poller->drop_count.load(), 0)); + }; + "blocking polling multiplexed mode"_test = [] { const auto tags = make_test_tags(0, 10000); @@ -378,7 +423,6 @@ const boost::ut::suite DataSinkTests = [] { bool seen_finished = false; while (!seen_finished) { seen_finished = poller->finished.load(); - using namespace std::chrono_literals; while (poller->process_one([&ranges](const auto &dataset) { ranges.push_back(dataset.signal_values.front()); ranges.push_back(dataset.signal_values.back()); From 1e22db01fc247866fb718df748995495c1db8559 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Sun, 18 Jun 2023 12:52:27 +0200 Subject: [PATCH 21/64] Add distinct null_type to denote that there's no callback --- include/data_sink.hpp | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index ed0953cd..dffe22ff 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -23,6 +23,9 @@ enum class trigger_observer_state { Ignore ///< Ignore tag }; +// TODO is the scope where want these? +struct null_type {}; + template concept TriggerPredicate = requires(const T p, tag_t tag) { {p(tag)} -> std::convertible_to; @@ -251,7 +254,7 @@ class data_sink : public node> { {} void process_bulk(std::span, std::span data, int64_t /*reader_position*/, const std::vector &tags) override { - if constexpr (!std::is_same_v) { + if constexpr (!std::is_same_v) { // if there's pending data, fill buffer and send out if (buffer_fill > 0) { const auto n = std::min(data.size(), buffer.size() - buffer_fill); @@ -302,7 +305,7 @@ class data_sink : public node> { } void flush() override { - if constexpr (!std::is_same_v) { + if constexpr (!std::is_same_v) { if (buffer_fill > 0) { callback(std::span(buffer).first(buffer_fill)); buffer_fill = 0; @@ -346,7 +349,7 @@ class data_sink : public node> { // but if we want to use different datastructures/pass additional info, this might become moot again, so // I leave it as is for now. inline void publish_dataset(DataSet &&data) { - if constexpr (!std::is_same_v) { + if constexpr (!std::is_same_v) { callback(std::move(data)); } else { auto poller = polling_handler.lock(); @@ -445,7 +448,7 @@ class data_sink : public node> { explicit multiplexed_listener_t(F factory, std::size_t max_window_size, std::shared_ptr handler, bool do_block) : observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), polling_handler{std::move(handler)}, block(do_block) {} inline void publish_dataset(DataSet &&data) { - if constexpr (!std::is_same_v) { + if constexpr (!std::is_same_v) { callback(std::move(data)); } else { auto poller = polling_handler.lock(); @@ -539,7 +542,7 @@ class data_sink : public node> { explicit snapshot_listener_t(P p, std::chrono::nanoseconds delay, Callback cb) : trigger_predicate(std::forward

(p)), time_delay(std::forward(cb)) {} inline void publish_dataset(DataSet &&data) { - if constexpr (!std::is_same_v) { + if constexpr (!std::is_same_v) { callback(std::move(data)); } else { auto poller = polling_handler.lock(); @@ -621,7 +624,7 @@ class data_sink : public node> { std::lock_guard lg(listener_mutex); const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); - add_listener(std::make_unique>(handler, block), block); + add_listener(std::make_unique>(handler, block), block); return handler; } @@ -630,7 +633,7 @@ class data_sink : public node> { const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); std::lock_guard lg(listener_mutex); - add_listener(std::make_unique>(std::forward(p), handler, pre_samples, post_samples, block), block); + add_listener(std::make_unique>(std::forward(p), handler, pre_samples, post_samples, block), block); history.resize(std::max(pre_samples, history.size())); return handler; } @@ -640,7 +643,7 @@ class data_sink : public node> { std::lock_guard lg(listener_mutex); const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); - add_listener(std::make_unique>(std::move(triggerObserverFactory), maximum_window_size, handler, block), block); + add_listener(std::make_unique>(std::move(triggerObserverFactory), maximum_window_size, handler, block), block); return handler; } @@ -649,7 +652,7 @@ class data_sink : public node> { const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); std::lock_guard lg(listener_mutex); - add_listener(std::make_unique>(std::forward

(p), delay, handler, block), block); + add_listener(std::make_unique>(std::forward

(p), delay, handler, block), block); return handler; } From dcfc755bf487b013c7586993e8cb75950ad5898b Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Sun, 18 Jun 2023 12:58:48 +0200 Subject: [PATCH 22/64] Tests: Use future more to pass received data --- test/qa_data_sink.cpp | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 3e51f439..b360b816 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -291,18 +291,16 @@ const boost::ut::suite DataSinkTests = [] { auto poller = data_sink_registry::instance().get_trigger_poller("test_sink", is_trigger, 3, 2, blocking_mode::Blocking); expect(neq(poller, nullptr)); - std::mutex m; - std::vector received_data; - - auto polling = std::async([poller, &received_data, &m] { + auto polling = std::async([poller] { + std::vector received_data; bool seen_finished = false; while (!seen_finished) { seen_finished = poller->finished; - [[maybe_unused]] auto r = poller->process_one([&received_data, &m](const auto &dataset) { - std::lock_guard lg{m}; + [[maybe_unused]] auto r = poller->process_one([&received_data](const auto &dataset) { received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); }); } + return received_data; }); fair::graph::scheduler::simple sched{std::move(flow_graph)}; @@ -310,9 +308,8 @@ const boost::ut::suite DataSinkTests = [] { sink.stop(); // TODO the scheduler should call this - polling.wait(); + const auto received_data = polling.get(); - std::lock_guard lg{m}; expect(eq(sink.n_samples_consumed, n_samples)); expect(eq(received_data.size(), 10)); expect(eq(received_data, std::vector{2997, 2998, 2999, 3000, 3001, 179997, 179998, 179999, 180000, 180001})); @@ -467,21 +464,19 @@ const boost::ut::suite DataSinkTests = [] { auto poller = data_sink_registry::instance().get_trigger_poller("test_sink", is_trigger, 3000, 2000, blocking_mode::Blocking); expect(neq(poller, nullptr)); - std::mutex m; - std::vector received_data; - - auto polling = std::async([poller, &received_data, &m] { + auto polling = std::async([poller] { + std::vector received_data; bool seen_finished = false; while (!seen_finished) { // TODO make finished vs. pending data handling actually thread-safe seen_finished = poller->finished.load(); - while (poller->process_one([&received_data, &m](const auto &dataset) { - std::lock_guard lg{m}; + while (poller->process_one([&received_data](const auto &dataset) { expect(eq(dataset.signal_values.size(), 5000)); received_data.push_back(dataset.signal_values.front()); received_data.push_back(dataset.signal_values.back()); })) {} } + return received_data; }); fair::graph::scheduler::simple sched{std::move(flow_graph)}; @@ -489,9 +484,7 @@ const boost::ut::suite DataSinkTests = [] { sink.stop(); // TODO the scheduler should call this - polling.wait(); - - std::lock_guard lg{m}; + const auto received_data = polling.get(); auto expected_start = std::vector{57000, 61999, 57001, 62000, 57002}; expect(eq(sink.n_samples_consumed, n_samples)); expect(eq(received_data.size(), 2 * n_triggers)); @@ -572,7 +565,7 @@ const boost::ut::suite DataSinkTests = [] { })) {} } - expect(eq(samples_seen + poller->drop_count.load(), n_samples)); + expect(eq(samples_seen + poller->drop_count, n_samples)); }); fair::graph::scheduler::simple sched{std::move(flow_graph)}; From c6e98931c83e279a2428473f14b6a840432a93ce Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 19 Jun 2023 09:24:58 +0200 Subject: [PATCH 23/64] Continuous/callback: Optionally pass tags --- include/data_sink.hpp | 75 +++++++++++++++++++++++++++++++++++++++---- test/qa_data_sink.cpp | 48 +++++++++++++++++++++------ 2 files changed, 108 insertions(+), 15 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index dffe22ff..04a2f9f2 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -172,6 +172,17 @@ class data_sink_registry { } }; +namespace detail { + template + std::span find_matching_prefix(std::span s, P predicate) { + const auto nm = std::find_if_not(s.begin(), s.end(), predicate); + if (nm == s.end()) { + return s; + } + return s.first(std::distance(s.begin(), nm)); + } +} + template class data_sink : public node> { public: @@ -233,10 +244,17 @@ class data_sink : public node> { template struct continuous_listener_t : public abstract_listener_t { + static constexpr auto has_callback = !std::is_same_v; + static constexpr auto callback_takes_tags = std::is_invocable_v, std::span>; + + std::optional first_sample_seen; bool block = false; + std::size_t samples_written = 0; + // callback-only std::size_t buffer_fill = 0; std::vector buffer; + std::vector tag_buffer; // polling-only std::weak_ptr polling_handler = {}; @@ -253,16 +271,46 @@ class data_sink : public node> { , polling_handler{std::move(poller)} {} - void process_bulk(std::span, std::span data, int64_t /*reader_position*/, const std::vector &tags) override { - if constexpr (!std::is_same_v) { + void process_bulk(std::span, std::span data, int64_t reader_position, const std::vector &tags_) override { + using namespace fair::graph::detail; + if (!first_sample_seen) { + first_sample_seen = reader_position; + } + + auto tags = std::vector(tags_.begin(), tags_.end()); + // send indices relative to first sample the user received + for (tag_t &tag : tags) { + tag.index -= *first_sample_seen; + } + + auto tag_view = std::span(tags); + + auto match_before = [](int64_t last_index) { + return [&last_index](const tag_t &tag) { + return tag.index < last_index; + }; + }; + + if constexpr (has_callback) { // if there's pending data, fill buffer and send out if (buffer_fill > 0) { const auto n = std::min(data.size(), buffer.size() - buffer_fill); std::copy(data.begin(), data.begin() + n, buffer.begin() + buffer_fill); + if constexpr (callback_takes_tags) { + const auto ts = find_matching_prefix(tag_view, match_before(samples_written + n)); + tag_buffer.insert(tag_buffer.end(), ts.begin(), ts.end()); + tag_view = tag_view.last(tag_view.size() - ts.size()); + } buffer_fill += n; if (buffer_fill == buffer.size()) { - callback(std::span(buffer)); + if constexpr (callback_takes_tags) { + callback(std::span(buffer), std::span(tag_buffer)); + } else { + callback(std::span(buffer)); + } + samples_written += buffer.size(); buffer_fill = 0; + tag_buffer.clear(); } data = data.last(data.size() - n); @@ -270,7 +318,14 @@ class data_sink : public node> { // send out complete chunks directly while (data.size() > buffer.size()) { - callback(data.first(buffer.size())); + if constexpr (callback_takes_tags) { + const auto ts = find_matching_prefix(tag_view, match_before(samples_written + buffer.size())); + tag_view = tag_view.last(tag_view.size() - ts.size()); + callback(data.first(buffer.size()), ts); + } else { + callback(data.first(buffer.size())); + } + samples_written += buffer.size(); data = data.last(data.size() - buffer.size()); } @@ -278,6 +333,9 @@ class data_sink : public node> { if (!data.empty()) { std::copy(data.begin(), data.end(), buffer.begin()); buffer_fill = data.size(); + if constexpr (callback_takes_tags) { + tag_buffer.insert(tag_buffer.end(), tag_view.begin(), tag_view.end()); + } } } else { auto poller = polling_handler.lock(); @@ -305,9 +363,14 @@ class data_sink : public node> { } void flush() override { - if constexpr (!std::is_same_v) { + if constexpr (has_callback) { if (buffer_fill > 0) { - callback(std::span(buffer).first(buffer_fill)); + if constexpr (callback_takes_tags) { + callback(std::span(buffer).first(buffer_fill), std::span(tag_buffer)); + tag_buffer.clear(); + } else { + callback(std::span(buffer).first(buffer_fill)); + } buffer_fill = 0; } } else { diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index b360b816..0fe076b3 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -191,16 +191,41 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - std::size_t samples_seen = 0; - std::size_t chunks_seen = 0; - auto callback = [&samples_seen, &chunks_seen](std::span buffer) { + std::atomic samples_seen1 = 0; + std::atomic chunks_seen1 = 0; + auto callback = [&samples_seen1, &chunks_seen1](std::span buffer) { for (std::size_t i = 0; i < buffer.size(); ++i) { - expect(eq(buffer[i], static_cast(samples_seen + i))); + expect(eq(buffer[i], static_cast(samples_seen1 + i))); } - samples_seen += buffer.size(); - chunks_seen++; - if (chunks_seen < 201) { + samples_seen1 += buffer.size(); + chunks_seen1++; + if (chunks_seen1 < 201) { + expect(eq(buffer.size(), chunk_size)); + } else { + expect(eq(buffer.size(), 5)); + } + }; + + std::mutex m2; + std::size_t samples_seen2 = 0; + std::size_t chunks_seen2 = 0; + std::vector received_tags; + auto callback_with_tags = [&samples_seen2, &chunks_seen2, &m2, &received_tags](std::span buffer, std::span tags) { + for (std::size_t i = 0; i < buffer.size(); ++i) { + expect(eq(buffer[i], static_cast(samples_seen2 + i))); + } + + for (const auto &tag : tags) { + ge(tag.index, static_cast(samples_seen2)); + lt(tag.index, samples_seen2 + buffer.size()); + } + + auto lg = std::lock_guard{m2}; + received_tags.insert(received_tags.end(), tags.begin(), tags.end()); + samples_seen2 += buffer.size(); + chunks_seen2++; + if (chunks_seen2 < 201) { expect(eq(buffer.size(), chunk_size)); } else { expect(eq(buffer.size(), 5)); @@ -208,15 +233,20 @@ const boost::ut::suite DataSinkTests = [] { }; expect(data_sink_registry::instance().register_streaming_callback("test_sink", chunk_size, callback)); + expect(data_sink_registry::instance().register_streaming_callback("test_sink", chunk_size, callback_with_tags)); fair::graph::scheduler::simple sched{std::move(flow_graph)}; sched.work(); sink.stop(); // TODO the scheduler should call this - expect(eq(chunks_seen, 201)); + auto lg = std::lock_guard{m2}; + expect(eq(chunks_seen1.load(), 201)); + expect(eq(chunks_seen2, 201)); expect(eq(sink.n_samples_consumed, n_samples)); - expect(eq(samples_seen, n_samples)); + expect(eq(samples_seen1.load(), n_samples)); + expect(eq(samples_seen2, n_samples)); + expect(eq(received_tags.size(), src.tags.size())); }; "blocking polling continuous mode"_test = [] { From 7a858db16958b3a9dad42f0e9a4bf3329a4bf2c8 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 19 Jun 2023 09:48:23 +0200 Subject: [PATCH 24/64] Pass tags via span --- include/data_sink.hpp | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 04a2f9f2..d3bb9de1 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -238,7 +238,7 @@ class data_sink : public node> { struct abstract_listener_t { virtual ~abstract_listener_t() = default; virtual void set_sample_rate(float) {} - virtual void process_bulk(std::span history, std::span data, int64_t reader_position, const std::vector &tags) = 0; + virtual void process_bulk(std::span history, std::span data, int64_t reader_position, std::span tags) = 0; virtual void flush() = 0; }; @@ -271,7 +271,7 @@ class data_sink : public node> { , polling_handler{std::move(poller)} {} - void process_bulk(std::span, std::span data, int64_t reader_position, const std::vector &tags_) override { + void process_bulk(std::span, std::span data, int64_t reader_position, std::span tags_) override { using namespace fair::graph::detail; if (!first_sample_seen) { first_sample_seen = reader_position; @@ -435,8 +435,8 @@ class data_sink : public node> { } } - void process_bulk(std::span history, std::span in_data, int64_t reader_position, const std::vector &tags) override { - auto filtered = tags; // should use views::filter once that is working everywhere + void process_bulk(std::span history, std::span in_data, int64_t reader_position, std::span tags) override { + auto filtered = std::vector(tags.begin(), tags.end()); // should use views::filter once that is working everywhere std::erase_if(filtered, [this](const auto &tag) { return !trigger_predicate(tag); }); @@ -543,8 +543,8 @@ class data_sink : public node> { } } - void process_bulk(std::span, std::span in_data, int64_t reader_position, const std::vector &tags) override { - for (const auto &tag :tags) { + void process_bulk(std::span, std::span in_data, int64_t reader_position, std::span tags) override { + for (const auto &tag : tags) { const auto obsr = observer(tag); // TODO set proper error state instead of throwing if (obsr == trigger_observer_state::Stop || obsr == trigger_observer_state::StopAndStart) { @@ -632,8 +632,8 @@ class data_sink : public node> { sample_delay = std::round(std::chrono::duration_cast>(time_delay).count() * r); } - void process_bulk(std::span, std::span in_data, int64_t reader_position, const std::vector &tags) override { - auto triggers = tags; // should use views::filter once that is working everywhere + void process_bulk(std::span, std::span in_data, int64_t reader_position, std::span tags) override { + auto triggers = std::vector(tags.begin(), tags.end()); // should use views::filter once that is working everywhere std::erase_if(triggers, [this](const auto &tag) { return !trigger_predicate(tag); }); @@ -773,7 +773,8 @@ class data_sink : public node> { auto out_of_range = [end_pos = reader_position + noutput_items](const auto &tag) { return tag.index > static_cast(end_pos); }; - std::erase_if(tags, out_of_range); + std::erase_if(tags, out_of_range); // TODO use views it works everywhere + auto tag_view = std::span(tags); tag_reader.consume(tags.size()); { From 260a4282182085a929867c46ceb6c69196c13051 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 19 Jun 2023 09:53:33 +0200 Subject: [PATCH 25/64] ASCII art --- include/data_sink.hpp | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index d3bb9de1..a8491104 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -183,6 +183,41 @@ namespace detail { } } +/** + * @brief generic data sink for exporting arbitrary-typed streams to non-GR C++ APIs. + * + * Each sink registers with a (user-defined/exchangeable) global registry that can be + * queried by the non-GR caller to find the sink responsible for a given signal name, etc. + * and either retrieve a poller handler that allows asynchronous data from a different thread, + * or register a callback that is invoked by the sink if the user-conditions are met. + * + *

+ * @code
+ *         ╔═══════════════╗
+ *    in0 ━╢   data sink   ║                      ┌──── caller ────┐
+ * (err0) ━╢ (opt. error)  ║                      │                │
+ *    ┄    ║               ║  retrieve poller or  │ (custom non-GR │
+ *    inN ━╢ :signal_names ║←--------------------→│  user code...) │
+ * (errN) ━╢ :signal_units ║  register            │                │
+ *         ║ :...          ║  callback function   └───┬────────────┘
+ *         ╚═ GR block ═╤══╝                          │
+ *                      │                             │
+ *                      │                             │
+ *                      │      ╭─registry─╮           │
+ *            register/ │      ╞══════════╡           │ queries for specific
+ *          deregister  ╰─────→│ [sinks]  │←──────────╯ signal_info_t list/criteria
+ *                             ╞══════════╡
+ *                             ╰──────────╯
+ *
+ * 
+ * Pollers can be configured to be blocking, i.e. blocks the flow-graph + * if data is not being retrieved in time, or non-blocking, i.e. data being dropped when + * the user-defined buffer size is full. + * N.B. due to the nature of the GR scheduler, signals from the same sink are notified + * synchronuously (/asynchronuously) if handled by the same (/different) sink block. + * + * @tparam T input sample type + */ template class data_sink : public node> { public: From 284a58d7a4501068eff60dca74fa1f6a83ca67ce Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 19 Jun 2023 10:54:20 +0200 Subject: [PATCH 26/64] Allow querying sinks by name or signal name --- include/data_sink.hpp | 84 ++++++++++++++++++++++++++----------------- test/qa_data_sink.cpp | 22 ++++++------ 2 files changed, 62 insertions(+), 44 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index a8491104..410e6a2f 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -44,6 +44,19 @@ concept TriggerObserverFactory = requires(T f) { template class data_sink; +struct data_sink_query { + std::optional sink_name; + std::optional signal_name; + + static data_sink_query with_signal_name(std::string_view name) { + return {{}, std::string{name}}; + } + + static data_sink_query with_sink_name(std::string_view name) { + return {std::string{name}, {}}; + } +}; + class data_sink_registry { std::mutex mutex; std::vector sinks; @@ -74,37 +87,37 @@ class data_sink_registry { } template - std::shared_ptr::poller> get_streaming_poller(std::string_view name, blocking_mode block = blocking_mode::NonBlocking) { + std::shared_ptr::poller> get_streaming_poller(const data_sink_query &query, blocking_mode block = blocking_mode::NonBlocking) { std::lock_guard lg{mutex}; - auto sink = find_sink(name); + auto sink = find_sink(query); return sink ? sink->get_streaming_poller(block) : nullptr; } template - std::shared_ptr::dataset_poller> get_trigger_poller(std::string_view name, P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { + std::shared_ptr::dataset_poller> get_trigger_poller(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { std::lock_guard lg{mutex}; - auto sink = find_sink(name); + auto sink = find_sink(query); return sink ? sink->get_trigger_poller(std::forward

(p), pre_samples, post_samples, block) : nullptr; } template - std::shared_ptr::dataset_poller> get_multiplexed_poller(std::string_view name, F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block = blocking_mode::NonBlocking) { + std::shared_ptr::dataset_poller> get_multiplexed_poller(const data_sink_query &query, F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block = blocking_mode::NonBlocking) { std::lock_guard lg{mutex}; - auto sink = find_sink(name); + auto sink = find_sink(query); return sink ? sink->get_multiplexed_poller(std::forward(triggerObserverFactory), maximum_window_size, block) : nullptr; } template - std::shared_ptr::dataset_poller> get_snapshot_poller(std::string_view name, P p, std::chrono::nanoseconds delay, blocking_mode block = blocking_mode::NonBlocking) { + std::shared_ptr::dataset_poller> get_snapshot_poller(const data_sink_query &query, P p, std::chrono::nanoseconds delay, blocking_mode block = blocking_mode::NonBlocking) { std::lock_guard lg{mutex}; - auto sink = find_sink(name); + auto sink = find_sink(query); return sink ? sink->get_snapshot_poller(std::forward

(p), delay, block) : nullptr; } template - bool register_streaming_callback(std::string_view name, std::size_t max_chunk_size, Callback callback) { + bool register_streaming_callback(const data_sink_query &query, std::size_t max_chunk_size, Callback callback) { std::lock_guard lg{mutex}; - auto sink = find_sink(name); + auto sink = find_sink(query); if (!sink) { return false; } @@ -114,9 +127,9 @@ class data_sink_registry { } template - bool register_trigger_callback(std::string_view name, P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { + bool register_trigger_callback(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { std::lock_guard lg{mutex}; - auto sink = find_sink(name); + auto sink = find_sink(query); if (!sink) { return false; } @@ -126,9 +139,9 @@ class data_sink_registry { } template - bool register_multiplexed_callback(std::string_view name, std::size_t maximum_window_size, Callback callback) { + bool register_multiplexed_callback(const data_sink_query &query, std::size_t maximum_window_size, Callback callback) { std::lock_guard lg{mutex}; - auto sink = find_sink(name); + auto sink = find_sink(query); if (!sink) { return false; } @@ -138,9 +151,9 @@ class data_sink_registry { } template - bool register_snapshot_callback(std::string_view name, P p, std::chrono::nanoseconds delay, Callback callback) { + bool register_snapshot_callback(const data_sink_query &query, P p, std::chrono::nanoseconds delay, Callback callback) { std::lock_guard lg{mutex}; - auto sink = find_sink(name); + auto sink = find_sink(query); if (!sink) { return false; } @@ -151,24 +164,25 @@ class data_sink_registry { private: template - data_sink* find_sink(std::string_view name) { - const auto it = std::find_if(sinks.begin(), sinks.end(), matcher(name)); - if (it == sinks.end()) { - return nullptr; - } + data_sink* find_sink(const data_sink_query &query) { - return std::any_cast*>(*it); - } - - template - static auto matcher(std::string_view name) { - return [name](const std::any &v) { + auto matches = [&query](const std::any &v) { try { - return std::any_cast*>(v)->name() == name; + auto sink = std::any_cast*>(v); + const auto sink_name_matches = !query.sink_name || *query.sink_name == sink->name(); + const auto signal_name_matches = !query.signal_name || *query.signal_name == sink->signal_name; + return sink_name_matches && signal_name_matches; } catch (...) { return false; } }; + + const auto it = std::find_if(sinks.begin(), sinks.end(), matches); + if (it == sinks.end()) { + return nullptr; + } + + return std::any_cast*>(*it); } }; @@ -196,9 +210,9 @@ namespace detail { * ╔═══════════════╗ * in0 ━╢ data sink ║ ┌──── caller ────┐ * (err0) ━╢ (opt. error) ║ │ │ - * ┄ ║ ║ retrieve poller or │ (custom non-GR │ - * inN ━╢ :signal_names ║←--------------------→│ user code...) │ - * (errN) ━╢ :signal_units ║ register │ │ + * ║ ║ retrieve poller or │ (custom non-GR │ + * ━╢ :signal_name ║←--------------------→│ user code...) │ + * ━╢ :signal_unit ║ register │ │ * ║ :... ║ callback function └───┬────────────┘ * ╚═ GR block ═╤══╝ │ * │ │ @@ -221,10 +235,14 @@ namespace detail { template class data_sink : public node> { public: + Annotated, Unit<"Hz">> sample_rate = 10000.f; + Annotated signal_name; + Annotated> signal_unit; + Annotated> signal_min; + Annotated> signal_max; + IN in; std::size_t n_samples_consumed = 0; - float sample_rate = 10000; - static constexpr std::size_t listener_buffer_size = 65536; template diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 0fe076b3..4956fe5c 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -232,8 +232,8 @@ const boost::ut::suite DataSinkTests = [] { } }; - expect(data_sink_registry::instance().register_streaming_callback("test_sink", chunk_size, callback)); - expect(data_sink_registry::instance().register_streaming_callback("test_sink", chunk_size, callback_with_tags)); + expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::with_sink_name("test_sink"), chunk_size, callback)); + expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::with_sink_name("test_sink"), chunk_size, callback_with_tags)); fair::graph::scheduler::simple sched{std::move(flow_graph)}; sched.work(); @@ -262,10 +262,10 @@ const boost::ut::suite DataSinkTests = [] { std::atomic samples_seen = 0; - auto poller1 = data_sink_registry::instance().get_streaming_poller("test_sink", blocking_mode::Blocking); + auto poller1 = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink"), blocking_mode::Blocking); expect(neq(poller1, nullptr)); - auto poller2 = data_sink_registry::instance().get_streaming_poller("test_sink", blocking_mode::Blocking); + auto poller2 = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink"), blocking_mode::Blocking); expect(neq(poller2, nullptr)); auto make_runner = [](auto poller) { @@ -318,7 +318,7 @@ const boost::ut::suite DataSinkTests = [] { return v && std::get(v->get()) == "TRIGGER"; }; - auto poller = data_sink_registry::instance().get_trigger_poller("test_sink", is_trigger, 3, 2, blocking_mode::Blocking); + auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, 3, 2, blocking_mode::Blocking); expect(neq(poller, nullptr)); auto polling = std::async([poller] { @@ -363,7 +363,7 @@ const boost::ut::suite DataSinkTests = [] { }; const auto delay = std::chrono::milliseconds{500}; // sample rate 10000 -> 5000 samples - auto poller = data_sink_registry::instance().get_snapshot_poller("test_sink", is_trigger, delay, blocking_mode::Blocking); + auto poller = data_sink_registry::instance().get_snapshot_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, delay, blocking_mode::Blocking); expect(neq(poller, nullptr)); auto poller_result = std::async([poller] { @@ -437,7 +437,7 @@ const boost::ut::suite DataSinkTests = [] { std::vector::dataset_poller>> pollers; for (const auto &f : factories) { - auto poller = data_sink_registry::instance().get_multiplexed_poller("test_sink", f, 100000, blocking_mode::Blocking); + auto poller = data_sink_registry::instance().get_multiplexed_poller(data_sink_query::with_sink_name("test_sink"), f, 100000, blocking_mode::Blocking); expect(neq(poller, nullptr)); pollers.push_back(poller); } @@ -491,7 +491,7 @@ const boost::ut::suite DataSinkTests = [] { return true; }; - auto poller = data_sink_registry::instance().get_trigger_poller("test_sink", is_trigger, 3000, 2000, blocking_mode::Blocking); + auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, 3000, 2000, blocking_mode::Blocking); expect(neq(poller, nullptr)); auto polling = std::async([poller] { @@ -552,7 +552,7 @@ const boost::ut::suite DataSinkTests = [] { received_data.push_back(dataset.signal_values.back()); }; - data_sink_registry::instance().register_trigger_callback("test_sink", is_trigger, 3000, 2000, callback); + data_sink_registry::instance().register_trigger_callback(data_sink_query::with_sink_name("test_sink"), is_trigger, 3000, 2000, callback); fair::graph::scheduler::simple sched{std::move(flow_graph)}; sched.work(); @@ -574,10 +574,10 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - auto invalid_type_poller = data_sink_registry::instance().get_streaming_poller("test_sink"); + auto invalid_type_poller = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink")); expect(eq(invalid_type_poller, nullptr)); - auto poller = data_sink_registry::instance().get_streaming_poller("test_sink"); + auto poller = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink")); expect(neq(poller, nullptr)); auto polling = std::async([poller] { From a6e1c9dd677cf359397c46f3e2d82bd0eb23d31b Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 19 Jun 2023 12:03:46 +0200 Subject: [PATCH 27/64] Make sure we can handle all data we receive in a work() call --- include/data_sink.hpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 410e6a2f..7c5f969a 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -234,6 +234,8 @@ namespace detail { */ template class data_sink : public node> { +static constexpr std::size_t listener_buffer_size = 65536; + public: Annotated, Unit<"Hz">> sample_rate = 10000.f; Annotated signal_name; @@ -241,9 +243,8 @@ class data_sink : public node> { Annotated> signal_min; Annotated> signal_max; - IN in; + IN in; std::size_t n_samples_consumed = 0; - static constexpr std::size_t listener_buffer_size = 65536; template struct poller_t { @@ -807,12 +808,11 @@ class data_sink : public node> { auto &in_port = input_port<"in">(this); auto &reader = in_port.streamReader(); - const auto n_readable = std::min(reader.available(), in_port.max_buffer_size()); - if (n_readable == 0) { + const auto noutput_items = std::min(reader.available(), in_port.max_buffer_size()); + if (noutput_items == 0) { return fair::graph::work_return_t::INSUFFICIENT_INPUT_ITEMS; } - const auto noutput_items = std::min(listener_buffer_size, n_readable); const auto reader_position = reader.position() + 1; const auto in_data = reader.get(noutput_items); const auto history_view = std::span(history.begin(), history_available); From edf1752fa38873afb1b7b249a32a4f0daf25190b Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 19 Jun 2023 23:18:03 +0200 Subject: [PATCH 28/64] Simplify listeners, make tag indices relative Fully simplify code to the "tag must be at index[0]" invariant, make tags relative wrt the data in the current chunk/dataset. --- include/data_sink.hpp | 255 +++++++++++++++++------------------------- test/qa_data_sink.cpp | 173 ++++++++++++++++++---------- 2 files changed, 217 insertions(+), 211 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 7c5f969a..ed07ea10 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -3,6 +3,7 @@ #include "circular_buffer.hpp" #include "dataset.hpp" +#include "history_buffer.hpp" #include "node.hpp" #include "tag.hpp" @@ -195,6 +196,16 @@ namespace detail { } return s.first(std::distance(s.begin(), nm)); } + + template + bool copy_span(std::span src, std::span dst) { + assert(src.size() <= dst.size()); + if (src.size() > dst.size()) { + return false; + } + std::copy(src.begin(), src.end(), dst.begin()); + return true; + } } /** @@ -244,7 +255,6 @@ static constexpr std::size_t listener_buffer_size = 65536; Annotated> signal_max; IN in; - std::size_t n_samples_consumed = 0; template struct poller_t { @@ -262,7 +272,7 @@ static constexpr std::size_t listener_buffer_size = 65536; const auto read_data = reader.get(available); fnc(read_data); - reader.consume(available); + std::ignore = reader.consume(available); return true; } @@ -274,7 +284,7 @@ static constexpr std::size_t listener_buffer_size = 65536; const auto read_data = reader.get(1); fnc(read_data[0]); - reader.consume(1); + std::ignore = reader.consume(1); return true; } }; @@ -283,16 +293,10 @@ static constexpr std::size_t listener_buffer_size = 65536; using dataset_poller = poller_t>; private: - struct pending_window_t { - tag_t trigger; - DataSet dataset; - std::size_t pending_post_samples = 0; - }; - struct abstract_listener_t { virtual ~abstract_listener_t() = default; virtual void set_sample_rate(float) {} - virtual void process_bulk(std::span history, std::span data, int64_t reader_position, std::span tags) = 0; + virtual void process(std::span history, std::span data, std::optional tag_data0) = 0; virtual void flush() = 0; }; @@ -301,7 +305,6 @@ static constexpr std::size_t listener_buffer_size = 65536; static constexpr auto has_callback = !std::is_same_v; static constexpr auto callback_takes_tags = std::is_invocable_v, std::span>; - std::optional first_sample_seen; bool block = false; std::size_t samples_written = 0; @@ -325,35 +328,19 @@ static constexpr std::size_t listener_buffer_size = 65536; , polling_handler{std::move(poller)} {} - void process_bulk(std::span, std::span data, int64_t reader_position, std::span tags_) override { + void process(std::span, std::span data, std::optional tag_data0) override { using namespace fair::graph::detail; - if (!first_sample_seen) { - first_sample_seen = reader_position; - } - - auto tags = std::vector(tags_.begin(), tags_.end()); - // send indices relative to first sample the user received - for (tag_t &tag : tags) { - tag.index -= *first_sample_seen; - } - - auto tag_view = std::span(tags); - - auto match_before = [](int64_t last_index) { - return [&last_index](const tag_t &tag) { - return tag.index < last_index; - }; - }; if constexpr (has_callback) { // if there's pending data, fill buffer and send out if (buffer_fill > 0) { - const auto n = std::min(data.size(), buffer.size() - buffer_fill); - std::copy(data.begin(), data.begin() + n, buffer.begin() + buffer_fill); + const auto n = buffer.size() - buffer_fill; + detail::copy_span(data.first(n), std::span(buffer).last(n)); if constexpr (callback_takes_tags) { - const auto ts = find_matching_prefix(tag_view, match_before(samples_written + n)); - tag_buffer.insert(tag_buffer.end(), ts.begin(), ts.end()); - tag_view = tag_view.last(tag_view.size() - ts.size()); + if (tag_data0) { + tag_buffer.push_back({static_cast(buffer_fill), *tag_data0}); + tag_data0.reset(); + } } buffer_fill += n; if (buffer_fill == buffer.size()) { @@ -373,9 +360,12 @@ static constexpr std::size_t listener_buffer_size = 65536; // send out complete chunks directly while (data.size() > buffer.size()) { if constexpr (callback_takes_tags) { - const auto ts = find_matching_prefix(tag_view, match_before(samples_written + buffer.size())); - tag_view = tag_view.last(tag_view.size() - ts.size()); - callback(data.first(buffer.size()), ts); + std::vector tags; + if (tag_data0) { + tags.push_back({0, std::move(*tag_data0)}); + tag_data0.reset(); + } + callback(data.first(buffer.size()), std::span(tags)); } else { callback(data.first(buffer.size())); } @@ -385,10 +375,12 @@ static constexpr std::size_t listener_buffer_size = 65536; // write remaining data to the buffer if (!data.empty()) { - std::copy(data.begin(), data.end(), buffer.begin()); + detail::copy_span(data, std::span(buffer).first(data.size())); buffer_fill = data.size(); if constexpr (callback_takes_tags) { - tag_buffer.insert(tag_buffer.end(), tag_view.begin(), tag_view.end()); + if (tag_data0) { + tag_buffer.push_back({0, std::move(*tag_data0)}); + } } } } else { @@ -400,7 +392,7 @@ static constexpr std::size_t listener_buffer_size = 65536; if (block) { auto write_data = poller->writer.reserve_output_range(data.size()); - std::copy(data.begin(), data.end(), write_data.begin()); + detail::copy_span(data, std::span(write_data)); write_data.publish(write_data.size()); } else { const auto can_write = poller->writer.available(); @@ -408,8 +400,7 @@ static constexpr std::size_t listener_buffer_size = 65536; poller->drop_count += data.size() - can_write; if (to_write > 0) { auto write_data = poller->writer.reserve_output_range(to_write); - const auto sub = data.first(to_write); - std::copy(sub.begin(), sub.end(), write_data.begin()); + detail::copy_span(data.first(to_write), std::span(write_data)); write_data.publish(write_data.size()); } } @@ -435,6 +426,11 @@ static constexpr std::size_t listener_buffer_size = 65536; } }; + struct pending_window_t { + DataSet dataset; + std::size_t pending_post_samples = 0; + }; + template struct trigger_listener_t : public abstract_listener_t { bool block = false; @@ -489,48 +485,27 @@ static constexpr std::size_t listener_buffer_size = 65536; } } - void process_bulk(std::span history, std::span in_data, int64_t reader_position, std::span tags) override { - auto filtered = std::vector(tags.begin(), tags.end()); // should use views::filter once that is working everywhere - std::erase_if(filtered, [this](const auto &tag) { - return !trigger_predicate(tag); - }); - for (const auto &trigger : filtered) { + void process(std::span history, std::span in_data, std::optional tag_data0) override { + if (tag_data0 && trigger_predicate(tag_t{0, *tag_data0})) { // TODO fill dataset with metadata etc. DataSet dataset; - dataset.timing_events = {{trigger}}; dataset.signal_values.reserve(pre_samples + post_samples); // TODO maybe make the circ. buffer smaller but preallocate these - pending_trigger_windows.push_back({.trigger = trigger, .dataset = std::move(dataset), .pending_post_samples = post_samples}); + + const auto pre_sample_view = history.last(std::min(pre_samples, history.size())); + dataset.signal_values.insert(dataset.signal_values.end(), pre_sample_view.begin(), pre_sample_view.end()); + + dataset.timing_events = {{{static_cast(pre_sample_view.size()), *tag_data0}}}; + pending_trigger_windows.push_back({.dataset = std::move(dataset), .pending_post_samples = post_samples}); } auto window = pending_trigger_windows.begin(); while (window != pending_trigger_windows.end()) { - auto &dataset = window->dataset; - const auto window_offset = window->trigger.index - reader_position; - - if (window_offset >= 0 && dataset.signal_values.empty()) { // new trigger, write history - // old history: pre-trigger data from previous in_data (if available) - const auto old_history_size = std::max(static_cast(pre_samples) - window_offset, std::int64_t{0}); - const auto available = std::min(static_cast(old_history_size), history.size()); - const auto old_history_view = history.last(available); - dataset.signal_values.insert(dataset.signal_values.end(), old_history_view.begin(), old_history_view.end()); - - // new history: pre-trigger samples from the current in_data - const auto new_history_size = pre_samples - old_history_size; - const auto new_history_view = in_data.subspan(window_offset - new_history_size, new_history_size); - dataset.signal_values.insert(dataset.signal_values.end(), new_history_view.begin(), new_history_view.end()); - } - - // write missing post-samples - const auto previous_post_samples = post_samples - window->pending_post_samples; - const auto first_requested = window_offset + previous_post_samples; - const auto last_requested = window_offset + post_samples - 1; - const auto last_available = std::min(last_requested, in_data.size() - 1); - const auto post_sample_view = in_data.subspan(first_requested, last_available - first_requested + 1); - dataset.signal_values.insert(dataset.signal_values.end(), post_sample_view.begin(), post_sample_view.end()); + const auto post_sample_view = in_data.first(std::min(window->pending_post_samples, in_data.size())); + window->dataset.signal_values.insert(window->dataset.signal_values.end(), post_sample_view.begin(), post_sample_view.end()); window->pending_post_samples -= post_sample_view.size(); if (window->pending_post_samples == 0) { - publish_dataset(std::move(dataset)); + publish_dataset(std::move(window->dataset)); window = pending_trigger_windows.erase(window); } else { ++window; @@ -562,7 +537,7 @@ static constexpr std::size_t listener_buffer_size = 65536; Callback callback; explicit multiplexed_listener_t(F factory, std::size_t max_window_size, Callback cb) : observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), callback(cb) {} - explicit multiplexed_listener_t(F factory, std::size_t max_window_size, std::shared_ptr handler, bool do_block) : observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), polling_handler{std::move(handler)}, block(do_block) {} + explicit multiplexed_listener_t(F factory, std::size_t max_window_size, std::shared_ptr handler, bool do_block) : block(do_block), observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), polling_handler{std::move(handler)} {} inline void publish_dataset(DataSet &&data) { if constexpr (!std::is_same_v) { @@ -588,28 +563,22 @@ static constexpr std::size_t listener_buffer_size = 65536; } } - inline void fill_pending_dataset(std::span in_data, int64_t reader_position, int64_t last_sample) { - const auto max_samples = static_cast(maximum_window_size - pending_dataset->signal_values.size()); - const auto first_sample = std::max(pending_dataset->timing_events[0][0].index - reader_position, int64_t{0}); - const auto actual_last_sample = std::min(first_sample + max_samples - 1, last_sample); - if (actual_last_sample >= first_sample) { - pending_dataset->signal_values.insert(pending_dataset->signal_values.end(), in_data.begin() + first_sample, in_data.begin() + actual_last_sample + 1); - } - } - - void process_bulk(std::span, std::span in_data, int64_t reader_position, std::span tags) override { - for (const auto &tag : tags) { - const auto obsr = observer(tag); + void process(std::span, std::span in_data, std::optional tag_data0) override { + if (tag_data0) { + const auto obsr = observer(tag_t{0, *tag_data0}); // TODO set proper error state instead of throwing if (obsr == trigger_observer_state::Stop || obsr == trigger_observer_state::StopAndStart) { if (obsr == trigger_observer_state::Stop && !pending_dataset) { throw std::runtime_error("multiplexed: Stop without start"); } - pending_dataset->timing_events[0].push_back(tag); - fill_pending_dataset(in_data, reader_position, tag.index - reader_position - 1); - publish_dataset(std::move(*pending_dataset)); - pending_dataset.reset(); + if (pending_dataset) { + if (obsr == trigger_observer_state::Stop) { + pending_dataset->timing_events[0].push_back({static_cast(pending_dataset->signal_values.size()), *tag_data0}); + } + publish_dataset(std::move(*pending_dataset)); + pending_dataset.reset(); + } } if (obsr == trigger_observer_state::Start || obsr == trigger_observer_state::StopAndStart) { if (obsr == trigger_observer_state::Start && pending_dataset) { @@ -617,11 +586,14 @@ static constexpr std::size_t listener_buffer_size = 65536; } pending_dataset = DataSet(); pending_dataset->signal_values.reserve(maximum_window_size); // TODO might be too much? - pending_dataset->timing_events = {{tag}}; + pending_dataset->timing_events = {{{0, *tag_data0}}}; } } if (pending_dataset) { - fill_pending_dataset(in_data, reader_position, in_data.size() - 1); + const auto to_write = std::min(in_data.size(), maximum_window_size - pending_dataset->signal_values.size()); + const auto view = in_data.first(to_write); + pending_dataset->signal_values.insert(pending_dataset->signal_values.end(), view.begin(), view.end()); + if (pending_dataset->signal_values.size() == maximum_window_size) { publish_dataset(std::move(*pending_dataset)); pending_dataset.reset(); @@ -641,15 +613,16 @@ static constexpr std::size_t listener_buffer_size = 65536; }; struct pending_snapshot { - tag_t tag; - tag_t::index_type requested_sample; + property_map tag_data; + std::size_t delay = 0; + std::size_t pending_samples = 0; }; template struct snapshot_listener_t : public abstract_listener_t { bool block = false; std::chrono::nanoseconds time_delay; - tag_t::index_type sample_delay = 0; + std::size_t sample_delay = 0; P trigger_predicate = {}; std::deque pending; std::weak_ptr polling_handler = {}; @@ -684,33 +657,27 @@ static constexpr std::size_t listener_buffer_size = 65536; void set_sample_rate(float r) override { sample_delay = std::round(std::chrono::duration_cast>(time_delay).count() * r); + // TODO do we need to update the requested_samples of pending here? (considering both old and new time_delay) } - void process_bulk(std::span, std::span in_data, int64_t reader_position, std::span tags) override { - auto triggers = std::vector(tags.begin(), tags.end()); // should use views::filter once that is working everywhere - std::erase_if(triggers, [this](const auto &tag) { - return !trigger_predicate(tag); - }); - - if (!triggers.empty()) { - for (const auto &trigger : triggers) { - pending.push_back({trigger, trigger.index + sample_delay}); - } - // can be unsorted if sample_delay changed. Alternative: iterate the whole list below - std::stable_sort(pending.begin(), pending.end(), [](const auto &lhs, const auto &rhs) { return lhs.requested_sample < rhs.requested_sample; }); + void process(std::span, std::span in_data, std::optional tag_data0) override { + if (tag_data0 && trigger_predicate({0, *tag_data0})) { + auto new_pending = pending_snapshot{*tag_data0, sample_delay, sample_delay}; + // make sure pending is sorted by number of pending_samples (insertion might be not at end if sample rate decreased; TODO unless we adapt them in set_sample_rate, see there) + auto rit = std::find_if(pending.rbegin(), pending.rend(), [delay = sample_delay](const auto &other) { return other.pending_samples < delay; }); + pending.insert(rit.base(), std::move(new_pending)); } auto it = pending.begin(); while (it != pending.end()) { - const auto rel_pos = it->requested_sample - reader_position; - assert(rel_pos >= 0); - if (rel_pos >= in_data.size()) { + if (it->pending_samples > in_data.size()) { + it->pending_samples -= in_data.size(); break; } DataSet dataset; - dataset.timing_events = {{it->tag}}; - dataset.signal_values = {in_data[rel_pos]}; + dataset.timing_events = {{{-static_cast(it->delay), std::move(it->tag_data)}}}; + dataset.signal_values = {in_data[it->pending_samples]}; publish_dataset(std::move(dataset)); it = pending.erase(it); @@ -751,7 +718,7 @@ static constexpr std::size_t listener_buffer_size = 65536; auto handler = std::make_shared(); std::lock_guard lg(listener_mutex); add_listener(std::make_unique>(std::forward(p), handler, pre_samples, post_samples, block), block); - history.resize(std::max(pre_samples, history.size())); + ensure_history_size(pre_samples); return handler; } @@ -781,7 +748,7 @@ static constexpr std::size_t listener_buffer_size = 65536; template void register_trigger_callback(P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { add_listener(std::make_unique>(std::forward

(p), pre_samples, post_samples, std::forward(callback)), false); - history.resize(std::max(pre_samples, history.size())); + ensure_history_size(pre_samples); } template @@ -797,64 +764,44 @@ static constexpr std::size_t listener_buffer_size = 65536; } // TODO this code should be called at the end of graph processing - void stop() { + void stop() noexcept { std::lock_guard lg(listener_mutex); for (auto &listener : listeners) { listener->flush(); } } - [[nodiscard]] work_return_t work() { - auto &in_port = input_port<"in">(this); - auto &reader = in_port.streamReader(); - - const auto noutput_items = std::min(reader.available(), in_port.max_buffer_size()); - if (noutput_items == 0) { - return fair::graph::work_return_t::INSUFFICIENT_INPUT_ITEMS; + [[nodiscard]] work_return_t process_bulk(std::span in_data) noexcept { + std::optional tagData; + if (this->input_tags_present()) { + tagData = this->input_tags()[0]; } - const auto reader_position = reader.position() + 1; - const auto in_data = reader.get(noutput_items); - const auto history_view = std::span(history.begin(), history_available); - // TODO I'm not sure why the +1 in "reader.position() + 1". Bug or do I misunderstand? - assert(reader_position == n_samples_consumed); - - auto &tag_reader = in_port.tagReader(); - const auto n_tags = tag_reader.available(); - const auto tag_data = tag_reader.get(n_tags); - std::vector tags(tag_data.begin(), tag_data.end()); - auto out_of_range = [end_pos = reader_position + noutput_items](const auto &tag) { - return tag.index > static_cast(end_pos); - }; - std::erase_if(tags, out_of_range); // TODO use views it works everywhere - auto tag_view = std::span(tags); - tag_reader.consume(tags.size()); - { std::lock_guard lg(listener_mutex); + const auto history_view = history.get_span(0); for (auto &listener : listeners) { - listener->process_bulk(history, in_data, reader_position, tags); + listener->process(history_view, in_data, tagData); } // store potential pre-samples for triggers at the beginning of the next chunk - // TODO should use built-in history functionality that doesn't copy (but is resizable as listeners are added) - history_available = std::min(history.size(), noutput_items); - const auto history_data = in_data.last(history_available); - history.assign(history_data.begin(), history_data.end()); - } - - n_samples_consumed += noutput_items; - - if (!reader.consume(noutput_items)) { - return work_return_t::ERROR; + const auto to_write = std::min(in_data.size(), history.capacity()); + history.push_back_bulk(in_data.last(to_write)); } return work_return_t::OK; } private: - std::vector history; - std::size_t history_available = 0; + gr::history_buffer history = gr::history_buffer(1); + + void ensure_history_size(std::size_t new_size) { + // TODO transitional, do not reallocate/copy, but create a shared buffer with size N, + // and a per-listener history buffer where more than N samples is needed. + auto new_history = gr::history_buffer(std::max(new_size, history.capacity())); + new_history.push_back_bulk(history.begin(), history.end()); + std::swap(history, new_history); + } void add_listener(std::unique_ptr&& l, bool block) { l->set_sample_rate(sample_rate); // TODO also call when sample_rate changes @@ -868,6 +815,6 @@ static constexpr std::size_t listener_buffer_size = 65536; } -ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink), in, n_samples_consumed, sample_rate); +ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink), in, sample_rate); #endif diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 4956fe5c..eba576cf 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -19,6 +19,16 @@ template<> auto boost::ut::cfg = boost::ut::runner>{}; #endif +template<> +struct fmt::formatter { + template + constexpr auto parse(ParseContext &ctx) { return ctx.begin(); } + template + constexpr auto format(const fair::graph::tag_t &tag, FormatContext &ctx) const { + return fmt::format_to(ctx.out(), "{}", tag.index); + } +}; + namespace fair::graph::data_sink_test { static constexpr std::int32_t n_samples = 200000; @@ -28,39 +38,56 @@ struct Source : public node> { OUT out; std::int32_t n_samples_produced = 0; std::int32_t n_samples_max = 1024; - std::int32_t n_tag_offset = 0; + std::size_t n_tag_offset = 0; float sample_rate = 1000.0f; T next_value = {}; - std::deque tags; // must be sorted by index + std::deque tags; // must be sorted by index, only one tag per sample void - init(const tag_t::map_type &old_settings, const tag_t::map_type &new_settings) { + init(const property_map &, const property_map &) { // optional init function that is called after construction and whenever settings change fair::graph::publish_tag(out, { { "n_samples_max", n_samples_max } }, n_tag_offset); } constexpr std::int64_t - available_samples(const Source &self) noexcept { + available_samples(const Source &) noexcept { const auto ret = static_cast(n_samples_max - n_samples_produced); return ret > 0 ? ret : -1; // '-1' -> DONE, produced enough samples } [[nodiscard]] constexpr T process_one() noexcept { - while (!tags.empty() && tags[0].index == n_samples_produced) { - // TODO there probably is, or should be, an easier way to do this - const auto pos = output_port<"out">(this).streamWriter().position(); - publish_tag(out, tags[0].map, n_samples_produced - pos); + if (!tags.empty() && tags[0].index == n_samples_produced) { +#if 0 + // TODO this is supposed to be the way, but the outputs tags are not processed after every process_one call, + // and with process_bulk we could only write one tag + this->output_tags()[0] = tags[0].map; +#else + auto range = out.tagWriter().reserve_output_range(1); + range[0].index = n_samples_produced; + range[0].map = std::move(tags[0].map); + range.publish(1); +#endif tags.pop_front(); } n_samples_produced++; - const auto v = next_value; - next_value++; - return v; + return next_value++; } }; +/** + * Example tag observer (TriggerObserver implementation) for the multiplexed listener case (interleaved data). As a toy example, we use + * data tagged as Year/Month/Day. + * + * For each of year, month, day, the user can specify whether: + * + * - option not set: The field is to be ignored + * - -1: Whenever a change between the previous and the current tag is observed, start a new data set (StopAndStart) + * - other values >= 0: A new dataset is started when the tag matches, and stopped, when a tag doesn't match + * + * (Note that the TriggerObserver is stateful and remembers the last tag seen, other than a stateless TriggerPredicate) + */ struct Observer { std::optional year; std::optional month; @@ -68,7 +95,7 @@ struct Observer { std::optional> last_seen; bool last_matched = false; - explicit Observer(std::optional y, std::optional m, std::optional d) : year(y), month(m), day(d) {} + explicit Observer(std::optional year_, std::optional month_, std::optional day_) : year(year_), month(month_), day(day_) {} static inline bool same(int x, std::optional other) { return other && x == *other; @@ -116,9 +143,8 @@ struct Observer { } }; -static tag_t make_tag(tag_t::index_type index, int y, int m, int d) { - tag_t::map_type map; - return tag_t{index, {{"Y", y}, {"M", m}, {"D", d}}}; +static tag_t make_tag(tag_t::index_type index, int year, int month, int day) { + return tag_t{index, {{"Y", year}, {"M", month}, {"D", day}}}; } static std::vector make_test_tags(tag_t::index_type first_index, tag_t::index_type interval) { @@ -173,6 +199,21 @@ std::string run_observer_test(std::span tags, O o) { ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink_test::Source), out, n_samples_produced, n_samples_max, n_tag_offset, sample_rate); + +template +std::string format_list(const T &l) { + return fmt::format("[{}]", fmt::join(l, ", ")); +} + +template +bool indexes_match(const T& lhs, const U& rhs) { + auto index_match = [](const auto &l, const auto &r) { + return l.index == r.index; + }; + + return std::equal(std::begin(lhs), std::end(lhs), std::begin(rhs), std::end(rhs), index_match); +} + const boost::ut::suite DataSinkTests = [] { using namespace boost::ut; using namespace fair::graph; @@ -180,13 +221,15 @@ const boost::ut::suite DataSinkTests = [] { using namespace std::string_literals; "callback continuous mode"_test = [] { - graph flow_graph; - - static constexpr std::int32_t n_samples = 200005; + static constexpr std::int32_t n_samples = 200005; static constexpr std::size_t chunk_size = 1000; + const auto src_tags = make_test_tags(0, 1000); + + graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); + src.tags = std::deque(src_tags.begin(), src_tags.end()); sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); @@ -222,7 +265,9 @@ const boost::ut::suite DataSinkTests = [] { } auto lg = std::lock_guard{m2}; - received_tags.insert(received_tags.end(), tags.begin(), tags.end()); + std::vector adjusted; + std::transform(tags.begin(), tags.end(), std::back_inserter(adjusted), [samples_seen2](const auto &tag) { return tag_t{static_cast(samples_seen2) + tag.index, tag.map}; }); + received_tags.insert(received_tags.end(), adjusted.begin(), adjusted.end()); samples_seen2 += buffer.size(); chunks_seen2++; if (chunks_seen2 < 201) { @@ -243,10 +288,9 @@ const boost::ut::suite DataSinkTests = [] { auto lg = std::lock_guard{m2}; expect(eq(chunks_seen1.load(), 201)); expect(eq(chunks_seen2, 201)); - expect(eq(sink.n_samples_consumed, n_samples)); expect(eq(samples_seen1.load(), n_samples)); expect(eq(samples_seen2, n_samples)); - expect(eq(received_tags.size(), src.tags.size())); + expect(eq(indexes_match(received_tags, src_tags), true)) << fmt::format("{} != {}", format_list(received_tags), format_list(src_tags)); }; "blocking polling continuous mode"_test = [] { @@ -280,11 +324,7 @@ const boost::ut::suite DataSinkTests = [] { })) {} } - std::vector expected(n_samples); - std::iota(expected.begin(), expected.end(), 0.0); - expect(eq(received.size(), expected.size())); - expect(eq(received, expected)); - expect(eq(poller->drop_count.load(), 0)); + return received; }); }; @@ -296,19 +336,27 @@ const boost::ut::suite DataSinkTests = [] { sink.stop(); // TODO the scheduler should call this - runner1.wait(); - runner2.wait(); - - expect(eq(sink.n_samples_consumed, n_samples)); + std::vector expected(n_samples); + std::iota(expected.begin(), expected.end(), 0.0); + + const auto received1 = runner1.get(); + const auto received2 = runner2.get(); + expect(eq(received1.size(), expected.size())); + expect(eq(received1, expected)); + expect(eq(poller1->drop_count.load(), 0)); + expect(eq(received2.size(), expected.size())); + expect(eq(received2, expected)); + expect(eq(poller2->drop_count.load(), 0)); }; "blocking polling trigger mode non-overlapping"_test = [] { constexpr std::int32_t n_samples = 200000; graph flow_graph; - auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); - src.tags = {{3000, {{"TYPE", "TRIGGER"}}}, tag_t{8000, {{"TYPE", "NO_TRIGGER"}}}, {180000, {{"TYPE", "TRIGGER"}}}}; - auto &sink = flow_graph.make_node>(); + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + const auto tags = std::vector{{3000, {{"TYPE", "TRIGGER"}}}, tag_t{8000, {{"TYPE", "NO_TRIGGER"}}}, {180000, {{"TYPE", "TRIGGER"}}}}; + src.tags = std::deque(tags.begin(), tags.end()); + auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); @@ -318,19 +366,24 @@ const boost::ut::suite DataSinkTests = [] { return v && std::get(v->get()) == "TRIGGER"; }; - auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, 3, 2, blocking_mode::Blocking); + auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, 3, 2, blocking_mode::Blocking); expect(neq(poller, nullptr)); auto polling = std::async([poller] { - std::vector received_data; + std::vector received_data; + std::vector received_tags; bool seen_finished = false; while (!seen_finished) { seen_finished = poller->finished; - [[maybe_unused]] auto r = poller->process_one([&received_data](const auto &dataset) { + [[maybe_unused]] auto r = poller->process_one([&received_data, &received_tags](const auto &dataset) { received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); + expect(eq(dataset.timing_events.size(), 1u)) >> fatal; + expect(eq(dataset.timing_events[0].size(), 1u)); + expect(eq(dataset.timing_events[0][0].index, 3)); + received_tags.insert(received_tags.end(), dataset.timing_events[0].begin(), dataset.timing_events[0].end()); }); } - return received_data; + return std::make_tuple(received_data, received_tags); }); fair::graph::scheduler::simple sched{std::move(flow_graph)}; @@ -338,11 +391,13 @@ const boost::ut::suite DataSinkTests = [] { sink.stop(); // TODO the scheduler should call this - const auto received_data = polling.get(); + const auto &[received_data, received_tags] = polling.get(); + const auto expected_tags = {tags[0], tags[2]}; // triggers-only - expect(eq(sink.n_samples_consumed, n_samples)); expect(eq(received_data.size(), 10)); - expect(eq(received_data, std::vector{2997, 2998, 2999, 3000, 3001, 179997, 179998, 179999, 180000, 180001})); + expect(eq(received_data, std::vector{2997, 2998, 2999, 3000, 3001, 179997, 179998, 179999, 180000, 180001})); + expect(eq(received_tags.size(), expected_tags.size())); + expect(eq(poller->drop_count.load(), 0)); }; @@ -373,6 +428,9 @@ const boost::ut::suite DataSinkTests = [] { while (!seen_finished) { seen_finished = poller->finished; [[maybe_unused]] auto r = poller->process_one([&received_data](const auto &dataset) { + expect(eq(dataset.timing_events.size(), 1u)) >> fatal; + expect(eq(dataset.timing_events[0].size(), 1u)); + expect(eq(dataset.timing_events[0][0].index, -5000)); received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); }); } @@ -387,7 +445,6 @@ const boost::ut::suite DataSinkTests = [] { const auto received_data = poller_result.get(); - expect(eq(sink.n_samples_consumed, n_samples)); expect(eq(received_data, std::vector{8000, 185000})); expect(eq(poller->drop_count.load(), 0)); }; @@ -398,7 +455,7 @@ const boost::ut::suite DataSinkTests = [] { const std::int32_t n_samples = tags.size() * 10000 + 100000; graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); - src.tags = std::deque(tags.begin(), tags.end()); + src.tags = std::deque(tags.begin(), tags.end()); auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); @@ -468,7 +525,6 @@ const boost::ut::suite DataSinkTests = [] { for (std::size_t i = 0; i < results.size(); ++i) { expect(eq(results[i].get(), expected[i])); } - expect(eq(sink.n_samples_consumed, n_samples)); }; "blocking polling trigger mode overlapping"_test = [] { @@ -496,17 +552,22 @@ const boost::ut::suite DataSinkTests = [] { auto polling = std::async([poller] { std::vector received_data; + std::vector received_tags; bool seen_finished = false; while (!seen_finished) { // TODO make finished vs. pending data handling actually thread-safe seen_finished = poller->finished.load(); - while (poller->process_one([&received_data](const auto &dataset) { - expect(eq(dataset.signal_values.size(), 5000)); + while (poller->process_one([&received_data, &received_tags](const auto &dataset) { + expect(eq(dataset.signal_values.size(), 5000u) >> fatal); received_data.push_back(dataset.signal_values.front()); received_data.push_back(dataset.signal_values.back()); + expect(eq(dataset.timing_events.size(), 1u)) >> fatal; + expect(eq(dataset.timing_events[0].size(), 1u)); + expect(eq(dataset.timing_events[0][0].index, 3000)); + received_tags.insert(received_tags.end(), dataset.timing_events[0].begin(), dataset.timing_events[0].end()); })) {} } - return received_data; + return std::make_tuple(received_data, received_tags); }); fair::graph::scheduler::simple sched{std::move(flow_graph)}; @@ -514,12 +575,12 @@ const boost::ut::suite DataSinkTests = [] { sink.stop(); // TODO the scheduler should call this - const auto received_data = polling.get(); + const auto &[received_data, received_tags] = polling.get(); auto expected_start = std::vector{57000, 61999, 57001, 62000, 57002}; - expect(eq(sink.n_samples_consumed, n_samples)); - expect(eq(received_data.size(), 2 * n_triggers)); + expect(eq(poller->drop_count.load(), 0u)); + expect(eq(received_data.size(), 2 * n_triggers) >> fatal); expect(eq(std::vector(received_data.begin(), received_data.begin() + 5), expected_start)); - expect(eq(poller->drop_count.load(), 0)); + expect(eq(received_tags.size(), n_triggers)); }; "callback trigger mode overlapping"_test = [] { @@ -538,7 +599,7 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - auto is_trigger = [](const tag_t &tag) { + auto is_trigger = [](const tag_t &) { return true; }; @@ -547,7 +608,7 @@ const boost::ut::suite DataSinkTests = [] { auto callback = [&received_data, &m](auto &&dataset) { std::lock_guard lg{m}; - expect(eq(dataset.signal_values.size(), 5000)); + expect(eq(dataset.signal_values.size(), 5000u)); received_data.push_back(dataset.signal_values.front()); received_data.push_back(dataset.signal_values.back()); }; @@ -561,7 +622,6 @@ const boost::ut::suite DataSinkTests = [] { std::lock_guard lg{m}; auto expected_start = std::vector{57000, 61999, 57001, 62000, 57002}; - expect(eq(sink.n_samples_consumed, n_samples)); expect(eq(received_data.size(), 2 * n_triggers)); expect(eq(std::vector(received_data.begin(), received_data.begin() + 5), expected_start)); }; @@ -595,7 +655,7 @@ const boost::ut::suite DataSinkTests = [] { })) {} } - expect(eq(samples_seen + poller->drop_count, n_samples)); + return samples_seen; }); fair::graph::scheduler::simple sched{std::move(flow_graph)}; @@ -603,9 +663,8 @@ const boost::ut::suite DataSinkTests = [] { sink.stop(); // TODO the scheduler should call this - polling.wait(); - - expect(eq(sink.n_samples_consumed, n_samples)); + const auto samples_seen = polling.get(); + expect(eq(samples_seen + poller->drop_count, n_samples)); }; }; From 2635a670ff8140fd06da3b2f617302c7f46ef4e3 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 21 Jun 2023 15:47:17 +0200 Subject: [PATCH 29/64] fix drop count logic --- include/data_sink.hpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index ed07ea10..3de2dc43 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -395,14 +395,13 @@ static constexpr std::size_t listener_buffer_size = 65536; detail::copy_span(data, std::span(write_data)); write_data.publish(write_data.size()); } else { - const auto can_write = poller->writer.available(); - auto to_write = std::min(data.size(), can_write); - poller->drop_count += data.size() - can_write; + auto to_write = std::max(data.size(), poller->writer.available()); if (to_write > 0) { auto write_data = poller->writer.reserve_output_range(to_write); detail::copy_span(data.first(to_write), std::span(write_data)); write_data.publish(write_data.size()); } + poller->drop_count += data.size() - to_write; } } } From 220376254a85625bfbe71bf97fcf50b571ec7513 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Thu, 22 Jun 2023 10:46:15 +0200 Subject: [PATCH 30/64] Continuous/polling: Pass tags, too --- include/data_sink.hpp | 74 ++++++++++++++++++++++++++++++++----------- test/qa_data_sink.cpp | 67 +++++++++++++++++++++++++-------------- 2 files changed, 99 insertions(+), 42 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 3de2dc43..7616ac9f 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -256,15 +256,55 @@ static constexpr std::size_t listener_buffer_size = 65536; IN in; - template - struct poller_t { + + struct poller { + gr::circular_buffer buffer = gr::circular_buffer(listener_buffer_size); + decltype(buffer.new_reader()) reader = buffer.new_reader(); + decltype(buffer.new_writer()) writer = buffer.new_writer(); + gr::circular_buffer tag_buffer = gr::circular_buffer(1024); + decltype(tag_buffer.new_reader()) tag_reader = tag_buffer.new_reader(); + decltype(tag_buffer.new_writer()) tag_writer = tag_buffer.new_writer(); + std::size_t samples_read = 0; // reader thread std::atomic finished = false; std::atomic drop_count = 0; - gr::circular_buffer buffer = gr::circular_buffer(listener_buffer_size); + + template + [[nodiscard]] bool process(Handler fnc) { + const auto available = reader.available(); + if (available == 0) { + return false; + } + + const auto read_data = reader.get(available); + if constexpr (requires { fnc(std::span(), std::span()); }) { + const auto tags = tag_reader.get(); + const auto it = std::find_if_not(tags.begin(), tags.end(), [until = static_cast(samples_read + available)](const auto &tag) { return tag.index < until; }); + auto relevant_tags = std::vector(tags.begin(), it); + for (auto &t : relevant_tags) { + t.index -= static_cast(samples_read); + } + fnc(read_data, std::span(relevant_tags)); + std::ignore = tag_reader.consume(relevant_tags.size()); + } else { + std::ignore = tag_reader.consume(tag_reader.available()); + fnc(read_data); + } + + std::ignore = reader.consume(available); + samples_read += available; + return true; + } + }; + + struct dataset_poller { + gr::circular_buffer> buffer = gr::circular_buffer>(listener_buffer_size); decltype(buffer.new_reader()) reader = buffer.new_reader(); decltype(buffer.new_writer()) writer = buffer.new_writer(); - [[nodiscard]] bool process_bulk(std::invocable> auto fnc) { + std::atomic finished = false; + std::atomic drop_count = 0; + + [[nodiscard]] bool process_bulk(std::invocable>> auto fnc) { const auto available = reader.available(); if (available == 0) { return false; @@ -276,7 +316,7 @@ static constexpr std::size_t listener_buffer_size = 65536; return true; } - [[nodiscard]] bool process_one(std::invocable auto fnc) { + [[nodiscard]] bool process_one(std::invocable> auto fnc) { const auto available = reader.available(); if (available == 0) { return false; @@ -289,9 +329,6 @@ static constexpr std::size_t listener_buffer_size = 65536; } }; - using poller = poller_t; - using dataset_poller = poller_t>; - private: struct abstract_listener_t { virtual ~abstract_listener_t() = default; @@ -390,19 +427,20 @@ static constexpr std::size_t listener_buffer_size = 65536; return; } - if (block) { - auto write_data = poller->writer.reserve_output_range(data.size()); - detail::copy_span(data, std::span(write_data)); + const auto to_write = block ? data.size() : std::min(data.size(), poller->writer.available()); + + if (to_write > 0) { + auto write_data = poller->writer.reserve_output_range(to_write); + detail::copy_span(data.first(to_write), std::span(write_data)); write_data.publish(write_data.size()); - } else { - auto to_write = std::max(data.size(), poller->writer.available()); - if (to_write > 0) { - auto write_data = poller->writer.reserve_output_range(to_write); - detail::copy_span(data.first(to_write), std::span(write_data)); - write_data.publish(write_data.size()); + if (tag_data0) { + auto tw = poller->tag_writer.reserve_output_range(1); + tw[0] = {static_cast(samples_written), std::move(*tag_data0)}; + tw.publish(1); } - poller->drop_count += data.size() - to_write; } + poller->drop_count += data.size() - to_write; + samples_written += to_write; } } diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index eba576cf..607ead90 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -298,7 +298,9 @@ const boost::ut::suite DataSinkTests = [] { constexpr std::int32_t n_samples = 200000; graph flow_graph; + const auto tags = make_test_tags(0, 1000); auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + src.tags = std::deque(tags.begin(), tags.end()); auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); @@ -306,30 +308,45 @@ const boost::ut::suite DataSinkTests = [] { std::atomic samples_seen = 0; - auto poller1 = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink"), blocking_mode::Blocking); - expect(neq(poller1, nullptr)); + auto poller_data_only = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink"), blocking_mode::Blocking); + expect(neq(poller_data_only, nullptr)); - auto poller2 = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink"), blocking_mode::Blocking); - expect(neq(poller2, nullptr)); + auto poller_with_tags = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink"), blocking_mode::Blocking); + expect(neq(poller_with_tags, nullptr)); - auto make_runner = [](auto poller) { - return std::async([poller] { - std::vector received; - bool seen_finished = false; - while (!seen_finished) { - // TODO make finished vs. pending data handling actually thread-safe - seen_finished = poller->finished.load(); - while (poller->process_bulk([&received](const auto &data) { - received.insert(received.end(), data.begin(), data.end()); - })) {} - } + auto runner1 = std::async([poller = poller_data_only] { + std::vector received; + bool seen_finished = false; + while (!seen_finished) { + // TODO make finished vs. pending data handling actually thread-safe + seen_finished = poller->finished; + while (poller->process([&received](const auto &data) { + received.insert(received.end(), data.begin(), data.end()); + })) {} + } - return received; - }); - }; + return received; + }); + + auto runner2 = std::async([poller = poller_with_tags] { + std::vector received; + std::vector received_tags; + bool seen_finished = false; + while (!seen_finished) { + // TODO make finished vs. pending data handling actually thread-safe + seen_finished = poller->finished; + while (poller->process([&received, &received_tags](const auto &data, const auto &tags_) { + auto tags = std::vector(tags_.begin(), tags_.end()); + for (auto &t : tags) { + t.index += static_cast(received.size()); + } + received_tags.insert(received_tags.end(), tags.begin(), tags.end()); + received.insert(received.end(), data.begin(), data.end()); + })) {} + } - auto runner1 = make_runner(poller1); - auto runner2 = make_runner(poller2); + return std::make_tuple(received, received_tags); + }); fair::graph::scheduler::simple sched{std::move(flow_graph)}; sched.work(); @@ -340,13 +357,15 @@ const boost::ut::suite DataSinkTests = [] { std::iota(expected.begin(), expected.end(), 0.0); const auto received1 = runner1.get(); - const auto received2 = runner2.get(); + const auto &[received2, received_tags] = runner2.get(); expect(eq(received1.size(), expected.size())); expect(eq(received1, expected)); - expect(eq(poller1->drop_count.load(), 0)); + expect(eq(poller_data_only->drop_count.load(), 0)); expect(eq(received2.size(), expected.size())); expect(eq(received2, expected)); - expect(eq(poller2->drop_count.load(), 0)); + expect(eq(received_tags.size(), tags.size())); + expect(eq(indexes_match(received_tags, tags), true)) << fmt::format("{} != {}", format_list(received_tags), format_list(tags)); + expect(eq(poller_with_tags->drop_count.load(), 0)); }; "blocking polling trigger mode non-overlapping"_test = [] { @@ -650,7 +669,7 @@ const boost::ut::suite DataSinkTests = [] { std::this_thread::sleep_for(20ms); seen_finished = poller->finished.load(); - while (poller->process_bulk([&samples_seen](const auto &data) { + while (poller->process([&samples_seen](const auto &data) { samples_seen += data.size(); })) {} } From 75d9e55910fd2818bea4d8006e22e9353e0e96dd Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Thu, 22 Jun 2023 11:03:00 +0200 Subject: [PATCH 31/64] Adapt to tag refactoring --- include/data_sink.hpp | 11 ++++++----- test/qa_data_sink.cpp | 40 +++++++++++++++++++--------------------- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 7616ac9f..fecad73b 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -375,7 +375,7 @@ static constexpr std::size_t listener_buffer_size = 65536; detail::copy_span(data.first(n), std::span(buffer).last(n)); if constexpr (callback_takes_tags) { if (tag_data0) { - tag_buffer.push_back({static_cast(buffer_fill), *tag_data0}); + tag_buffer.push_back({static_cast(buffer_fill), *tag_data0}); tag_data0.reset(); } } @@ -435,7 +435,7 @@ static constexpr std::size_t listener_buffer_size = 65536; write_data.publish(write_data.size()); if (tag_data0) { auto tw = poller->tag_writer.reserve_output_range(1); - tw[0] = {static_cast(samples_written), std::move(*tag_data0)}; + tw[0] = {static_cast(samples_written), std::move(*tag_data0)}; tw.publish(1); } } @@ -611,7 +611,7 @@ static constexpr std::size_t listener_buffer_size = 65536; if (pending_dataset) { if (obsr == trigger_observer_state::Stop) { - pending_dataset->timing_events[0].push_back({static_cast(pending_dataset->signal_values.size()), *tag_data0}); + pending_dataset->timing_events[0].push_back({static_cast(pending_dataset->signal_values.size()), *tag_data0}); } publish_dataset(std::move(*pending_dataset)); pending_dataset.reset(); @@ -713,7 +713,7 @@ static constexpr std::size_t listener_buffer_size = 65536; } DataSet dataset; - dataset.timing_events = {{{-static_cast(it->delay), std::move(it->tag_data)}}}; + dataset.timing_events = {{{-static_cast(it->delay), std::move(it->tag_data)}}}; dataset.signal_values = {in_data[it->pending_samples]}; publish_dataset(std::move(dataset)); @@ -811,7 +811,8 @@ static constexpr std::size_t listener_buffer_size = 65536; [[nodiscard]] work_return_t process_bulk(std::span in_data) noexcept { std::optional tagData; if (this->input_tags_present()) { - tagData = this->input_tags()[0]; + assert(this->input_tags()[0].index == 0); + tagData = this->input_tags()[0].map; } { diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 607ead90..fe02ca42 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -10,7 +10,6 @@ #include #include -#include #include #if defined(__clang__) && __clang_major__ >= 16 @@ -41,7 +40,8 @@ struct Source : public node> { std::size_t n_tag_offset = 0; float sample_rate = 1000.0f; T next_value = {}; - std::deque tags; // must be sorted by index, only one tag per sample + std::size_t next_tag = 0; + std::vector tags; // must be sorted by index, only one tag per sample void init(const property_map &, const property_map &) { @@ -55,20 +55,18 @@ struct Source : public node> { return ret > 0 ? ret : -1; // '-1' -> DONE, produced enough samples } - [[nodiscard]] constexpr T - process_one() noexcept { - if (!tags.empty() && tags[0].index == n_samples_produced) { + T process_one() noexcept { + if (next_tag < tags.size() && tags[next_tag].index <= static_cast>(n_samples_produced)) { #if 0 - // TODO this is supposed to be the way, but the outputs tags are not processed after every process_one call, - // and with process_bulk we could only write one tag - this->output_tags()[0] = tags[0].map; + tag_t &out_tag = this->output_tags()[0]; + out_tag = tags[next_tag]; + this->forward_tags(); #else auto range = out.tagWriter().reserve_output_range(1); - range[0].index = n_samples_produced; - range[0].map = std::move(tags[0].map); + range[0] = tags[next_tag]; range.publish(1); #endif - tags.pop_front(); + next_tag++; } n_samples_produced++; @@ -143,11 +141,11 @@ struct Observer { } }; -static tag_t make_tag(tag_t::index_type index, int year, int month, int day) { +static tag_t make_tag(tag_t::signed_index_type index, int year, int month, int day) { return tag_t{index, {{"Y", year}, {"M", month}, {"D", day}}}; } -static std::vector make_test_tags(tag_t::index_type first_index, tag_t::index_type interval) { +static std::vector make_test_tags(tag_t::signed_index_type first_index, tag_t::signed_index_type interval) { std::vector tags; for (int y = 1; y <= 3; ++y) { for (int m = 1; m <= 2; ++m) { @@ -229,7 +227,7 @@ const boost::ut::suite DataSinkTests = [] { graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); - src.tags = std::deque(src_tags.begin(), src_tags.end()); + src.tags = src_tags; sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); @@ -260,13 +258,13 @@ const boost::ut::suite DataSinkTests = [] { } for (const auto &tag : tags) { - ge(tag.index, static_cast(samples_seen2)); + ge(tag.index, static_cast(samples_seen2)); lt(tag.index, samples_seen2 + buffer.size()); } auto lg = std::lock_guard{m2}; std::vector adjusted; - std::transform(tags.begin(), tags.end(), std::back_inserter(adjusted), [samples_seen2](const auto &tag) { return tag_t{static_cast(samples_seen2) + tag.index, tag.map}; }); + std::transform(tags.begin(), tags.end(), std::back_inserter(adjusted), [samples_seen2](const auto &tag) { return tag_t{static_cast(samples_seen2) + tag.index, tag.map}; }); received_tags.insert(received_tags.end(), adjusted.begin(), adjusted.end()); samples_seen2 += buffer.size(); chunks_seen2++; @@ -300,7 +298,7 @@ const boost::ut::suite DataSinkTests = [] { graph flow_graph; const auto tags = make_test_tags(0, 1000); auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); - src.tags = std::deque(tags.begin(), tags.end()); + src.tags = tags; auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); @@ -374,7 +372,7 @@ const boost::ut::suite DataSinkTests = [] { graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); const auto tags = std::vector{{3000, {{"TYPE", "TRIGGER"}}}, tag_t{8000, {{"TYPE", "NO_TRIGGER"}}}, {180000, {{"TYPE", "TRIGGER"}}}}; - src.tags = std::deque(tags.begin(), tags.end()); + src.tags = tags; auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); @@ -474,7 +472,7 @@ const boost::ut::suite DataSinkTests = [] { const std::int32_t n_samples = tags.size() * 10000 + 100000; graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); - src.tags = std::deque(tags.begin(), tags.end()); + src.tags = tags; auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); @@ -554,7 +552,7 @@ const boost::ut::suite DataSinkTests = [] { auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); for (std::size_t i = 0; i < n_triggers; ++i) { - src.tags.push_back(tag_t{static_cast(60000 + i), {{"TYPE", "TRIGGER"}}}); + src.tags.push_back(tag_t{static_cast(60000 + i), {{"TYPE", "TRIGGER"}}}); } auto &sink = flow_graph.make_node>(); @@ -610,7 +608,7 @@ const boost::ut::suite DataSinkTests = [] { auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); for (std::size_t i = 0; i < n_triggers; ++i) { - src.tags.push_back(tag_t{static_cast(60000 + i), {{"TYPE", "TRIGGER"}}}); + src.tags.push_back(tag_t{static_cast(60000 + i), {{"TYPE", "TRIGGER"}}}); } auto &sink = flow_graph.make_node>(); From 61c61990f00a3e75eefd66ea39bd499499496af6 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Thu, 22 Jun 2023 14:03:42 +0200 Subject: [PATCH 32/64] Fix logic errors for small chunk sizes Seen when force the source to create 1 sample at a time. --- include/data_sink.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index fecad73b..32d9faa0 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -371,8 +371,8 @@ static constexpr std::size_t listener_buffer_size = 65536; if constexpr (has_callback) { // if there's pending data, fill buffer and send out if (buffer_fill > 0) { - const auto n = buffer.size() - buffer_fill; - detail::copy_span(data.first(n), std::span(buffer).last(n)); + const auto n = std::min(data.size(), buffer.size() - buffer_fill); + detail::copy_span(data.first(n), std::span(buffer).subspan(buffer_fill, n)); if constexpr (callback_takes_tags) { if (tag_data0) { tag_buffer.push_back({static_cast(buffer_fill), *tag_data0}); @@ -707,7 +707,7 @@ static constexpr std::size_t listener_buffer_size = 65536; auto it = pending.begin(); while (it != pending.end()) { - if (it->pending_samples > in_data.size()) { + if (it->pending_samples >= in_data.size()) { it->pending_samples -= in_data.size(); break; } From 550e4500c02d68a4cda12350fe6d9489b593e6ec Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Thu, 22 Jun 2023 14:07:26 +0200 Subject: [PATCH 33/64] Set tags in test source in the intended way --- test/qa_data_sink.cpp | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index fe02ca42..6d24c645 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -52,20 +52,19 @@ struct Source : public node> { constexpr std::int64_t available_samples(const Source &) noexcept { const auto ret = static_cast(n_samples_max - n_samples_produced); - return ret > 0 ? ret : -1; // '-1' -> DONE, produced enough samples + // forcing one sample, at a time, see below + return ret > 0 ? 1 : -1; // '-1' -> DONE, produced enough samples } T process_one() noexcept { if (next_tag < tags.size() && tags[next_tag].index <= static_cast>(n_samples_produced)) { -#if 0 tag_t &out_tag = this->output_tags()[0]; - out_tag = tags[next_tag]; + // TODO when not enforcing single samples in available_samples, one would have to do: + // const auto base = std::max(out.streamWriter().position() + 1, tag_t::signed_index_type{0}); + // out_tag = tag_t{ tags[next_tag].index - base, tags[next_tag].map }; + // Still think there could be nicer API to set a tag from process_one() + out_tag = tag_t{ 0 , tags[next_tag].map }; this->forward_tags(); -#else - auto range = out.tagWriter().reserve_output_range(1); - range[0] = tags[next_tag]; - range.publish(1); -#endif next_tag++; } From 9a29152f2037e40f83784934a6d31aa8e36044d8 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Thu, 22 Jun 2023 14:29:26 +0200 Subject: [PATCH 34/64] Run clang-format --- include/data_sink.hpp | 427 +++++++++++++++++++++++------------------- test/qa_data_sink.cpp | 337 +++++++++++++++++---------------- 2 files changed, 398 insertions(+), 366 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 32d9faa0..c8af9f62 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -12,35 +12,35 @@ namespace fair::graph { -enum class blocking_mode { - NonBlocking, - Blocking -}; +enum class blocking_mode { NonBlocking, Blocking }; enum class trigger_observer_state { - Start, ///< Start a new dataset - Stop, ///< Finish dataset + Start, ///< Start a new dataset + Stop, ///< Finish dataset StopAndStart, ///< Finish pending dataset, start a new one - Ignore ///< Ignore tag + Ignore ///< Ignore tag }; // TODO is the scope where want these? struct null_type {}; +// Until clang-format can handle concepts +// clang-format off template concept TriggerPredicate = requires(const T p, tag_t tag) { - {p(tag)} -> std::convertible_to; + { p(tag) } -> std::convertible_to; }; template concept TriggerObserver = requires(T o, tag_t tag) { - {o(tag)} -> std::convertible_to; + { o(tag) } -> std::convertible_to; }; template concept TriggerObserverFactory = requires(T f) { - {f()} -> TriggerObserver; + { f() } -> TriggerObserver; }; +// clang-format on template class data_sink; @@ -49,35 +49,40 @@ struct data_sink_query { std::optional sink_name; std::optional signal_name; - static data_sink_query with_signal_name(std::string_view name) { - return {{}, std::string{name}}; + static data_sink_query + with_signal_name(std::string_view name) { + return { {}, std::string{ name } }; } - static data_sink_query with_sink_name(std::string_view name) { - return {std::string{name}, {}}; + static data_sink_query + with_sink_name(std::string_view name) { + return { std::string{ name }, {} }; } }; class data_sink_registry { - std::mutex mutex; + std::mutex mutex; std::vector sinks; public: // TODO this shouldn't be a singleton but associated with the flow graph (?) - static data_sink_registry& instance() { + static data_sink_registry & + instance() { static data_sink_registry s_instance; return s_instance; } template - void register_sink(data_sink *sink) { - std::lock_guard lg{mutex}; + void + register_sink(data_sink *sink) { + std::lock_guard lg{ mutex }; sinks.push_back(sink); } template - void unregister_sink(data_sink *sink) { - std::lock_guard lg{mutex}; + void + unregister_sink(data_sink *sink) { + std::lock_guard lg{ mutex }; std::erase_if(sinks, [sink](const std::any &v) { try { return std::any_cast *>(v) == sink; @@ -88,37 +93,42 @@ class data_sink_registry { } template - std::shared_ptr::poller> get_streaming_poller(const data_sink_query &query, blocking_mode block = blocking_mode::NonBlocking) { - std::lock_guard lg{mutex}; - auto sink = find_sink(query); + std::shared_ptr::poller> + get_streaming_poller(const data_sink_query &query, blocking_mode block = blocking_mode::NonBlocking) { + std::lock_guard lg{ mutex }; + auto sink = find_sink(query); return sink ? sink->get_streaming_poller(block) : nullptr; } template - std::shared_ptr::dataset_poller> get_trigger_poller(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { - std::lock_guard lg{mutex}; - auto sink = find_sink(query); + std::shared_ptr::dataset_poller> + get_trigger_poller(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { + std::lock_guard lg{ mutex }; + auto sink = find_sink(query); return sink ? sink->get_trigger_poller(std::forward

(p), pre_samples, post_samples, block) : nullptr; } template - std::shared_ptr::dataset_poller> get_multiplexed_poller(const data_sink_query &query, F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block = blocking_mode::NonBlocking) { - std::lock_guard lg{mutex}; - auto sink = find_sink(query); + std::shared_ptr::dataset_poller> + get_multiplexed_poller(const data_sink_query &query, F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block = blocking_mode::NonBlocking) { + std::lock_guard lg{ mutex }; + auto sink = find_sink(query); return sink ? sink->get_multiplexed_poller(std::forward(triggerObserverFactory), maximum_window_size, block) : nullptr; } template - std::shared_ptr::dataset_poller> get_snapshot_poller(const data_sink_query &query, P p, std::chrono::nanoseconds delay, blocking_mode block = blocking_mode::NonBlocking) { - std::lock_guard lg{mutex}; - auto sink = find_sink(query); + std::shared_ptr::dataset_poller> + get_snapshot_poller(const data_sink_query &query, P p, std::chrono::nanoseconds delay, blocking_mode block = blocking_mode::NonBlocking) { + std::lock_guard lg{ mutex }; + auto sink = find_sink(query); return sink ? sink->get_snapshot_poller(std::forward

(p), delay, block) : nullptr; } template - bool register_streaming_callback(const data_sink_query &query, std::size_t max_chunk_size, Callback callback) { - std::lock_guard lg{mutex}; - auto sink = find_sink(query); + bool + register_streaming_callback(const data_sink_query &query, std::size_t max_chunk_size, Callback callback) { + std::lock_guard lg{ mutex }; + auto sink = find_sink(query); if (!sink) { return false; } @@ -128,9 +138,10 @@ class data_sink_registry { } template - bool register_trigger_callback(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { - std::lock_guard lg{mutex}; - auto sink = find_sink(query); + bool + register_trigger_callback(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { + std::lock_guard lg{ mutex }; + auto sink = find_sink(query); if (!sink) { return false; } @@ -140,9 +151,10 @@ class data_sink_registry { } template - bool register_multiplexed_callback(const data_sink_query &query, std::size_t maximum_window_size, Callback callback) { - std::lock_guard lg{mutex}; - auto sink = find_sink(query); + bool + register_multiplexed_callback(const data_sink_query &query, std::size_t maximum_window_size, Callback callback) { + std::lock_guard lg{ mutex }; + auto sink = find_sink(query); if (!sink) { return false; } @@ -152,9 +164,10 @@ class data_sink_registry { } template - bool register_snapshot_callback(const data_sink_query &query, P p, std::chrono::nanoseconds delay, Callback callback) { - std::lock_guard lg{mutex}; - auto sink = find_sink(query); + bool + register_snapshot_callback(const data_sink_query &query, P p, std::chrono::nanoseconds delay, Callback callback) { + std::lock_guard lg{ mutex }; + auto sink = find_sink(query); if (!sink) { return false; } @@ -165,12 +178,12 @@ class data_sink_registry { private: template - data_sink* find_sink(const data_sink_query &query) { - + data_sink * + find_sink(const data_sink_query &query) { auto matches = [&query](const std::any &v) { try { - auto sink = std::any_cast*>(v); - const auto sink_name_matches = !query.sink_name || *query.sink_name == sink->name(); + auto sink = std::any_cast *>(v); + const auto sink_name_matches = !query.sink_name || *query.sink_name == sink->name(); const auto signal_name_matches = !query.signal_name || *query.signal_name == sink->signal_name; return sink_name_matches && signal_name_matches; } catch (...) { @@ -183,30 +196,32 @@ class data_sink_registry { return nullptr; } - return std::any_cast*>(*it); + return std::any_cast *>(*it); } }; namespace detail { - template - std::span find_matching_prefix(std::span s, P predicate) { - const auto nm = std::find_if_not(s.begin(), s.end(), predicate); - if (nm == s.end()) { - return s; - } - return s.first(std::distance(s.begin(), nm)); +template +std::span +find_matching_prefix(std::span s, P predicate) { + const auto nm = std::find_if_not(s.begin(), s.end(), predicate); + if (nm == s.end()) { + return s; } + return s.first(std::distance(s.begin(), nm)); +} - template - bool copy_span(std::span src, std::span dst) { - assert(src.size() <= dst.size()); - if (src.size() > dst.size()) { - return false; - } - std::copy(src.begin(), src.end(), dst.begin()); - return true; +template +bool +copy_span(std::span src, std::span dst) { + assert(src.size() <= dst.size()); + if (src.size() > dst.size()) { + return false; } + std::copy(src.begin(), src.end(), dst.begin()); + return true; } +} // namespace detail /** * @brief generic data sink for exporting arbitrary-typed streams to non-GR C++ APIs. @@ -245,7 +260,7 @@ namespace detail { */ template class data_sink : public node> { -static constexpr std::size_t listener_buffer_size = 65536; + static constexpr std::size_t listener_buffer_size = 65536; public: Annotated, Unit<"Hz">> sample_rate = 10000.f; @@ -254,22 +269,22 @@ static constexpr std::size_t listener_buffer_size = 65536; Annotated> signal_min; Annotated> signal_max; - IN in; - + IN in; struct poller { - gr::circular_buffer buffer = gr::circular_buffer(listener_buffer_size); - decltype(buffer.new_reader()) reader = buffer.new_reader(); - decltype(buffer.new_writer()) writer = buffer.new_writer(); - gr::circular_buffer tag_buffer = gr::circular_buffer(1024); - decltype(tag_buffer.new_reader()) tag_reader = tag_buffer.new_reader(); - decltype(tag_buffer.new_writer()) tag_writer = tag_buffer.new_writer(); - std::size_t samples_read = 0; // reader thread - std::atomic finished = false; - std::atomic drop_count = 0; + gr::circular_buffer buffer = gr::circular_buffer(listener_buffer_size); + decltype(buffer.new_reader()) reader = buffer.new_reader(); + decltype(buffer.new_writer()) writer = buffer.new_writer(); + gr::circular_buffer tag_buffer = gr::circular_buffer(1024); + decltype(tag_buffer.new_reader()) tag_reader = tag_buffer.new_reader(); + decltype(tag_buffer.new_writer()) tag_writer = tag_buffer.new_writer(); + std::size_t samples_read = 0; // reader thread + std::atomic finished = false; + std::atomic drop_count = 0; template - [[nodiscard]] bool process(Handler fnc) { + [[nodiscard]] bool + process(Handler fnc) { const auto available = reader.available(); if (available == 0) { return false; @@ -277,9 +292,9 @@ static constexpr std::size_t listener_buffer_size = 65536; const auto read_data = reader.get(available); if constexpr (requires { fnc(std::span(), std::span()); }) { - const auto tags = tag_reader.get(); - const auto it = std::find_if_not(tags.begin(), tags.end(), [until = static_cast(samples_read + available)](const auto &tag) { return tag.index < until; }); - auto relevant_tags = std::vector(tags.begin(), it); + const auto tags = tag_reader.get(); + const auto it = std::find_if_not(tags.begin(), tags.end(), [until = static_cast(samples_read + available)](const auto &tag) { return tag.index < until; }); + auto relevant_tags = std::vector(tags.begin(), it); for (auto &t : relevant_tags) { t.index -= static_cast(samples_read); } @@ -297,14 +312,15 @@ static constexpr std::size_t listener_buffer_size = 65536; }; struct dataset_poller { - gr::circular_buffer> buffer = gr::circular_buffer>(listener_buffer_size); - decltype(buffer.new_reader()) reader = buffer.new_reader(); - decltype(buffer.new_writer()) writer = buffer.new_writer(); + gr::circular_buffer> buffer = gr::circular_buffer>(listener_buffer_size); + decltype(buffer.new_reader()) reader = buffer.new_reader(); + decltype(buffer.new_writer()) writer = buffer.new_writer(); - std::atomic finished = false; - std::atomic drop_count = 0; + std::atomic finished = false; + std::atomic drop_count = 0; - [[nodiscard]] bool process_bulk(std::invocable>> auto fnc) { + [[nodiscard]] bool + process_bulk(std::invocable>> auto fnc) { const auto available = reader.available(); if (available == 0) { return false; @@ -316,7 +332,8 @@ static constexpr std::size_t listener_buffer_size = 65536; return true; } - [[nodiscard]] bool process_one(std::invocable> auto fnc) { + [[nodiscard]] bool + process_one(std::invocable> auto fnc) { const auto available = reader.available(); if (available == 0) { return false; @@ -332,40 +349,41 @@ static constexpr std::size_t listener_buffer_size = 65536; private: struct abstract_listener_t { virtual ~abstract_listener_t() = default; - virtual void set_sample_rate(float) {} - virtual void process(std::span history, std::span data, std::optional tag_data0) = 0; - virtual void flush() = 0; + + virtual void + set_sample_rate(float) {} + + virtual void + process(std::span history, std::span data, std::optional tag_data0) + = 0; + virtual void + flush() = 0; }; template struct continuous_listener_t : public abstract_listener_t { - static constexpr auto has_callback = !std::is_same_v; + static constexpr auto has_callback = !std::is_same_v; static constexpr auto callback_takes_tags = std::is_invocable_v, std::span>; - bool block = false; - std::size_t samples_written = 0; + bool block = false; + std::size_t samples_written = 0; // callback-only - std::size_t buffer_fill = 0; - std::vector buffer; + std::size_t buffer_fill = 0; + std::vector buffer; std::vector tag_buffer; // polling-only std::weak_ptr polling_handler = {}; - Callback callback; + Callback callback; - explicit continuous_listener_t(std::size_t max_chunk_size, Callback c) - : buffer(max_chunk_size) - , callback{std::forward(c)} - {} + explicit continuous_listener_t(std::size_t max_chunk_size, Callback c) : buffer(max_chunk_size), callback{ std::forward(c) } {} - explicit continuous_listener_t(std::shared_ptr poller, bool do_block) - : block(do_block) - , polling_handler{std::move(poller)} - {} + explicit continuous_listener_t(std::shared_ptr poller, bool do_block) : block(do_block), polling_handler{ std::move(poller) } {} - void process(std::span, std::span data, std::optional tag_data0) override { + void + process(std::span, std::span data, std::optional tag_data0) override { using namespace fair::graph::detail; if constexpr (has_callback) { @@ -375,7 +393,7 @@ static constexpr std::size_t listener_buffer_size = 65536; detail::copy_span(data.first(n), std::span(buffer).subspan(buffer_fill, n)); if constexpr (callback_takes_tags) { if (tag_data0) { - tag_buffer.push_back({static_cast(buffer_fill), *tag_data0}); + tag_buffer.push_back({ static_cast(buffer_fill), *tag_data0 }); tag_data0.reset(); } } @@ -399,7 +417,7 @@ static constexpr std::size_t listener_buffer_size = 65536; if constexpr (callback_takes_tags) { std::vector tags; if (tag_data0) { - tags.push_back({0, std::move(*tag_data0)}); + tags.push_back({ 0, std::move(*tag_data0) }); tag_data0.reset(); } callback(data.first(buffer.size()), std::span(tags)); @@ -416,7 +434,7 @@ static constexpr std::size_t listener_buffer_size = 65536; buffer_fill = data.size(); if constexpr (callback_takes_tags) { if (tag_data0) { - tag_buffer.push_back({0, std::move(*tag_data0)}); + tag_buffer.push_back({ 0, std::move(*tag_data0) }); } } } @@ -435,7 +453,7 @@ static constexpr std::size_t listener_buffer_size = 65536; write_data.publish(write_data.size()); if (tag_data0) { auto tw = poller->tag_writer.reserve_output_range(1); - tw[0] = {static_cast(samples_written), std::move(*tag_data0)}; + tw[0] = { static_cast(samples_written), std::move(*tag_data0) }; tw.publish(1); } } @@ -444,7 +462,8 @@ static constexpr std::size_t listener_buffer_size = 65536; } } - void flush() override { + void + flush() override { if constexpr (has_callback) { if (buffer_fill > 0) { if constexpr (callback_takes_tags) { @@ -464,41 +483,33 @@ static constexpr std::size_t listener_buffer_size = 65536; }; struct pending_window_t { - DataSet dataset; + DataSet dataset; std::size_t pending_post_samples = 0; }; template struct trigger_listener_t : public abstract_listener_t { - bool block = false; - std::size_t pre_samples = 0; - std::size_t post_samples = 0; + bool block = false; + std::size_t pre_samples = 0; + std::size_t post_samples = 0; - P trigger_predicate = {}; - std::deque pending_trigger_windows; // triggers that still didn't receive all their data + P trigger_predicate = {}; + std::deque pending_trigger_windows; // triggers that still didn't receive all their data std::weak_ptr polling_handler = {}; - Callback callback; + Callback callback; explicit trigger_listener_t(P predicate, std::shared_ptr handler, std::size_t pre, std::size_t post, bool do_block) - : block(do_block) - , pre_samples(pre) - , post_samples(post) - , trigger_predicate(std::forward

(predicate)) - , polling_handler{std::move(handler)} - {} + : block(do_block), pre_samples(pre), post_samples(post), trigger_predicate(std::forward

(predicate)), polling_handler{ std::move(handler) } {} explicit trigger_listener_t(P predicate, std::size_t pre, std::size_t post, Callback cb) - : pre_samples(pre) - , post_samples(post) - , trigger_predicate(std::forward

(predicate)) - , callback{std::forward(cb)} - {} + : pre_samples(pre), post_samples(post), trigger_predicate(std::forward

(predicate)), callback{ std::forward(cb) } {} // TODO all the dataset-based listeners could share publish_dataset and parts of flush (closing pollers), // but if we want to use different datastructures/pass additional info, this might become moot again, so // I leave it as is for now. - inline void publish_dataset(DataSet &&data) { + inline void + publish_dataset(DataSet &&data) { if constexpr (!std::is_same_v) { callback(std::move(data)); } else { @@ -522,8 +533,9 @@ static constexpr std::size_t listener_buffer_size = 65536; } } - void process(std::span history, std::span in_data, std::optional tag_data0) override { - if (tag_data0 && trigger_predicate(tag_t{0, *tag_data0})) { + void + process(std::span history, std::span in_data, std::optional tag_data0) override { + if (tag_data0 && trigger_predicate(tag_t{ 0, *tag_data0 })) { // TODO fill dataset with metadata etc. DataSet dataset; dataset.signal_values.reserve(pre_samples + post_samples); // TODO maybe make the circ. buffer smaller but preallocate these @@ -531,8 +543,8 @@ static constexpr std::size_t listener_buffer_size = 65536; const auto pre_sample_view = history.last(std::min(pre_samples, history.size())); dataset.signal_values.insert(dataset.signal_values.end(), pre_sample_view.begin(), pre_sample_view.end()); - dataset.timing_events = {{{static_cast(pre_sample_view.size()), *tag_data0}}}; - pending_trigger_windows.push_back({.dataset = std::move(dataset), .pending_post_samples = post_samples}); + dataset.timing_events = { { { static_cast(pre_sample_view.size()), *tag_data0 } } }; + pending_trigger_windows.push_back({ .dataset = std::move(dataset), .pending_post_samples = post_samples }); } auto window = pending_trigger_windows.begin(); @@ -550,7 +562,8 @@ static constexpr std::size_t listener_buffer_size = 65536; } } - void flush() override { + void + flush() override { for (auto &window : pending_trigger_windows) { if (!window.dataset.signal_values.empty()) { publish_dataset(std::move(window.dataset)); @@ -565,18 +578,22 @@ static constexpr std::size_t listener_buffer_size = 65536; template struct multiplexed_listener_t : public abstract_listener_t { - bool block = false; - F observerFactory; - decltype(observerFactory()) observer; - std::optional> pending_dataset; - std::size_t maximum_window_size; + bool block = false; + F observerFactory; + decltype(observerFactory()) observer; + std::optional> pending_dataset; + std::size_t maximum_window_size; std::weak_ptr polling_handler = {}; - Callback callback; + Callback callback; + + explicit multiplexed_listener_t(F factory, std::size_t max_window_size, Callback cb) + : observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), callback(cb) {} - explicit multiplexed_listener_t(F factory, std::size_t max_window_size, Callback cb) : observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), callback(cb) {} - explicit multiplexed_listener_t(F factory, std::size_t max_window_size, std::shared_ptr handler, bool do_block) : block(do_block), observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), polling_handler{std::move(handler)} {} + explicit multiplexed_listener_t(F factory, std::size_t max_window_size, std::shared_ptr handler, bool do_block) + : block(do_block), observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), polling_handler{ std::move(handler) } {} - inline void publish_dataset(DataSet &&data) { + inline void + publish_dataset(DataSet &&data) { if constexpr (!std::is_same_v) { callback(std::move(data)); } else { @@ -600,9 +617,10 @@ static constexpr std::size_t listener_buffer_size = 65536; } } - void process(std::span, std::span in_data, std::optional tag_data0) override { + void + process(std::span, std::span in_data, std::optional tag_data0) override { if (tag_data0) { - const auto obsr = observer(tag_t{0, *tag_data0}); + const auto obsr = observer(tag_t{ 0, *tag_data0 }); // TODO set proper error state instead of throwing if (obsr == trigger_observer_state::Stop || obsr == trigger_observer_state::StopAndStart) { if (obsr == trigger_observer_state::Stop && !pending_dataset) { @@ -611,7 +629,7 @@ static constexpr std::size_t listener_buffer_size = 65536; if (pending_dataset) { if (obsr == trigger_observer_state::Stop) { - pending_dataset->timing_events[0].push_back({static_cast(pending_dataset->signal_values.size()), *tag_data0}); + pending_dataset->timing_events[0].push_back({ static_cast(pending_dataset->signal_values.size()), *tag_data0 }); } publish_dataset(std::move(*pending_dataset)); pending_dataset.reset(); @@ -623,12 +641,12 @@ static constexpr std::size_t listener_buffer_size = 65536; } pending_dataset = DataSet(); pending_dataset->signal_values.reserve(maximum_window_size); // TODO might be too much? - pending_dataset->timing_events = {{{0, *tag_data0}}}; + pending_dataset->timing_events = { { { 0, *tag_data0 } } }; } } if (pending_dataset) { const auto to_write = std::min(in_data.size(), maximum_window_size - pending_dataset->signal_values.size()); - const auto view = in_data.first(to_write); + const auto view = in_data.first(to_write); pending_dataset->signal_values.insert(pending_dataset->signal_values.end(), view.begin(), view.end()); if (pending_dataset->signal_values.size() == maximum_window_size) { @@ -638,10 +656,11 @@ static constexpr std::size_t listener_buffer_size = 65536; } } - void flush() override { + void + flush() override { if (pending_dataset) { - publish_dataset(std::move(*pending_dataset)); - pending_dataset.reset(); + publish_dataset(std::move(*pending_dataset)); + pending_dataset.reset(); } if (auto p = polling_handler.lock()) { p->finished = true; @@ -651,24 +670,27 @@ static constexpr std::size_t listener_buffer_size = 65536; struct pending_snapshot { property_map tag_data; - std::size_t delay = 0; - std::size_t pending_samples = 0; + std::size_t delay = 0; + std::size_t pending_samples = 0; }; template struct snapshot_listener_t : public abstract_listener_t { - bool block = false; - std::chrono::nanoseconds time_delay; - std::size_t sample_delay = 0; - P trigger_predicate = {}; - std::deque pending; + bool block = false; + std::chrono::nanoseconds time_delay; + std::size_t sample_delay = 0; + P trigger_predicate = {}; + std::deque pending; std::weak_ptr polling_handler = {}; - Callback callback; + Callback callback; + + explicit snapshot_listener_t(P p, std::chrono::nanoseconds delay, std::shared_ptr poller, bool do_block) + : block(do_block), time_delay(delay), trigger_predicate(std::forward

(p)), polling_handler{ std::move(poller) } {} - explicit snapshot_listener_t(P p, std::chrono::nanoseconds delay, std::shared_ptr poller, bool do_block) : block(do_block), time_delay(delay), trigger_predicate(std::forward

(p)), polling_handler{std::move(poller)} {} explicit snapshot_listener_t(P p, std::chrono::nanoseconds delay, Callback cb) : trigger_predicate(std::forward

(p)), time_delay(std::forward(cb)) {} - inline void publish_dataset(DataSet &&data) { + inline void + publish_dataset(DataSet &&data) { if constexpr (!std::is_same_v) { callback(std::move(data)); } else { @@ -692,14 +714,16 @@ static constexpr std::size_t listener_buffer_size = 65536; } } - void set_sample_rate(float r) override { + void + set_sample_rate(float r) override { sample_delay = std::round(std::chrono::duration_cast>(time_delay).count() * r); // TODO do we need to update the requested_samples of pending here? (considering both old and new time_delay) } - void process(std::span, std::span in_data, std::optional tag_data0) override { - if (tag_data0 && trigger_predicate({0, *tag_data0})) { - auto new_pending = pending_snapshot{*tag_data0, sample_delay, sample_delay}; + void + process(std::span, std::span in_data, std::optional tag_data0) override { + if (tag_data0 && trigger_predicate({ 0, *tag_data0 })) { + auto new_pending = pending_snapshot{ *tag_data0, sample_delay, sample_delay }; // make sure pending is sorted by number of pending_samples (insertion might be not at end if sample rate decreased; TODO unless we adapt them in set_sample_rate, see there) auto rit = std::find_if(pending.rbegin(), pending.rend(), [delay = sample_delay](const auto &other) { return other.pending_samples < delay; }); pending.insert(rit.base(), std::move(new_pending)); @@ -713,15 +737,16 @@ static constexpr std::size_t listener_buffer_size = 65536; } DataSet dataset; - dataset.timing_events = {{{-static_cast(it->delay), std::move(it->tag_data)}}}; - dataset.signal_values = {in_data[it->pending_samples]}; + dataset.timing_events = { { { -static_cast(it->delay), std::move(it->tag_data) } } }; + dataset.signal_values = { in_data[it->pending_samples] }; publish_dataset(std::move(dataset)); it = pending.erase(it); } } - void flush() override { + void + flush() override { pending.clear(); if (auto p = polling_handler.lock()) { p->finished = true; @@ -730,29 +755,27 @@ static constexpr std::size_t listener_buffer_size = 65536; }; std::deque> listeners; - std::mutex listener_mutex; + std::mutex listener_mutex; public: - data_sink() { - data_sink_registry::instance().register_sink(this); - } + data_sink() { data_sink_registry::instance().register_sink(this); } - ~data_sink() { - data_sink_registry::instance().unregister_sink(this); - } + ~data_sink() { data_sink_registry::instance().unregister_sink(this); } - std::shared_ptr get_streaming_poller(blocking_mode block_mode = blocking_mode::NonBlocking) { + std::shared_ptr + get_streaming_poller(blocking_mode block_mode = blocking_mode::NonBlocking) { std::lock_guard lg(listener_mutex); - const auto block = block_mode == blocking_mode::Blocking; - auto handler = std::make_shared(); + const auto block = block_mode == blocking_mode::Blocking; + auto handler = std::make_shared(); add_listener(std::make_unique>(handler, block), block); return handler; } template - std::shared_ptr get_trigger_poller(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block_mode = blocking_mode::NonBlocking) { - const auto block = block_mode == blocking_mode::Blocking; - auto handler = std::make_shared(); + std::shared_ptr + get_trigger_poller(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block_mode = blocking_mode::NonBlocking) { + const auto block = block_mode == blocking_mode::Blocking; + auto handler = std::make_shared(); std::lock_guard lg(listener_mutex); add_listener(std::make_unique>(std::forward(p), handler, pre_samples, post_samples, block), block); ensure_history_size(pre_samples); @@ -760,55 +783,63 @@ static constexpr std::size_t listener_buffer_size = 65536; } template - std::shared_ptr get_multiplexed_poller(F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block_mode = blocking_mode::NonBlocking) { + std::shared_ptr + get_multiplexed_poller(F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block_mode = blocking_mode::NonBlocking) { std::lock_guard lg(listener_mutex); - const auto block = block_mode == blocking_mode::Blocking; - auto handler = std::make_shared(); + const auto block = block_mode == blocking_mode::Blocking; + auto handler = std::make_shared(); add_listener(std::make_unique>(std::move(triggerObserverFactory), maximum_window_size, handler, block), block); return handler; } template - std::shared_ptr get_snapshot_poller(P p, std::chrono::nanoseconds delay, blocking_mode block_mode = blocking_mode::NonBlocking) { - const auto block = block_mode == blocking_mode::Blocking; - auto handler = std::make_shared(); + std::shared_ptr + get_snapshot_poller(P p, std::chrono::nanoseconds delay, blocking_mode block_mode = blocking_mode::NonBlocking) { + const auto block = block_mode == blocking_mode::Blocking; + auto handler = std::make_shared(); std::lock_guard lg(listener_mutex); add_listener(std::make_unique>(std::forward

(p), delay, handler, block), block); return handler; } template - void register_streaming_callback(std::size_t max_chunk_size, Callback callback) { + void + register_streaming_callback(std::size_t max_chunk_size, Callback callback) { add_listener(std::make_unique>(max_chunk_size, std::forward(callback)), false); } template - void register_trigger_callback(P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { + void + register_trigger_callback(P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { add_listener(std::make_unique>(std::forward

(p), pre_samples, post_samples, std::forward(callback)), false); ensure_history_size(pre_samples); } template - void register_multiplexed_callback(F triggerObserverFactory, std::size_t maximum_window_size, Callback callback) { + void + register_multiplexed_callback(F triggerObserverFactory, std::size_t maximum_window_size, Callback callback) { std::lock_guard lg(listener_mutex); add_listener(std::make_unique(std::move(triggerObserverFactory), maximum_window_size, std::forward(callback)), false); } template - void register_snapshot_callback(P p, std::chrono::nanoseconds delay, Callback callback) { + void + register_snapshot_callback(P p, std::chrono::nanoseconds delay, Callback callback) { std::lock_guard lg(listener_mutex); add_listener(std::make_unique(std::forward

(p), delay, std::forward(callback)), false); } // TODO this code should be called at the end of graph processing - void stop() noexcept { + void + stop() noexcept { std::lock_guard lg(listener_mutex); for (auto &listener : listeners) { listener->flush(); } } - [[nodiscard]] work_return_t process_bulk(std::span in_data) noexcept { + [[nodiscard]] work_return_t + process_bulk(std::span in_data) noexcept { std::optional tagData; if (this->input_tags_present()) { assert(this->input_tags()[0].index == 0); @@ -817,7 +848,7 @@ static constexpr std::size_t listener_buffer_size = 65536; { std::lock_guard lg(listener_mutex); - const auto history_view = history.get_span(0); + const auto history_view = history.get_span(0); for (auto &listener : listeners) { listener->process(history_view, in_data, tagData); } @@ -833,7 +864,8 @@ static constexpr std::size_t listener_buffer_size = 65536; private: gr::history_buffer history = gr::history_buffer(1); - void ensure_history_size(std::size_t new_size) { + void + ensure_history_size(std::size_t new_size) { // TODO transitional, do not reallocate/copy, but create a shared buffer with size N, // and a per-listener history buffer where more than N samples is needed. auto new_history = gr::history_buffer(std::max(new_size, history.capacity())); @@ -841,7 +873,8 @@ static constexpr std::size_t listener_buffer_size = 65536; std::swap(history, new_history); } - void add_listener(std::unique_ptr&& l, bool block) { + void + add_listener(std::unique_ptr &&l, bool block) { l->set_sample_rate(sample_rate); // TODO also call when sample_rate changes if (block) { listeners.push_back(std::move(l)); @@ -851,7 +884,7 @@ static constexpr std::size_t listener_buffer_size = 65536; } }; -} +} // namespace fair::graph ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink), in, sample_rate); diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 6d24c645..96df1bc0 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -21,9 +21,14 @@ auto boost::ut::cfg = boost::ut::runner struct fmt::formatter { template - constexpr auto parse(ParseContext &ctx) { return ctx.begin(); } + constexpr auto + parse(ParseContext &ctx) { + return ctx.begin(); + } + template - constexpr auto format(const fair::graph::tag_t &tag, FormatContext &ctx) const { + constexpr auto + format(const fair::graph::tag_t &tag, FormatContext &ctx) const { return fmt::format_to(ctx.out(), "{}", tag.index); } }; @@ -34,13 +39,13 @@ static constexpr std::int32_t n_samples = 200000; template struct Source : public node> { - OUT out; - std::int32_t n_samples_produced = 0; - std::int32_t n_samples_max = 1024; - std::size_t n_tag_offset = 0; - float sample_rate = 1000.0f; - T next_value = {}; - std::size_t next_tag = 0; + OUT out; + std::int32_t n_samples_produced = 0; + std::int32_t n_samples_max = 1024; + std::size_t n_tag_offset = 0; + float sample_rate = 1000.0f; + T next_value = {}; + std::size_t next_tag = 0; std::vector tags; // must be sorted by index, only one tag per sample void @@ -56,14 +61,15 @@ struct Source : public node> { return ret > 0 ? 1 : -1; // '-1' -> DONE, produced enough samples } - T process_one() noexcept { + T + process_one() noexcept { if (next_tag < tags.size() && tags[next_tag].index <= static_cast>(n_samples_produced)) { tag_t &out_tag = this->output_tags()[0]; // TODO when not enforcing single samples in available_samples, one would have to do: // const auto base = std::max(out.streamWriter().position() + 1, tag_t::signed_index_type{0}); // out_tag = tag_t{ tags[next_tag].index - base, tags[next_tag].map }; // Still think there could be nicer API to set a tag from process_one() - out_tag = tag_t{ 0 , tags[next_tag].map }; + out_tag = tag_t{ 0, tags[next_tag].map }; this->forward_tags(); next_tag++; } @@ -86,22 +92,26 @@ struct Source : public node> { * (Note that the TriggerObserver is stateful and remembers the last tag seen, other than a stateless TriggerPredicate) */ struct Observer { - std::optional year; - std::optional month; - std::optional day; + std::optional year; + std::optional month; + std::optional day; std::optional> last_seen; - bool last_matched = false; + bool last_matched = false; explicit Observer(std::optional year_, std::optional month_, std::optional day_) : year(year_), month(month_), day(day_) {} - static inline bool same(int x, std::optional other) { + static inline bool + same(int x, std::optional other) { return other && x == *other; } - static inline bool changed(int x, std::optional other) { + + static inline bool + changed(int x, std::optional other) { return !same(x, other); } - trigger_observer_state operator()(const tag_t &tag) { + trigger_observer_state + operator()(const tag_t &tag) { const auto ty = tag.get("Y"); const auto tm = tag.get("M"); const auto td = tag.get("D"); @@ -109,22 +119,22 @@ struct Observer { return trigger_observer_state::Ignore; } - const auto tup = std::make_tuple(std::get(ty->get()), std::get(tm->get()), std::get(td->get())); - const auto &[y, m, d] = tup; - const auto ly = last_seen ? std::optional(std::get<0>(*last_seen)) : std::nullopt; - const auto lm = last_seen ? std::optional(std::get<1>(*last_seen)) : std::nullopt; - const auto ld = last_seen ? std::optional(std::get<2>(*last_seen)) : std::nullopt; + const auto tup = std::make_tuple(std::get(ty->get()), std::get(tm->get()), std::get(td->get())); + const auto &[y, m, d] = tup; + const auto ly = last_seen ? std::optional(std::get<0>(*last_seen)) : std::nullopt; + const auto lm = last_seen ? std::optional(std::get<1>(*last_seen)) : std::nullopt; + const auto ld = last_seen ? std::optional(std::get<2>(*last_seen)) : std::nullopt; - const auto year_restart = year && *year == -1 && changed(y, ly); - const auto year_matches = !year || *year == -1 || same(y, year); - const auto month_restart = month && *month == -1 && changed(m, lm); - const auto month_matches = !month || *month == -1 || same(m, month); - const auto day_restart = day && *day == -1 && changed(d, ld); - const auto day_matches = !day || *day == -1 || same(d, day); - const auto matches = year_matches && month_matches && day_matches; - const auto restart = year_restart || month_restart || day_restart; + const auto year_restart = year && *year == -1 && changed(y, ly); + const auto year_matches = !year || *year == -1 || same(y, year); + const auto month_restart = month && *month == -1 && changed(m, lm); + const auto month_matches = !month || *month == -1 || same(m, month); + const auto day_restart = day && *day == -1 && changed(d, ld); + const auto day_matches = !day || *day == -1 || same(d, day); + const auto matches = year_matches && month_matches && day_matches; + const auto restart = year_restart || month_restart || day_restart; - trigger_observer_state r = trigger_observer_state::Ignore; + trigger_observer_state r = trigger_observer_state::Ignore; if (last_matched && !matches) { r = trigger_observer_state::Stop; @@ -134,17 +144,19 @@ struct Observer { r = trigger_observer_state::StopAndStart; } - last_seen = tup; + last_seen = tup; last_matched = matches; return r; } }; -static tag_t make_tag(tag_t::signed_index_type index, int year, int month, int day) { - return tag_t{index, {{"Y", year}, {"M", month}, {"D", day}}}; +static tag_t +make_tag(tag_t::signed_index_type index, int year, int month, int day) { + return tag_t{ index, { { "Y", year }, { "M", month }, { "D", day } } }; } -static std::vector make_test_tags(tag_t::signed_index_type first_index, tag_t::signed_index_type interval) { +static std::vector +make_test_tags(tag_t::signed_index_type first_index, tag_t::signed_index_type interval) { std::vector tags; for (int y = 1; y <= 3; ++y) { for (int m = 1; m <= 2; ++m) { @@ -157,8 +169,9 @@ static std::vector make_test_tags(tag_t::signed_index_type first_index, t return tags; } -static std::string to_ascii_art(std::span states) { - bool started = false; +static std::string +to_ascii_art(std::span states) { + bool started = false; std::string r; for (auto s : states) { switch (s) { @@ -174,17 +187,16 @@ static std::string to_ascii_art(std::span states) { r += started ? "||#" : "|#"; started = true; break; - case trigger_observer_state::Ignore: - r += started ? "#" : "_"; - break; + case trigger_observer_state::Ignore: r += started ? "#" : "_"; break; } }; return r; } template -std::string run_observer_test(std::span tags, O o) { - std::vector r; +std::string +run_observer_test(std::span tags, O o) { + std::vector r; r.reserve(tags.size()); for (const auto &tag : tags) { r.push_back(o(tag)); @@ -196,17 +208,16 @@ std::string run_observer_test(std::span tags, O o) { ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink_test::Source), out, n_samples_produced, n_samples_max, n_tag_offset, sample_rate); - template -std::string format_list(const T &l) { +std::string +format_list(const T &l) { return fmt::format("[{}]", fmt::join(l, ", ")); } template -bool indexes_match(const T& lhs, const U& rhs) { - auto index_match = [](const auto &l, const auto &r) { - return l.index == r.index; - }; +bool +indexes_match(const T &lhs, const U &rhs) { + auto index_match = [](const auto &l, const auto &r) { return l.index == r.index; }; return std::equal(std::begin(lhs), std::end(lhs), std::begin(rhs), std::end(rhs), index_match); } @@ -218,22 +229,22 @@ const boost::ut::suite DataSinkTests = [] { using namespace std::string_literals; "callback continuous mode"_test = [] { - static constexpr std::int32_t n_samples = 200005; - static constexpr std::size_t chunk_size = 1000; + static constexpr std::int32_t n_samples = 200005; + static constexpr std::size_t chunk_size = 1000; - const auto src_tags = make_test_tags(0, 1000); + const auto src_tags = make_test_tags(0, 1000); - graph flow_graph; - auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); - auto &sink = flow_graph.make_node>(); - src.tags = src_tags; + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + auto &sink = flow_graph.make_node>(); + src.tags = src_tags; sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); std::atomic samples_seen1 = 0; - std::atomic chunks_seen1 = 0; - auto callback = [&samples_seen1, &chunks_seen1](std::span buffer) { + std::atomic chunks_seen1 = 0; + auto callback = [&samples_seen1, &chunks_seen1](std::span buffer) { for (std::size_t i = 0; i < buffer.size(); ++i) { expect(eq(buffer[i], static_cast(samples_seen1 + i))); } @@ -247,11 +258,11 @@ const boost::ut::suite DataSinkTests = [] { } }; - std::mutex m2; - std::size_t samples_seen2 = 0; - std::size_t chunks_seen2 = 0; + std::mutex m2; + std::size_t samples_seen2 = 0; + std::size_t chunks_seen2 = 0; std::vector received_tags; - auto callback_with_tags = [&samples_seen2, &chunks_seen2, &m2, &received_tags](std::span buffer, std::span tags) { + auto callback_with_tags = [&samples_seen2, &chunks_seen2, &m2, &received_tags](std::span buffer, std::span tags) { for (std::size_t i = 0; i < buffer.size(); ++i) { expect(eq(buffer[i], static_cast(samples_seen2 + i))); } @@ -261,9 +272,11 @@ const boost::ut::suite DataSinkTests = [] { lt(tag.index, samples_seen2 + buffer.size()); } - auto lg = std::lock_guard{m2}; + auto lg = std::lock_guard{ m2 }; std::vector adjusted; - std::transform(tags.begin(), tags.end(), std::back_inserter(adjusted), [samples_seen2](const auto &tag) { return tag_t{static_cast(samples_seen2) + tag.index, tag.map}; }); + std::transform(tags.begin(), tags.end(), std::back_inserter(adjusted), [samples_seen2](const auto &tag) { + return tag_t{ static_cast(samples_seen2) + tag.index, tag.map }; + }); received_tags.insert(received_tags.end(), adjusted.begin(), adjusted.end()); samples_seen2 += buffer.size(); chunks_seen2++; @@ -277,12 +290,12 @@ const boost::ut::suite DataSinkTests = [] { expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::with_sink_name("test_sink"), chunk_size, callback)); expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::with_sink_name("test_sink"), chunk_size, callback_with_tags)); - fair::graph::scheduler::simple sched{std::move(flow_graph)}; + fair::graph::scheduler::simple sched{ std::move(flow_graph) }; sched.work(); sink.stop(); // TODO the scheduler should call this - auto lg = std::lock_guard{m2}; + auto lg = std::lock_guard{ m2 }; expect(eq(chunks_seen1.load(), 201)); expect(eq(chunks_seen2, 201)); expect(eq(samples_seen1.load(), n_samples)); @@ -291,61 +304,60 @@ const boost::ut::suite DataSinkTests = [] { }; "blocking polling continuous mode"_test = [] { - constexpr std::int32_t n_samples = 200000; - graph flow_graph; - const auto tags = make_test_tags(0, 1000); - auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); - src.tags = tags; - auto &sink = flow_graph.make_node>(); + graph flow_graph; + const auto tags = make_test_tags(0, 1000); + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + src.tags = tags; + auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - std::atomic samples_seen = 0; + std::atomic samples_seen = 0; - auto poller_data_only = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink"), blocking_mode::Blocking); + auto poller_data_only = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink"), blocking_mode::Blocking); expect(neq(poller_data_only, nullptr)); auto poller_with_tags = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink"), blocking_mode::Blocking); expect(neq(poller_with_tags, nullptr)); - auto runner1 = std::async([poller = poller_data_only] { + auto runner1 = std::async([poller = poller_data_only] { std::vector received; - bool seen_finished = false; + bool seen_finished = false; while (!seen_finished) { // TODO make finished vs. pending data handling actually thread-safe seen_finished = poller->finished; - while (poller->process([&received](const auto &data) { - received.insert(received.end(), data.begin(), data.end()); - })) {} + while (poller->process([&received](const auto &data) { received.insert(received.end(), data.begin(), data.end()); })) { + } } return received; }); - auto runner2 = std::async([poller = poller_with_tags] { + auto runner2 = std::async([poller = poller_with_tags] { std::vector received; std::vector received_tags; - bool seen_finished = false; + bool seen_finished = false; while (!seen_finished) { // TODO make finished vs. pending data handling actually thread-safe seen_finished = poller->finished; while (poller->process([&received, &received_tags](const auto &data, const auto &tags_) { - auto tags = std::vector(tags_.begin(), tags_.end()); + auto tags = std::vector(tags_.begin(), tags_.end()); for (auto &t : tags) { t.index += static_cast(received.size()); } received_tags.insert(received_tags.end(), tags.begin(), tags.end()); received.insert(received.end(), data.begin(), data.end()); - })) {} + })) { + } } return std::make_tuple(received, received_tags); }); - fair::graph::scheduler::simple sched{std::move(flow_graph)}; + fair::graph::scheduler::simple sched{ std::move(flow_graph) }; sched.work(); sink.stop(); // TODO the scheduler should call this @@ -353,7 +365,7 @@ const boost::ut::suite DataSinkTests = [] { std::vector expected(n_samples); std::iota(expected.begin(), expected.end(), 0.0); - const auto received1 = runner1.get(); + const auto received1 = runner1.get(); const auto &[received2, received_tags] = runner2.get(); expect(eq(received1.size(), expected.size())); expect(eq(received1, expected)); @@ -368,11 +380,11 @@ const boost::ut::suite DataSinkTests = [] { "blocking polling trigger mode non-overlapping"_test = [] { constexpr std::int32_t n_samples = 200000; - graph flow_graph; - auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); - const auto tags = std::vector{{3000, {{"TYPE", "TRIGGER"}}}, tag_t{8000, {{"TYPE", "NO_TRIGGER"}}}, {180000, {{"TYPE", "TRIGGER"}}}}; - src.tags = tags; - auto &sink = flow_graph.make_node>(); + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + const auto tags = std::vector{ { 3000, { { "TYPE", "TRIGGER" } } }, tag_t{ 8000, { { "TYPE", "NO_TRIGGER" } } }, { 180000, { { "TYPE", "TRIGGER" } } } }; + src.tags = tags; + auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); @@ -385,12 +397,12 @@ const boost::ut::suite DataSinkTests = [] { auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, 3, 2, blocking_mode::Blocking); expect(neq(poller, nullptr)); - auto polling = std::async([poller] { + auto polling = std::async([poller] { std::vector received_data; - std::vector received_tags; - bool seen_finished = false; + std::vector received_tags; + bool seen_finished = false; while (!seen_finished) { - seen_finished = poller->finished; + seen_finished = poller->finished; [[maybe_unused]] auto r = poller->process_one([&received_data, &received_tags](const auto &dataset) { received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); expect(eq(dataset.timing_events.size(), 1u)) >> fatal; @@ -402,16 +414,16 @@ const boost::ut::suite DataSinkTests = [] { return std::make_tuple(received_data, received_tags); }); - fair::graph::scheduler::simple sched{std::move(flow_graph)}; + fair::graph::scheduler::simple sched{ std::move(flow_graph) }; sched.work(); sink.stop(); // TODO the scheduler should call this const auto &[received_data, received_tags] = polling.get(); - const auto expected_tags = {tags[0], tags[2]}; // triggers-only + const auto expected_tags = { tags[0], tags[2] }; // triggers-only expect(eq(received_data.size(), 10)); - expect(eq(received_data, std::vector{2997, 2998, 2999, 3000, 3001, 179997, 179998, 179999, 180000, 180001})); + expect(eq(received_data, std::vector{ 2997, 2998, 2999, 3000, 3001, 179997, 179998, 179999, 180000, 180001 })); expect(eq(received_tags.size(), expected_tags.size())); expect(eq(poller->drop_count.load(), 0)); @@ -420,10 +432,10 @@ const boost::ut::suite DataSinkTests = [] { "blocking polling snapshot mode"_test = [] { constexpr std::int32_t n_samples = 200000; - graph flow_graph; - auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); - src.tags = {{3000, {{"TYPE", "TRIGGER"}}}, tag_t{8000, {{"TYPE", "NO_TRIGGER"}}}, {180000, {{"TYPE", "TRIGGER"}}}}; - auto &sink = flow_graph.make_node>(); + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + src.tags = { { 3000, { { "TYPE", "TRIGGER" } } }, tag_t{ 8000, { { "TYPE", "NO_TRIGGER" } } }, { 180000, { { "TYPE", "TRIGGER" } } } }; + auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); @@ -433,16 +445,16 @@ const boost::ut::suite DataSinkTests = [] { return v && std::get(v->get()) == "TRIGGER"; }; - const auto delay = std::chrono::milliseconds{500}; // sample rate 10000 -> 5000 samples - auto poller = data_sink_registry::instance().get_snapshot_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, delay, blocking_mode::Blocking); + const auto delay = std::chrono::milliseconds{ 500 }; // sample rate 10000 -> 5000 samples + auto poller = data_sink_registry::instance().get_snapshot_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, delay, blocking_mode::Blocking); expect(neq(poller, nullptr)); - auto poller_result = std::async([poller] { + auto poller_result = std::async([poller] { std::vector received_data; - bool seen_finished = false; + bool seen_finished = false; while (!seen_finished) { - seen_finished = poller->finished; + seen_finished = poller->finished; [[maybe_unused]] auto r = poller->process_one([&received_data](const auto &dataset) { expect(eq(dataset.timing_events.size(), 1u)) >> fatal; expect(eq(dataset.timing_events[0].size(), 1u)); @@ -454,25 +466,25 @@ const boost::ut::suite DataSinkTests = [] { return received_data; }); - fair::graph::scheduler::simple sched{std::move(flow_graph)}; + fair::graph::scheduler::simple sched{ std::move(flow_graph) }; sched.work(); sink.stop(); // TODO the scheduler should call this const auto received_data = poller_result.get(); - expect(eq(received_data, std::vector{8000, 185000})); + expect(eq(received_data, std::vector{ 8000, 185000 })); expect(eq(poller->drop_count.load(), 0)); }; "blocking polling multiplexed mode"_test = [] { - const auto tags = make_test_tags(0, 10000); + const auto tags = make_test_tags(0, 10000); const std::int32_t n_samples = tags.size() * 10000 + 100000; - graph flow_graph; - auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); - src.tags = tags; - auto &sink = flow_graph.make_node>(); + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + src.tags = tags; + auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); @@ -488,25 +500,15 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(run_observer_test(t, Observer({}, {}, 1)), "|#|__|#|__|#|__|#|__|#|__|#|__"s)); } - auto observer_factory = [](std::optional y, std::optional m, std::optional d) { - return [y, m, d]() { - return Observer(y, m, d); - }; - }; - const auto factories = std::array{observer_factory({}, -1, {}), - observer_factory(-1, {}, {}), - observer_factory(1, {}, {}), - observer_factory(1, {}, 2), - observer_factory({}, {}, 1)}; + auto observer_factory = [](std::optional y, std::optional m, std::optional d) { return [y, m, d]() { return Observer(y, m, d); }; }; + const auto factories = std::array{ observer_factory({}, -1, {}), observer_factory(-1, {}, {}), observer_factory(1, {}, {}), observer_factory(1, {}, 2), observer_factory({}, {}, 1) }; // Following the patterns above, where each #/_ is 10000 samples - const auto expected = std::array, factories.size()>{{ - {0, 29999, 30000, 59999, 60000, 89999, 90000, 119999, 120000, 149999, 150000, 249999}, - {0, 59999, 60000, 119999, 120000, 219999}, - {0, 59999}, - {10000, 19999, 40000, 49999}, - {0, 9999, 30000, 39999, 60000, 69999, 90000, 99999, 120000, 129999, 150000, 159999} - }}; + const auto expected = std::array, factories.size()>{ { { 0, 29999, 30000, 59999, 60000, 89999, 90000, 119999, 120000, 149999, 150000, 249999 }, + { 0, 59999, 60000, 119999, 120000, 219999 }, + { 0, 59999 }, + { 10000, 19999, 40000, 49999 }, + { 0, 9999, 30000, 39999, 60000, 69999, 90000, 99999, 120000, 129999, 150000, 159999 } } }; std::vector::dataset_poller>> pollers; for (const auto &f : factories) { @@ -520,20 +522,21 @@ const boost::ut::suite DataSinkTests = [] { for (std::size_t i = 0; i < pollers.size(); ++i) { auto f = std::async([poller = pollers[i]] { std::vector ranges; - bool seen_finished = false; + bool seen_finished = false; while (!seen_finished) { seen_finished = poller->finished.load(); while (poller->process_one([&ranges](const auto &dataset) { ranges.push_back(dataset.signal_values.front()); ranges.push_back(dataset.signal_values.back()); - })) {} + })) { + } } return ranges; }); results.push_back(std::move(f)); } - fair::graph::scheduler::simple sched{std::move(flow_graph)}; + fair::graph::scheduler::simple sched{ std::move(flow_graph) }; sched.work(); sink.stop(); // TODO the scheduler should call this @@ -544,14 +547,14 @@ const boost::ut::suite DataSinkTests = [] { }; "blocking polling trigger mode overlapping"_test = [] { - constexpr std::int32_t n_samples = 2000000; - constexpr std::size_t n_triggers = 5000; + constexpr std::int32_t n_samples = 2000000; + constexpr std::size_t n_triggers = 5000; - graph flow_graph; - auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); for (std::size_t i = 0; i < n_triggers; ++i) { - src.tags.push_back(tag_t{static_cast(60000 + i), {{"TYPE", "TRIGGER"}}}); + src.tags.push_back(tag_t{ static_cast(60000 + i), { { "TYPE", "TRIGGER" } } }); } auto &sink = flow_graph.make_node>(); @@ -559,17 +562,15 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - auto is_trigger = [](const tag_t &tag) { - return true; - }; + auto is_trigger = [](const tag_t &tag) { return true; }; - auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, 3000, 2000, blocking_mode::Blocking); + auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, 3000, 2000, blocking_mode::Blocking); expect(neq(poller, nullptr)); - auto polling = std::async([poller] { + auto polling = std::async([poller] { std::vector received_data; std::vector received_tags; - bool seen_finished = false; + bool seen_finished = false; while (!seen_finished) { // TODO make finished vs. pending data handling actually thread-safe seen_finished = poller->finished.load(); @@ -581,18 +582,19 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(dataset.timing_events[0].size(), 1u)); expect(eq(dataset.timing_events[0][0].index, 3000)); received_tags.insert(received_tags.end(), dataset.timing_events[0].begin(), dataset.timing_events[0].end()); - })) {} + })) { + } } return std::make_tuple(received_data, received_tags); }); - fair::graph::scheduler::simple sched{std::move(flow_graph)}; + fair::graph::scheduler::simple sched{ std::move(flow_graph) }; sched.work(); sink.stop(); // TODO the scheduler should call this const auto &[received_data, received_tags] = polling.get(); - auto expected_start = std::vector{57000, 61999, 57001, 62000, 57002}; + auto expected_start = std::vector{ 57000, 61999, 57001, 62000, 57002 }; expect(eq(poller->drop_count.load(), 0u)); expect(eq(received_data.size(), 2 * n_triggers) >> fatal); expect(eq(std::vector(received_data.begin(), received_data.begin() + 5), expected_start)); @@ -600,14 +602,14 @@ const boost::ut::suite DataSinkTests = [] { }; "callback trigger mode overlapping"_test = [] { - constexpr std::int32_t n_samples = 2000000; - constexpr std::size_t n_triggers = 5000; + constexpr std::int32_t n_samples = 2000000; + constexpr std::size_t n_triggers = 5000; - graph flow_graph; - auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + graph flow_graph; + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); for (std::size_t i = 0; i < n_triggers; ++i) { - src.tags.push_back(tag_t{static_cast(60000 + i), {{"TYPE", "TRIGGER"}}}); + src.tags.push_back(tag_t{ static_cast(60000 + i), { { "TYPE", "TRIGGER" } } }); } auto &sink = flow_graph.make_node>(); @@ -615,15 +617,13 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - auto is_trigger = [](const tag_t &) { - return true; - }; + auto is_trigger = [](const tag_t &) { return true; }; - std::mutex m; + std::mutex m; std::vector received_data; - auto callback = [&received_data, &m](auto &&dataset) { - std::lock_guard lg{m}; + auto callback = [&received_data, &m](auto &&dataset) { + std::lock_guard lg{ m }; expect(eq(dataset.signal_values.size(), 5000u)); received_data.push_back(dataset.signal_values.front()); received_data.push_back(dataset.signal_values.back()); @@ -631,20 +631,20 @@ const boost::ut::suite DataSinkTests = [] { data_sink_registry::instance().register_trigger_callback(data_sink_query::with_sink_name("test_sink"), is_trigger, 3000, 2000, callback); - fair::graph::scheduler::simple sched{std::move(flow_graph)}; + fair::graph::scheduler::simple sched{ std::move(flow_graph) }; sched.work(); sink.stop(); // TODO the scheduler should call this - std::lock_guard lg{m}; - auto expected_start = std::vector{57000, 61999, 57001, 62000, 57002}; + std::lock_guard lg{ m }; + auto expected_start = std::vector{ 57000, 61999, 57001, 62000, 57002 }; expect(eq(received_data.size(), 2 * n_triggers)); expect(eq(std::vector(received_data.begin(), received_data.begin() + 5), expected_start)); }; "non-blocking polling continuous mode"_test = [] { graph flow_graph; - auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); + auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); auto &sink = flow_graph.make_node>(); sink.set_name("test_sink"); @@ -656,25 +656,24 @@ const boost::ut::suite DataSinkTests = [] { auto poller = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink")); expect(neq(poller, nullptr)); - auto polling = std::async([poller] { + auto polling = std::async([poller] { expect(neq(poller, nullptr)); - std::size_t samples_seen = 0; - bool seen_finished = false; + std::size_t samples_seen = 0; + bool seen_finished = false; while (!seen_finished) { // TODO make finished vs. pending data handling actually thread-safe using namespace std::chrono_literals; std::this_thread::sleep_for(20ms); seen_finished = poller->finished.load(); - while (poller->process([&samples_seen](const auto &data) { - samples_seen += data.size(); - })) {} + while (poller->process([&samples_seen](const auto &data) { samples_seen += data.size(); })) { + } } return samples_seen; }); - fair::graph::scheduler::simple sched{std::move(flow_graph)}; + fair::graph::scheduler::simple sched{ std::move(flow_graph) }; sched.work(); sink.stop(); // TODO the scheduler should call this From 2a1d12a3d5a0488d65642b55fe5b746f8f3ad0b8 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Thu, 22 Jun 2023 20:38:50 +0200 Subject: [PATCH 35/64] continuous/poller: Write tags before data Tags without data is checked for, data without tags can't. --- include/data_sink.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index c8af9f62..a5c32f44 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -448,14 +448,14 @@ class data_sink : public node> { const auto to_write = block ? data.size() : std::min(data.size(), poller->writer.available()); if (to_write > 0) { - auto write_data = poller->writer.reserve_output_range(to_write); - detail::copy_span(data.first(to_write), std::span(write_data)); - write_data.publish(write_data.size()); if (tag_data0) { auto tw = poller->tag_writer.reserve_output_range(1); tw[0] = { static_cast(samples_written), std::move(*tag_data0) }; tw.publish(1); } + auto write_data = poller->writer.reserve_output_range(to_write); + detail::copy_span(data.first(to_write), std::span(write_data)); + write_data.publish(write_data.size()); } poller->drop_count += data.size() - to_write; samples_written += to_write; From 1034cadfdc850045b883e4bf3bbbc99437736947 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Fri, 23 Jun 2023 15:54:38 +0200 Subject: [PATCH 36/64] Speed up test (most relevant in debug mode) --- test/qa_data_sink.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 96df1bc0..ee61c343 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -547,8 +547,8 @@ const boost::ut::suite DataSinkTests = [] { }; "blocking polling trigger mode overlapping"_test = [] { - constexpr std::int32_t n_samples = 2000000; - constexpr std::size_t n_triggers = 5000; + constexpr std::int32_t n_samples = 150000; + constexpr std::size_t n_triggers = 300; graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); @@ -602,8 +602,8 @@ const boost::ut::suite DataSinkTests = [] { }; "callback trigger mode overlapping"_test = [] { - constexpr std::int32_t n_samples = 2000000; - constexpr std::size_t n_triggers = 5000; + constexpr std::int32_t n_samples = 150000; + constexpr std::size_t n_triggers = 300; graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); From ae697959f69f1dd2b303e6575b7ae55e45995985 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Fri, 23 Jun 2023 16:03:12 +0200 Subject: [PATCH 37/64] No need to recreate history if capacity is sufficient... --- include/data_sink.hpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index a5c32f44..14b22dc2 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -866,6 +866,9 @@ class data_sink : public node> { void ensure_history_size(std::size_t new_size) { + if (new_size <= history.capacity()) { + return; + } // TODO transitional, do not reallocate/copy, but create a shared buffer with size N, // and a per-listener history buffer where more than N samples is needed. auto new_history = gr::history_buffer(std::max(new_size, history.capacity())); From 0ca59e54ee35ef1898d9149ecedffd52c9dae0d5 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 26 Jun 2023 10:16:05 +0200 Subject: [PATCH 38/64] Clean up order and naming Prefix private member variables with "_". Reorder class members to adhere to this order: - private state - public interface - private implementation (nested impl classes, methods) --- include/data_sink.hpp | 334 +++++++++++++++++++++--------------------- 1 file changed, 166 insertions(+), 168 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 14b22dc2..4ea3daa6 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -61,8 +61,8 @@ struct data_sink_query { }; class data_sink_registry { - std::mutex mutex; - std::vector sinks; + std::mutex _mutex; + std::vector _sinks; public: // TODO this shouldn't be a singleton but associated with the flow graph (?) @@ -75,15 +75,15 @@ class data_sink_registry { template void register_sink(data_sink *sink) { - std::lock_guard lg{ mutex }; - sinks.push_back(sink); + std::lock_guard lg{ _mutex }; + _sinks.push_back(sink); } template void unregister_sink(data_sink *sink) { - std::lock_guard lg{ mutex }; - std::erase_if(sinks, [sink](const std::any &v) { + std::lock_guard lg{ _mutex }; + std::erase_if(_sinks, [sink](const std::any &v) { try { return std::any_cast *>(v) == sink; } catch (...) { @@ -95,7 +95,7 @@ class data_sink_registry { template std::shared_ptr::poller> get_streaming_poller(const data_sink_query &query, blocking_mode block = blocking_mode::NonBlocking) { - std::lock_guard lg{ mutex }; + std::lock_guard lg{ _mutex }; auto sink = find_sink(query); return sink ? sink->get_streaming_poller(block) : nullptr; } @@ -103,7 +103,7 @@ class data_sink_registry { template std::shared_ptr::dataset_poller> get_trigger_poller(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { - std::lock_guard lg{ mutex }; + std::lock_guard lg{ _mutex }; auto sink = find_sink(query); return sink ? sink->get_trigger_poller(std::forward

(p), pre_samples, post_samples, block) : nullptr; } @@ -111,7 +111,7 @@ class data_sink_registry { template std::shared_ptr::dataset_poller> get_multiplexed_poller(const data_sink_query &query, F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block = blocking_mode::NonBlocking) { - std::lock_guard lg{ mutex }; + std::lock_guard lg{ _mutex }; auto sink = find_sink(query); return sink ? sink->get_multiplexed_poller(std::forward(triggerObserverFactory), maximum_window_size, block) : nullptr; } @@ -119,7 +119,7 @@ class data_sink_registry { template std::shared_ptr::dataset_poller> get_snapshot_poller(const data_sink_query &query, P p, std::chrono::nanoseconds delay, blocking_mode block = blocking_mode::NonBlocking) { - std::lock_guard lg{ mutex }; + std::lock_guard lg{ _mutex }; auto sink = find_sink(query); return sink ? sink->get_snapshot_poller(std::forward

(p), delay, block) : nullptr; } @@ -127,7 +127,7 @@ class data_sink_registry { template bool register_streaming_callback(const data_sink_query &query, std::size_t max_chunk_size, Callback callback) { - std::lock_guard lg{ mutex }; + std::lock_guard lg{ _mutex }; auto sink = find_sink(query); if (!sink) { return false; @@ -140,7 +140,7 @@ class data_sink_registry { template bool register_trigger_callback(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { - std::lock_guard lg{ mutex }; + std::lock_guard lg{ _mutex }; auto sink = find_sink(query); if (!sink) { return false; @@ -153,7 +153,7 @@ class data_sink_registry { template bool register_multiplexed_callback(const data_sink_query &query, std::size_t maximum_window_size, Callback callback) { - std::lock_guard lg{ mutex }; + std::lock_guard lg{ _mutex }; auto sink = find_sink(query); if (!sink) { return false; @@ -166,7 +166,7 @@ class data_sink_registry { template bool register_snapshot_callback(const data_sink_query &query, P p, std::chrono::nanoseconds delay, Callback callback) { - std::lock_guard lg{ mutex }; + std::lock_guard lg{ _mutex }; auto sink = find_sink(query); if (!sink) { return false; @@ -191,8 +191,8 @@ class data_sink_registry { } }; - const auto it = std::find_if(sinks.begin(), sinks.end(), matches); - if (it == sinks.end()) { + const auto it = std::find_if(_sinks.begin(), _sinks.end(), matches); + if (it == _sinks.end()) { return nullptr; } @@ -260,7 +260,12 @@ copy_span(std::span src, std::span dst) { */ template class data_sink : public node> { - static constexpr std::size_t listener_buffer_size = 65536; + struct abstract_listener; + + static constexpr std::size_t _listener_buffer_size = 65536; + std::deque> _listeners; + std::mutex _listener_mutex; + gr::history_buffer _history = gr::history_buffer(1); public: Annotated, Unit<"Hz">> sample_rate = 10000.f; @@ -269,10 +274,10 @@ class data_sink : public node> { Annotated> signal_min; Annotated> signal_max; - IN in; + IN in; struct poller { - gr::circular_buffer buffer = gr::circular_buffer(listener_buffer_size); + gr::circular_buffer buffer = gr::circular_buffer(_listener_buffer_size); decltype(buffer.new_reader()) reader = buffer.new_reader(); decltype(buffer.new_writer()) writer = buffer.new_writer(); gr::circular_buffer tag_buffer = gr::circular_buffer(1024); @@ -312,7 +317,7 @@ class data_sink : public node> { }; struct dataset_poller { - gr::circular_buffer> buffer = gr::circular_buffer>(listener_buffer_size); + gr::circular_buffer> buffer = gr::circular_buffer>(_listener_buffer_size); decltype(buffer.new_reader()) reader = buffer.new_reader(); decltype(buffer.new_writer()) writer = buffer.new_writer(); @@ -346,9 +351,134 @@ class data_sink : public node> { } }; + data_sink() { data_sink_registry::instance().register_sink(this); } + + ~data_sink() { data_sink_registry::instance().unregister_sink(this); } + + std::shared_ptr + get_streaming_poller(blocking_mode block_mode = blocking_mode::NonBlocking) { + std::lock_guard lg(_listener_mutex); + const auto block = block_mode == blocking_mode::Blocking; + auto handler = std::make_shared(); + add_listener(std::make_unique>(handler, block), block); + return handler; + } + + template + std::shared_ptr + get_trigger_poller(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block_mode = blocking_mode::NonBlocking) { + const auto block = block_mode == blocking_mode::Blocking; + auto handler = std::make_shared(); + std::lock_guard lg(_listener_mutex); + add_listener(std::make_unique>(std::forward(p), handler, pre_samples, post_samples, block), block); + ensure_history_size(pre_samples); + return handler; + } + + template + std::shared_ptr + get_multiplexed_poller(F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block_mode = blocking_mode::NonBlocking) { + std::lock_guard lg(_listener_mutex); + const auto block = block_mode == blocking_mode::Blocking; + auto handler = std::make_shared(); + add_listener(std::make_unique>(std::move(triggerObserverFactory), maximum_window_size, handler, block), block); + return handler; + } + + template + std::shared_ptr + get_snapshot_poller(P p, std::chrono::nanoseconds delay, blocking_mode block_mode = blocking_mode::NonBlocking) { + const auto block = block_mode == blocking_mode::Blocking; + auto handler = std::make_shared(); + std::lock_guard lg(_listener_mutex); + add_listener(std::make_unique>(std::forward

(p), delay, handler, block), block); + return handler; + } + + template + void + register_streaming_callback(std::size_t max_chunk_size, Callback callback) { + add_listener(std::make_unique>(max_chunk_size, std::forward(callback)), false); + } + + template + void + register_trigger_callback(P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { + add_listener(std::make_unique>(std::forward

(p), pre_samples, post_samples, std::forward(callback)), false); + ensure_history_size(pre_samples); + } + + template + void + register_multiplexed_callback(F triggerObserverFactory, std::size_t maximum_window_size, Callback callback) { + std::lock_guard lg(_listener_mutex); + add_listener(std::make_unique(std::move(triggerObserverFactory), maximum_window_size, std::forward(callback)), false); + } + + template + void + register_snapshot_callback(P p, std::chrono::nanoseconds delay, Callback callback) { + std::lock_guard lg(_listener_mutex); + add_listener(std::make_unique(std::forward

(p), delay, std::forward(callback)), false); + } + + // TODO this code should be called at the end of graph processing + void + stop() noexcept { + std::lock_guard lg(_listener_mutex); + for (auto &listener : _listeners) { + listener->flush(); + } + } + + [[nodiscard]] work_return_t + process_bulk(std::span in_data) noexcept { + std::optional tagData; + if (this->input_tags_present()) { + assert(this->input_tags()[0].index == 0); + tagData = this->input_tags()[0].map; + } + + { + std::lock_guard lg(_listener_mutex); // TODO review/profile if a lock-free data structure should be used here + const auto history_view = _history.get_span(0); + for (auto &listener : _listeners) { + listener->process(history_view, in_data, tagData); + } + + // store potential pre-samples for triggers at the beginning of the next chunk + const auto to_write = std::min(in_data.size(), _history.capacity()); + _history.push_back_bulk(in_data.last(to_write)); + } + + return work_return_t::OK; + } + private: - struct abstract_listener_t { - virtual ~abstract_listener_t() = default; + void + ensure_history_size(std::size_t new_size) { + if (new_size <= _history.capacity()) { + return; + } + // TODO transitional, do not reallocate/copy, but create a shared buffer with size N, + // and a per-listener history buffer where more than N samples is needed. + auto new_history = gr::history_buffer(std::max(new_size, _history.capacity())); + new_history.push_back_bulk(_history.begin(), _history.end()); + std::swap(_history, new_history); + } + + void + add_listener(std::unique_ptr &&l, bool block) { + l->set_sample_rate(sample_rate); // TODO also call when sample_rate changes + if (block) { + _listeners.push_back(std::move(l)); + } else { + _listeners.push_front(std::move(l)); + } + } + + struct abstract_listener { + virtual ~abstract_listener() = default; virtual void set_sample_rate(float) {} @@ -361,7 +491,7 @@ class data_sink : public node> { }; template - struct continuous_listener_t : public abstract_listener_t { + struct continuous_listener : public abstract_listener { static constexpr auto has_callback = !std::is_same_v; static constexpr auto callback_takes_tags = std::is_invocable_v, std::span>; @@ -378,9 +508,9 @@ class data_sink : public node> { Callback callback; - explicit continuous_listener_t(std::size_t max_chunk_size, Callback c) : buffer(max_chunk_size), callback{ std::forward(c) } {} + explicit continuous_listener(std::size_t max_chunk_size, Callback c) : buffer(max_chunk_size), callback{ std::forward(c) } {} - explicit continuous_listener_t(std::shared_ptr poller, bool do_block) : block(do_block), polling_handler{ std::move(poller) } {} + explicit continuous_listener(std::shared_ptr poller, bool do_block) : block(do_block), polling_handler{ std::move(poller) } {} void process(std::span, std::span data, std::optional tag_data0) override { @@ -482,27 +612,27 @@ class data_sink : public node> { } }; - struct pending_window_t { + struct pending_window { DataSet dataset; std::size_t pending_post_samples = 0; }; template - struct trigger_listener_t : public abstract_listener_t { + struct trigger_listener : public abstract_listener { bool block = false; std::size_t pre_samples = 0; std::size_t post_samples = 0; P trigger_predicate = {}; - std::deque pending_trigger_windows; // triggers that still didn't receive all their data + std::deque pending_trigger_windows; // triggers that still didn't receive all their data std::weak_ptr polling_handler = {}; Callback callback; - explicit trigger_listener_t(P predicate, std::shared_ptr handler, std::size_t pre, std::size_t post, bool do_block) + explicit trigger_listener(P predicate, std::shared_ptr handler, std::size_t pre, std::size_t post, bool do_block) : block(do_block), pre_samples(pre), post_samples(post), trigger_predicate(std::forward

(predicate)), polling_handler{ std::move(handler) } {} - explicit trigger_listener_t(P predicate, std::size_t pre, std::size_t post, Callback cb) + explicit trigger_listener(P predicate, std::size_t pre, std::size_t post, Callback cb) : pre_samples(pre), post_samples(post), trigger_predicate(std::forward

(predicate)), callback{ std::forward(cb) } {} // TODO all the dataset-based listeners could share publish_dataset and parts of flush (closing pollers), @@ -577,7 +707,7 @@ class data_sink : public node> { }; template - struct multiplexed_listener_t : public abstract_listener_t { + struct multiplexed_listener : public abstract_listener { bool block = false; F observerFactory; decltype(observerFactory()) observer; @@ -586,10 +716,10 @@ class data_sink : public node> { std::weak_ptr polling_handler = {}; Callback callback; - explicit multiplexed_listener_t(F factory, std::size_t max_window_size, Callback cb) + explicit multiplexed_listener(F factory, std::size_t max_window_size, Callback cb) : observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), callback(cb) {} - explicit multiplexed_listener_t(F factory, std::size_t max_window_size, std::shared_ptr handler, bool do_block) + explicit multiplexed_listener(F factory, std::size_t max_window_size, std::shared_ptr handler, bool do_block) : block(do_block), observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), polling_handler{ std::move(handler) } {} inline void @@ -675,7 +805,7 @@ class data_sink : public node> { }; template - struct snapshot_listener_t : public abstract_listener_t { + struct snapshot_listener : public abstract_listener { bool block = false; std::chrono::nanoseconds time_delay; std::size_t sample_delay = 0; @@ -684,10 +814,10 @@ class data_sink : public node> { std::weak_ptr polling_handler = {}; Callback callback; - explicit snapshot_listener_t(P p, std::chrono::nanoseconds delay, std::shared_ptr poller, bool do_block) + explicit snapshot_listener(P p, std::chrono::nanoseconds delay, std::shared_ptr poller, bool do_block) : block(do_block), time_delay(delay), trigger_predicate(std::forward

(p)), polling_handler{ std::move(poller) } {} - explicit snapshot_listener_t(P p, std::chrono::nanoseconds delay, Callback cb) : trigger_predicate(std::forward

(p)), time_delay(std::forward(cb)) {} + explicit snapshot_listener(P p, std::chrono::nanoseconds delay, Callback cb) : trigger_predicate(std::forward

(p)), time_delay(std::forward(cb)) {} inline void publish_dataset(DataSet &&data) { @@ -753,138 +883,6 @@ class data_sink : public node> { } } }; - - std::deque> listeners; - std::mutex listener_mutex; - -public: - data_sink() { data_sink_registry::instance().register_sink(this); } - - ~data_sink() { data_sink_registry::instance().unregister_sink(this); } - - std::shared_ptr - get_streaming_poller(blocking_mode block_mode = blocking_mode::NonBlocking) { - std::lock_guard lg(listener_mutex); - const auto block = block_mode == blocking_mode::Blocking; - auto handler = std::make_shared(); - add_listener(std::make_unique>(handler, block), block); - return handler; - } - - template - std::shared_ptr - get_trigger_poller(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block_mode = blocking_mode::NonBlocking) { - const auto block = block_mode == blocking_mode::Blocking; - auto handler = std::make_shared(); - std::lock_guard lg(listener_mutex); - add_listener(std::make_unique>(std::forward(p), handler, pre_samples, post_samples, block), block); - ensure_history_size(pre_samples); - return handler; - } - - template - std::shared_ptr - get_multiplexed_poller(F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block_mode = blocking_mode::NonBlocking) { - std::lock_guard lg(listener_mutex); - const auto block = block_mode == blocking_mode::Blocking; - auto handler = std::make_shared(); - add_listener(std::make_unique>(std::move(triggerObserverFactory), maximum_window_size, handler, block), block); - return handler; - } - - template - std::shared_ptr - get_snapshot_poller(P p, std::chrono::nanoseconds delay, blocking_mode block_mode = blocking_mode::NonBlocking) { - const auto block = block_mode == blocking_mode::Blocking; - auto handler = std::make_shared(); - std::lock_guard lg(listener_mutex); - add_listener(std::make_unique>(std::forward

(p), delay, handler, block), block); - return handler; - } - - template - void - register_streaming_callback(std::size_t max_chunk_size, Callback callback) { - add_listener(std::make_unique>(max_chunk_size, std::forward(callback)), false); - } - - template - void - register_trigger_callback(P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { - add_listener(std::make_unique>(std::forward

(p), pre_samples, post_samples, std::forward(callback)), false); - ensure_history_size(pre_samples); - } - - template - void - register_multiplexed_callback(F triggerObserverFactory, std::size_t maximum_window_size, Callback callback) { - std::lock_guard lg(listener_mutex); - add_listener(std::make_unique(std::move(triggerObserverFactory), maximum_window_size, std::forward(callback)), false); - } - - template - void - register_snapshot_callback(P p, std::chrono::nanoseconds delay, Callback callback) { - std::lock_guard lg(listener_mutex); - add_listener(std::make_unique(std::forward

(p), delay, std::forward(callback)), false); - } - - // TODO this code should be called at the end of graph processing - void - stop() noexcept { - std::lock_guard lg(listener_mutex); - for (auto &listener : listeners) { - listener->flush(); - } - } - - [[nodiscard]] work_return_t - process_bulk(std::span in_data) noexcept { - std::optional tagData; - if (this->input_tags_present()) { - assert(this->input_tags()[0].index == 0); - tagData = this->input_tags()[0].map; - } - - { - std::lock_guard lg(listener_mutex); - const auto history_view = history.get_span(0); - for (auto &listener : listeners) { - listener->process(history_view, in_data, tagData); - } - - // store potential pre-samples for triggers at the beginning of the next chunk - const auto to_write = std::min(in_data.size(), history.capacity()); - history.push_back_bulk(in_data.last(to_write)); - } - - return work_return_t::OK; - } - -private: - gr::history_buffer history = gr::history_buffer(1); - - void - ensure_history_size(std::size_t new_size) { - if (new_size <= history.capacity()) { - return; - } - // TODO transitional, do not reallocate/copy, but create a shared buffer with size N, - // and a per-listener history buffer where more than N samples is needed. - auto new_history = gr::history_buffer(std::max(new_size, history.capacity())); - new_history.push_back_bulk(history.begin(), history.end()); - std::swap(history, new_history); - } - - void - add_listener(std::unique_ptr &&l, bool block) { - l->set_sample_rate(sample_rate); // TODO also call when sample_rate changes - if (block) { - listeners.push_back(std::move(l)); - } else { - listeners.push_front(std::move(l)); - } - } }; } // namespace fair::graph From 298f58337164b4b53b4fe95b0a39ec1ec10ae0d8 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 26 Jun 2023 10:44:34 +0200 Subject: [PATCH 39/64] try to build with emscripten --- include/data_sink.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 4ea3daa6..8608fc13 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -673,7 +673,7 @@ class data_sink : public node> { const auto pre_sample_view = history.last(std::min(pre_samples, history.size())); dataset.signal_values.insert(dataset.signal_values.end(), pre_sample_view.begin(), pre_sample_view.end()); - dataset.timing_events = { { { static_cast(pre_sample_view.size()), *tag_data0 } } }; + dataset.timing_events = { { { static_cast(pre_sample_view.size()), *tag_data0 } } }; pending_trigger_windows.push_back({ .dataset = std::move(dataset), .pending_post_samples = post_samples }); } From 2dfe377006e81b68cdc903fe47cbcbc4ac1a20ed Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 26 Jun 2023 11:09:33 +0200 Subject: [PATCH 40/64] Delete expired listeners from list If a poller has been dropped, delete the listener from the list in the next iteration. --- include/data_sink.hpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 8608fc13..15c7c319 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -442,6 +442,7 @@ class data_sink : public node> { { std::lock_guard lg(_listener_mutex); // TODO review/profile if a lock-free data structure should be used here const auto history_view = _history.get_span(0); + std::erase_if(_listeners, [](const auto &l) { return l->expired; }); for (auto &listener : _listeners) { listener->process(history_view, in_data, tagData); } @@ -478,8 +479,12 @@ class data_sink : public node> { } struct abstract_listener { + bool expired = false; + virtual ~abstract_listener() = default; + void set_expired() { expired = true; } + virtual void set_sample_rate(float) {} @@ -571,7 +576,7 @@ class data_sink : public node> { } else { auto poller = polling_handler.lock(); if (!poller) { - // TODO someone remove this listener from the list + this->set_expired(); return; } @@ -645,6 +650,7 @@ class data_sink : public node> { } else { auto poller = polling_handler.lock(); if (!poller) { + this->set_expired(); return; } @@ -729,6 +735,7 @@ class data_sink : public node> { } else { auto poller = polling_handler.lock(); if (!poller) { + this->set_expired(); return; } @@ -826,6 +833,7 @@ class data_sink : public node> { } else { auto poller = polling_handler.lock(); if (!poller) { + this->set_expired(); return; } From 040b9ef04638f8e535daf6bdcba4d991dd2900ec Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 26 Jun 2023 14:25:28 +0200 Subject: [PATCH 41/64] Fix some of Ralph's findings --- include/data_sink.hpp | 21 ++++++++++++++++----- test/qa_data_sink.cpp | 8 ++++---- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 15c7c319..04f6072e 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -31,6 +31,17 @@ concept TriggerPredicate = requires(const T p, tag_t tag) { { p(tag) } -> std::convertible_to; }; +/** + * For the 'Multiplexed' acquisition mode: Stateful object checking all incoming tags to control which data should be sent + * to the listener. + * + * A new dataset is started when the observer returns @c Start or @c StopAndStart. + * A dataset is closed and sent when @c Stop or @StopAndStart is returned. + * + * The observer can rely on being called with each incoming tag exactly once, in the order they arrive. + * + * @see trigger_observer_state + */ template concept TriggerObserver = requires(T o, tag_t tag) { { o(tag) } -> std::convertible_to; @@ -470,7 +481,7 @@ class data_sink : public node> { void add_listener(std::unique_ptr &&l, bool block) { - l->set_sample_rate(sample_rate); // TODO also call when sample_rate changes + l->apply_sample_rate(sample_rate); // TODO also call when sample_rate changes if (block) { _listeners.push_back(std::move(l)); } else { @@ -486,7 +497,7 @@ class data_sink : public node> { void set_expired() { expired = true; } virtual void - set_sample_rate(float) {} + apply_sample_rate(float) {} virtual void process(std::span history, std::span data, std::optional tag_data0) @@ -853,8 +864,8 @@ class data_sink : public node> { } void - set_sample_rate(float r) override { - sample_delay = std::round(std::chrono::duration_cast>(time_delay).count() * r); + apply_sample_rate(float rateHz) override { + sample_delay = std::round(std::chrono::duration_cast>(time_delay).count() * rateHz); // TODO do we need to update the requested_samples of pending here? (considering both old and new time_delay) } @@ -895,6 +906,6 @@ class data_sink : public node> { } // namespace fair::graph -ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink), in, sample_rate); +ENABLE_REFLECTION_FOR_TEMPLATE_FULL((typename T), (fair::graph::data_sink), in, sample_rate, signal_name, signal_unit, signal_min, signal_max); #endif diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index ee61c343..745305aa 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -112,9 +112,9 @@ struct Observer { trigger_observer_state operator()(const tag_t &tag) { - const auto ty = tag.get("Y"); - const auto tm = tag.get("M"); - const auto td = tag.get("D"); + const auto ty = tag.get("YEAR"); + const auto tm = tag.get("MONTH"); + const auto td = tag.get("DAY"); if (!ty || !tm || !td) { return trigger_observer_state::Ignore; } @@ -152,7 +152,7 @@ struct Observer { static tag_t make_tag(tag_t::signed_index_type index, int year, int month, int day) { - return tag_t{ index, { { "Y", year }, { "M", month }, { "D", day } } }; + return tag_t{ index, { { "YEAR", year }, { "MONTH", month }, { "DAY", day } } }; } static std::vector From b4bd1dc2f80c4c1c8ca37470dd2624ea0668e523 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 26 Jun 2023 14:50:39 +0200 Subject: [PATCH 42/64] More docs, make blocking the default --- include/data_sink.hpp | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 04f6072e..bc93611b 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -40,6 +40,25 @@ concept TriggerPredicate = requires(const T p, tag_t tag) { * * The observer can rely on being called with each incoming tag exactly once, in the order they arrive. * + * Example: + * + * @code + * // Observer observing three possible tag values, "green", "yellow", "red". + * // starting a dataset when seeing "green", stopping on "red", starting a new dataset on "yellow" + * struct color_observer { + * trigger_observer_state operator()(const tag_t &tag) { + * if (tag == green || tag == yellow) { + * return trigger_observer_state::StopAndStart; + * } + * if (tag == red) { + * return trigger_observer_state::Stop; + * } + * + * return trigger_observer_state::Ignore; + * } + * }; + * @endcode + * * @see trigger_observer_state */ template @@ -105,7 +124,7 @@ class data_sink_registry { template std::shared_ptr::poller> - get_streaming_poller(const data_sink_query &query, blocking_mode block = blocking_mode::NonBlocking) { + get_streaming_poller(const data_sink_query &query, blocking_mode block = blocking_mode::Blocking) { std::lock_guard lg{ _mutex }; auto sink = find_sink(query); return sink ? sink->get_streaming_poller(block) : nullptr; @@ -113,7 +132,7 @@ class data_sink_registry { template std::shared_ptr::dataset_poller> - get_trigger_poller(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::NonBlocking) { + get_trigger_poller(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::Blocking) { std::lock_guard lg{ _mutex }; auto sink = find_sink(query); return sink ? sink->get_trigger_poller(std::forward

(p), pre_samples, post_samples, block) : nullptr; @@ -121,7 +140,7 @@ class data_sink_registry { template std::shared_ptr::dataset_poller> - get_multiplexed_poller(const data_sink_query &query, F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block = blocking_mode::NonBlocking) { + get_multiplexed_poller(const data_sink_query &query, F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block = blocking_mode::Blocking) { std::lock_guard lg{ _mutex }; auto sink = find_sink(query); return sink ? sink->get_multiplexed_poller(std::forward(triggerObserverFactory), maximum_window_size, block) : nullptr; @@ -129,7 +148,7 @@ class data_sink_registry { template std::shared_ptr::dataset_poller> - get_snapshot_poller(const data_sink_query &query, P p, std::chrono::nanoseconds delay, blocking_mode block = blocking_mode::NonBlocking) { + get_snapshot_poller(const data_sink_query &query, P p, std::chrono::nanoseconds delay, blocking_mode block = blocking_mode::Blocking) { std::lock_guard lg{ _mutex }; auto sink = find_sink(query); return sink ? sink->get_snapshot_poller(std::forward

(p), delay, block) : nullptr; @@ -367,7 +386,7 @@ class data_sink : public node> { ~data_sink() { data_sink_registry::instance().unregister_sink(this); } std::shared_ptr - get_streaming_poller(blocking_mode block_mode = blocking_mode::NonBlocking) { + get_streaming_poller(blocking_mode block_mode = blocking_mode::Blocking) { std::lock_guard lg(_listener_mutex); const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); @@ -377,7 +396,7 @@ class data_sink : public node> { template std::shared_ptr - get_trigger_poller(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block_mode = blocking_mode::NonBlocking) { + get_trigger_poller(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block_mode = blocking_mode::Blocking) { const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); std::lock_guard lg(_listener_mutex); @@ -388,7 +407,7 @@ class data_sink : public node> { template std::shared_ptr - get_multiplexed_poller(F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block_mode = blocking_mode::NonBlocking) { + get_multiplexed_poller(F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block_mode = blocking_mode::Blocking) { std::lock_guard lg(_listener_mutex); const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); @@ -398,7 +417,7 @@ class data_sink : public node> { template std::shared_ptr - get_snapshot_poller(P p, std::chrono::nanoseconds delay, blocking_mode block_mode = blocking_mode::NonBlocking) { + get_snapshot_poller(P p, std::chrono::nanoseconds delay, blocking_mode block_mode = blocking_mode::Blocking) { const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); std::lock_guard lg(_listener_mutex); From 7b1784df3c24cba19848b82a4b67a9aa2a3eeabd Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 27 Jun 2023 11:01:38 +0200 Subject: [PATCH 43/64] Adapt to Ivan's comments --- include/data_sink.hpp | 34 ++++++++++++++++------------------ include/utils.hpp | 2 ++ test/qa_data_sink.cpp | 22 +++++++++++----------- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index bc93611b..db7324bf 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -21,9 +21,6 @@ enum class trigger_observer_state { Ignore ///< Ignore tag }; -// TODO is the scope where want these? -struct null_type {}; - // Until clang-format can handle concepts // clang-format off template @@ -76,16 +73,16 @@ template class data_sink; struct data_sink_query { - std::optional sink_name; - std::optional signal_name; + std::optional _sink_name; + std::optional _signal_name; static data_sink_query - with_signal_name(std::string_view name) { + signal_name(std::string_view name) { return { {}, std::string{ name } }; } static data_sink_query - with_sink_name(std::string_view name) { + sink_name(std::string_view name) { return { std::string{ name }, {} }; } }; @@ -96,6 +93,7 @@ class data_sink_registry { public: // TODO this shouldn't be a singleton but associated with the flow graph (?) + // TODO reconsider mutex usage when moving to the graph static data_sink_registry & instance() { static data_sink_registry s_instance; @@ -213,8 +211,8 @@ class data_sink_registry { auto matches = [&query](const std::any &v) { try { auto sink = std::any_cast *>(v); - const auto sink_name_matches = !query.sink_name || *query.sink_name == sink->name(); - const auto signal_name_matches = !query.signal_name || *query.signal_name == sink->signal_name; + const auto sink_name_matches = !query._sink_name || *query._sink_name == sink->name(); + const auto signal_name_matches = !query._signal_name || *query._signal_name == sink->signal_name; return sink_name_matches && signal_name_matches; } catch (...) { return false; @@ -390,7 +388,7 @@ class data_sink : public node> { std::lock_guard lg(_listener_mutex); const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); - add_listener(std::make_unique>(handler, block), block); + add_listener(std::make_unique>(handler, block), block); return handler; } @@ -400,7 +398,7 @@ class data_sink : public node> { const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); std::lock_guard lg(_listener_mutex); - add_listener(std::make_unique>(std::forward(p), handler, pre_samples, post_samples, block), block); + add_listener(std::make_unique>(std::forward(p), handler, pre_samples, post_samples, block), block); ensure_history_size(pre_samples); return handler; } @@ -411,7 +409,7 @@ class data_sink : public node> { std::lock_guard lg(_listener_mutex); const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); - add_listener(std::make_unique>(std::move(triggerObserverFactory), maximum_window_size, handler, block), block); + add_listener(std::make_unique>(std::move(triggerObserverFactory), maximum_window_size, handler, block), block); return handler; } @@ -421,7 +419,7 @@ class data_sink : public node> { const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); std::lock_guard lg(_listener_mutex); - add_listener(std::make_unique>(std::forward

(p), delay, handler, block), block); + add_listener(std::make_unique>(std::forward

(p), delay, handler, block), block); return handler; } @@ -527,7 +525,7 @@ class data_sink : public node> { template struct continuous_listener : public abstract_listener { - static constexpr auto has_callback = !std::is_same_v; + static constexpr auto has_callback = !std::is_same_v; static constexpr auto callback_takes_tags = std::is_invocable_v, std::span>; bool block = false; @@ -675,7 +673,7 @@ class data_sink : public node> { // I leave it as is for now. inline void publish_dataset(DataSet &&data) { - if constexpr (!std::is_same_v) { + if constexpr (!std::is_same_v) { callback(std::move(data)); } else { auto poller = polling_handler.lock(); @@ -760,7 +758,7 @@ class data_sink : public node> { inline void publish_dataset(DataSet &&data) { - if constexpr (!std::is_same_v) { + if constexpr (!std::is_same_v) { callback(std::move(data)); } else { auto poller = polling_handler.lock(); @@ -858,7 +856,7 @@ class data_sink : public node> { inline void publish_dataset(DataSet &&data) { - if constexpr (!std::is_same_v) { + if constexpr (!std::is_same_v) { callback(std::move(data)); } else { auto poller = polling_handler.lock(); @@ -892,7 +890,7 @@ class data_sink : public node> { process(std::span, std::span in_data, std::optional tag_data0) override { if (tag_data0 && trigger_predicate({ 0, *tag_data0 })) { auto new_pending = pending_snapshot{ *tag_data0, sample_delay, sample_delay }; - // make sure pending is sorted by number of pending_samples (insertion might be not at end if sample rate decreased; TODO unless we adapt them in set_sample_rate, see there) + // make sure pending is sorted by number of pending_samples (insertion might be not at end if sample rate decreased; TODO unless we adapt them in apply_sample_rate, see there) auto rit = std::find_if(pending.rbegin(), pending.rend(), [delay = sample_delay](const auto &other) { return other.pending_samples < delay; }); pending.insert(rit.base(), std::move(new_pending)); } diff --git a/include/utils.hpp b/include/utils.hpp index 41a17ce9..7d8e0bbd 100644 --- a/include/utils.hpp +++ b/include/utils.hpp @@ -33,6 +33,8 @@ namespace fair::meta { using namespace fair::literals; +struct null_type {}; + template struct print_types; diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 745305aa..ff336229 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -287,8 +287,8 @@ const boost::ut::suite DataSinkTests = [] { } }; - expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::with_sink_name("test_sink"), chunk_size, callback)); - expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::with_sink_name("test_sink"), chunk_size, callback_with_tags)); + expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::sink_name("test_sink"), chunk_size, callback)); + expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::sink_name("test_sink"), chunk_size, callback_with_tags)); fair::graph::scheduler::simple sched{ std::move(flow_graph) }; sched.work(); @@ -317,10 +317,10 @@ const boost::ut::suite DataSinkTests = [] { std::atomic samples_seen = 0; - auto poller_data_only = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink"), blocking_mode::Blocking); + auto poller_data_only = data_sink_registry::instance().get_streaming_poller(data_sink_query::sink_name("test_sink"), blocking_mode::Blocking); expect(neq(poller_data_only, nullptr)); - auto poller_with_tags = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink"), blocking_mode::Blocking); + auto poller_with_tags = data_sink_registry::instance().get_streaming_poller(data_sink_query::sink_name("test_sink"), blocking_mode::Blocking); expect(neq(poller_with_tags, nullptr)); auto runner1 = std::async([poller = poller_data_only] { @@ -394,7 +394,7 @@ const boost::ut::suite DataSinkTests = [] { return v && std::get(v->get()) == "TRIGGER"; }; - auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, 3, 2, blocking_mode::Blocking); + auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::sink_name("test_sink"), is_trigger, 3, 2, blocking_mode::Blocking); expect(neq(poller, nullptr)); auto polling = std::async([poller] { @@ -446,7 +446,7 @@ const boost::ut::suite DataSinkTests = [] { }; const auto delay = std::chrono::milliseconds{ 500 }; // sample rate 10000 -> 5000 samples - auto poller = data_sink_registry::instance().get_snapshot_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, delay, blocking_mode::Blocking); + auto poller = data_sink_registry::instance().get_snapshot_poller(data_sink_query::sink_name("test_sink"), is_trigger, delay, blocking_mode::Blocking); expect(neq(poller, nullptr)); auto poller_result = std::async([poller] { @@ -512,7 +512,7 @@ const boost::ut::suite DataSinkTests = [] { std::vector::dataset_poller>> pollers; for (const auto &f : factories) { - auto poller = data_sink_registry::instance().get_multiplexed_poller(data_sink_query::with_sink_name("test_sink"), f, 100000, blocking_mode::Blocking); + auto poller = data_sink_registry::instance().get_multiplexed_poller(data_sink_query::sink_name("test_sink"), f, 100000, blocking_mode::Blocking); expect(neq(poller, nullptr)); pollers.push_back(poller); } @@ -564,7 +564,7 @@ const boost::ut::suite DataSinkTests = [] { auto is_trigger = [](const tag_t &tag) { return true; }; - auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::with_sink_name("test_sink"), is_trigger, 3000, 2000, blocking_mode::Blocking); + auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::sink_name("test_sink"), is_trigger, 3000, 2000, blocking_mode::Blocking); expect(neq(poller, nullptr)); auto polling = std::async([poller] { @@ -629,7 +629,7 @@ const boost::ut::suite DataSinkTests = [] { received_data.push_back(dataset.signal_values.back()); }; - data_sink_registry::instance().register_trigger_callback(data_sink_query::with_sink_name("test_sink"), is_trigger, 3000, 2000, callback); + data_sink_registry::instance().register_trigger_callback(data_sink_query::sink_name("test_sink"), is_trigger, 3000, 2000, callback); fair::graph::scheduler::simple sched{ std::move(flow_graph) }; sched.work(); @@ -650,10 +650,10 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - auto invalid_type_poller = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink")); + auto invalid_type_poller = data_sink_registry::instance().get_streaming_poller(data_sink_query::sink_name("test_sink")); expect(eq(invalid_type_poller, nullptr)); - auto poller = data_sink_registry::instance().get_streaming_poller(data_sink_query::with_sink_name("test_sink")); + auto poller = data_sink_registry::instance().get_streaming_poller(data_sink_query::sink_name("test_sink")); expect(neq(poller, nullptr)); auto polling = std::async([poller] { From c55bf58a8eb0360d0def8f9012bf262b4e7142ea Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 27 Jun 2023 11:06:22 +0200 Subject: [PATCH 44/64] Fix ASCII art --- include/data_sink.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index db7324bf..53d62040 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -265,8 +265,8 @@ copy_span(std::span src, std::span dst) { * in0 ━╢ data sink ║ ┌──── caller ────┐ * (err0) ━╢ (opt. error) ║ │ │ * ║ ║ retrieve poller or │ (custom non-GR │ - * ━╢ :signal_name ║←--------------------→│ user code...) │ - * ━╢ :signal_unit ║ register │ │ + * ║ :signal_name ║←--------------------→│ user code...) │ + * ║ :signal_unit ║ register │ │ * ║ :... ║ callback function └───┬────────────┘ * ╚═ GR block ═╤══╝ │ * │ │ From b9b1c88bee8a5626d9328357308d4c7a5c545749 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 27 Jun 2023 11:42:47 +0200 Subject: [PATCH 45/64] apply clang-format --- include/data_sink.hpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 53d62040..f9fc9ac4 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -507,11 +507,14 @@ class data_sink : public node> { } struct abstract_listener { - bool expired = false; + bool expired = false; virtual ~abstract_listener() = default; - void set_expired() { expired = true; } + void + set_expired() { + expired = true; + } virtual void apply_sample_rate(float) {} From 3676ecb24212a01fcaa66fb630f0584b3562f9b1 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 27 Jun 2023 11:52:27 +0200 Subject: [PATCH 46/64] Remove TriggerObserverFactory --- include/data_sink.hpp | 34 ++++++++++++++-------------------- test/qa_data_sink.cpp | 9 ++++----- 2 files changed, 18 insertions(+), 25 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index f9fc9ac4..54b75f00 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -63,10 +63,6 @@ concept TriggerObserver = requires(T o, tag_t tag) { { o(tag) } -> std::convertible_to; }; -template -concept TriggerObserverFactory = requires(T f) { - { f() } -> TriggerObserver; -}; // clang-format on template @@ -136,12 +132,12 @@ class data_sink_registry { return sink ? sink->get_trigger_poller(std::forward

(p), pre_samples, post_samples, block) : nullptr; } - template + template std::shared_ptr::dataset_poller> - get_multiplexed_poller(const data_sink_query &query, F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block = blocking_mode::Blocking) { + get_multiplexed_poller(const data_sink_query &query, O triggerObserver, std::size_t maximum_window_size, blocking_mode block = blocking_mode::Blocking) { std::lock_guard lg{ _mutex }; auto sink = find_sink(query); - return sink ? sink->get_multiplexed_poller(std::forward(triggerObserverFactory), maximum_window_size, block) : nullptr; + return sink ? sink->get_multiplexed_poller(std::forward(triggerObserver), maximum_window_size, block) : nullptr; } template @@ -403,13 +399,13 @@ class data_sink : public node> { return handler; } - template + template std::shared_ptr - get_multiplexed_poller(F triggerObserverFactory, std::size_t maximum_window_size, blocking_mode block_mode = blocking_mode::Blocking) { + get_multiplexed_poller(O triggerObserver, std::size_t maximum_window_size, blocking_mode block_mode = blocking_mode::Blocking) { std::lock_guard lg(_listener_mutex); const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); - add_listener(std::make_unique>(std::move(triggerObserverFactory), maximum_window_size, handler, block), block); + add_listener(std::make_unique>(std::move(triggerObserver), maximum_window_size, handler, block), block); return handler; } @@ -436,11 +432,11 @@ class data_sink : public node> { ensure_history_size(pre_samples); } - template + template void - register_multiplexed_callback(F triggerObserverFactory, std::size_t maximum_window_size, Callback callback) { + register_multiplexed_callback(O triggerObserver, std::size_t maximum_window_size, Callback callback) { std::lock_guard lg(_listener_mutex); - add_listener(std::make_unique(std::move(triggerObserverFactory), maximum_window_size, std::forward(callback)), false); + add_listener(std::make_unique(std::move(triggerObserver), maximum_window_size, std::forward(callback)), false); } template @@ -743,21 +739,19 @@ class data_sink : public node> { } }; - template + template struct multiplexed_listener : public abstract_listener { bool block = false; - F observerFactory; - decltype(observerFactory()) observer; + O observer; std::optional> pending_dataset; std::size_t maximum_window_size; std::weak_ptr polling_handler = {}; Callback callback; - explicit multiplexed_listener(F factory, std::size_t max_window_size, Callback cb) - : observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), callback(cb) {} + explicit multiplexed_listener(O observer_, std::size_t max_window_size, Callback cb) : observer(std::move(observer_)), maximum_window_size(max_window_size), callback(cb) {} - explicit multiplexed_listener(F factory, std::size_t max_window_size, std::shared_ptr handler, bool do_block) - : block(do_block), observerFactory(factory), observer(observerFactory()), maximum_window_size(max_window_size), polling_handler{ std::move(handler) } {} + explicit multiplexed_listener(O observer_, std::size_t max_window_size, std::shared_ptr handler, bool do_block) + : block(do_block), observer(std::move(observer_)), maximum_window_size(max_window_size), polling_handler{ std::move(handler) } {} inline void publish_dataset(DataSet &&data) { diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index ff336229..8753377d 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -500,19 +500,18 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(run_observer_test(t, Observer({}, {}, 1)), "|#|__|#|__|#|__|#|__|#|__|#|__"s)); } - auto observer_factory = [](std::optional y, std::optional m, std::optional d) { return [y, m, d]() { return Observer(y, m, d); }; }; - const auto factories = std::array{ observer_factory({}, -1, {}), observer_factory(-1, {}, {}), observer_factory(1, {}, {}), observer_factory(1, {}, 2), observer_factory({}, {}, 1) }; + const auto observers = std::array{ Observer({}, -1, {}), Observer(-1, {}, {}), Observer(1, {}, {}), Observer(1, {}, 2), Observer({}, {}, 1) }; // Following the patterns above, where each #/_ is 10000 samples - const auto expected = std::array, factories.size()>{ { { 0, 29999, 30000, 59999, 60000, 89999, 90000, 119999, 120000, 149999, 150000, 249999 }, + const auto expected = std::array, observers.size()>{ { { 0, 29999, 30000, 59999, 60000, 89999, 90000, 119999, 120000, 149999, 150000, 249999 }, { 0, 59999, 60000, 119999, 120000, 219999 }, { 0, 59999 }, { 10000, 19999, 40000, 49999 }, { 0, 9999, 30000, 39999, 60000, 69999, 90000, 99999, 120000, 129999, 150000, 159999 } } }; std::vector::dataset_poller>> pollers; - for (const auto &f : factories) { - auto poller = data_sink_registry::instance().get_multiplexed_poller(data_sink_query::sink_name("test_sink"), f, 100000, blocking_mode::Blocking); + for (const auto &o : observers) { + auto poller = data_sink_registry::instance().get_multiplexed_poller(data_sink_query::sink_name("test_sink"), o, 100000, blocking_mode::Blocking); expect(neq(poller, nullptr)); pollers.push_back(poller); } From 7478c87426eb635acaebae98caabf0333335016c Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 27 Jun 2023 12:19:16 +0200 Subject: [PATCH 47/64] Add concepts for callbacks --- include/data_sink.hpp | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 54b75f00..60b6efad 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -23,6 +23,16 @@ enum class trigger_observer_state { // Until clang-format can handle concepts // clang-format off + +template +concept DataSetCallback = std::invocable>; + +/** + * Stream callback functions receive the span of data, and optionally the tags associated with it. + */ +template +concept StreamCallback = std::invocable> || std::invocable, std::span>; + template concept TriggerPredicate = requires(const T p, tag_t tag) { { p(tag) } -> std::convertible_to; @@ -148,7 +158,7 @@ class data_sink_registry { return sink ? sink->get_snapshot_poller(std::forward

(p), delay, block) : nullptr; } - template + template Callback> bool register_streaming_callback(const data_sink_query &query, std::size_t max_chunk_size, Callback callback) { std::lock_guard lg{ _mutex }; @@ -161,7 +171,7 @@ class data_sink_registry { return true; } - template + template Callback> bool register_trigger_callback(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { std::lock_guard lg{ _mutex }; @@ -174,7 +184,7 @@ class data_sink_registry { return true; } - template + template Callback> bool register_multiplexed_callback(const data_sink_query &query, std::size_t maximum_window_size, Callback callback) { std::lock_guard lg{ _mutex }; @@ -187,7 +197,7 @@ class data_sink_registry { return true; } - template + template Callback> bool register_snapshot_callback(const data_sink_query &query, P p, std::chrono::nanoseconds delay, Callback callback) { std::lock_guard lg{ _mutex }; @@ -388,13 +398,13 @@ class data_sink : public node> { return handler; } - template + template std::shared_ptr - get_trigger_poller(TriggerPredicate p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block_mode = blocking_mode::Blocking) { + get_trigger_poller(P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block_mode = blocking_mode::Blocking) { const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); std::lock_guard lg(_listener_mutex); - add_listener(std::make_unique>(std::forward(p), handler, pre_samples, post_samples, block), block); + add_listener(std::make_unique>(std::move(p), handler, pre_samples, post_samples, block), block); ensure_history_size(pre_samples); return handler; } @@ -419,27 +429,27 @@ class data_sink : public node> { return handler; } - template + template Callback> void register_streaming_callback(std::size_t max_chunk_size, Callback callback) { add_listener(std::make_unique>(max_chunk_size, std::forward(callback)), false); } - template + template Callback> void register_trigger_callback(P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { add_listener(std::make_unique>(std::forward

(p), pre_samples, post_samples, std::forward(callback)), false); ensure_history_size(pre_samples); } - template + template Callback> void register_multiplexed_callback(O triggerObserver, std::size_t maximum_window_size, Callback callback) { std::lock_guard lg(_listener_mutex); add_listener(std::make_unique(std::move(triggerObserver), maximum_window_size, std::forward(callback)), false); } - template + template Callback> void register_snapshot_callback(P p, std::chrono::nanoseconds delay, Callback callback) { std::lock_guard lg(_listener_mutex); From 02d4924c56a257977c3118e1862a47050bbc6192 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 27 Jun 2023 12:43:03 +0200 Subject: [PATCH 48/64] Simplify/Unify TagObserver/Predicate --- include/data_sink.hpp | 41 +++++++++++------------- test/qa_data_sink.cpp | 74 ++++++++++++++++++++----------------------- 2 files changed, 52 insertions(+), 63 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 60b6efad..fe206a3d 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -14,11 +14,10 @@ namespace fair::graph { enum class blocking_mode { NonBlocking, Blocking }; -enum class trigger_observer_state { - Start, ///< Start a new dataset - Stop, ///< Finish dataset - StopAndStart, ///< Finish pending dataset, start a new one - Ignore ///< Ignore tag +enum class trigger_test_result { + Matching, ///< Start a new dataset + NotMatching, ///< Finish dataset + Ignore ///< Ignore tag }; // Until clang-format can handle concepts @@ -33,13 +32,17 @@ concept DataSetCallback = std::invocable>; template concept StreamCallback = std::invocable> || std::invocable, std::span>; +/** + * For the 'Triggered' and 'Snapshot' acquisition modes. + * Stateless predicate to check whether a tag matches the trigger criteria. + */ template concept TriggerPredicate = requires(const T p, tag_t tag) { - { p(tag) } -> std::convertible_to; + { p(tag) } -> std::convertible_to; }; /** - * For the 'Multiplexed' acquisition mode: Stateful object checking all incoming tags to control which data should be sent + * For the 'Multiplexed' acquisition mode: Possibly stateful object checking all incoming tags to control which data should be sent * to the listener. * * A new dataset is started when the observer returns @c Start or @c StopAndStart. @@ -55,10 +58,10 @@ concept TriggerPredicate = requires(const T p, tag_t tag) { * struct color_observer { * trigger_observer_state operator()(const tag_t &tag) { * if (tag == green || tag == yellow) { - * return trigger_observer_state::StopAndStart; + * return trigger_observer_state::Matching; * } * if (tag == red) { - * return trigger_observer_state::Stop; + * return trigger_observer_state::NotMatching; * } * * return trigger_observer_state::Ignore; @@ -70,7 +73,7 @@ concept TriggerPredicate = requires(const T p, tag_t tag) { */ template concept TriggerObserver = requires(T o, tag_t tag) { - { o(tag) } -> std::convertible_to; + { o(tag) } -> std::convertible_to; }; // clang-format on @@ -708,7 +711,7 @@ class data_sink : public node> { void process(std::span history, std::span in_data, std::optional tag_data0) override { - if (tag_data0 && trigger_predicate(tag_t{ 0, *tag_data0 })) { + if (tag_data0 && trigger_predicate(tag_t{ 0, *tag_data0 }) == trigger_test_result::Matching) { // TODO fill dataset with metadata etc. DataSet dataset; dataset.signal_values.reserve(pre_samples + post_samples); // TODO maybe make the circ. buffer smaller but preallocate these @@ -793,24 +796,16 @@ class data_sink : public node> { process(std::span, std::span in_data, std::optional tag_data0) override { if (tag_data0) { const auto obsr = observer(tag_t{ 0, *tag_data0 }); - // TODO set proper error state instead of throwing - if (obsr == trigger_observer_state::Stop || obsr == trigger_observer_state::StopAndStart) { - if (obsr == trigger_observer_state::Stop && !pending_dataset) { - throw std::runtime_error("multiplexed: Stop without start"); - } - + if (obsr == trigger_test_result::NotMatching || obsr == trigger_test_result::Matching) { if (pending_dataset) { - if (obsr == trigger_observer_state::Stop) { + if (obsr == trigger_test_result::NotMatching) { pending_dataset->timing_events[0].push_back({ static_cast(pending_dataset->signal_values.size()), *tag_data0 }); } publish_dataset(std::move(*pending_dataset)); pending_dataset.reset(); } } - if (obsr == trigger_observer_state::Start || obsr == trigger_observer_state::StopAndStart) { - if (obsr == trigger_observer_state::Start && pending_dataset) { - throw std::runtime_error("multiplexed: Two starts without stop"); - } + if (obsr == trigger_test_result::Matching) { pending_dataset = DataSet(); pending_dataset->signal_values.reserve(maximum_window_size); // TODO might be too much? pending_dataset->timing_events = { { { 0, *tag_data0 } } }; @@ -895,7 +890,7 @@ class data_sink : public node> { void process(std::span, std::span in_data, std::optional tag_data0) override { - if (tag_data0 && trigger_predicate({ 0, *tag_data0 })) { + if (tag_data0 && trigger_predicate({ 0, *tag_data0 }) == trigger_test_result::Matching) { auto new_pending = pending_snapshot{ *tag_data0, sample_delay, sample_delay }; // make sure pending is sorted by number of pending_samples (insertion might be not at end if sample rate decreased; TODO unless we adapt them in apply_sample_rate, see there) auto rit = std::find_if(pending.rbegin(), pending.rend(), [delay = sample_delay](const auto &other) { return other.pending_samples < delay; }); diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 8753377d..462a2e0e 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -110,38 +110,36 @@ struct Observer { return !same(x, other); } - trigger_observer_state + trigger_test_result operator()(const tag_t &tag) { const auto ty = tag.get("YEAR"); const auto tm = tag.get("MONTH"); const auto td = tag.get("DAY"); if (!ty || !tm || !td) { - return trigger_observer_state::Ignore; + return trigger_test_result::Ignore; } - const auto tup = std::make_tuple(std::get(ty->get()), std::get(tm->get()), std::get(td->get())); - const auto &[y, m, d] = tup; - const auto ly = last_seen ? std::optional(std::get<0>(*last_seen)) : std::nullopt; - const auto lm = last_seen ? std::optional(std::get<1>(*last_seen)) : std::nullopt; - const auto ld = last_seen ? std::optional(std::get<2>(*last_seen)) : std::nullopt; - - const auto year_restart = year && *year == -1 && changed(y, ly); - const auto year_matches = !year || *year == -1 || same(y, year); - const auto month_restart = month && *month == -1 && changed(m, lm); - const auto month_matches = !month || *month == -1 || same(m, month); - const auto day_restart = day && *day == -1 && changed(d, ld); - const auto day_matches = !day || *day == -1 || same(d, day); - const auto matches = year_matches && month_matches && day_matches; - const auto restart = year_restart || month_restart || day_restart; - - trigger_observer_state r = trigger_observer_state::Ignore; - - if (last_matched && !matches) { - r = trigger_observer_state::Stop; - } else if (!last_matched && matches) { - r = trigger_observer_state::Start; - } else if ((!last_seen || last_matched) && matches && restart) { - r = trigger_observer_state::StopAndStart; + const auto tup = std::make_tuple(std::get(ty->get()), std::get(tm->get()), std::get(td->get())); + const auto &[y, m, d] = tup; + const auto ly = last_seen ? std::optional(std::get<0>(*last_seen)) : std::nullopt; + const auto lm = last_seen ? std::optional(std::get<1>(*last_seen)) : std::nullopt; + const auto ld = last_seen ? std::optional(std::get<2>(*last_seen)) : std::nullopt; + + const auto year_restart = year && *year == -1 && changed(y, ly); + const auto year_matches = !year || *year == -1 || same(y, year); + const auto month_restart = month && *month == -1 && changed(m, lm); + const auto month_matches = !month || *month == -1 || same(m, month); + const auto day_restart = day && *day == -1 && changed(d, ld); + const auto day_matches = !day || *day == -1 || same(d, day); + const auto matches = year_matches && month_matches && day_matches; + const auto restart = year_restart || month_restart || day_restart; + + trigger_test_result r = trigger_test_result::Ignore; + + if (!matches) { + r = trigger_test_result::NotMatching; + } else if (!last_matched || restart) { + r = trigger_test_result::Matching; } last_seen = tup; @@ -170,24 +168,20 @@ make_test_tags(tag_t::signed_index_type first_index, tag_t::signed_index_type in } static std::string -to_ascii_art(std::span states) { +to_ascii_art(std::span states) { bool started = false; std::string r; for (auto s : states) { switch (s) { - case trigger_observer_state::Start: - r += started ? "E" : "|#"; + case trigger_test_result::Matching: + r += started ? "||#" : "|#"; started = true; break; - case trigger_observer_state::Stop: - r += started ? "|_" : "E"; + case trigger_test_result::NotMatching: + r += started ? "|_" : "_"; started = false; break; - case trigger_observer_state::StopAndStart: - r += started ? "||#" : "|#"; - started = true; - break; - case trigger_observer_state::Ignore: r += started ? "#" : "_"; break; + case trigger_test_result::Ignore: r += started ? "#" : "_"; break; } }; return r; @@ -196,7 +190,7 @@ to_ascii_art(std::span states) { template std::string run_observer_test(std::span tags, O o) { - std::vector r; + std::vector r; r.reserve(tags.size()); for (const auto &tag : tags) { r.push_back(o(tag)); @@ -391,7 +385,7 @@ const boost::ut::suite DataSinkTests = [] { auto is_trigger = [](const tag_t &tag) { const auto v = tag.get("TYPE"); - return v && std::get(v->get()) == "TRIGGER"; + return v && std::get(v->get()) == "TRIGGER" ? trigger_test_result::Matching : trigger_test_result::Ignore; }; auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::sink_name("test_sink"), is_trigger, 3, 2, blocking_mode::Blocking); @@ -442,7 +436,7 @@ const boost::ut::suite DataSinkTests = [] { auto is_trigger = [](const tag_t &tag) { const auto v = tag.get("TYPE"); - return v && std::get(v->get()) == "TRIGGER"; + return (v && std::get(v->get()) == "TRIGGER") ? trigger_test_result::Matching : trigger_test_result::Ignore; }; const auto delay = std::chrono::milliseconds{ 500 }; // sample rate 10000 -> 5000 samples @@ -561,7 +555,7 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - auto is_trigger = [](const tag_t &tag) { return true; }; + auto is_trigger = [](const tag_t &tag) { return trigger_test_result::Matching; }; auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::sink_name("test_sink"), is_trigger, 3000, 2000, blocking_mode::Blocking); expect(neq(poller, nullptr)); @@ -616,7 +610,7 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - auto is_trigger = [](const tag_t &) { return true; }; + auto is_trigger = [](const tag_t &) { return trigger_test_result::Matching; }; std::mutex m; std::vector received_data; From 09f73aaf1e410793b107c9393b95c7df94a52a33 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 27 Jun 2023 13:35:15 +0200 Subject: [PATCH 49/64] Make sample rate configurable via settings --- include/data_sink.hpp | 17 +++++++++++++++-- test/qa_data_sink.cpp | 2 +- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index fe206a3d..a3d89f90 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -305,7 +305,7 @@ class data_sink : public node> { gr::history_buffer _history = gr::history_buffer(1); public: - Annotated, Unit<"Hz">> sample_rate = 10000.f; + Annotated, Unit<"Hz">> sample_rate = 1.f; Annotated signal_name; Annotated> signal_unit; Annotated> signal_min; @@ -392,6 +392,19 @@ class data_sink : public node> { ~data_sink() { data_sink_registry::instance().unregister_sink(this); } + void + init(const property_map & /*old_settings*/, const property_map &new_settings) { + const auto it = new_settings.find("sample_rate"); + if (it == new_settings.end()) { + return; + } + sample_rate = std::get(it->second); + std::lock_guard lg(_listener_mutex); + for (auto &l : _listeners) { + l->apply_sample_rate(sample_rate); + } + } + std::shared_ptr get_streaming_poller(blocking_mode block_mode = blocking_mode::Blocking) { std::lock_guard lg(_listener_mutex); @@ -507,7 +520,7 @@ class data_sink : public node> { void add_listener(std::unique_ptr &&l, bool block) { - l->apply_sample_rate(sample_rate); // TODO also call when sample_rate changes + l->apply_sample_rate(sample_rate); if (block) { _listeners.push_back(std::move(l)); } else { diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 462a2e0e..9706c130 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -429,7 +429,7 @@ const boost::ut::suite DataSinkTests = [] { graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); src.tags = { { 3000, { { "TYPE", "TRIGGER" } } }, tag_t{ 8000, { { "TYPE", "NO_TRIGGER" } } }, { 180000, { { "TYPE", "TRIGGER" } } } }; - auto &sink = flow_graph.make_node>(); + auto &sink = flow_graph.make_node>({ {"sample_rate", 10000.f } } ); sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); From c8e43cceeb1a64469224a495de37097365820831 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 27 Jun 2023 16:38:51 +0200 Subject: [PATCH 50/64] Add signal info handling Forward signal info from sink settings or incoming tags (settings have preference) to the datasets. --- include/data_sink.hpp | 172 +++++++++++++++++++++++++++++++----------- test/qa_data_sink.cpp | 40 ++++++++-- 2 files changed, 163 insertions(+), 49 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index a3d89f90..faf396f4 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -9,6 +9,7 @@ #include #include +#include namespace fair::graph { @@ -258,6 +259,18 @@ copy_span(std::span src, std::span dst) { std::copy(src.begin(), src.end(), dst.begin()); return true; } + +template +std::optional +get(const property_map &m, const std::string_view &key) { + const auto it = m.find(key); + if (it == m.end()) { + return {}; + } + + return std::get(it->second); +} + } // namespace detail /** @@ -302,14 +315,15 @@ class data_sink : public node> { static constexpr std::size_t _listener_buffer_size = 65536; std::deque> _listeners; std::mutex _listener_mutex; - gr::history_buffer _history = gr::history_buffer(1); + gr::history_buffer _history = gr::history_buffer(1); + bool _has_signal_info_from_settings = false; public: Annotated, Unit<"Hz">> sample_rate = 1.f; - Annotated signal_name; - Annotated> signal_unit; - Annotated> signal_min; - Annotated> signal_max; + Annotated signal_name = std::string("unknown signal"); + Annotated> signal_unit = std::string("a.u."); + Annotated> signal_min = std::numeric_limits::lowest(); + Annotated> signal_max = std::numeric_limits::max(); IN in; @@ -394,14 +408,8 @@ class data_sink : public node> { void init(const property_map & /*old_settings*/, const property_map &new_settings) { - const auto it = new_settings.find("sample_rate"); - if (it == new_settings.end()) { - return; - } - sample_rate = std::get(it->second); - std::lock_guard lg(_listener_mutex); - for (auto &l : _listeners) { - l->apply_sample_rate(sample_rate); + if (apply_signal_info(new_settings)) { + _has_signal_info_from_settings = true; } } @@ -477,7 +485,7 @@ class data_sink : public node> { stop() noexcept { std::lock_guard lg(_listener_mutex); for (auto &listener : _listeners) { - listener->flush(); + listener->stop(); } } @@ -487,6 +495,10 @@ class data_sink : public node> { if (this->input_tags_present()) { assert(this->input_tags()[0].index == 0); tagData = this->input_tags()[0].map; + // signal info from settings overrides info from tags + if (!_has_signal_info_from_settings) { + apply_signal_info(this->input_tags()[0].map); + } } { @@ -506,6 +518,62 @@ class data_sink : public node> { } private: + bool + apply_signal_info(const property_map &properties) { + try { + const auto srate = detail::get(properties, tag::SAMPLE_RATE.key()); + const auto name = detail::get(properties, tag::SIGNAL_NAME.key()); + const auto unit = detail::get(properties, tag::SIGNAL_UNIT.key()); + const auto min = detail::get(properties, tag::SIGNAL_MIN.key()); + const auto max = detail::get(properties, tag::SIGNAL_MAX.key()); + + // commit + if (srate) { + sample_rate = *srate; + } + if (name) { + signal_name = *name; + } + if (unit) { + signal_unit = *unit; + } + if (min) { + signal_min = *min; + } + if (max) { + signal_max = *max; + } + + // forward to listeners + if (srate || name || unit || min || max) { + const auto dstempl = make_dataset_template(); + + std::lock_guard lg{ _listener_mutex }; + for (auto &l : _listeners) { + if (srate) { + l->apply_sample_rate(sample_rate); + } + if (name || unit || min || max) { + l->set_dataset_template(dstempl); + } + } + } + return name || unit || min || max; + } catch (const std::bad_variant_access &) { + // TODO log? + return false; + } + } + + DataSet + make_dataset_template() const { + DataSet dstempl; + dstempl.signal_names = { signal_name }; + dstempl.signal_units = { signal_unit }; + dstempl.signal_ranges = { { signal_min, signal_max } }; + return dstempl; + } + void ensure_history_size(std::size_t new_size) { if (new_size <= _history.capacity()) { @@ -520,6 +588,7 @@ class data_sink : public node> { void add_listener(std::unique_ptr &&l, bool block) { + l->set_dataset_template(make_dataset_template()); l->apply_sample_rate(sample_rate); if (block) { _listeners.push_back(std::move(l)); @@ -539,13 +608,16 @@ class data_sink : public node> { } virtual void - apply_sample_rate(float) {} + apply_sample_rate(float /*sample_rate*/) {} + + virtual void + set_dataset_template(DataSet) {} virtual void process(std::span history, std::span data, std::optional tag_data0) = 0; virtual void - flush() = 0; + stop() = 0; }; template @@ -651,7 +723,7 @@ class data_sink : public node> { } void - flush() override { + stop() override { if constexpr (has_callback) { if (buffer_fill > 0) { if constexpr (callback_takes_tags) { @@ -677,10 +749,11 @@ class data_sink : public node> { template struct trigger_listener : public abstract_listener { - bool block = false; - std::size_t pre_samples = 0; - std::size_t post_samples = 0; + bool block = false; + std::size_t pre_samples = 0; + std::size_t post_samples = 0; + DataSet dataset_template; P trigger_predicate = {}; std::deque pending_trigger_windows; // triggers that still didn't receive all their data std::weak_ptr polling_handler = {}; @@ -693,9 +766,11 @@ class data_sink : public node> { explicit trigger_listener(P predicate, std::size_t pre, std::size_t post, Callback cb) : pre_samples(pre), post_samples(post), trigger_predicate(std::forward

(predicate)), callback{ std::forward(cb) } {} - // TODO all the dataset-based listeners could share publish_dataset and parts of flush (closing pollers), - // but if we want to use different datastructures/pass additional info, this might become moot again, so - // I leave it as is for now. + void + set_dataset_template(DataSet dst) override { + dataset_template = std::move(dst); + } + inline void publish_dataset(DataSet &&data) { if constexpr (!std::is_same_v) { @@ -725,8 +800,7 @@ class data_sink : public node> { void process(std::span history, std::span in_data, std::optional tag_data0) override { if (tag_data0 && trigger_predicate(tag_t{ 0, *tag_data0 }) == trigger_test_result::Matching) { - // TODO fill dataset with metadata etc. - DataSet dataset; + DataSet dataset = dataset_template; dataset.signal_values.reserve(pre_samples + post_samples); // TODO maybe make the circ. buffer smaller but preallocate these const auto pre_sample_view = history.last(std::min(pre_samples, history.size())); @@ -743,7 +817,7 @@ class data_sink : public node> { window->pending_post_samples -= post_sample_view.size(); if (window->pending_post_samples == 0) { - publish_dataset(std::move(window->dataset)); + this->publish_dataset(std::move(window->dataset)); window = pending_trigger_windows.erase(window); } else { ++window; @@ -752,10 +826,10 @@ class data_sink : public node> { } void - flush() override { + stop() override { for (auto &window : pending_trigger_windows) { if (!window.dataset.signal_values.empty()) { - publish_dataset(std::move(window.dataset)); + this->publish_dataset(std::move(window.dataset)); } } pending_trigger_windows.clear(); @@ -769,6 +843,7 @@ class data_sink : public node> { struct multiplexed_listener : public abstract_listener { bool block = false; O observer; + DataSet dataset_template; std::optional> pending_dataset; std::size_t maximum_window_size; std::weak_ptr polling_handler = {}; @@ -779,6 +854,11 @@ class data_sink : public node> { explicit multiplexed_listener(O observer_, std::size_t max_window_size, std::shared_ptr handler, bool do_block) : block(do_block), observer(std::move(observer_)), maximum_window_size(max_window_size), polling_handler{ std::move(handler) } {} + void + set_dataset_template(DataSet dst) override { + dataset_template = std::move(dst); + } + inline void publish_dataset(DataSet &&data) { if constexpr (!std::is_same_v) { @@ -814,12 +894,12 @@ class data_sink : public node> { if (obsr == trigger_test_result::NotMatching) { pending_dataset->timing_events[0].push_back({ static_cast(pending_dataset->signal_values.size()), *tag_data0 }); } - publish_dataset(std::move(*pending_dataset)); + this->publish_dataset(std::move(*pending_dataset)); pending_dataset.reset(); } } if (obsr == trigger_test_result::Matching) { - pending_dataset = DataSet(); + pending_dataset = dataset_template; pending_dataset->signal_values.reserve(maximum_window_size); // TODO might be too much? pending_dataset->timing_events = { { { 0, *tag_data0 } } }; } @@ -830,16 +910,16 @@ class data_sink : public node> { pending_dataset->signal_values.insert(pending_dataset->signal_values.end(), view.begin(), view.end()); if (pending_dataset->signal_values.size() == maximum_window_size) { - publish_dataset(std::move(*pending_dataset)); + this->publish_dataset(std::move(*pending_dataset)); pending_dataset.reset(); } } } void - flush() override { + stop() override { if (pending_dataset) { - publish_dataset(std::move(*pending_dataset)); + this->publish_dataset(std::move(*pending_dataset)); pending_dataset.reset(); } if (auto p = polling_handler.lock()) { @@ -858,7 +938,8 @@ class data_sink : public node> { struct snapshot_listener : public abstract_listener { bool block = false; std::chrono::nanoseconds time_delay; - std::size_t sample_delay = 0; + std::size_t sample_delay = 0; + DataSet dataset_template; P trigger_predicate = {}; std::deque pending; std::weak_ptr polling_handler = {}; @@ -869,6 +950,17 @@ class data_sink : public node> { explicit snapshot_listener(P p, std::chrono::nanoseconds delay, Callback cb) : trigger_predicate(std::forward

(p)), time_delay(std::forward(cb)) {} + void + set_dataset_template(DataSet dst) override { + dataset_template = std::move(dst); + } + + void + apply_sample_rate(float rateHz) override { + sample_delay = std::round(std::chrono::duration_cast>(time_delay).count() * rateHz); + // TODO do we need to update the requested_samples of pending here? (considering both old and new time_delay) + } + inline void publish_dataset(DataSet &&data) { if constexpr (!std::is_same_v) { @@ -895,12 +987,6 @@ class data_sink : public node> { } } - void - apply_sample_rate(float rateHz) override { - sample_delay = std::round(std::chrono::duration_cast>(time_delay).count() * rateHz); - // TODO do we need to update the requested_samples of pending here? (considering both old and new time_delay) - } - void process(std::span, std::span in_data, std::optional tag_data0) override { if (tag_data0 && trigger_predicate({ 0, *tag_data0 }) == trigger_test_result::Matching) { @@ -917,17 +1003,17 @@ class data_sink : public node> { break; } - DataSet dataset; + DataSet dataset = dataset_template; dataset.timing_events = { { { -static_cast(it->delay), std::move(it->tag_data) } } }; dataset.signal_values = { in_data[it->pending_samples] }; - publish_dataset(std::move(dataset)); + this->publish_dataset(std::move(dataset)); it = pending.erase(it); } } void - flush() override { + stop() override { pending.clear(); if (auto p = polling_handler.lock()) { p->finished = true; diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 9706c130..767e01e7 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -376,9 +376,10 @@ const boost::ut::suite DataSinkTests = [] { graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); - const auto tags = std::vector{ { 3000, { { "TYPE", "TRIGGER" } } }, tag_t{ 8000, { { "TYPE", "NO_TRIGGER" } } }, { 180000, { { "TYPE", "TRIGGER" } } } }; + const auto tags = std::vector{ { 3000, { { "TYPE", "TRIGGER" } } }, { 8000, { { "TYPE", "NO_TRIGGER" } } }, { 180000, { { "TYPE", "TRIGGER" } } } }; src.tags = tags; - auto &sink = flow_graph.make_node>(); + auto &sink = flow_graph.make_node>( + { { "signal_name", "test signal" }, { "signal_unit", "none" }, { "signal_min", int32_t{ 0 } }, { "signal_max", int32_t{ n_samples - 1 } } }); sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); @@ -399,7 +400,14 @@ const boost::ut::suite DataSinkTests = [] { seen_finished = poller->finished; [[maybe_unused]] auto r = poller->process_one([&received_data, &received_tags](const auto &dataset) { received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); - expect(eq(dataset.timing_events.size(), 1u)) >> fatal; + // signal info from sink settings + expect(eq(dataset.signal_names.size(), 1u)); + expect(eq(dataset.signal_units.size(), 1u)); + expect(eq(dataset.signal_ranges.size(), 1u)); + expect(eq(dataset.timing_events.size(), 1u)); + expect(eq(dataset.signal_names[0], "test signal"s)); + expect(eq(dataset.signal_units[0], "none"s)); + expect(eq(dataset.signal_ranges[0], std::vector{ 0, n_samples - 1 })); expect(eq(dataset.timing_events[0].size(), 1u)); expect(eq(dataset.timing_events[0][0].index, 3)); received_tags.insert(received_tags.end(), dataset.timing_events[0].begin(), dataset.timing_events[0].end()); @@ -428,8 +436,15 @@ const boost::ut::suite DataSinkTests = [] { graph flow_graph; auto &src = flow_graph.make_node>({ { "n_samples_max", n_samples } }); - src.tags = { { 3000, { { "TYPE", "TRIGGER" } } }, tag_t{ 8000, { { "TYPE", "NO_TRIGGER" } } }, { 180000, { { "TYPE", "TRIGGER" } } } }; - auto &sink = flow_graph.make_node>({ {"sample_rate", 10000.f } } ); + src.tags = { { 0, + { { std::string(tag::SIGNAL_NAME.key()), "test signal" }, + { std::string(tag::SIGNAL_UNIT.key()), "none" }, + { std::string(tag::SIGNAL_MIN.key()), int32_t{ 0 } }, + { std::string(tag::SIGNAL_MAX.key()), n_samples - 1 } } }, + { 3000, { { "TYPE", "TRIGGER" } } }, + { 8000, { { "TYPE", "NO_TRIGGER" } } }, + { 180000, { { "TYPE", "TRIGGER" } } } }; + auto &sink = flow_graph.make_node>({ { "sample_rate", 10000.f } }); sink.set_name("test_sink"); expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); @@ -450,7 +465,14 @@ const boost::ut::suite DataSinkTests = [] { while (!seen_finished) { seen_finished = poller->finished; [[maybe_unused]] auto r = poller->process_one([&received_data](const auto &dataset) { - expect(eq(dataset.timing_events.size(), 1u)) >> fatal; + // signal info from tags + expect(eq(dataset.signal_names.size(), 1u)); + expect(eq(dataset.signal_units.size(), 1u)); + expect(eq(dataset.signal_ranges.size(), 1u)); + expect(eq(dataset.timing_events.size(), 1u)); + expect(eq(dataset.signal_names[0], "test signal"s)); + expect(eq(dataset.signal_units[0], "none"s)); + expect(eq(dataset.signal_ranges[0], std::vector{ 0, n_samples - 1 })); expect(eq(dataset.timing_events[0].size(), 1u)); expect(eq(dataset.timing_events[0][0].index, -5000)); received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); @@ -519,6 +541,12 @@ const boost::ut::suite DataSinkTests = [] { while (!seen_finished) { seen_finished = poller->finished.load(); while (poller->process_one([&ranges](const auto &dataset) { + // default signal info, we didn't set anything + expect(eq(dataset.signal_names.size(), 1u)); + expect(eq(dataset.signal_units.size(), 1u)); + expect(eq(dataset.timing_events.size(), 1u)); + expect(eq(dataset.signal_names[0], "unknown signal"s)); + expect(eq(dataset.signal_units[0], "a.u."s)); ranges.push_back(dataset.signal_values.front()); ranges.push_back(dataset.signal_values.back()); })) { From b77ca5d3673eda0010622433d3c33566781e69fd Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Tue, 27 Jun 2023 16:46:31 +0200 Subject: [PATCH 51/64] Remove dataset_poller's process_one --- include/data_sink.hpp | 15 +------ test/qa_data_sink.cpp | 92 +++++++++++++++++++++++-------------------- 2 files changed, 51 insertions(+), 56 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index faf396f4..46003f66 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -376,7 +376,7 @@ class data_sink : public node> { std::atomic drop_count = 0; [[nodiscard]] bool - process_bulk(std::invocable>> auto fnc) { + process(std::invocable>> auto fnc) { const auto available = reader.available(); if (available == 0) { return false; @@ -387,19 +387,6 @@ class data_sink : public node> { std::ignore = reader.consume(available); return true; } - - [[nodiscard]] bool - process_one(std::invocable> auto fnc) { - const auto available = reader.available(); - if (available == 0) { - return false; - } - - const auto read_data = reader.get(1); - fnc(read_data[0]); - std::ignore = reader.consume(1); - return true; - } }; data_sink() { data_sink_registry::instance().register_sink(this); } diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 767e01e7..c472048a 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -398,19 +398,21 @@ const boost::ut::suite DataSinkTests = [] { bool seen_finished = false; while (!seen_finished) { seen_finished = poller->finished; - [[maybe_unused]] auto r = poller->process_one([&received_data, &received_tags](const auto &dataset) { - received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); - // signal info from sink settings - expect(eq(dataset.signal_names.size(), 1u)); - expect(eq(dataset.signal_units.size(), 1u)); - expect(eq(dataset.signal_ranges.size(), 1u)); - expect(eq(dataset.timing_events.size(), 1u)); - expect(eq(dataset.signal_names[0], "test signal"s)); - expect(eq(dataset.signal_units[0], "none"s)); - expect(eq(dataset.signal_ranges[0], std::vector{ 0, n_samples - 1 })); - expect(eq(dataset.timing_events[0].size(), 1u)); - expect(eq(dataset.timing_events[0][0].index, 3)); - received_tags.insert(received_tags.end(), dataset.timing_events[0].begin(), dataset.timing_events[0].end()); + [[maybe_unused]] auto r = poller->process([&received_data, &received_tags](const auto &datasets) { + for (const auto &dataset : datasets) { + received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); + // signal info from sink settings + expect(eq(dataset.signal_names.size(), 1u)); + expect(eq(dataset.signal_units.size(), 1u)); + expect(eq(dataset.signal_ranges.size(), 1u)); + expect(eq(dataset.timing_events.size(), 1u)); + expect(eq(dataset.signal_names[0], "test signal"s)); + expect(eq(dataset.signal_units[0], "none"s)); + expect(eq(dataset.signal_ranges[0], std::vector{ 0, n_samples - 1 })); + expect(eq(dataset.timing_events[0].size(), 1u)); + expect(eq(dataset.timing_events[0][0].index, 3)); + received_tags.insert(received_tags.end(), dataset.timing_events[0].begin(), dataset.timing_events[0].end()); + } }); } return std::make_tuple(received_data, received_tags); @@ -464,18 +466,20 @@ const boost::ut::suite DataSinkTests = [] { bool seen_finished = false; while (!seen_finished) { seen_finished = poller->finished; - [[maybe_unused]] auto r = poller->process_one([&received_data](const auto &dataset) { - // signal info from tags - expect(eq(dataset.signal_names.size(), 1u)); - expect(eq(dataset.signal_units.size(), 1u)); - expect(eq(dataset.signal_ranges.size(), 1u)); - expect(eq(dataset.timing_events.size(), 1u)); - expect(eq(dataset.signal_names[0], "test signal"s)); - expect(eq(dataset.signal_units[0], "none"s)); - expect(eq(dataset.signal_ranges[0], std::vector{ 0, n_samples - 1 })); - expect(eq(dataset.timing_events[0].size(), 1u)); - expect(eq(dataset.timing_events[0][0].index, -5000)); - received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); + [[maybe_unused]] auto r = poller->process([&received_data](const auto &datasets) { + for (const auto &dataset : datasets) { + // signal info from tags + expect(eq(dataset.signal_names.size(), 1u)); + expect(eq(dataset.signal_units.size(), 1u)); + expect(eq(dataset.signal_ranges.size(), 1u)); + expect(eq(dataset.timing_events.size(), 1u)); + expect(eq(dataset.signal_names[0], "test signal"s)); + expect(eq(dataset.signal_units[0], "none"s)); + expect(eq(dataset.signal_ranges[0], std::vector{ 0, n_samples - 1 })); + expect(eq(dataset.timing_events[0].size(), 1u)); + expect(eq(dataset.timing_events[0][0].index, -5000)); + received_data.insert(received_data.end(), dataset.signal_values.begin(), dataset.signal_values.end()); + } }); } @@ -540,15 +544,17 @@ const boost::ut::suite DataSinkTests = [] { bool seen_finished = false; while (!seen_finished) { seen_finished = poller->finished.load(); - while (poller->process_one([&ranges](const auto &dataset) { - // default signal info, we didn't set anything - expect(eq(dataset.signal_names.size(), 1u)); - expect(eq(dataset.signal_units.size(), 1u)); - expect(eq(dataset.timing_events.size(), 1u)); - expect(eq(dataset.signal_names[0], "unknown signal"s)); - expect(eq(dataset.signal_units[0], "a.u."s)); - ranges.push_back(dataset.signal_values.front()); - ranges.push_back(dataset.signal_values.back()); + while (poller->process([&ranges](const auto &datasets) { + for (const auto &dataset : datasets) { + // default signal info, we didn't set anything + expect(eq(dataset.signal_names.size(), 1u)); + expect(eq(dataset.signal_units.size(), 1u)); + expect(eq(dataset.timing_events.size(), 1u)); + expect(eq(dataset.signal_names[0], "unknown signal"s)); + expect(eq(dataset.signal_units[0], "a.u."s)); + ranges.push_back(dataset.signal_values.front()); + ranges.push_back(dataset.signal_values.back()); + } })) { } } @@ -595,14 +601,16 @@ const boost::ut::suite DataSinkTests = [] { while (!seen_finished) { // TODO make finished vs. pending data handling actually thread-safe seen_finished = poller->finished.load(); - while (poller->process_one([&received_data, &received_tags](const auto &dataset) { - expect(eq(dataset.signal_values.size(), 5000u) >> fatal); - received_data.push_back(dataset.signal_values.front()); - received_data.push_back(dataset.signal_values.back()); - expect(eq(dataset.timing_events.size(), 1u)) >> fatal; - expect(eq(dataset.timing_events[0].size(), 1u)); - expect(eq(dataset.timing_events[0][0].index, 3000)); - received_tags.insert(received_tags.end(), dataset.timing_events[0].begin(), dataset.timing_events[0].end()); + while (poller->process([&received_data, &received_tags](const auto &datasets) { + for (const auto &dataset : datasets) { + expect(eq(dataset.signal_values.size(), 5000u) >> fatal); + received_data.push_back(dataset.signal_values.front()); + received_data.push_back(dataset.signal_values.back()); + expect(eq(dataset.timing_events.size(), 1u)) >> fatal; + expect(eq(dataset.timing_events[0].size(), 1u)); + expect(eq(dataset.timing_events[0][0].index, 3000)); + received_tags.insert(received_tags.end(), dataset.timing_events[0].begin(), dataset.timing_events[0].end()); + } })) { } } From 2581d60d06ea5f93d4cc0864db08f083afa8c59c Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 28 Jun 2023 10:00:21 +0200 Subject: [PATCH 52/64] remove obsolete TODOs --- test/qa_data_sink.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index c472048a..e57e2ecd 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -321,7 +321,6 @@ const boost::ut::suite DataSinkTests = [] { std::vector received; bool seen_finished = false; while (!seen_finished) { - // TODO make finished vs. pending data handling actually thread-safe seen_finished = poller->finished; while (poller->process([&received](const auto &data) { received.insert(received.end(), data.begin(), data.end()); })) { } @@ -335,7 +334,6 @@ const boost::ut::suite DataSinkTests = [] { std::vector received_tags; bool seen_finished = false; while (!seen_finished) { - // TODO make finished vs. pending data handling actually thread-safe seen_finished = poller->finished; while (poller->process([&received, &received_tags](const auto &data, const auto &tags_) { auto tags = std::vector(tags_.begin(), tags_.end()); @@ -599,7 +597,6 @@ const boost::ut::suite DataSinkTests = [] { std::vector received_tags; bool seen_finished = false; while (!seen_finished) { - // TODO make finished vs. pending data handling actually thread-safe seen_finished = poller->finished.load(); while (poller->process([&received_data, &received_tags](const auto &datasets) { for (const auto &dataset : datasets) { @@ -690,7 +687,6 @@ const boost::ut::suite DataSinkTests = [] { std::size_t samples_seen = 0; bool seen_finished = false; while (!seen_finished) { - // TODO make finished vs. pending data handling actually thread-safe using namespace std::chrono_literals; std::this_thread::sleep_for(20ms); From c5bfc27b43fd419a25fb730e680b226ceb469299 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 28 Jun 2023 12:23:28 +0200 Subject: [PATCH 53/64] adapt to rebase --- test/qa_data_sink.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index e57e2ecd..57ff58c6 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -285,7 +285,7 @@ const boost::ut::suite DataSinkTests = [] { expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::sink_name("test_sink"), chunk_size, callback_with_tags)); fair::graph::scheduler::simple sched{ std::move(flow_graph) }; - sched.work(); + sched.run_and_wait(); sink.stop(); // TODO the scheduler should call this @@ -350,7 +350,7 @@ const boost::ut::suite DataSinkTests = [] { }); fair::graph::scheduler::simple sched{ std::move(flow_graph) }; - sched.work(); + sched.run_and_wait(); sink.stop(); // TODO the scheduler should call this @@ -417,7 +417,7 @@ const boost::ut::suite DataSinkTests = [] { }); fair::graph::scheduler::simple sched{ std::move(flow_graph) }; - sched.work(); + sched.run_and_wait(); sink.stop(); // TODO the scheduler should call this @@ -485,7 +485,7 @@ const boost::ut::suite DataSinkTests = [] { }); fair::graph::scheduler::simple sched{ std::move(flow_graph) }; - sched.work(); + sched.run_and_wait(); sink.stop(); // TODO the scheduler should call this @@ -562,7 +562,7 @@ const boost::ut::suite DataSinkTests = [] { } fair::graph::scheduler::simple sched{ std::move(flow_graph) }; - sched.work(); + sched.run_and_wait(); sink.stop(); // TODO the scheduler should call this @@ -615,7 +615,7 @@ const boost::ut::suite DataSinkTests = [] { }); fair::graph::scheduler::simple sched{ std::move(flow_graph) }; - sched.work(); + sched.run_and_wait(); sink.stop(); // TODO the scheduler should call this @@ -658,7 +658,7 @@ const boost::ut::suite DataSinkTests = [] { data_sink_registry::instance().register_trigger_callback(data_sink_query::sink_name("test_sink"), is_trigger, 3000, 2000, callback); fair::graph::scheduler::simple sched{ std::move(flow_graph) }; - sched.work(); + sched.run_and_wait(); sink.stop(); // TODO the scheduler should call this @@ -699,7 +699,7 @@ const boost::ut::suite DataSinkTests = [] { }); fair::graph::scheduler::simple sched{ std::move(flow_graph) }; - sched.work(); + sched.run_and_wait(); sink.stop(); // TODO the scheduler should call this From af6eb4c0c4a146b435bdd3aa18b4064f1b061c63 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 28 Jun 2023 12:25:48 +0200 Subject: [PATCH 54/64] Test sink lookup by signal name --- test/qa_data_sink.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 57ff58c6..02be3587 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -387,7 +387,8 @@ const boost::ut::suite DataSinkTests = [] { return v && std::get(v->get()) == "TRIGGER" ? trigger_test_result::Matching : trigger_test_result::Ignore; }; - auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::sink_name("test_sink"), is_trigger, 3, 2, blocking_mode::Blocking); + // lookup by signal name + auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::signal_name("test signal"), is_trigger, 3, 2, blocking_mode::Blocking); expect(neq(poller, nullptr)); auto polling = std::async([poller] { From f2b1e83a4ecb38239b1156d49032ebba5f518766 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 28 Jun 2023 13:56:35 +0200 Subject: [PATCH 55/64] Unify TriggerObserver and TriggerPredicate to TriggerMatcher --- include/data_sink.hpp | 160 +++++++++++++++++++++--------------------- test/qa_data_sink.cpp | 94 ++++++++++++------------- 2 files changed, 128 insertions(+), 126 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 46003f66..0a3c976e 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -15,7 +15,7 @@ namespace fair::graph { enum class blocking_mode { NonBlocking, Blocking }; -enum class trigger_test_result { +enum class trigger_match_result { Matching, ///< Start a new dataset NotMatching, ///< Finish dataset Ignore ///< Ignore tag @@ -34,47 +34,50 @@ template concept StreamCallback = std::invocable> || std::invocable, std::span>; /** - * For the 'Triggered' and 'Snapshot' acquisition modes. + * Used for testing whether a tag should trigger data acquisition. + * + * For the 'Triggered' (data window) and 'Snapshot' (single sample) acquisition modes: * Stateless predicate to check whether a tag matches the trigger criteria. - */ -template -concept TriggerPredicate = requires(const T p, tag_t tag) { - { p(tag) } -> std::convertible_to; -}; - -/** + * + * @code + * auto matcher = [](const auto &tag) { + * const auto is_trigger = ...check if tag is trigger...; + * return is_trigger ? trigger_match_result::Matching : trigger_match_result::Ignore; + * }; + * @endcode + * * For the 'Multiplexed' acquisition mode: Possibly stateful object checking all incoming tags to control which data should be sent * to the listener. * - * A new dataset is started when the observer returns @c Start or @c StopAndStart. + * A new dataset is started when the matcher returns @c Start or @c StopAndStart. * A dataset is closed and sent when @c Stop or @StopAndStart is returned. * - * The observer can rely on being called with each incoming tag exactly once, in the order they arrive. + * For the multiplexed case, the matcher might be stateful and can rely on being called with each incoming tag exactly once, in the order they arrive. * * Example: * * @code - * // Observer observing three possible tag values, "green", "yellow", "red". + * // matcher observing three possible tag values, "green", "yellow", "red". * // starting a dataset when seeing "green", stopping on "red", starting a new dataset on "yellow" - * struct color_observer { - * trigger_observer_state operator()(const tag_t &tag) { + * struct color_matcher { + * matcher_result operator()(const tag_t &tag) { * if (tag == green || tag == yellow) { - * return trigger_observer_state::Matching; + * return trigger_match_result::Matching; * } * if (tag == red) { - * return trigger_observer_state::NotMatching; + * return trigger_match_result::NotMatching; * } * - * return trigger_observer_state::Ignore; + * return trigger_match_result::Ignore; * } * }; * @endcode * - * @see trigger_observer_state + * @see trigger_match_result */ template -concept TriggerObserver = requires(T o, tag_t tag) { - { o(tag) } -> std::convertible_to; +concept TriggerMatcher = requires(T matcher, tag_t tag) { + { matcher(tag) } -> std::convertible_to; }; // clang-format on @@ -138,28 +141,28 @@ class data_sink_registry { return sink ? sink->get_streaming_poller(block) : nullptr; } - template + template std::shared_ptr::dataset_poller> - get_trigger_poller(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::Blocking) { + get_trigger_poller(const data_sink_query &query, M matcher, std::size_t pre_samples, std::size_t post_samples, blocking_mode block = blocking_mode::Blocking) { std::lock_guard lg{ _mutex }; auto sink = find_sink(query); - return sink ? sink->get_trigger_poller(std::forward

(p), pre_samples, post_samples, block) : nullptr; + return sink ? sink->get_trigger_poller(std::forward(matcher), pre_samples, post_samples, block) : nullptr; } - template + template std::shared_ptr::dataset_poller> - get_multiplexed_poller(const data_sink_query &query, O triggerObserver, std::size_t maximum_window_size, blocking_mode block = blocking_mode::Blocking) { + get_multiplexed_poller(const data_sink_query &query, M matcher, std::size_t maximum_window_size, blocking_mode block = blocking_mode::Blocking) { std::lock_guard lg{ _mutex }; auto sink = find_sink(query); - return sink ? sink->get_multiplexed_poller(std::forward(triggerObserver), maximum_window_size, block) : nullptr; + return sink ? sink->get_multiplexed_poller(std::forward(matcher), maximum_window_size, block) : nullptr; } - template + template std::shared_ptr::dataset_poller> - get_snapshot_poller(const data_sink_query &query, P p, std::chrono::nanoseconds delay, blocking_mode block = blocking_mode::Blocking) { + get_snapshot_poller(const data_sink_query &query, M matcher, std::chrono::nanoseconds delay, blocking_mode block = blocking_mode::Blocking) { std::lock_guard lg{ _mutex }; auto sink = find_sink(query); - return sink ? sink->get_snapshot_poller(std::forward

(p), delay, block) : nullptr; + return sink ? sink->get_snapshot_poller(std::forward(matcher), delay, block) : nullptr; } template Callback> @@ -175,42 +178,42 @@ class data_sink_registry { return true; } - template Callback> + template Callback, TriggerMatcher M> bool - register_trigger_callback(const data_sink_query &query, P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { + register_trigger_callback(const data_sink_query &query, M matcher, std::size_t pre_samples, std::size_t post_samples, Callback callback) { std::lock_guard lg{ _mutex }; auto sink = find_sink(query); if (!sink) { return false; } - sink->register_trigger_callback(std::forward

(p), pre_samples, post_samples, std::forward(callback)); + sink->register_trigger_callback(std::forward(matcher), pre_samples, post_samples, std::forward(callback)); return true; } - template Callback> + template Callback, TriggerMatcher M> bool - register_multiplexed_callback(const data_sink_query &query, std::size_t maximum_window_size, Callback callback) { + register_multiplexed_callback(const data_sink_query &query, M matcher, std::size_t maximum_window_size, Callback callback) { std::lock_guard lg{ _mutex }; auto sink = find_sink(query); if (!sink) { return false; } - sink->template register_multiplexed_callback(maximum_window_size, std::move(callback)); + sink->register_multiplexed_callback(std::forward(matcher), maximum_window_size, std::forward(callback)); return true; } - template Callback> + template Callback, TriggerMatcher M> bool - register_snapshot_callback(const data_sink_query &query, P p, std::chrono::nanoseconds delay, Callback callback) { + register_snapshot_callback(const data_sink_query &query, M matcher, std::chrono::nanoseconds delay, Callback callback) { std::lock_guard lg{ _mutex }; auto sink = find_sink(query); if (!sink) { return false; } - sink->template register_snapshot_callback(std::forward

(p), delay, std::forward(callback)); + sink->register_snapshot_callback(std::forward(matcher), delay, std::forward(callback)); return true; } @@ -409,62 +412,62 @@ class data_sink : public node> { return handler; } - template + template std::shared_ptr - get_trigger_poller(P p, std::size_t pre_samples, std::size_t post_samples, blocking_mode block_mode = blocking_mode::Blocking) { + get_trigger_poller(M matcher, std::size_t pre_samples, std::size_t post_samples, blocking_mode block_mode = blocking_mode::Blocking) { const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); std::lock_guard lg(_listener_mutex); - add_listener(std::make_unique>(std::move(p), handler, pre_samples, post_samples, block), block); + add_listener(std::make_unique>(std::move(matcher), handler, pre_samples, post_samples, block), block); ensure_history_size(pre_samples); return handler; } - template + template std::shared_ptr - get_multiplexed_poller(O triggerObserver, std::size_t maximum_window_size, blocking_mode block_mode = blocking_mode::Blocking) { + get_multiplexed_poller(M matcher, std::size_t maximum_window_size, blocking_mode block_mode = blocking_mode::Blocking) { std::lock_guard lg(_listener_mutex); const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); - add_listener(std::make_unique>(std::move(triggerObserver), maximum_window_size, handler, block), block); + add_listener(std::make_unique>(std::move(matcher), maximum_window_size, handler, block), block); return handler; } - template + template std::shared_ptr - get_snapshot_poller(P p, std::chrono::nanoseconds delay, blocking_mode block_mode = blocking_mode::Blocking) { + get_snapshot_poller(M matcher, std::chrono::nanoseconds delay, blocking_mode block_mode = blocking_mode::Blocking) { const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); std::lock_guard lg(_listener_mutex); - add_listener(std::make_unique>(std::forward

(p), delay, handler, block), block); + add_listener(std::make_unique>(std::move(matcher), delay, handler, block), block); return handler; } template Callback> void register_streaming_callback(std::size_t max_chunk_size, Callback callback) { - add_listener(std::make_unique>(max_chunk_size, std::forward(callback)), false); + add_listener(std::make_unique>(max_chunk_size, std::move(callback)), false); } - template Callback> + template Callback> void - register_trigger_callback(P p, std::size_t pre_samples, std::size_t post_samples, Callback callback) { - add_listener(std::make_unique>(std::forward

(p), pre_samples, post_samples, std::forward(callback)), false); + register_trigger_callback(M matcher, std::size_t pre_samples, std::size_t post_samples, Callback callback) { + add_listener(std::make_unique>(std::move(matcher), pre_samples, post_samples, std::move(callback)), false); ensure_history_size(pre_samples); } - template Callback> + template Callback> void - register_multiplexed_callback(O triggerObserver, std::size_t maximum_window_size, Callback callback) { + register_multiplexed_callback(M matcher, std::size_t maximum_window_size, Callback callback) { std::lock_guard lg(_listener_mutex); - add_listener(std::make_unique(std::move(triggerObserver), maximum_window_size, std::forward(callback)), false); + add_listener(std::make_unique>(std::move(matcher), maximum_window_size, std::move(callback)), false); } - template Callback> + template Callback> void - register_snapshot_callback(P p, std::chrono::nanoseconds delay, Callback callback) { + register_snapshot_callback(M matcher, std::chrono::nanoseconds delay, Callback callback) { std::lock_guard lg(_listener_mutex); - add_listener(std::make_unique(std::forward

(p), delay, std::forward(callback)), false); + add_listener(std::make_unique>(std::move(matcher), delay, std::move(callback)), false); } // TODO this code should be called at the end of graph processing @@ -734,24 +737,23 @@ class data_sink : public node> { std::size_t pending_post_samples = 0; }; - template + template struct trigger_listener : public abstract_listener { bool block = false; std::size_t pre_samples = 0; std::size_t post_samples = 0; DataSet dataset_template; - P trigger_predicate = {}; + M trigger_matcher = {}; std::deque pending_trigger_windows; // triggers that still didn't receive all their data std::weak_ptr polling_handler = {}; Callback callback; - explicit trigger_listener(P predicate, std::shared_ptr handler, std::size_t pre, std::size_t post, bool do_block) - : block(do_block), pre_samples(pre), post_samples(post), trigger_predicate(std::forward

(predicate)), polling_handler{ std::move(handler) } {} + explicit trigger_listener(M matcher, std::shared_ptr handler, std::size_t pre, std::size_t post, bool do_block) + : block(do_block), pre_samples(pre), post_samples(post), trigger_matcher(std::move(matcher)), polling_handler{ std::move(handler) } {} - explicit trigger_listener(P predicate, std::size_t pre, std::size_t post, Callback cb) - : pre_samples(pre), post_samples(post), trigger_predicate(std::forward

(predicate)), callback{ std::forward(cb) } {} + explicit trigger_listener(M matcher, std::size_t pre, std::size_t post, Callback cb) : pre_samples(pre), post_samples(post), trigger_matcher(std::move(matcher)), callback{ std::move(cb) } {} void set_dataset_template(DataSet dst) override { @@ -786,7 +788,7 @@ class data_sink : public node> { void process(std::span history, std::span in_data, std::optional tag_data0) override { - if (tag_data0 && trigger_predicate(tag_t{ 0, *tag_data0 }) == trigger_test_result::Matching) { + if (tag_data0 && trigger_matcher(tag_t{ 0, *tag_data0 }) == trigger_match_result::Matching) { DataSet dataset = dataset_template; dataset.signal_values.reserve(pre_samples + post_samples); // TODO maybe make the circ. buffer smaller but preallocate these @@ -826,20 +828,20 @@ class data_sink : public node> { } }; - template + template struct multiplexed_listener : public abstract_listener { bool block = false; - O observer; + M matcher; DataSet dataset_template; std::optional> pending_dataset; std::size_t maximum_window_size; std::weak_ptr polling_handler = {}; Callback callback; - explicit multiplexed_listener(O observer_, std::size_t max_window_size, Callback cb) : observer(std::move(observer_)), maximum_window_size(max_window_size), callback(cb) {} + explicit multiplexed_listener(M matcher_, std::size_t max_window_size, Callback cb) : matcher(std::move(matcher_)), maximum_window_size(max_window_size), callback(cb) {} - explicit multiplexed_listener(O observer_, std::size_t max_window_size, std::shared_ptr handler, bool do_block) - : block(do_block), observer(std::move(observer_)), maximum_window_size(max_window_size), polling_handler{ std::move(handler) } {} + explicit multiplexed_listener(M matcher_, std::size_t max_window_size, std::shared_ptr handler, bool do_block) + : block(do_block), matcher(std::move(matcher_)), maximum_window_size(max_window_size), polling_handler{ std::move(handler) } {} void set_dataset_template(DataSet dst) override { @@ -875,17 +877,17 @@ class data_sink : public node> { void process(std::span, std::span in_data, std::optional tag_data0) override { if (tag_data0) { - const auto obsr = observer(tag_t{ 0, *tag_data0 }); - if (obsr == trigger_test_result::NotMatching || obsr == trigger_test_result::Matching) { + const auto obsr = matcher(tag_t{ 0, *tag_data0 }); + if (obsr == trigger_match_result::NotMatching || obsr == trigger_match_result::Matching) { if (pending_dataset) { - if (obsr == trigger_test_result::NotMatching) { + if (obsr == trigger_match_result::NotMatching) { pending_dataset->timing_events[0].push_back({ static_cast(pending_dataset->signal_values.size()), *tag_data0 }); } this->publish_dataset(std::move(*pending_dataset)); pending_dataset.reset(); } } - if (obsr == trigger_test_result::Matching) { + if (obsr == trigger_match_result::Matching) { pending_dataset = dataset_template; pending_dataset->signal_values.reserve(maximum_window_size); // TODO might be too much? pending_dataset->timing_events = { { { 0, *tag_data0 } } }; @@ -921,21 +923,21 @@ class data_sink : public node> { std::size_t pending_samples = 0; }; - template + template struct snapshot_listener : public abstract_listener { bool block = false; std::chrono::nanoseconds time_delay; std::size_t sample_delay = 0; DataSet dataset_template; - P trigger_predicate = {}; + M trigger_matcher = {}; std::deque pending; std::weak_ptr polling_handler = {}; Callback callback; - explicit snapshot_listener(P p, std::chrono::nanoseconds delay, std::shared_ptr poller, bool do_block) - : block(do_block), time_delay(delay), trigger_predicate(std::forward

(p)), polling_handler{ std::move(poller) } {} + explicit snapshot_listener(M matcher, std::chrono::nanoseconds delay, std::shared_ptr poller, bool do_block) + : block(do_block), time_delay(delay), trigger_matcher(std::move(matcher)), polling_handler{ std::move(poller) } {} - explicit snapshot_listener(P p, std::chrono::nanoseconds delay, Callback cb) : trigger_predicate(std::forward

(p)), time_delay(std::forward(cb)) {} + explicit snapshot_listener(M matcher, std::chrono::nanoseconds delay, Callback cb) : trigger_matcher(std::move(matcher)), time_delay(std::move(cb)) {} void set_dataset_template(DataSet dst) override { @@ -976,7 +978,7 @@ class data_sink : public node> { void process(std::span, std::span in_data, std::optional tag_data0) override { - if (tag_data0 && trigger_predicate({ 0, *tag_data0 }) == trigger_test_result::Matching) { + if (tag_data0 && trigger_matcher({ 0, *tag_data0 }) == trigger_match_result::Matching) { auto new_pending = pending_snapshot{ *tag_data0, sample_delay, sample_delay }; // make sure pending is sorted by number of pending_samples (insertion might be not at end if sample rate decreased; TODO unless we adapt them in apply_sample_rate, see there) auto rit = std::find_if(pending.rbegin(), pending.rend(), [delay = sample_delay](const auto &other) { return other.pending_samples < delay; }); diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 02be3587..b80adaf2 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -80,7 +80,7 @@ struct Source : public node> { }; /** - * Example tag observer (TriggerObserver implementation) for the multiplexed listener case (interleaved data). As a toy example, we use + * Example tag matcher (TriggerMatcher implementation) for the multiplexed listener case (interleaved data). As a toy example, we use * data tagged as Year/Month/Day. * * For each of year, month, day, the user can specify whether: @@ -89,16 +89,16 @@ struct Source : public node> { * - -1: Whenever a change between the previous and the current tag is observed, start a new data set (StopAndStart) * - other values >= 0: A new dataset is started when the tag matches, and stopped, when a tag doesn't match * - * (Note that the TriggerObserver is stateful and remembers the last tag seen, other than a stateless TriggerPredicate) + * (Note that this Matcher is stateful and remembers the last tag seen) */ -struct Observer { +struct Matcher { std::optional year; std::optional month; std::optional day; std::optional> last_seen; bool last_matched = false; - explicit Observer(std::optional year_, std::optional month_, std::optional day_) : year(year_), month(month_), day(day_) {} + explicit Matcher(std::optional year_, std::optional month_, std::optional day_) : year(year_), month(month_), day(day_) {} static inline bool same(int x, std::optional other) { @@ -110,36 +110,36 @@ struct Observer { return !same(x, other); } - trigger_test_result + trigger_match_result operator()(const tag_t &tag) { const auto ty = tag.get("YEAR"); const auto tm = tag.get("MONTH"); const auto td = tag.get("DAY"); if (!ty || !tm || !td) { - return trigger_test_result::Ignore; + return trigger_match_result::Ignore; } - const auto tup = std::make_tuple(std::get(ty->get()), std::get(tm->get()), std::get(td->get())); - const auto &[y, m, d] = tup; - const auto ly = last_seen ? std::optional(std::get<0>(*last_seen)) : std::nullopt; - const auto lm = last_seen ? std::optional(std::get<1>(*last_seen)) : std::nullopt; - const auto ld = last_seen ? std::optional(std::get<2>(*last_seen)) : std::nullopt; + const auto tup = std::make_tuple(std::get(ty->get()), std::get(tm->get()), std::get(td->get())); + const auto &[y, m, d] = tup; + const auto ly = last_seen ? std::optional(std::get<0>(*last_seen)) : std::nullopt; + const auto lm = last_seen ? std::optional(std::get<1>(*last_seen)) : std::nullopt; + const auto ld = last_seen ? std::optional(std::get<2>(*last_seen)) : std::nullopt; - const auto year_restart = year && *year == -1 && changed(y, ly); - const auto year_matches = !year || *year == -1 || same(y, year); - const auto month_restart = month && *month == -1 && changed(m, lm); - const auto month_matches = !month || *month == -1 || same(m, month); - const auto day_restart = day && *day == -1 && changed(d, ld); - const auto day_matches = !day || *day == -1 || same(d, day); - const auto matches = year_matches && month_matches && day_matches; - const auto restart = year_restart || month_restart || day_restart; + const auto year_restart = year && *year == -1 && changed(y, ly); + const auto year_matches = !year || *year == -1 || same(y, year); + const auto month_restart = month && *month == -1 && changed(m, lm); + const auto month_matches = !month || *month == -1 || same(m, month); + const auto day_restart = day && *day == -1 && changed(d, ld); + const auto day_matches = !day || *day == -1 || same(d, day); + const auto matches = year_matches && month_matches && day_matches; + const auto restart = year_restart || month_restart || day_restart; - trigger_test_result r = trigger_test_result::Ignore; + trigger_match_result r = trigger_match_result::Ignore; if (!matches) { - r = trigger_test_result::NotMatching; + r = trigger_match_result::NotMatching; } else if (!last_matched || restart) { - r = trigger_test_result::Matching; + r = trigger_match_result::Matching; } last_seen = tup; @@ -168,29 +168,29 @@ make_test_tags(tag_t::signed_index_type first_index, tag_t::signed_index_type in } static std::string -to_ascii_art(std::span states) { +to_ascii_art(std::span states) { bool started = false; std::string r; for (auto s : states) { switch (s) { - case trigger_test_result::Matching: + case trigger_match_result::Matching: r += started ? "||#" : "|#"; started = true; break; - case trigger_test_result::NotMatching: + case trigger_match_result::NotMatching: r += started ? "|_" : "_"; started = false; break; - case trigger_test_result::Ignore: r += started ? "#" : "_"; break; + case trigger_match_result::Ignore: r += started ? "#" : "_"; break; } }; return r; } -template +template std::string -run_observer_test(std::span tags, O o) { - std::vector r; +run_matcher_test(std::span tags, M o) { + std::vector r; r.reserve(tags.size()); for (const auto &tag : tags) { r.push_back(o(tag)); @@ -384,7 +384,7 @@ const boost::ut::suite DataSinkTests = [] { auto is_trigger = [](const tag_t &tag) { const auto v = tag.get("TYPE"); - return v && std::get(v->get()) == "TRIGGER" ? trigger_test_result::Matching : trigger_test_result::Ignore; + return v && std::get(v->get()) == "TRIGGER" ? trigger_match_result::Matching : trigger_match_result::Ignore; }; // lookup by signal name @@ -452,7 +452,7 @@ const boost::ut::suite DataSinkTests = [] { auto is_trigger = [](const tag_t &tag) { const auto v = tag.get("TYPE"); - return (v && std::get(v->get()) == "TRIGGER") ? trigger_test_result::Matching : trigger_test_result::Ignore; + return (v && std::get(v->get()) == "TRIGGER") ? trigger_match_result::Matching : trigger_match_result::Ignore; }; const auto delay = std::chrono::milliseconds{ 500 }; // sample rate 10000 -> 5000 samples @@ -511,26 +511,26 @@ const boost::ut::suite DataSinkTests = [] { { const auto t = std::span(tags); - // Test the test observer - expect(eq(run_observer_test(t, Observer({}, -1, {})), "|###||###||###||###||###||###"s)); - expect(eq(run_observer_test(t, Observer(-1, {}, {})), "|######||######||######"s)); - expect(eq(run_observer_test(t, Observer(1, {}, {})), "|######|____________"s)); - expect(eq(run_observer_test(t, Observer(1, {}, 2)), "_|#|__|#|_____________"s)); - expect(eq(run_observer_test(t, Observer({}, {}, 1)), "|#|__|#|__|#|__|#|__|#|__|#|__"s)); + // Test the test matcher + expect(eq(run_matcher_test(t, Matcher({}, -1, {})), "|###||###||###||###||###||###"s)); + expect(eq(run_matcher_test(t, Matcher(-1, {}, {})), "|######||######||######"s)); + expect(eq(run_matcher_test(t, Matcher(1, {}, {})), "|######|____________"s)); + expect(eq(run_matcher_test(t, Matcher(1, {}, 2)), "_|#|__|#|_____________"s)); + expect(eq(run_matcher_test(t, Matcher({}, {}, 1)), "|#|__|#|__|#|__|#|__|#|__|#|__"s)); } - const auto observers = std::array{ Observer({}, -1, {}), Observer(-1, {}, {}), Observer(1, {}, {}), Observer(1, {}, 2), Observer({}, {}, 1) }; + const auto matchers = std::array{ Matcher({}, -1, {}), Matcher(-1, {}, {}), Matcher(1, {}, {}), Matcher(1, {}, 2), Matcher({}, {}, 1) }; // Following the patterns above, where each #/_ is 10000 samples - const auto expected = std::array, observers.size()>{ { { 0, 29999, 30000, 59999, 60000, 89999, 90000, 119999, 120000, 149999, 150000, 249999 }, - { 0, 59999, 60000, 119999, 120000, 219999 }, - { 0, 59999 }, - { 10000, 19999, 40000, 49999 }, - { 0, 9999, 30000, 39999, 60000, 69999, 90000, 99999, 120000, 129999, 150000, 159999 } } }; + const auto expected = std::array, matchers.size()>{ { { 0, 29999, 30000, 59999, 60000, 89999, 90000, 119999, 120000, 149999, 150000, 249999 }, + { 0, 59999, 60000, 119999, 120000, 219999 }, + { 0, 59999 }, + { 10000, 19999, 40000, 49999 }, + { 0, 9999, 30000, 39999, 60000, 69999, 90000, 99999, 120000, 129999, 150000, 159999 } } }; std::vector::dataset_poller>> pollers; - for (const auto &o : observers) { - auto poller = data_sink_registry::instance().get_multiplexed_poller(data_sink_query::sink_name("test_sink"), o, 100000, blocking_mode::Blocking); + for (const auto &m : matchers) { + auto poller = data_sink_registry::instance().get_multiplexed_poller(data_sink_query::sink_name("test_sink"), m, 100000, blocking_mode::Blocking); expect(neq(poller, nullptr)); pollers.push_back(poller); } @@ -588,7 +588,7 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - auto is_trigger = [](const tag_t &tag) { return trigger_test_result::Matching; }; + auto is_trigger = [](const tag_t &tag) { return trigger_match_result::Matching; }; auto poller = data_sink_registry::instance().get_trigger_poller(data_sink_query::sink_name("test_sink"), is_trigger, 3000, 2000, blocking_mode::Blocking); expect(neq(poller, nullptr)); @@ -644,7 +644,7 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(connection_result_t::SUCCESS, flow_graph.connect<"out">(src).to<"in">(sink))); - auto is_trigger = [](const tag_t &) { return trigger_test_result::Matching; }; + auto is_trigger = [](const tag_t &) { return trigger_match_result::Matching; }; std::mutex m; std::vector received_data; From 150304ea42475aa51ef317ceda1641188a09ed74 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 28 Jun 2023 13:58:37 +0200 Subject: [PATCH 56/64] Call stop() on destruction --- include/data_sink.hpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 0a3c976e..75cb36b6 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -394,7 +394,10 @@ class data_sink : public node> { data_sink() { data_sink_registry::instance().register_sink(this); } - ~data_sink() { data_sink_registry::instance().unregister_sink(this); } + ~data_sink() { + stop(); + data_sink_registry::instance().unregister_sink(this); + } void init(const property_map & /*old_settings*/, const property_map &new_settings) { From 3603b6d9022ecaf376e760d6ad2fee254d512700 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 28 Jun 2023 14:00:59 +0200 Subject: [PATCH 57/64] Add TODOs --- include/data_sink.hpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 75cb36b6..89b44a4d 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -331,6 +331,7 @@ class data_sink : public node> { IN in; struct poller { + // TODO consider whether reusing port here makes sense gr::circular_buffer buffer = gr::circular_buffer(_listener_buffer_size); decltype(buffer.new_reader()) reader = buffer.new_reader(); decltype(buffer.new_writer()) writer = buffer.new_writer(); @@ -572,7 +573,11 @@ class data_sink : public node> { if (new_size <= _history.capacity()) { return; } - // TODO transitional, do not reallocate/copy, but create a shared buffer with size N, + // TODO Important! + // - History size must be limited to avoid users causing OOM + // - History should shrink again + + // transitional, do not reallocate/copy, but create a shared buffer with size N, // and a per-listener history buffer where more than N samples is needed. auto new_history = gr::history_buffer(std::max(new_size, _history.capacity())); new_history.push_back_bulk(_history.begin(), _history.end()); From 3f92af842f033a85244b941be00012dc1affa766 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 28 Jun 2023 14:03:21 +0200 Subject: [PATCH 58/64] cleanup detail --- include/data_sink.hpp | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 89b44a4d..c343617c 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -242,18 +242,8 @@ class data_sink_registry { }; namespace detail { -template -std::span -find_matching_prefix(std::span s, P predicate) { - const auto nm = std::find_if_not(s.begin(), s.end(), predicate); - if (nm == s.end()) { - return s; - } - return s.first(std::distance(s.begin(), nm)); -} - template -bool +inline bool copy_span(std::span src, std::span dst) { assert(src.size() <= dst.size()); if (src.size() > dst.size()) { @@ -264,7 +254,7 @@ copy_span(std::span src, std::span dst) { } template -std::optional +inline std::optional get(const property_map &m, const std::string_view &key) { const auto it = m.find(key); if (it == m.end()) { From df8a52f86d20bf63eecc182a7aea1dd812d553bc Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 28 Jun 2023 14:42:35 +0200 Subject: [PATCH 59/64] Optionally pass sink to streaming callback --- include/data_sink.hpp | 53 +++++++++++++++++++++++-------------------- test/qa_data_sink.cpp | 12 +++++++--- 2 files changed, 38 insertions(+), 27 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index c343617c..ee5126aa 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -21,6 +21,9 @@ enum class trigger_match_result { Ignore ///< Ignore tag }; +template +class data_sink; + // Until clang-format can handle concepts // clang-format off @@ -28,10 +31,10 @@ template concept DataSetCallback = std::invocable>; /** - * Stream callback functions receive the span of data, and optionally the tags associated with it. + * Stream callback functions receive the span of data, with optional tags and reference to the sink. */ template -concept StreamCallback = std::invocable> || std::invocable, std::span>; +concept StreamCallback = std::invocable> || std::invocable, std::span> || std::invocable, std::span, const data_sink&>; /** * Used for testing whether a tag should trigger data acquisition. @@ -82,9 +85,6 @@ concept TriggerMatcher = requires(T matcher, tag_t tag) { // clang-format on -template -class data_sink; - struct data_sink_query { std::optional _sink_name; std::optional _signal_name; @@ -402,7 +402,7 @@ class data_sink : public node> { std::lock_guard lg(_listener_mutex); const auto block = block_mode == blocking_mode::Blocking; auto handler = std::make_shared(); - add_listener(std::make_unique>(handler, block), block); + add_listener(std::make_unique>(handler, block, *this), block); return handler; } @@ -440,7 +440,7 @@ class data_sink : public node> { template Callback> void register_streaming_callback(std::size_t max_chunk_size, Callback callback) { - add_listener(std::make_unique>(max_chunk_size, std::move(callback)), false); + add_listener(std::make_unique>(max_chunk_size, std::move(callback), *this), false); } template Callback> @@ -611,10 +611,12 @@ class data_sink : public node> { template struct continuous_listener : public abstract_listener { static constexpr auto has_callback = !std::is_same_v; - static constexpr auto callback_takes_tags = std::is_invocable_v, std::span>; + static constexpr auto callback_takes_tags = std::is_invocable_v, std::span> + || std::is_invocable_v, std::span, const data_sink &>; - bool block = false; - std::size_t samples_written = 0; + const data_sink &parent_sink; + bool block = false; + std::size_t samples_written = 0; // callback-only std::size_t buffer_fill = 0; @@ -626,9 +628,20 @@ class data_sink : public node> { Callback callback; - explicit continuous_listener(std::size_t max_chunk_size, Callback c) : buffer(max_chunk_size), callback{ std::forward(c) } {} + explicit continuous_listener(std::size_t max_chunk_size, Callback c, const data_sink &parent) : parent_sink(parent), buffer(max_chunk_size), callback{ std::forward(c) } {} - explicit continuous_listener(std::shared_ptr poller, bool do_block) : block(do_block), polling_handler{ std::move(poller) } {} + explicit continuous_listener(std::shared_ptr poller, bool do_block, const data_sink &parent) : parent_sink(parent), block(do_block), polling_handler{ std::move(poller) } {} + + inline void + call_callback(std::span data, std::span tags) { + if constexpr (std::is_invocable_v, std::span, const data_sink &>) { + callback(std::move(data), std::move(tags), parent_sink); + } else if constexpr (std::is_invocable_v, std::span>) { + callback(std::move(data), std::move(tags)); + } else { + callback(std::move(data)); + } + } void process(std::span, std::span data, std::optional tag_data0) override { @@ -647,11 +660,7 @@ class data_sink : public node> { } buffer_fill += n; if (buffer_fill == buffer.size()) { - if constexpr (callback_takes_tags) { - callback(std::span(buffer), std::span(tag_buffer)); - } else { - callback(std::span(buffer)); - } + call_callback(std::span(buffer), std::span(tag_buffer)); samples_written += buffer.size(); buffer_fill = 0; tag_buffer.clear(); @@ -668,7 +677,7 @@ class data_sink : public node> { tags.push_back({ 0, std::move(*tag_data0) }); tag_data0.reset(); } - callback(data.first(buffer.size()), std::span(tags)); + call_callback(data.first(buffer.size()), std::span(tags)); } else { callback(data.first(buffer.size())); } @@ -714,12 +723,8 @@ class data_sink : public node> { stop() override { if constexpr (has_callback) { if (buffer_fill > 0) { - if constexpr (callback_takes_tags) { - callback(std::span(buffer).first(buffer_fill), std::span(tag_buffer)); - tag_buffer.clear(); - } else { - callback(std::span(buffer).first(buffer_fill)); - } + call_callback(std::span(buffer).first(buffer_fill), std::span(tag_buffer)); + tag_buffer.clear(); buffer_fill = 0; } } else { diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index b80adaf2..1a7dd7b2 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -281,8 +281,14 @@ const boost::ut::suite DataSinkTests = [] { } }; + auto callback_with_tags_and_sink = [&sink](std::span, std::span, const data_sink &passed_sink) { + expect(eq(passed_sink.name(), "test_sink"s)); + expect(eq(sink.unique_name, passed_sink.unique_name)); + }; + expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::sink_name("test_sink"), chunk_size, callback)); expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::sink_name("test_sink"), chunk_size, callback_with_tags)); + expect(data_sink_registry::instance().register_streaming_callback(data_sink_query::sink_name("test_sink"), chunk_size, callback_with_tags_and_sink)); fair::graph::scheduler::simple sched{ std::move(flow_graph) }; sched.run_and_wait(); @@ -336,11 +342,11 @@ const boost::ut::suite DataSinkTests = [] { while (!seen_finished) { seen_finished = poller->finished; while (poller->process([&received, &received_tags](const auto &data, const auto &tags_) { - auto tags = std::vector(tags_.begin(), tags_.end()); - for (auto &t : tags) { + auto rtags = std::vector(tags_.begin(), tags_.end()); + for (auto &t : rtags) { t.index += static_cast(received.size()); } - received_tags.insert(received_tags.end(), tags.begin(), tags.end()); + received_tags.insert(received_tags.end(), rtags.begin(), rtags.end()); received.insert(received.end(), data.begin(), data.end()); })) { } From 9fb8158b5f7f5813c8fcaece7f3fb574895e3d87 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Wed, 28 Jun 2023 16:12:08 +0200 Subject: [PATCH 60/64] Basic tests for multiplexed/callback and snapshot/callback --- include/data_sink.hpp | 2 +- test/qa_data_sink.cpp | 33 +++++++++++++++++++++++---------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index ee5126aa..d432b8bc 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -940,7 +940,7 @@ class data_sink : public node> { explicit snapshot_listener(M matcher, std::chrono::nanoseconds delay, std::shared_ptr poller, bool do_block) : block(do_block), time_delay(delay), trigger_matcher(std::move(matcher)), polling_handler{ std::move(poller) } {} - explicit snapshot_listener(M matcher, std::chrono::nanoseconds delay, Callback cb) : trigger_matcher(std::move(matcher)), time_delay(std::move(cb)) {} + explicit snapshot_listener(M matcher, std::chrono::nanoseconds delay, Callback cb) : time_delay(delay), trigger_matcher(std::move(matcher)), callback(std::move(cb)) {} void set_dataset_template(DataSet dst) override { diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 1a7dd7b2..6c7e9b9e 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -438,7 +438,7 @@ const boost::ut::suite DataSinkTests = [] { expect(eq(poller->drop_count.load(), 0)); }; - "blocking polling snapshot mode"_test = [] { + "blocking snapshot mode"_test = [] { constexpr std::int32_t n_samples = 200000; graph flow_graph; @@ -465,6 +465,12 @@ const boost::ut::suite DataSinkTests = [] { auto poller = data_sink_registry::instance().get_snapshot_poller(data_sink_query::sink_name("test_sink"), is_trigger, delay, blocking_mode::Blocking); expect(neq(poller, nullptr)); + std::vector received_data_cb; + + auto callback = [&received_data_cb](const auto &dataset) { received_data_cb.insert(received_data_cb.end(), dataset.signal_values.begin(), dataset.signal_values.end()); }; + + expect(data_sink_registry::instance().register_snapshot_callback(data_sink_query::sink_name("test_sink"), is_trigger, delay, callback)); + auto poller_result = std::async([poller] { std::vector received_data; @@ -497,12 +503,12 @@ const boost::ut::suite DataSinkTests = [] { sink.stop(); // TODO the scheduler should call this const auto received_data = poller_result.get(); - + expect(eq(received_data_cb, received_data)); expect(eq(received_data, std::vector{ 8000, 185000 })); expect(eq(poller->drop_count.load(), 0)); }; - "blocking polling multiplexed mode"_test = [] { + "blocking multiplexed mode"_test = [] { const auto tags = make_test_tags(0, 10000); const std::int32_t n_samples = tags.size() * 10000 + 100000; @@ -533,15 +539,21 @@ const boost::ut::suite DataSinkTests = [] { { 0, 59999 }, { 10000, 19999, 40000, 49999 }, { 0, 9999, 30000, 39999, 60000, 69999, 90000, 99999, 120000, 129999, 150000, 159999 } } }; - std::vector::dataset_poller>> pollers; + std::array::dataset_poller>, matchers.size()> pollers; - for (const auto &m : matchers) { - auto poller = data_sink_registry::instance().get_multiplexed_poller(data_sink_query::sink_name("test_sink"), m, 100000, blocking_mode::Blocking); - expect(neq(poller, nullptr)); - pollers.push_back(poller); - } + std::vector>> results; + std::array, matchers.size()> results_cb; - std::vector>> results; + for (std::size_t i = 0; i < results_cb.size(); ++i) { + auto callback = [&r = results_cb[i]](const auto &dataset) { + r.push_back(dataset.signal_values.front()); + r.push_back(dataset.signal_values.back()); + }; + expect(eq(data_sink_registry::instance().register_multiplexed_callback(data_sink_query::sink_name("test_sink"), Matcher(matchers[i]), 100000, callback), true)); + + pollers[i] = data_sink_registry::instance().get_multiplexed_poller(data_sink_query::sink_name("test_sink"), Matcher(matchers[i]), 100000, blocking_mode::Blocking); + expect(neq(pollers[i], nullptr)); + } for (std::size_t i = 0; i < pollers.size(); ++i) { auto f = std::async([poller = pollers[i]] { @@ -575,6 +587,7 @@ const boost::ut::suite DataSinkTests = [] { for (std::size_t i = 0; i < results.size(); ++i) { expect(eq(results[i].get(), expected[i])); + expect(eq(results_cb[i], expected[i])); } }; From 8c299842aa043cda5dffdec61097e098bcc85b26 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Sat, 1 Jul 2023 14:23:44 +0200 Subject: [PATCH 61/64] Make test source not produce single samples --- test/qa_data_sink.cpp | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 6c7e9b9e..488244ff 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -56,9 +56,19 @@ struct Source : public node> { constexpr std::int64_t available_samples(const Source &) noexcept { - const auto ret = static_cast(n_samples_max - n_samples_produced); - // forcing one sample, at a time, see below - return ret > 0 ? 1 : -1; // '-1' -> DONE, produced enough samples + // TODO unify with other test sources + // split into chunks so that we have a single tag at index 0 (or none) + auto ret = static_cast(n_samples_max - n_samples_produced); + if (next_tag < tags.size()) { + if (n_samples_produced < tags[next_tag].index) { + ret = tags[next_tag].index - n_samples_produced; + } else if (next_tag + 1 < tags.size()) { + // tag at first sample? then read up until before next tag + ret = tags[next_tag+1].index - n_samples_produced; + } + } + + return ret > 0 ? ret : -1; // '-1' -> DONE, produced enough samples } T From 7181a197c927757f4b73d74fdd7267a2c89b01fd Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Sat, 1 Jul 2023 14:38:12 +0200 Subject: [PATCH 62/64] Fix off-by-one error in continuous/callback chunk-ification --- include/data_sink.hpp | 2 +- test/qa_data_sink.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index d432b8bc..141425c3 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -670,7 +670,7 @@ class data_sink : public node> { } // send out complete chunks directly - while (data.size() > buffer.size()) { + while (data.size() >= buffer.size()) { if constexpr (callback_takes_tags) { std::vector tags; if (tag_data0) { diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 488244ff..981405dc 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -272,8 +272,8 @@ const boost::ut::suite DataSinkTests = [] { } for (const auto &tag : tags) { - ge(tag.index, static_cast(samples_seen2)); - lt(tag.index, samples_seen2 + buffer.size()); + expect(ge(tag.index, 0)); + expect(lt(tag.index, buffer.size())); } auto lg = std::lock_guard{ m2 }; From 5c1d76648afcf7296609fac22840ca94bbce7698 Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Sat, 1 Jul 2023 14:59:59 +0200 Subject: [PATCH 63/64] don't copy history if none is needed as history_buffer has a minimum size of 1, use an optional to indicate when we don't need history at all. --- include/data_sink.hpp | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/include/data_sink.hpp b/include/data_sink.hpp index 141425c3..d7d84548 100644 --- a/include/data_sink.hpp +++ b/include/data_sink.hpp @@ -308,7 +308,7 @@ class data_sink : public node> { static constexpr std::size_t _listener_buffer_size = 65536; std::deque> _listeners; std::mutex _listener_mutex; - gr::history_buffer _history = gr::history_buffer(1); + std::optional> _history; bool _has_signal_info_from_settings = false; public: @@ -487,15 +487,16 @@ class data_sink : public node> { { std::lock_guard lg(_listener_mutex); // TODO review/profile if a lock-free data structure should be used here - const auto history_view = _history.get_span(0); + const auto history_view = _history ? _history->get_span(0) : std::span(); std::erase_if(_listeners, [](const auto &l) { return l->expired; }); for (auto &listener : _listeners) { listener->process(history_view, in_data, tagData); } - - // store potential pre-samples for triggers at the beginning of the next chunk - const auto to_write = std::min(in_data.size(), _history.capacity()); - _history.push_back_bulk(in_data.last(to_write)); + if (_history) { + // store potential pre-samples for triggers at the beginning of the next chunk + const auto to_write = std::min(in_data.size(), _history->capacity()); + _history->push_back_bulk(in_data.last(to_write)); + } } return work_return_t::OK; @@ -560,7 +561,8 @@ class data_sink : public node> { void ensure_history_size(std::size_t new_size) { - if (new_size <= _history.capacity()) { + const auto old_size = _history ? _history->capacity() : std::size_t{0}; + if (new_size <= old_size) { return; } // TODO Important! @@ -569,9 +571,11 @@ class data_sink : public node> { // transitional, do not reallocate/copy, but create a shared buffer with size N, // and a per-listener history buffer where more than N samples is needed. - auto new_history = gr::history_buffer(std::max(new_size, _history.capacity())); - new_history.push_back_bulk(_history.begin(), _history.end()); - std::swap(_history, new_history); + auto new_history = gr::history_buffer(new_size); + if (_history) { + new_history.push_back_bulk(_history->begin(), _history->end()); + } + _history = new_history; } void From 64dfd4086d57a217324f8a51aff7790a4c219fcf Mon Sep 17 00:00:00 2001 From: Frank Osterfeld Date: Mon, 3 Jul 2023 10:45:59 +0200 Subject: [PATCH 64/64] Fix available_samples signature std::size_t is 32 bit in wasm, and node checks for the return value, which must match std::size_t's size, otherwise silently doesn't call it, which lead execution to hang in wasm. --- test/qa_data_sink.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/qa_data_sink.cpp b/test/qa_data_sink.cpp index 981405dc..2bfc9182 100644 --- a/test/qa_data_sink.cpp +++ b/test/qa_data_sink.cpp @@ -54,11 +54,11 @@ struct Source : public node> { fair::graph::publish_tag(out, { { "n_samples_max", n_samples_max } }, n_tag_offset); } - constexpr std::int64_t + constexpr std::make_signed_t available_samples(const Source &) noexcept { // TODO unify with other test sources // split into chunks so that we have a single tag at index 0 (or none) - auto ret = static_cast(n_samples_max - n_samples_produced); + auto ret = static_cast>(n_samples_max - n_samples_produced); if (next_tag < tags.size()) { if (n_samples_produced < tags[next_tag].index) { ret = tags[next_tag].index - n_samples_produced;