diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000000..385c976c77 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,35 @@ +# Read the Docs configuration file for Sphinx projects +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-20.04 + tools: + python: "3.8" + # You can also specify other tool versions: + # nodejs: "20" + # rust: "1.70" + # golang: "1.20" + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/conf.py + # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs + # builder: "dirhtml" + # Fail on all warnings to avoid broken references + # fail_on_warning: true + +# Optionally build your docs in additional formats such as PDF and ePub +# formats: +# - pdf +# - epub + +# Optional but recommended, declare the Python requirements required +# to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: docs/requirements.txt diff --git a/ADOPTERS.MD b/ADOPTERS.MD new file mode 100644 index 0000000000..93494da197 --- /dev/null +++ b/ADOPTERS.MD @@ -0,0 +1,18 @@ +# Fledge Adopters + +- Beckhoff - PLC Vendor +- Dianomic - IIoT Software +- Flir - IR/Gas Cameras +- General Atomics - Predator Drone +- Google - Search-ML-Cloud-TPUs +- JEA - Energy/Water Company +- [Motorsports.ai](http://motorsports.ai/) - Racing Digital Twins +- Nexcom - Industrial Gateways +- Nokia - Wireless Communications +- OSIsoft - Data Infrastructure +- Rovisys - Industrial SI +- Transpara - HMI for Process Manufacturers +- Wago - PLC Vendor +- Zededa - VMs for IoT +- RTE France - T&D +- Nueman Aluminium \ No newline at end of file diff --git a/C/common/asset_tracking.cpp b/C/common/asset_tracking.cpp index 4329c540d0..2f6e9dedc7 100644 --- a/C/common/asset_tracking.cpp +++ b/C/common/asset_tracking.cpp @@ -5,17 +5,27 @@ * * Released under the Apache 2.0 Licence * - * Author: Amandeep Singh Arora + * Author: Amandeep Singh Arora, Massimiliano Pinto */ #include #include +#include using namespace std; AssetTracker *AssetTracker::instance = 0; +/** + * Worker thread entry point + */ +static void worker(void *arg) +{ + AssetTracker *tracker = (AssetTracker *)arg; + tracker->workerThread(); +} + /** * Get asset tracker singleton instance for the current south service * @@ -36,6 +46,83 @@ AssetTracker::AssetTracker(ManagementClient *mgtClient, string service) : m_mgtClient(mgtClient), m_service(service) { instance = this; + m_shutdown = false; + m_storageClient = NULL; + m_thread = new thread(worker, this); + + try { + // Find out the name of the fledge service + ConfigCategory category = mgtClient->getCategory("service"); + if (category.itemExists("name")) + { + m_fledgeName = category.getValue("name"); + } + } catch (exception& ex) { + Logger::getLogger()->error("Unable to fetch the service category, %s", ex.what()); + } + + try { + // Get a handle on the storage layer + ServiceRecord storageRecord("Fledge Storage"); + if (!m_mgtClient->getService(storageRecord)) + { + Logger::getLogger()->fatal("Unable to find storage service"); + return; + } + Logger::getLogger()->info("Connect to storage on %s:%d", + storageRecord.getAddress().c_str(), + storageRecord.getPort()); + + + m_storageClient = new StorageClient(storageRecord.getAddress(), + storageRecord.getPort()); + } catch (exception& ex) { + Logger::getLogger()->error("Failed to create storage client", ex.what()); + } + +} + +/** + * Destructor for the asset tracker. We must make sure any pending + * tuples are written out before the asset tracker is destroyed. + */ +AssetTracker::~AssetTracker() +{ + m_shutdown = true; + // Signal the worker thread to flush the queue + { + unique_lock lck(m_mutex); + m_cv.notify_all(); + } + while (m_pending.size()) + { + // Wait for pending queue to drain + this_thread::sleep_for(chrono::milliseconds(10)); + } + if (m_thread) + { + m_thread->join(); + delete m_thread; + m_thread = NULL; + } + + if (m_storageClient) + { + delete m_storageClient; + m_storageClient = NULL; + } + + for (auto& item : assetTrackerTuplesCache) + { + delete item; + } + assetTrackerTuplesCache.clear(); + + for (auto& store : storageAssetTrackerTuplesCache) + { + delete store.first; + } + storageAssetTrackerTuplesCache.clear(); } /** @@ -52,10 +139,7 @@ void AssetTracker::populateAssetTrackingCache(string /*plugin*/, string /*event* std::vector& vec = m_mgtClient->getAssetTrackingTuples(m_service); for (AssetTrackingTuple* & rec : vec) { - assetTrackerTuplesCache.insert(rec); - - Logger::getLogger()->debug("Added asset tracker tuple to cache: '%s'", - rec->assetToString().c_str()); + assetTrackerTuplesCache.emplace(rec); } delete (&vec); } @@ -86,6 +170,12 @@ bool AssetTracker::checkAssetTrackingCache(AssetTrackingTuple& tuple) return true; } +/** + * Lookup tuple in the asset tracker cache + * + * @param tuple The tuple to lookup + * @return NULL if the tuple is not in the cache or the tuple from the cache + */ AssetTrackingTuple* AssetTracker::findAssetTrackingCache(AssetTrackingTuple& tuple) { AssetTrackingTuple *ptr = &tuple; @@ -110,15 +200,13 @@ void AssetTracker::addAssetTrackingTuple(AssetTrackingTuple& tuple) std::unordered_set::const_iterator it = assetTrackerTuplesCache.find(&tuple); if (it == assetTrackerTuplesCache.end()) { - bool rv = m_mgtClient->addAssetTrackingTuple(tuple.m_serviceName, tuple.m_pluginName, tuple.m_assetName, tuple.m_eventName); - if (rv) // insert into cache only if DB operation succeeded - { - AssetTrackingTuple *ptr = new AssetTrackingTuple(tuple); - assetTrackerTuplesCache.insert(ptr); - Logger::getLogger()->info("addAssetTrackingTuple(): Added tuple to cache: '%s'", tuple.assetToString().c_str()); - } - else - Logger::getLogger()->error("addAssetTrackingTuple(): Failed to insert asset tracking tuple into DB: '%s'", tuple.assetToString().c_str()); + AssetTrackingTuple *ptr = new AssetTrackingTuple(tuple); + + assetTrackerTuplesCache.emplace(ptr); + + queue(ptr); + + Logger::getLogger()->debug("addAssetTrackingTuple(): Added tuple to cache: '%s'", tuple.assetToString().c_str()); } } @@ -145,7 +233,7 @@ void AssetTracker::addAssetTrackingTuple(string plugin, string asset, string eve } /** - * Return the name of the service responsible for particulr event of the named asset + * Return the name of the service responsible for particular event of the named asset * * @param event The event of interest * @param asset The asset we are interested in @@ -182,3 +270,608 @@ string AssetTracker::getService(const std::string& event, const std::string& ass throw runtime_error("Fetching service for asset not yet implemented"); } } + +/** + * Constructor for an asset tracking tuple table + */ +AssetTrackingTable::AssetTrackingTable() +{ +} + +/** + * Destructor for asset tracking tuple table + */ +AssetTrackingTable::~AssetTrackingTable() +{ + for (auto t : m_tuples) + { + delete t.second; + } +} + +/** + * Add a tuple to an asset tracking table + * + * @param tuple Pointer to the asset tracking tuple to add + */ +void AssetTrackingTable::add(AssetTrackingTuple *tuple) +{ + auto ret = m_tuples.insert(pair(tuple->getAssetName(), tuple)); + if (ret.second == false) + delete tuple; // Already exists +} + +/** + * Find the named asset tuple and return a pointer to te asset + * + * @param name The name of the asset to lookup + * @return AssetTrackingTupple* The matchign tuple or NULL + */ +AssetTrackingTuple *AssetTrackingTable::find(const string& name) +{ + auto ret = m_tuples.find(name); + if (ret != m_tuples.end()) + return ret->second; + return NULL; +} + +/** + * Remove an asset tracking tuple from the table + */ +void AssetTrackingTable::remove(const string& name) +{ + auto ret = m_tuples.find(name); + if (ret != m_tuples.end()) + { + m_tuples.erase(ret); + delete ret->second; // Free the tuple + } +} + +/** + * Queue an asset tuple for writing to the database. + */ +void AssetTracker::queue(TrackingTuple *tuple) +{ + unique_lock lck(m_mutex); + m_pending.emplace(tuple); + m_cv.notify_all(); +} + +/** + * The worker thread that will flush any pending asset tuples to + * the database. + */ +void AssetTracker::workerThread() +{ + unique_lock lck(m_mutex); + while (m_pending.empty() && m_shutdown == false) + { + m_cv.wait_for(lck, chrono::milliseconds(500)); + processQueue(); + } + // Process any items left in the queue at shutdown + processQueue(); +} + +/** + * Process the queue of asset tracking tuple + */ +void AssetTracker::processQueue() +{ +vector values; +static bool warned = false; + + while (!m_pending.empty()) + { + // Get first element as TrackingTuple calss + TrackingTuple *tuple = m_pending.front(); + + // Write the tuple - ideally we would like a bulk update here or to go direct to the + // database. However we need the Fledge service name for that, which is now in + // the member variable m_fledgeName + + bool warn = warned; + // Call class specialised processData routine: + // - 1 Insert asset tracker data via Fledge API as fallback + // or + // - get values for direct DB operation + + InsertValues iValue = tuple->processData(m_storageClient != NULL, + m_mgtClient, + warn, + m_fledgeName); + warned = warn; + + // Bulk DB insert when queue is empty + if (iValue.size() > 0) + { + values.push_back(iValue); + } + + // Remove element + m_pending.pop(); + } + + // Queue processed, bulk direct DB data insert could be done + if (m_storageClient && values.size() > 0) + { + // Bulk DB insert + int n_rows = m_storageClient->insertTable("asset_tracker", values); + if (n_rows != values.size()) + { + Logger::getLogger()->warn("The asset tracker failed to insert all records %d of %d inserted", + n_rows, values.size()); + } + } +} + +/** + * Fetch all storage asset tracking tuples from DB and populate local cache + * + * Return the vector of deprecated asset names + * + */ +void AssetTracker::populateStorageAssetTrackingCache() +{ + + try { + std::vector& vec = + (std::vector&) m_mgtClient->getStorageAssetTrackingTuples(m_service); + + for (StorageAssetTrackingTuple* & rec : vec) + { + set setOfDPs = getDataPointsSet(rec->m_datapoints); + if (setOfDPs.size() == 0) + { + Logger::getLogger()->warn("%s:%d Datapoints unavailable for service %s ", + __FUNCTION__, + __LINE__, + m_service.c_str()); + } + // Add item into cache + storageAssetTrackerTuplesCache.emplace(rec, setOfDPs); + } + delete (&vec); + } + catch (...) + { + Logger::getLogger()->error("%s:%d Failed to populate storage asset " \ + "tracking tuples' cache", + __FUNCTION__, + __LINE__); + return; + } + + return; +} + +//This function takes a string of datapoints in comma-separated format and returns +//set of string datapoint values +std::set AssetTracker::getDataPointsSet(std::string strDatapoints) +{ + std::set tokens; + stringstream st(strDatapoints); + std::string temp; + + while(getline(st, temp, ',')) + { + tokens.insert(temp); + } + + return tokens; +} + +/** + * Return Plugin Information in the Fledge configuration + * + * @return bool True if the plugin info could be obtained + */ +bool AssetTracker::getFledgeConfigInfo() +{ + Logger::getLogger()->error("StorageAssetTracker::getPluginInfo start"); + try { + string url = "/fledge/category/service"; + if (!m_mgtClient) + { + Logger::getLogger()->error("%s:%d, m_mgtClient Ptr is NULL", + __FUNCTION__, + __LINE__); + return false; + } + + auto res = m_mgtClient->getHttpClient()->request("GET", url.c_str()); + Document doc; + string response = res->content.string(); + doc.Parse(response.c_str()); + if (doc.HasParseError()) + { + bool httpError = (isdigit(response[0]) && + isdigit(response[1]) && + isdigit(response[2]) && + response[3]==':'); + Logger::getLogger()->error("%s fetching service record: %s\n", + httpError?"HTTP error while":"Failed to parse result of", + response.c_str()); + return false; + } + else if (doc.HasMember("message")) + { + Logger::getLogger()->error("Failed to fetch /fledge/category/service %s.", + doc["message"].GetString()); + return false; + } + else + { + Value& serviceName = doc["name"]; + if (!serviceName.IsObject()) + { + Logger::getLogger()->error("%s:%d, serviceName is not an object", + __FUNCTION__, + __LINE__); + return false; + } + + if (!serviceName.HasMember("value")) + { + Logger::getLogger()->error("%s:%d, serviceName has no member value", + __FUNCTION__, + __LINE__); + return false; + + } + Value& serviceVal = serviceName["value"]; + if ( !serviceVal.IsString()) + { + Logger::getLogger()->error("%s:%d, serviceVal is not a string", + __FUNCTION__, + __LINE__); + return false; + } + + m_fledgeName = serviceVal.GetString(); + Logger::getLogger()->error("%s:%d, m_plugin value = %s", + __FUNCTION__, + __LINE__, + m_fledgeName.c_str()); + return true; + } + + } catch (const SimpleWeb::system_error &e) { + Logger::getLogger()->error("Get service failed %s.", e.what()); + return false; + } + + return false; +} + +/** This function takes a StorageAssetTrackingTuple pointer and searches for + * it in cache, if found then returns its Deprecated status + * + * @param ptr StorageAssetTrackingTuple* , as key in cache (map) + * @return bool Deprecation status + */ +bool AssetTracker::getDeprecated(StorageAssetTrackingTuple* ptr) +{ + StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(ptr); + + if (it == storageAssetTrackerTuplesCache.end()) + { + Logger::getLogger()->debug("%s:%d :tuple not found in cache", + __FUNCTION__, + __LINE__); + return false; + } + else + { + return (it->first)->isDeprecated(); + } + + return false; +} + +/** + * Updates datapoints present in the arg dpSet in the cache + * + * @param dpSet set of datapoints string values to be updated in cache + * @param ptr StorageAssetTrackingTuple* , as key in cache (map) + * Retval void + */ + +void AssetTracker::updateCache(std::set dpSet, StorageAssetTrackingTuple* ptr) +{ + if(ptr == nullptr) + { + Logger::getLogger()->error("%s:%d: StorageAssetTrackingTuple should not be NULL pointer", + __FUNCTION__, + __LINE__); + return; + } + + StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(ptr); + // search for the record in cache , if not present, simply update cache and return + if (it == storageAssetTrackerTuplesCache.end()) + { + Logger::getLogger()->debug("%s:%d :tuple not found in cache '%s', ptr '%p'", + __FUNCTION__, + __LINE__, + ptr->assetToString().c_str(), + ptr); + + // Create new tuple, add it to processing queue and to cache + addStorageAssetTrackingTuple(*ptr, dpSet, true); + + return; + } + else + { + Logger::getLogger()->debug("%s:%d :tuple found in cache '%p', '%s': datapoints '%d'", + __FUNCTION__, + __LINE__, + (it->first), + (it->first)->assetToString().c_str(), + (it->second).size()); + + // record is found in cache , compare the datapoints of the argument ptr to that present in the cache + // update the cache with datapoints present in argument record but absent in cache + + std::set &cacheRecord = it->second; + unsigned int sizeOfCacheRecord = cacheRecord.size(); + + // store all the datapoints to be updated in string strDatapoints which is sent to management_client + std::string strDatapoints; + unsigned int count = 0; + for (auto itr : cacheRecord) + { + strDatapoints.append(itr); + strDatapoints.append(","); + count++; + } + + // check which datapoints are not present in cache record, and need to be updated + // in cache and db, store them in string strDatapoints, in comma-separated format + for(auto itr: dpSet) + { + if (cacheRecord.find(itr) == cacheRecord.end()) + { + strDatapoints.append(itr); + strDatapoints.append(","); + count++; + } + } + + // remove the last comma + if (strDatapoints[strDatapoints.size()-1] == ',') + { + strDatapoints.pop_back(); + } + + if (count <= sizeOfCacheRecord) + { + // No need to update as count of cache record is not getting increased + return; + } + + // Add current StorageAssetTrackingTuple to the process queue + addStorageAssetTrackingTuple(*(it->first), dpSet); + + // if update of DB successful , then update the CacheRecord + for(auto itr: dpSet) + { + if (cacheRecord.find(itr) == cacheRecord.end()) + { + cacheRecord.insert(itr); + } + } + } +} + +/** + * Add asset tracking tuple via microservice management API and in cache + * + * @param tuple New tuple to add to the queue + * @param dpSet Set of datapoints to handle + * @param addObj Create a new obj for cache and queue if true. + * Otherwise just add current tuple to processing queue. + */ +void AssetTracker::addStorageAssetTrackingTuple(StorageAssetTrackingTuple& tuple, + std::set& dpSet, + bool addObj) +{ + // Create a comma separated list of datapoints + std::string strDatapoints; + unsigned int count = 0; + for (auto itr : dpSet) + { + strDatapoints.append(itr); + strDatapoints.append(","); + count++; + } + if (strDatapoints[strDatapoints.size()-1] == ',') + { + strDatapoints.pop_back(); + } + + if (addObj) + { + // Create new tuple from input one + StorageAssetTrackingTuple *ptr = new StorageAssetTrackingTuple(tuple); + + // Add new tuple to storage asset cache + storageAssetTrackerTuplesCache.emplace(ptr, dpSet); + + // Add datapoints and count needed for data insert + ptr->m_datapoints = strDatapoints; + ptr->m_maxCount = count; + + // Add new tuple to processing queue + queue(ptr); + } + else + { + // Add datapoints and count needed for data insert + tuple.m_datapoints = strDatapoints; + tuple.m_maxCount = count; + + // Just add current tuple to processing queue + queue(&tuple); + } +} + +/** + * Insert AssetTrackingTuple data via Fledge core API + * or prepare InsertValues object for direct DB operation + * + * @param storage Boolean for storage being available + * @param mgtClient ManagementClient object pointer + * @param warned Boolean ireference updated for logging operation + * @param instanceName Fledge instance name + * @return InsertValues object + */ +InsertValues AssetTrackingTuple::processData(bool storage, + ManagementClient *mgtClient, + bool &warned, + string &instanceName) +{ + InsertValues iValue; + + // Write the tuple - ideally we would like a bulk update here or to go direct to the + // database. However we need the Fledge service name passed in instanceName + if (!storage) + { + // Fall back to using interface to the core + if (!warned) + { + Logger::getLogger()->warn("Asset tracker falling back to core API"); + } + warned = true; + + mgtClient->addAssetTrackingTuple(m_serviceName, + m_pluginName, + m_assetName, + m_eventName); + } + else + { + iValue.push_back(InsertValue("asset", m_assetName)); + iValue.push_back(InsertValue("event", m_eventName)); + iValue.push_back(InsertValue("service", m_serviceName)); + iValue.push_back(InsertValue("fledge", instanceName)); + iValue.push_back(InsertValue("plugin", m_pluginName)); + } + + return iValue; +} + +/** + * Insert StorageAssetTrackingTuple data via Fledge core API + * or prepare InsertValues object for direct DB operation + * + * @param storage Boolean for storage being available + * @param mgtClient ManagementClient object pointer + * @param warned Boolean ireference updated for logging operation + * @param instanceName Fledge instance name + * @return InsertValues object + */ +InsertValues StorageAssetTrackingTuple::processData(bool storage, + ManagementClient *mgtClient, + bool &warned, + string &instanceName) +{ + InsertValues iValue; + + // Write the tuple - ideally we would like a bulk update here or to go direct to the + // database. However we need the Fledge service name for that, which is now in + // the member variable m_fledgeName + if (!storage) + { + // Fall back to using interface to the core + if (!warned) + { + Logger::getLogger()->warn("Storage Asset tracker falling back to core API"); + } + warned = true; + + // Insert tuple via Fledge core API + mgtClient->addStorageAssetTrackingTuple(m_serviceName, + m_pluginName, + m_assetName, + m_eventName, + false, + m_datapoints, + m_maxCount); + } + else + { + iValue.push_back(InsertValue("asset", m_assetName)); + iValue.push_back(InsertValue("event", m_eventName)); + iValue.push_back(InsertValue("service", m_serviceName)); + iValue.push_back(InsertValue("fledge", instanceName)); + iValue.push_back(InsertValue("plugin", m_pluginName)); + + // prepare JSON datapoints + string datapoints = "\""; + for ( int i = 0; i < m_datapoints.size(); ++i) + { + if (m_datapoints[i] == ',') + { + datapoints.append("\",\""); + } + else + { + datapoints.append(1,m_datapoints[i]); + } + } + datapoints.append("\""); + + Document doc; + string jsonData = "{\"count\": " + + std::to_string(m_maxCount) + + ", \"datapoints\": [" + + datapoints + "]}"; + doc.Parse(jsonData.c_str()); + iValue.push_back(InsertValue("data", doc)); + } + + return iValue; +} + +/** + * Check if a StorageAssetTrackingTuple is in cache + * + * @param tuple The StorageAssetTrackingTuple to find + * @return Pointer to found tuple or NULL + */ +StorageAssetTrackingTuple* AssetTracker::findStorageAssetTrackingCache(StorageAssetTrackingTuple& tuple) +{ + StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(&tuple); + + if (it == storageAssetTrackerTuplesCache.end()) + { + return NULL; + } + else + { + return it->first; + } +} + +/** + * Get stored value in the StorageAssetTrackingTuple cache for the given tuple + * + * @param tuple The StorageAssetTrackingTuple to find + * @return Pointer to found std::set result or NULL if tuble does not exist + */ +std::set* AssetTracker::getStorageAssetTrackingCacheData(StorageAssetTrackingTuple* tuple) +{ + StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(tuple); + + if (it == storageAssetTrackerTuplesCache.end()) + { + return NULL; + } + else + { + return &(it->second); + } +} diff --git a/C/common/audit_logger.cpp b/C/common/audit_logger.cpp new file mode 100644 index 0000000000..dca431f0e2 --- /dev/null +++ b/C/common/audit_logger.cpp @@ -0,0 +1,76 @@ +/* + * Fledge Singleton Audit Logger interface + * + * Copyright (c) 2023 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ + +#include + +AuditLogger *AuditLogger::m_instance = 0; + +using namespace std; + +/** + * Constructor for an audit logger that is passed + * the management client. This must be called early in + * a service or task creation before any audit logs are + * created. + * + * @param mgmt Pointer to the management client + */ +AuditLogger::AuditLogger(ManagementClient *mgmt) : m_mgmt(mgmt) +{ + m_instance = this; +} + +/** + * Destructor for an audit logger + */ +AuditLogger::~AuditLogger() +{ +} + +/** + * Get the audit logger singleton + */ +AuditLogger *AuditLogger::getLogger() +{ + if (!m_instance) + { + Logger::getLogger()->error("An attempt has been made to obtain the audit logger before it has been created."); + } + return m_instance; +} + +void AuditLogger::auditLog(const string& code, + const string& level, + const string& data) +{ + if (m_instance) + { + m_instance->audit(code, level, data); + } + else + { + Logger::getLogger()->error("An attempt has been made to log an audit event when no audit logger is available"); + Logger::getLogger()->error("Audit event is: %s, %s, %s", code.c_str(), level.c_str(), data.c_str()); + } +} + +/** + * Log an audit message + * + * @param code The audit code + * @param level The audit level + * @param data Optional data associated with the audit entry + */ +void AuditLogger::audit(const string& code, + const string& level, + const string& data) +{ + m_mgmt->addAuditEntry(code, level, data); +} diff --git a/C/common/config_category.cpp b/C/common/config_category.cpp index 3b14e60885..d2db0482e3 100644 --- a/C/common/config_category.cpp +++ b/C/common/config_category.cpp @@ -20,6 +20,7 @@ #include #include #include +#include using namespace std; @@ -44,8 +45,8 @@ ConfigCategories::ConfigCategories(const std::string& json) doc.Parse(json.c_str()); if (doc.HasParseError()) { - Logger::getLogger()->error("Configuration parse error in %s: %s at %d", json.c_str(), - GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset()); + Logger::getLogger()->error("Configuration parse error in %s: %s at %d, '%s'", json.c_str(), + GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset(), StringAround(json, (unsigned)doc.GetErrorOffset()).c_str()); throw new ConfigMalformed(); } if (doc.HasMember("categories")) @@ -140,9 +141,10 @@ ConfigCategory::ConfigCategory(const string& name, const string& json) : m_name( doc.Parse(json.c_str()); if (doc.HasParseError()) { - Logger::getLogger()->error("Configuration parse error in category '%s', %s: %s at %d", + Logger::getLogger()->error("Configuration parse error in category '%s', %s: %s at %d, '%s'", name.c_str(), json.c_str(), - GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset()); + GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset(), + StringAround(json, (unsigned)doc.GetErrorOffset())); throw new ConfigMalformed(); } @@ -468,6 +470,12 @@ string ConfigCategory::getItemAttribute(const string& itemName, return m_items[i]->m_validity; case GROUP_ATTR: return m_items[i]->m_group; + case DISPLAY_NAME_ATTR: + return m_items[i]->m_displayName; + case DEPRECATED_ATTR: + return m_items[i]->m_deprecated; + case RULE_ATTR: + return m_items[i]->m_rule; default: throw new ConfigItemAttributeNotFound(); } @@ -524,6 +532,15 @@ bool ConfigCategory::setItemAttribute(const string& itemName, case GROUP_ATTR: m_items[i]->m_group = value; return true; + case DISPLAY_NAME_ATTR: + m_items[i]->m_displayName = value; + return true; + case DEPRECATED_ATTR: + m_items[i]->m_deprecated = value; + return true; + case RULE_ATTR: + m_items[i]->m_rule = value; + return true; default: return false; } @@ -1057,6 +1074,15 @@ ConfigCategory::CategoryItem::CategoryItem(const string& name, m_group = ""; } + if (item.HasMember("rule")) + { + m_rule = item["rule"].GetString(); + } + else + { + m_rule = ""; + } + if (item.HasMember("options")) { const Value& options = item["options"]; @@ -1093,10 +1119,14 @@ ConfigCategory::CategoryItem::CategoryItem(const string& name, check.Parse(m_value.c_str()); if (check.HasParseError()) { + Logger::getLogger()->error("The JSON configuration item %s has a parse error: %s", + m_name.c_str(), GetParseError_En(check.GetParseError())); throw new runtime_error(GetParseError_En(check.GetParseError())); } if (!check.IsObject()) { + Logger::getLogger()->error("The JSON configuration item %s is not a valid JSON objects", + m_name.c_str()); throw new runtime_error("'value' JSON property is not an object"); } } @@ -1128,13 +1158,14 @@ ConfigCategory::CategoryItem::CategoryItem(const string& name, // Item "value" is just a string else if (item.HasMember("value") && item["value"].IsString()) { + // Get content of script type item as is + rapidjson::StringBuffer strbuf; + rapidjson::Writer writer(strbuf); + item["value"].Accept(writer); + if (m_itemType == ScriptItem || m_itemType == CodeItem) { - // Get content of script type item as is - rapidjson::StringBuffer strbuf; - rapidjson::Writer writer(strbuf); - item["value"].Accept(writer); m_value = strbuf.GetString(); if (m_value.empty()) { @@ -1143,7 +1174,8 @@ ConfigCategory::CategoryItem::CategoryItem(const string& name, } else { - m_value = item["value"].GetString(); + m_value = JSONunescape(strbuf.GetString()); + if (m_options.size() == 0) m_itemType = StringItem; else @@ -1195,10 +1227,14 @@ ConfigCategory::CategoryItem::CategoryItem(const string& name, check.Parse(m_default.c_str()); if (check.HasParseError()) { + Logger::getLogger()->error("The JSON configuration item %s has a parse error in the default value: %s", + m_name.c_str(), GetParseError_En(check.GetParseError())); throw new runtime_error(GetParseError_En(check.GetParseError())); } if (!check.IsObject()) { + Logger::getLogger()->error("The JSON configuration item %s default is not a valid JSON object", + m_name.c_str()); throw new runtime_error("'default' JSON property is not an object"); } } @@ -1231,13 +1267,14 @@ ConfigCategory::CategoryItem::CategoryItem(const string& name, // Item "default" is just a string else if (item.HasMember("default") && item["default"].IsString()) { + // Get content of script type item as is + rapidjson::StringBuffer strbuf; + rapidjson::Writer writer(strbuf); + item["default"].Accept(writer); if (m_itemType == ScriptItem || m_itemType == CodeItem) { - // Get content of script type item as is - rapidjson::StringBuffer strbuf; - rapidjson::Writer writer(strbuf); - item["default"].Accept(writer); + m_default = strbuf.GetString(); if (m_default.empty()) { m_default = "\"\""; @@ -1245,7 +1282,7 @@ ConfigCategory::CategoryItem::CategoryItem(const string& name, } else { - m_default = item["default"].GetString(); + m_default = JSONunescape(strbuf.GetString()); if (m_options.size() == 0) m_itemType = StringItem; else @@ -1339,6 +1376,7 @@ ConfigCategory::CategoryItem::CategoryItem(const CategoryItem& rhs) m_itemType = rhs.m_itemType; m_validity = rhs.m_validity; m_group = rhs.m_group; + m_rule = rhs.m_rule; } /** @@ -1424,6 +1462,11 @@ ostringstream convert; convert << ", \"validity\" : \"" << JSONescape(m_validity) << "\""; } + if (!m_rule.empty()) + { + convert << ", \"rule\" : \"" << JSONescape(m_rule) << "\""; + } + if (!m_group.empty()) { convert << ", \"group\" : \"" << m_group << "\""; @@ -1433,17 +1476,6 @@ ostringstream convert; { convert << ", \"file\" : \"" << m_file << "\""; } - if (m_options.size() > 0) - { - convert << ", \"options\" : [ "; - for (int i = 0; i < m_options.size(); i++) - { - if (i > 0) - convert << ","; - convert << "\"" << m_options[i] << "\""; - } - convert << "]"; - } } convert << " }"; @@ -1501,6 +1533,11 @@ ostringstream convert; convert << ", \"validity\" : \"" << JSONescape(m_validity) << "\""; } + if (!m_rule.empty()) + { + convert << ", \"rule\" : \"" << JSONescape(m_rule) << "\""; + } + if (!m_group.empty()) { convert << ", \"group\" : \"" << m_group << "\""; diff --git a/C/common/datapoint.cpp b/C/common/datapoint.cpp index 71160c379b..0dad4e3292 100644 --- a/C/common/datapoint.cpp +++ b/C/common/datapoint.cpp @@ -143,11 +143,6 @@ void DatapointValue::deleteNestedDPV() delete m_value.a; m_value.a = NULL; } - else if (m_type == T_IMAGE) - { - delete m_value.image; - m_value.a = NULL; - } else if (m_type == T_DATABUFFER) { delete m_value.dataBuffer; @@ -216,7 +211,8 @@ DatapointValue::DatapointValue(const DatapointValue& obj) Datapoint *d = *it; // Add new allocated datapoint to the vector // using copy constructor - m_value.dpa->push_back(new Datapoint(*d)); + Datapoint *dpCopy = new Datapoint(*d); + m_value.dpa->emplace_back(dpCopy); } break; @@ -353,3 +349,72 @@ int bscount = 0; } return rval; } + +/** + * Parsing a Json string + * + * @param json : string json + * @return vector of datapoints +*/ +std::vector *Datapoint::parseJson(const std::string& json) { + + rapidjson::Document document; + + const auto& parseResult = document.Parse(json.c_str()); + if (parseResult.HasParseError()) { + Logger::getLogger()->fatal("Parsing error %d (%s).", parseResult.GetParseError(), json.c_str()); + printf("Parsing error %d (%s).", parseResult.GetParseError(), json.c_str()); + return nullptr; + } + + if (!document.IsObject()) { + return nullptr; + } + return recursiveJson(document); +} + +/** + * Recursive method to convert a JSON string to a datapoint + * + * @param document : object rapidjon + * @return vector of datapoints +*/ +std::vector *Datapoint::recursiveJson(const rapidjson::Value& document) { + std::vector* p = new std::vector(); + + for (rapidjson::Value::ConstMemberIterator itr = document.MemberBegin(); itr != document.MemberEnd(); ++itr) + { + if (itr->value.IsObject()) { + std::vector * vec = recursiveJson(itr->value); + DatapointValue d(vec, true); + p->push_back(new Datapoint(itr->name.GetString(), d)); + } + else if (itr->value.IsString()) { + DatapointValue d(itr->value.GetString()); + p->push_back(new Datapoint(itr->name.GetString(), d)); + } + else if (itr->value.IsDouble()) { + DatapointValue d(itr->value.GetDouble()); + p->push_back(new Datapoint(itr->name.GetString(), d)); + } + else if (itr->value.IsNumber() && itr->value.IsInt()) { + DatapointValue d((long)itr->value.GetInt()); + p->push_back(new Datapoint(itr->name.GetString(), d)); + } + else if (itr->value.IsNumber() && itr->value.IsUint()) { + DatapointValue d((long)itr->value.GetUint()); + p->push_back(new Datapoint(itr->name.GetString(), d)); + } + else if (itr->value.IsNumber() && itr->value.IsInt64()) { + DatapointValue d((long)itr->value.GetInt64()); + p->push_back(new Datapoint(itr->name.GetString(), d)); + } + else if (itr->value.IsNumber() && itr->value.IsUint64()) { + DatapointValue d((long)itr->value.GetUint64()); + p->push_back(new Datapoint(itr->name.GetString(), d)); + } + } + + return p; +} + diff --git a/C/common/datapoint_utility.cpp b/C/common/datapoint_utility.cpp new file mode 100644 index 0000000000..36b1bc7c22 --- /dev/null +++ b/C/common/datapoint_utility.cpp @@ -0,0 +1,217 @@ +/* + * Datapoint utility. + * + * Copyright (c) 2020, RTE (https://www.rte-france.com) + * + * Released under the Apache 2.0 Licence + * + * Author: Yannick Marchetaux + * + */ +#include +#include + +using namespace std; + +/** + * Search a dictionary from a key + * + * @param dict : parent dictionary + * @param key : key to research + * @return vector of datapoint otherwise null pointer +*/ +DatapointUtility::Datapoints *DatapointUtility::findDictElement(Datapoints *dict, const string& key) { + return findDictOrListElement(dict, key, DatapointValue::T_DP_DICT); +} + +/** + * Search a array from a key + * + * @param dict : parent dictionary + * @param key : key to research + * @return vector of datapoint otherwise null pointer +*/ +DatapointUtility::Datapoints *DatapointUtility::findListElement(Datapoints *dict, const string& key) { + return findDictOrListElement(dict, key, DatapointValue::T_DP_LIST); +} + +/** + * Search a list or dictionary from a key + * + * @param dict : parent dictionary + * @param key : key to research + * @param type : type of data searched + * @return vector of datapoint otherwise null pointer +*/ +DatapointUtility::Datapoints *DatapointUtility::findDictOrListElement(Datapoints *dict, const string& key, DatapointValue::dataTagType type) { + Datapoint *dp = findDatapointElement(dict, key); + + if (dp == nullptr) { + return nullptr; + } + + DatapointValue& data = dp->getData(); + if (data.getType() == type) { + return data.getDpVec(); + } + + return nullptr; +} + +/** + * Search a DatapointValue from a key + * + * @param dict : parent dictionary + * @param key : key to research + * @return corresponding datapointValue otherwise null pointer +*/ +DatapointValue *DatapointUtility::findValueElement(Datapoints *dict, const string& key) { + + Datapoint *dp = findDatapointElement(dict, key); + + if (dp == nullptr) { + return nullptr; + } + + return &dp->getData(); +} + +/** + * Search a Datapoint from a key + * + * @param dict : parent dictionary + * @param key : key to research + * @return corresponding datapoint otherwise null pointer +*/ +Datapoint *DatapointUtility::findDatapointElement(Datapoints *dict, const string& key) { + if (dict == nullptr) { + return nullptr; + } + + for (Datapoint *dp : *dict) { + if (dp->getName() == key) { + return dp; + } + } + return nullptr; +} + +/** + * Search a string from a key + * + * @param dict : parent dictionary + * @param key : key to research + * @return corresponding string otherwise empty string +*/ +string DatapointUtility::findStringElement(Datapoints *dict, const string& key) { + + Datapoint *dp = findDatapointElement(dict, key); + + if (dp == nullptr) { + return ""; + } + + DatapointValue& data = dp->getData(); + const DatapointValue::dataTagType dType(data.getType()); + if (dType == DatapointValue::T_STRING) { + return data.toStringValue(); + } + + return ""; +} + +/** + * Method to delete and to free elements from a vector + * + * @param dps dict of values + * @param key key of dict +*/ +void DatapointUtility::deleteValue(Datapoints *dps, const string& key) { + for (Datapoints::iterator it = dps->begin(); it != dps->end(); it++){ + if ((*it)->getName() == key) { + delete (*it); + dps->erase(it); + break; + } + } +} + +/** + * Generate default attribute integer on Datapoint + * + * @param dps dict of values + * @param key key of dict + * @param valueDefault value attribute of dict + * @return pointer of the created datapoint + */ +Datapoint *DatapointUtility::createIntegerElement(Datapoints *dps, const string& key, long valueDefault) { + + deleteValue(dps, key); + + DatapointValue dv(valueDefault); + Datapoint *dp = new Datapoint(key, dv); + dps->push_back(dp); + + return dp; +} + +/** + * Generate default attribute string on Datapoint + * + * @param dps dict of values + * @param key key of dict + * @param valueDefault value attribute of dict + * @return pointer of the created datapoint + */ +Datapoint *DatapointUtility::createStringElement(Datapoints *dps, const string& key, const string& valueDefault) { + + deleteValue(dps, key); + + DatapointValue dv(valueDefault); + Datapoint *dp = new Datapoint(key, dv); + dps->push_back(dp); + + return dp; +} + +/** + * Generate default attribute dict on Datapoint + * + * @param dps dict of values + * @param key key of dict + * @param dict if the element is a dictionary + * @return pointer of the created datapoint + */ +Datapoint *DatapointUtility::createDictOrListElement(Datapoints* dps, const string& key, bool dict) { + + deleteValue(dps, key); + + Datapoints *newVec = new Datapoints; + DatapointValue dv(newVec, dict); + Datapoint *dp = new Datapoint(key, dv); + dps->push_back(dp); + + return dp; +} + +/** + * Generate default attribute dict on Datapoint + * + * @param dps dict of values + * @param key key of dict + * @return pointer of the created datapoint + */ +Datapoint *DatapointUtility::createDictElement(Datapoints* dps, const string& key) { + return createDictOrListElement(dps, key, true); +} + +/** + * Generate default attribute list on Datapoint + * + * @param dps dict of values + * @param key key of dict + * @return pointer of the created datapoint + */ +Datapoint *DatapointUtility::createListElement(Datapoints* dps, const string& key) { + return createDictOrListElement(dps, key, false); +} \ No newline at end of file diff --git a/C/common/filter_pipeline.cpp b/C/common/filter_pipeline.cpp index af78771705..d6229cc654 100755 --- a/C/common/filter_pipeline.cpp +++ b/C/common/filter_pipeline.cpp @@ -29,7 +29,7 @@ using namespace std; * @param serviceName Name of the service to which this pipeline applies */ FilterPipeline::FilterPipeline(ManagementClient* mgtClient, StorageClient& storage, string serviceName) : - mgtClient(mgtClient), storage(storage), serviceName(serviceName), m_ready(false) + mgtClient(mgtClient), storage(storage), serviceName(serviceName), m_ready(false), m_shutdown(false) { } diff --git a/C/common/include/asset_tracking.h b/C/common/include/asset_tracking.h index fd27a1f2c5..5445bfca4c 100644 --- a/C/common/include/asset_tracking.h +++ b/C/common/include/asset_tracking.h @@ -7,13 +7,34 @@ * * Released under the Apache 2.0 Licence * - * Author: Amandeep Singh Arora + * Author: Amandeep Singh Arora, Massimiliano Pinto */ #include #include +#include #include #include #include +#include +#include +#include +#include +#include + +/** + * Tracking abstract base class to be passed in the process data queue + */ +class TrackingTuple { +public: + TrackingTuple() {}; + virtual ~TrackingTuple() = default; + virtual InsertValues processData(bool storage_connected, + ManagementClient *mgtClient, + bool &warned, + std::string &instanceName) = 0; + virtual std::string assetToString() = 0; +}; + /** * The AssetTrackingTuple class is used to represent an asset @@ -21,15 +42,10 @@ * this class and pointer to this class that would be required * to create an unordered_set of this class. */ -class AssetTrackingTuple { +class AssetTrackingTuple : public TrackingTuple { public: - std::string m_serviceName; - std::string m_pluginName; - std::string m_assetName; - std::string m_eventName; - - std::string assetToString() + std::string assetToString() { std::ostringstream o; o << "service:" << m_serviceName << @@ -46,7 +62,7 @@ class AssetTrackingTuple { x.m_pluginName==m_pluginName && x.m_assetName==m_assetName && x.m_eventName==m_eventName); - } + }; AssetTrackingTuple(const std::string& service, const std::string& plugin, @@ -57,18 +73,28 @@ class AssetTrackingTuple { m_pluginName(plugin), m_assetName(asset), m_eventName(event), - m_deprecated(deprecated) - {} + m_deprecated(deprecated) {} - std::string& getAssetName() { return m_assetName; }; + std::string &getAssetName() { return m_assetName; }; std::string getPluginName() { return m_pluginName;} std::string getEventName() { return m_eventName;} std::string getServiceName() { return m_serviceName;} bool isDeprecated() { return m_deprecated; }; void unDeprecate() { m_deprecated = false; }; + InsertValues processData(bool storage_connected, + ManagementClient *mgtClient, + bool &warned, + std::string &instanceName); + +public: + std::string m_serviceName; + std::string m_pluginName; + std::string m_assetName; + std::string m_eventName; + private: - bool m_deprecated; + bool m_deprecated; }; struct AssetTrackingTuplePtrEqual { @@ -98,6 +124,109 @@ namespace std }; } +class StorageAssetTrackingTuple : public TrackingTuple { +public: + StorageAssetTrackingTuple(const std::string& service, + const std::string& plugin, + const std::string& asset, + const std::string& event, + const bool& deprecated = false, + const std::string& datapoints = "", + unsigned int c = 0) : m_datapoints(datapoints), + m_maxCount(c), + m_serviceName(service), + m_pluginName(plugin), + m_assetName(asset), + m_eventName(event), + m_deprecated(deprecated) + {}; + + inline bool operator==(const StorageAssetTrackingTuple& x) const + { + return ( x.m_serviceName==m_serviceName && + x.m_pluginName==m_pluginName && + x.m_assetName==m_assetName && + x.m_eventName==m_eventName); + }; + std::string assetToString() + { + std::ostringstream o; + o << "service:" << m_serviceName << + ", plugin:" << m_pluginName << + ", asset:" << m_assetName << + ", event:" << m_eventName << + ", deprecated:" << m_deprecated << + ", m_datapoints:" << m_datapoints << + ", m_maxCount:" << m_maxCount; + return o.str(); + }; + + bool isDeprecated() { return m_deprecated; }; + + unsigned int getMaxCount() { return m_maxCount; } + std::string getDataPoints() { return m_datapoints; } + void unDeprecate() { m_deprecated = false; }; + void setDeprecate() { m_deprecated = true; }; + + InsertValues processData(bool storage, + ManagementClient *mgtClient, + bool &warned, + std::string &instanceName); + +public: + std::string m_datapoints; + unsigned int m_maxCount; + std::string m_serviceName; + std::string m_pluginName; + std::string m_assetName; + std::string m_eventName; + +private: + bool m_deprecated; +}; + +struct StorageAssetTrackingTuplePtrEqual { + bool operator()(StorageAssetTrackingTuple const* a, StorageAssetTrackingTuple const* b) const { + return *a == *b; + } +}; + +namespace std +{ + template <> + struct hash + { + size_t operator()(const StorageAssetTrackingTuple& t) const + { + return (std::hash()(t.m_serviceName + + t.m_pluginName + + t.m_assetName + + t.m_eventName)); + } + }; + + template <> + struct hash + { + size_t operator()(StorageAssetTrackingTuple* t) const + { + return (std::hash()(t->m_serviceName + + t->m_pluginName + + t->m_assetName + + t->m_eventName)); + } + }; +} + +typedef std::unordered_map, + std::hash, + StorageAssetTrackingTuplePtrEqual> StorageAssetCacheMap; +typedef std::unordered_map, + std::hash, + StorageAssetTrackingTuplePtrEqual>::iterator StorageAssetCacheMapItr; + class ManagementClient; /** @@ -109,14 +238,20 @@ class AssetTracker { public: AssetTracker(ManagementClient *mgtClient, std::string service); - ~AssetTracker() {} + ~AssetTracker(); static AssetTracker *getAssetTracker(); void populateAssetTrackingCache(std::string plugin, std::string event); + void populateStorageAssetTrackingCache(); bool checkAssetTrackingCache(AssetTrackingTuple& tuple); AssetTrackingTuple* findAssetTrackingCache(AssetTrackingTuple& tuple); void addAssetTrackingTuple(AssetTrackingTuple& tuple); void addAssetTrackingTuple(std::string plugin, std::string asset, std::string event); + void addStorageAssetTrackingTuple(StorageAssetTrackingTuple& tuple, + std::set& dpSet, + bool addObj = false); + StorageAssetTrackingTuple* + findStorageAssetTrackingCache(StorageAssetTrackingTuple& tuple); std::string getIngestService(const std::string& asset) { @@ -127,16 +262,52 @@ class AssetTracker { { return getService("Egress", asset); }; + void workerThread(); + + bool getDeprecated(StorageAssetTrackingTuple* ptr); + void updateCache(std::set dpSet, StorageAssetTrackingTuple* ptr); + std::set + *getStorageAssetTrackingCacheData(StorageAssetTrackingTuple* tuple); private: std::string getService(const std::string& event, const std::string& asset); + void queue(TrackingTuple *tuple); + void processQueue(); + std::set + getDataPointsSet(std::string strDatapoints); + bool getFledgeConfigInfo(); private: - static AssetTracker *instance; - ManagementClient *m_mgtClient; - std::string m_service; - std::unordered_set, AssetTrackingTuplePtrEqual> assetTrackerTuplesCache; + static AssetTracker *instance; + ManagementClient *m_mgtClient; + std::string m_service; + std::unordered_set, AssetTrackingTuplePtrEqual> + assetTrackerTuplesCache; + std::queue m_pending; // Tuples that are not yet written to the storage + std::thread *m_thread; + bool m_shutdown; + std::condition_variable m_cv; + std::mutex m_mutex; + std::string m_fledgeName; + StorageClient *m_storageClient; + StorageAssetCacheMap storageAssetTrackerTuplesCache; +}; + +/** + * A class to hold a set of asset tracking tuples that allows + * lookup by name. + */ +class AssetTrackingTable { + public: + AssetTrackingTable(); + ~AssetTrackingTable(); + void add(AssetTrackingTuple *tuple); + void remove(const std::string& name); + AssetTrackingTuple *find(const std::string& name); + private: + std::map + m_tuples; }; #endif diff --git a/C/common/include/audit_logger.h b/C/common/include/audit_logger.h new file mode 100644 index 0000000000..2f0f5b5fae --- /dev/null +++ b/C/common/include/audit_logger.h @@ -0,0 +1,39 @@ +#ifndef _AUDIT_LOGGER_H +#define _AUDIT_LOGGER_H +/* + * Fledge Singleton Audit Logger interface + * + * Copyright (c) 2023 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ + +#include +#include +#include + +/** + * A singleton class for access to the audit logger within services. The + * service must create this with the maagement client before any access to it is used. + */ +class AuditLogger { + public: + AuditLogger(ManagementClient *mgmt); + ~AuditLogger(); + + static AuditLogger *getLogger(); + static void auditLog(const std::string& code, + const std::string& level, + const std::string& data = ""); + + void audit(const std::string& code, + const std::string& level, + const std::string& data = ""); + + private: + static AuditLogger *m_instance; + ManagementClient *m_mgmt; +}; +#endif diff --git a/C/common/include/config_category.h b/C/common/include/config_category.h index 24aee86af1..bc87d943d0 100644 --- a/C/common/include/config_category.h +++ b/C/common/include/config_category.h @@ -126,7 +126,10 @@ class ConfigCategory { MAXIMUM_ATTR, LENGTH_ATTR, VALIDITY_ATTR, - GROUP_ATTR}; + GROUP_ATTR, + DISPLAY_NAME_ATTR, + DEPRECATED_ATTR, + RULE_ATTR}; std::string getItemAttribute(const std::string& itemName, ItemAttribute itemAttribute) const; @@ -170,10 +173,11 @@ class ConfigCategory { ItemType m_itemType; std::string m_validity; std::string m_group; + std::string m_rule; }; std::vector m_items; std::string m_name; - std::string m_parent_name; + std::string m_parent_name; std::string m_description; std::string m_displayName; diff --git a/C/common/include/datapoint.h b/C/common/include/datapoint.h index 3b0b012b45..a81183b4fb 100644 --- a/C/common/include/datapoint.h +++ b/C/common/include/datapoint.h @@ -17,6 +17,7 @@ #include #include #include +#include class Datapoint; /** @@ -139,7 +140,21 @@ class DatapointValue { */ ~DatapointValue(); - + /** + * Set the value of a datapoint, this may + * also cause the type to be changed. + * @param value An string value to set + */ + void setValue(std::string value) + { + if(m_value.str) + { + delete m_value.str; + } + m_value.str = new std::string(value); + m_type = T_STRING; + } + /** * Set the value of a datapoint, this may * also cause the type to be changed. @@ -347,6 +362,14 @@ class Datapoint { { return m_value; } + + /** + * Parse a json string and generates + * a corresponding datapoint vector + */ + std::vector* parseJson(const std::string& json); + std::vector* recursiveJson(const rapidjson::Value& document); + private: std::string m_name; DatapointValue m_value; diff --git a/C/common/include/datapoint_utility.h b/C/common/include/datapoint_utility.h new file mode 100644 index 0000000000..63f0ea81ac --- /dev/null +++ b/C/common/include/datapoint_utility.h @@ -0,0 +1,43 @@ +#ifndef INCLUDE_DATAPOINT_UTILITY_H_ +#define INCLUDE_DATAPOINT_UTILITY_H_ +/* + * Fledge + * + * Copyright (c) 2021 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Yannick Marchetaux + * + */ + +#include +#include +#include "datapoint.h" +#include "reading.h" + +namespace DatapointUtility { + // Define type + using Datapoints = std::vector; + using Readings = std::vector; + + // Function for search value + Datapoints *findDictElement (Datapoints* dict, const std::string& key); + DatapointValue *findValueElement (Datapoints* dict, const std::string& key); + Datapoint *findDatapointElement (Datapoints* dict, const std::string& key); + Datapoints *findDictOrListElement (Datapoints *dict, const std::string& key, DatapointValue::dataTagType type); + Datapoints *findListElement (Datapoints *dict, const std::string& key); + std::string findStringElement (Datapoints* dict, const std::string& key); + + // delete + void deleteValue(Datapoints *dps, const std::string& key); + + // Function for create element + Datapoint *createStringElement (Datapoints *dps, const std::string& key, const std::string& valueDefault); + Datapoint *createIntegerElement (Datapoints *dps, const std::string& key, long valueDefault); + Datapoint *createDictElement (Datapoints *dps, const std::string& key); + Datapoint *createListElement (Datapoints *dps, const std::string& key); + Datapoint *createDictOrListElement (Datapoints* dps, const std::string& key, bool dict); +}; + +#endif // INCLUDE_DATAPOINT_UTILITY_H_ \ No newline at end of file diff --git a/C/common/include/filter_pipeline.h b/C/common/include/filter_pipeline.h index 0bb20f9b3e..6c3f7c4029 100644 --- a/C/common/include/filter_pipeline.h +++ b/C/common/include/filter_pipeline.h @@ -52,6 +52,8 @@ class FilterPipeline // Check FilterPipeline is ready for data ingest bool isReady() { return m_ready; }; bool hasChanged(const std::string pipeline) const { return m_pipeline != pipeline; } + bool isShuttingDown() { return m_shutdown; }; + void setShuttingDown() { m_shutdown = true; } private: PLUGIN_HANDLE loadFilterPlugin(const std::string& filterName); @@ -66,6 +68,7 @@ class FilterPipeline m_filterCategories; std::string m_pipeline; bool m_ready; + bool m_shutdown; ServiceHandler *m_serviceHandler; }; diff --git a/C/common/include/logger.h b/C/common/include/logger.h index 1cffa457a3..e64596fcb3 100755 --- a/C/common/include/logger.h +++ b/C/common/include/logger.h @@ -40,6 +40,7 @@ class Logger { std::string *format(const std::string& msg, va_list ap); static Logger *instance; std::string levelString; + int m_level; }; #endif diff --git a/C/common/include/management_client.h b/C/common/include/management_client.h index 86cd530f37..9fc8bf2123 100644 --- a/C/common/include/management_client.h +++ b/C/common/include/management_client.h @@ -30,6 +30,7 @@ using HttpServer = SimpleWeb::Server; using namespace rapidjson; class AssetTrackingTuple; +class AssetTrackingTable; class StorageAssetTrackingTuple; /** @@ -115,6 +116,7 @@ class ManagementClient { const std::string& assetName, const std::string& event); int validateDatapoints(std::string dp1, std::string dp2); + AssetTrackingTable *getDeprecatedAssetTrackingTuples(); private: std::ostringstream m_urlbase; diff --git a/C/common/include/process.h b/C/common/include/process.h index bb54cf2bbe..e992cf7ccb 100644 --- a/C/common/include/process.h +++ b/C/common/include/process.h @@ -12,6 +12,7 @@ #include #include +#include #include /** @@ -44,6 +45,7 @@ class FledgeProcess ManagementClient* m_client; StorageClient* m_storage; Logger* m_logger; + AuditLogger* m_auditLogger; }; #endif diff --git a/C/common/include/pyruntime.h b/C/common/include/pyruntime.h index 630220e7cf..496e76371e 100644 --- a/C/common/include/pyruntime.h +++ b/C/common/include/pyruntime.h @@ -14,12 +14,15 @@ class PythonRuntime { public: static PythonRuntime *getPythonRuntime(); + static bool initialised() { return m_instance != NULL; }; + static void shutdown(); void execute(const std::string& python); PyObject *call(const std::string& name, const std::string& fmt, ...); PyObject *call(PyObject *module, const std::string& name, const std::string& fmt, ...); PyObject *importModule(const std::string& name); private: PythonRuntime(); + ~PythonRuntime(); PythonRuntime(const PythonRuntime& rhs); PythonRuntime& operator=(const PythonRuntime& rhs); void logException(const std::string& name); diff --git a/C/common/include/pythonreading.h b/C/common/include/pythonreading.h index c6c9f5a2ed..7a4297a6e0 100644 --- a/C/common/include/pythonreading.h +++ b/C/common/include/pythonreading.h @@ -20,6 +20,7 @@ class PythonReading : public Reading { public: PythonReading(PyObject *pyReading); + ~PythonReading() {}; PyObject *toPython(bool changeKeys = false, bool bytesString = false); static std::string errorMessage(); static bool isArray(PyObject *); diff --git a/C/common/include/pythonreadingset.h b/C/common/include/pythonreadingset.h index 7b97c1ae69..b91813da67 100644 --- a/C/common/include/pythonreadingset.h +++ b/C/common/include/pythonreadingset.h @@ -20,6 +20,7 @@ class PythonReadingSet : public ReadingSet { public: PythonReadingSet(PyObject *pySet); + ~PythonReadingSet() {}; PyObject *toPython(bool changeKeys = false); private: void setReadingAttr(Reading* newReading, PyObject *readingList, bool fillIfMissing); diff --git a/C/common/include/reading.h b/C/common/include/reading.h index 271053cb6f..3b61e14af1 100644 --- a/C/common/include/reading.h +++ b/C/common/include/reading.h @@ -14,6 +14,7 @@ #include #include #include +#include #define DEFAULT_DATE_TIME_FORMAT "%Y-%m-%d %H:%M:%S" #define COMBINED_DATE_STANDARD_FORMAT "%Y-%m-%dT%H:%M:%S" @@ -34,9 +35,10 @@ class Reading { Reading(const std::string& asset, Datapoint *value); Reading(const std::string& asset, std::vector values); Reading(const std::string& asset, std::vector values, const std::string& ts); + Reading(const std::string& asset, const std::string& datapoints); Reading(const Reading& orig); - ~Reading(); + ~Reading(); // This should bbe virtual void addDatapoint(Datapoint *value); Datapoint *removeDatapoint(const std::string& name); Datapoint *getDatapoint(const std::string& name) const; @@ -52,6 +54,7 @@ class Reading { const std::vector getReadingData() const { return m_values; }; // Return refrerence to Reading datapoints std::vector& getReadingData() { return m_values; }; + bool hasId() const { return m_has_id; }; unsigned long getId() const { return m_id; }; unsigned long getTimestamp() const { return (unsigned long)m_timestamp.tv_sec; }; unsigned long getUserTimestamp() const { return (unsigned long)m_userTimestamp.tv_sec; }; @@ -67,6 +70,7 @@ class Reading { typedef enum dateTimeFormat { FMT_DEFAULT, FMT_STANDARD, FMT_ISO8601, FMT_ISO8601MS } readingTimeFormat; + void getFormattedDateTimeStr(const time_t *tv_sec, char *date_time, readingTimeFormat dateFormat) const; // Return Reading asset time - ts time const std::string getAssetDateTime(readingTimeFormat datetimeFmt = FMT_DEFAULT, bool addMs = true) const; // Return Reading asset time - user_ts time @@ -77,6 +81,7 @@ class Reading { Reading& operator=(Reading const&); void stringToTimestamp(const std::string& timestamp, struct timeval *ts); const std::string escape(const std::string& str) const; + std::vector *JSONtoDatapoints(const rapidjson::Value& json); unsigned long m_id; bool m_has_id; std::string m_asset; diff --git a/C/common/include/reading_set.h b/C/common/include/reading_set.h index d38aed4407..2f096da0bd 100755 --- a/C/common/include/reading_set.h +++ b/C/common/include/reading_set.h @@ -28,9 +28,9 @@ class ReadingSet { ReadingSet(); ReadingSet(const std::string& json); ReadingSet(const std::vector* readings); - ~ReadingSet(); + virtual ~ReadingSet(); - unsigned long getCount() const { return m_count; }; + unsigned long getCount() const { return m_readings.size(); }; const Reading *operator[] (const unsigned int idx) { return m_readings[idx]; }; @@ -40,14 +40,20 @@ class ReadingSet { // Return the reference of readings std::vector* getAllReadingsPtr() { return &m_readings; }; + // Remove readings from reading set and return reference to readings + std::vector* moveAllReadings(); + // Delete a reading from reading set and return pointer of deleted reading + Reading* removeReading(unsigned long id); + // Return the reading id of the last data element unsigned long getLastId() const { return m_last_id; }; unsigned long getReadingId(uint32_t pos); void append(ReadingSet *); void append(ReadingSet&); - void append(const std::vector &); + void append(std::vector &); void removeAll(); void clear(); + bool copy(const ReadingSet& src); protected: unsigned long m_count; @@ -66,6 +72,7 @@ class ReadingSet { class JSONReading : public Reading { public: JSONReading(const rapidjson::Value& json); + ~JSONReading() {}; // Return the reading id unsigned long getId() const { return m_id; }; diff --git a/C/common/include/storage_asset_tracking.h b/C/common/include/storage_asset_tracking.h deleted file mode 100644 index 1fe2dc4695..0000000000 --- a/C/common/include/storage_asset_tracking.h +++ /dev/null @@ -1,121 +0,0 @@ -#ifndef _STORAGE_ASSET_TRACKING_H -#define _STORAGE_ASSET_TRACKING_H -/* - * Fledge storage asset tracking related - * - * Copyright (c) 2022 Dianomic Systems - * - * Released under the Apache 2.0 Licence - * - * Author: Ashwini Sinha - */ -#include -#include -#include -#include -#include -#include -#include - - -/** - * The StorageAssetTrackingTuple class is used to represent ai storage asset - * tracking tuple. Hash function and '==' operator are defined for - * this class and pointer to this class that would be required - * to create an unordered_set of this class. - */ - -class StorageAssetTrackingTuple : public AssetTrackingTuple { - -public: - std::string m_datapoints; - unsigned int m_maxCount; - - std::string assetToString() - { - std::ostringstream o; - o << AssetTrackingTuple::assetToString() << ", m_datapoints:" << m_datapoints << ", m_maxCount:" << m_maxCount; - return o.str(); - } - - unsigned int getMaxCount() { return m_maxCount; } - std::string getDataPoints() { return m_datapoints; } - - StorageAssetTrackingTuple(const std::string& service, - const std::string& plugin, - const std::string& asset, - const std::string& event, - const bool& deprecated = false, - const std::string& datapoints = "", - unsigned int c = 0) : - AssetTrackingTuple(service, plugin, asset, event, deprecated), m_datapoints(datapoints), m_maxCount(c) - {} - -private: -}; - -struct StorageAssetTrackingTuplePtrEqual { - bool operator()(StorageAssetTrackingTuple const* a, StorageAssetTrackingTuple const* b) const { - return *a == *b; - } -}; - -namespace std -{ - template <> - struct hash - { - size_t operator()(const StorageAssetTrackingTuple& t) const - { - return (std::hash()(t.m_serviceName + t.m_pluginName + t.m_assetName + t.m_eventName)); - } - }; - - template <> - struct hash - { - size_t operator()(StorageAssetTrackingTuple* t) const - { - return (std::hash()(t->m_serviceName + t->m_pluginName + t->m_assetName + t->m_eventName)); - } - }; -} - -class ManagementClient; - -typedef std::unordered_map, std::hash, StorageAssetTrackingTuplePtrEqual> StorageAssetCacheMap; - -typedef std::unordered_map, std::hash, StorageAssetTrackingTuplePtrEqual>::iterator StorageAssetCacheMapItr; - -/** - * The StorageAssetTracker class provides the asset tracking functionality. - * There are methods to populate asset tracking cache from asset_tracker DB table, - * and methods to check/add asset tracking tuples to DB and to cache - */ -class StorageAssetTracker { - -public: - StorageAssetTracker(ManagementClient *mgtClient, std::string m_service); - ~StorageAssetTracker() {} - void populateStorageAssetTrackingCache(); - StorageAssetTrackingTuple* - findStorageAssetTrackingCache(StorageAssetTrackingTuple& tuple); - void addStorageAssetTrackingTuple(StorageAssetTrackingTuple& tuple); - bool getFledgeConfigInfo(); - static StorageAssetTracker *getStorageAssetTracker(); - static void releaseStorageAssetTracker(); - void updateCache(std::set dpSet, StorageAssetTrackingTuple* ptr); - bool getDeprecated(StorageAssetTrackingTuple* ptr); - -private: - static StorageAssetTracker *instance; - ManagementClient *m_mgtClient; - std::string m_fledgeService; - std::string m_service; - std::string m_event; - std::set getDataPointsSet(std::string strDatapoints); - - StorageAssetCacheMap storageAssetTrackerTuplesCache; -}; - -#endif diff --git a/C/common/include/storage_client.h b/C/common/include/storage_client.h index 4df4b8a701..902999acc5 100644 --- a/C/common/include/storage_client.h +++ b/C/common/include/storage_client.h @@ -26,7 +26,7 @@ using HttpClient = SimpleWeb::Client; -#define STREAM_BLK_SIZE 50 // Readings to send per write call to a stream +#define STREAM_BLK_SIZE 100 // Readings to send per write call to a stream #define STREAM_THRESHOLD 25 // Switch to streamed mode above this number of readings per second // Backup values for repeated storage client exception messages @@ -49,6 +49,12 @@ class StorageClient { ResultSet *queryTable(const std::string& tablename, const Query& query); ReadingSet *queryTableToReadings(const std::string& tableName, const Query& query); int insertTable(const std::string& schema, const std::string& tableName, const InsertValues& values); + int insertTable(const std::string& schema, const std::string& tableName, + const std::vector& values); + int insertTable(const std::string& tableName, const std::vector& values); + + + int updateTable(const std::string& schema, const std::string& tableName, const InsertValues& values, const Where& where, const UpdateModifier *modifier = NULL); int updateTable(const std::string& schema, const std::string& tableName, const JSONProperties& json, @@ -73,6 +79,12 @@ class StorageClient { const UpdateModifier *modifier = NULL); int updateTable(const std::string& tableName, const InsertValues& values, const ExpressionValues& expressions, const Where& where, const UpdateModifier *modifier = NULL); + int updateTable(const std::string& schema, const std::string& tableName, + std::vector > &updates, const UpdateModifier *modifier); + + int updateTable(const std::string& tableName, std::vector >& updates, + const UpdateModifier *modifier = NULL); + int deleteTable(const std::string& tableName, const Query& query); bool readingAppend(Reading& reading); bool readingAppend(const std::vector & readings); @@ -86,7 +98,10 @@ class StorageClient { const std::string& callbackUrl); bool unregisterAssetNotification(const std::string& assetName, const std::string& callbackUrl); - + bool registerTableNotification(const std::string& tableName, const std::string& key, + std::vector keyValues, const std::string& operation, const std::string& callbackUrl); + bool unregisterTableNotification(const std::string& tableName, const std::string& key, + std::vector keyValues, const std::string& operation, const std::string& callbackUrl); void registerManagement(ManagementClient *mgmnt) { m_management = mgmnt; }; bool createSchema(const std::string&); diff --git a/C/common/include/string_utils.h b/C/common/include/string_utils.h index 0e0aa363f4..85956dbaaf 100644 --- a/C/common/include/string_utils.h +++ b/C/common/include/string_utils.h @@ -14,7 +14,6 @@ #include #include -using namespace std; void StringReplace(std::string& StringToManage, const std::string& StringToSearch, @@ -24,25 +23,28 @@ void StringReplaceAll(std::string& StringToManage, const std::string& StringToSearch, const std::string& StringReplacement); -string StringSlashFix(const string& stringToFix); +std::string StringSlashFix(const std::string& stringToFix); std::string evaluateParentPath(const std::string& path, char separator); std::string extractLastLevel(const std::string& path, char separator); void StringStripCRLF(std::string& StringToManage); -string StringStripWhiteSpacesAll(const std::string& original); -string StringStripWhiteSpacesExtra(const std::string& original); +std::string StringStripWhiteSpacesAll(const std::string& original); +std::string StringStripWhiteSpacesExtra(const std::string& original); void StringStripQuotes(std::string& StringToManage); -string urlEncode(const string& s); -string urlDecode(const string& s); -void StringEscapeQuotes(string& s); +std::string urlEncode(const std::string& s); +std::string urlDecode(const std::string& s); +void StringEscapeQuotes(std::string& s); char *trim(char *str); std::string StringLTrim(const std::string& str); std::string StringRTrim(const std::string& str); std::string StringTrim(const std::string& str); -bool IsRegex(const string &str); +bool IsRegex(const std::string &str); + +std::string StringAround(const std::string& str, unsigned int pos, + unsigned int after = 30, unsigned int before = 10); #endif diff --git a/C/common/logger.cpp b/C/common/logger.cpp index dc0126e443..1470dd38be 100755 --- a/C/common/logger.cpp +++ b/C/common/logger.cpp @@ -51,6 +51,9 @@ static char ident[80]; Logger::~Logger() { closelog(); + // Stop the getLogger() call returning a deleted instance + if (instance == this) + instance = NULL; } Logger *Logger::getLogger() @@ -72,18 +75,22 @@ void Logger::setMinLevel(const string& level) { setlogmask(LOG_UPTO(LOG_INFO)); levelString = level; + m_level = LOG_INFO; } else if (level.compare("warning") == 0) { setlogmask(LOG_UPTO(LOG_WARNING)); levelString = level; + m_level = LOG_WARNING; } else if (level.compare("debug") == 0) { setlogmask(LOG_UPTO(LOG_DEBUG)); levelString = level; + m_level = LOG_DEBUG; } else if (level.compare("error") == 0) { setlogmask(LOG_UPTO(LOG_ERR)); levelString = level; + m_level = LOG_ERR; } else { error("Request to set unsupported log level %s", level.c_str()); @@ -92,6 +99,10 @@ void Logger::setMinLevel(const string& level) void Logger::debug(const string& msg, ...) { + if (m_level == LOG_ERR || m_level == LOG_WARNING || m_level == LOG_INFO) + { + return; + } va_list args; va_start(args, msg); string *fmt = format(msg, args); @@ -111,6 +122,10 @@ void Logger::printLongString(const string& s) void Logger::info(const string& msg, ...) { + if (m_level == LOG_ERR || m_level == LOG_WARNING) + { + return; + } va_list args; va_start(args, msg); string *fmt = format(msg, args); diff --git a/C/common/management_client.cpp b/C/common/management_client.cpp index bb732fb6df..55c0ebfa79 100644 --- a/C/common/management_client.cpp +++ b/C/common/management_client.cpp @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -827,6 +826,12 @@ std::vector& ManagementClient::getAssetTrackingTuples(const throw runtime_error("Expected asset tracker tuple to be an object"); } + // Do not load "store" events as they bill be loaded by getStorageAssetTrackingTuples() + if (rec["event"].GetString() == "store") + { + continue; + } + // Note: deprecatedTimestamp NULL value is returned as "" // otherwise it's a string DATE bool deprecated = rec.HasMember("deprecatedTimestamp") && @@ -1496,6 +1501,92 @@ AssetTrackingTuple* ManagementClient::getAssetTrackingTuple(const std::string& s return tuple; } +/** + * Get the asset tracking tuples for all the deprecated assets + * + * @return A vector of pointers to AssetTrackingTuple objects allocated on heap + */ +AssetTrackingTable* ManagementClient::getDeprecatedAssetTrackingTuples() +{ + AssetTrackingTable* table = NULL; + try { + string url = "/fledge/track?deprecated=true"; + + auto res = this->getHttpClient()->request("GET", url.c_str()); + Document doc; + string response = res->content.string(); + doc.Parse(response.c_str()); + if (doc.HasParseError()) + { + bool httpError = (isdigit(response[0]) && + isdigit(response[1]) && + isdigit(response[2]) && + response[3]==':'); + m_logger->error("%s fetch asset tracking tuple: %s\n", + httpError?"HTTP error during":"Failed to parse result of", + response.c_str()); + throw new exception(); + } + else if (doc.HasMember("message")) + { + m_logger->error("Failed to fetch asset tracking tuple: %s.", + doc["message"].GetString()); + throw new exception(); + } + else + { + const rapidjson::Value& trackArray = doc["track"]; + if (trackArray.IsArray()) + { + table = new AssetTrackingTable(); + // Process every row and create the AssetTrackingTuple object + for (auto& rec : trackArray.GetArray()) + { + if (!rec.IsObject()) + { + throw runtime_error("Expected asset tracker tuple to be an object"); + } + + // Note: deprecatedTimestamp NULL value is returned as "" + // otherwise it's a string DATE + bool deprecated = rec.HasMember("deprecatedTimestamp") && + strlen(rec["deprecatedTimestamp"].GetString()); + + // Create a new AssetTrackingTuple object, to be freed by the caller + AssetTrackingTuple *tuple = new AssetTrackingTuple(rec["service"].GetString(), + rec["plugin"].GetString(), + rec["asset"].GetString(), + rec["event"].GetString(), + deprecated); + + m_logger->debug("Adding AssetTracker tuple for service %s: %s:%s:%s, " \ + "deprecated state is %d", + rec["service"].GetString(), + rec["plugin"].GetString(), + rec["asset"].GetString(), + rec["event"].GetString(), + deprecated); + + table->add(tuple); + } + } + else + { + throw runtime_error("Expected array of rows in asset track tuples array"); + } + + return table; + } + } catch (const SimpleWeb::system_error &e) { + m_logger->error("Fetch/parse of deprecated asset tracking tuples failed: %s.", + e.what()); + } catch (...) { + m_logger->error("Unexpected exception when retrieving asset tuples for deprecated assets"); + } + + return table; +} + /** * Return the content of the named ACL by calling the * management API of the Fledge core. @@ -1555,7 +1646,9 @@ ACL ManagementClient::getACL(const string& aclName) */ StorageAssetTrackingTuple* ManagementClient::getStorageAssetTrackingTuple(const std::string& serviceName, const std::string& assetName, - const std::string& event, const std::string& dp, const unsigned int& c) + const std::string& event, + const std::string& dp, + const unsigned int& c) { StorageAssetTrackingTuple* tuple = NULL; @@ -1663,7 +1756,9 @@ StorageAssetTrackingTuple* ManagementClient::getStorageAssetTrackingTuple(const if(validateDatapoints(dp,datapoints)) { //datapoints in db not same as in arg, continue - m_logger->debug("%s:%d :Datapoints in db not same as in arg",__FUNCTION__, __LINE__); + m_logger->debug("%s:%d :Datapoints in db not same as in arg", + __FUNCTION__, + __LINE__); continue; } @@ -1680,7 +1775,9 @@ StorageAssetTrackingTuple* ManagementClient::getStorageAssetTrackingTuple(const if ( count != c) { // count not same, continue - m_logger->debug("%s:%d :count in db not same as received in arg", __FUNCTION__, __LINE__); + m_logger->debug("%s:%d :count in db not same as received in arg", + __FUNCTION__, + __LINE__); continue; } @@ -1689,7 +1786,9 @@ StorageAssetTrackingTuple* ManagementClient::getStorageAssetTrackingTuple(const rec["plugin"].GetString(), rec["asset"].GetString(), rec["event"].GetString(), - deprecated, datapoints, count); + deprecated, + datapoints, + count); m_logger->debug("%s:%d : Adding StorageAssetTracker tuple for service %s: %s:%s:%s, " \ "deprecated state is %d, datapoints %s , count %d",__FUNCTION__, __LINE__, diff --git a/C/common/process.cpp b/C/common/process.cpp index c580d11b56..4deed3f1fc 100644 --- a/C/common/process.cpp +++ b/C/common/process.cpp @@ -144,6 +144,9 @@ FledgeProcess::FledgeProcess(int argc, char** argv) : // Connection to Fledge core microservice m_client = new ManagementClient(m_core_mngt_host, m_core_mngt_port); + // Create Audit Logger + m_auditLogger = new AuditLogger(m_client); + // Storage layer handle ServiceRecord storageInfo("Fledge Storage"); diff --git a/C/common/pyruntime.cpp b/C/common/pyruntime.cpp index 99e7ca45e7..06acd52f1f 100644 --- a/C/common/pyruntime.cpp +++ b/C/common/pyruntime.cpp @@ -44,6 +44,15 @@ PythonRuntime::PythonRuntime() PyThreadState *save = PyEval_SaveThread(); // Release the GIL } +/** + * Destructor + */ +PythonRuntime::~PythonRuntime() +{ + PyGILState_STATE gstate = PyGILState_Ensure(); + Py_Finalize(); +} + /** * Don't allow a copy constructor to be used */ @@ -319,3 +328,17 @@ PyObject *PythonRuntime::importModule(const string& name) PyGILState_Release(state); return module; } + +/** + * Shutdown an instance of a Python runtime if one + * has been started + */ +void PythonRuntime::shutdown() +{ + if (!m_instance) + { + return; + } + delete m_instance; + m_instance = NULL; +} diff --git a/C/common/pythonreading.cpp b/C/common/pythonreading.cpp index 7c31068f6d..678af30747 100755 --- a/C/common/pythonreading.cpp +++ b/C/common/pythonreading.cpp @@ -105,13 +105,13 @@ PythonReading::PythonReading(PyObject *pyReading) // or reading['ema'] if (PyUnicode_Check(dKey)) { - m_values.push_back(new Datapoint( + m_values.emplace_back(new Datapoint( string(PyUnicode_AsUTF8(dKey)), *dataPoint)); } else { - m_values.push_back(new Datapoint( + m_values.emplace_back(new Datapoint( string(PyBytes_AsString(dKey)), *dataPoint)); } @@ -131,9 +131,11 @@ PythonReading::PythonReading(PyObject *pyReading) { // Set id m_id = PyLong_AsUnsignedLong(id); + m_has_id = true; } else { + m_has_id = false; m_id = 0; } @@ -231,7 +233,14 @@ DatapointValue *PythonReading::getDatapointValue(PyObject *value) DatapointValue *dpv = getDatapointValue(dValue); if (dpv) { - values->push_back(new Datapoint(string(PyBytes_AsString(dKey)), *dpv)); + if (PyUnicode_Check(dKey)) + { + values->emplace_back(new Datapoint(string(PyUnicode_AsUTF8(dKey)), *dpv)); + } + else + { + values->emplace_back(new Datapoint(string(PyBytes_AsString(dKey)), *dpv)); + } // Remove temp objects delete dpv; } @@ -286,7 +295,7 @@ DatapointValue *PythonReading::getDatapointValue(PyObject *value) DatapointValue *dpv = getDatapointValue(val); if (dpv) { - values->push_back(new Datapoint(string(PyBytes_AsString(key)), *dpv)); + values->emplace_back(new Datapoint(string(PyBytes_AsString(key)), *dpv)); // Remove temp objects delete dpv; } @@ -577,9 +586,9 @@ PyObject *PythonReading::convertDatapoint(Datapoint *dp, bool bytesString) else if (dataType == DatapointValue::dataTagType::T_DP_DICT) { vector* children = dp->getData().getDpVec();; + value = PyDict_New(); for (auto child = children->begin(); child != children->end(); ++child) { - value = PyDict_New(); PyObject *childValue = convertDatapoint(*child); // Add Datapoint: key and value PyObject *key = PyUnicode_FromString((*child)->getName().c_str()); @@ -593,9 +602,9 @@ PyObject *PythonReading::convertDatapoint(Datapoint *dp, bool bytesString) { vector* children = dp->getData().getDpVec(); int i = 0; + value = PyList_New(children->size()); for (auto child = children->begin(); child != children->end(); ++child) { - value = PyList_New(children->size()); PyObject *childValue = convertDatapoint(*child); // TODO complete // Add Datapoint: key and value diff --git a/C/common/pythonreadingset.cpp b/C/common/pythonreadingset.cpp index a0d2d4c24d..fa664e1838 100755 --- a/C/common/pythonreadingset.cpp +++ b/C/common/pythonreadingset.cpp @@ -117,7 +117,6 @@ PythonReadingSet::PythonReadingSet(PyObject *set) m_readings.push_back(reading); m_count++; m_last_id = reading->getId(); - Logger::getLogger()->debug("PythonReadingSet c'tor: LIST: reading->toJSON()='%s' ", reading->toJSON().c_str()); } } else if (PyDict_Check(set)) @@ -129,7 +128,6 @@ PythonReadingSet::PythonReadingSet(PyObject *set) m_readings.push_back(reading); m_count++; m_last_id = reading->getId(); - Logger::getLogger()->debug("PythonReadingSet c'tor: DICT: reading->toJSON()=%s", reading->toJSON().c_str()); } } else diff --git a/C/common/reading.cpp b/C/common/reading.cpp old mode 100644 new mode 100755 index 2826ce4c42..c2b4a3bcb2 --- a/C/common/reading.cpp +++ b/C/common/reading.cpp @@ -12,11 +12,14 @@ #include #include #include +#include #include #include #include +#include using namespace std; +using namespace rapidjson; std::vector Reading::m_dateTypes = { DEFAULT_DATE_TIME_FORMAT, @@ -32,7 +35,7 @@ std::vector Reading::m_dateTypes = { * Each actual datavalue that relates to that asset is held within an * instance of a Datapoint class. */ -Reading::Reading(const string& asset, Datapoint *value) : m_asset(asset) +Reading::Reading(const string& asset, Datapoint *value) : m_asset(asset), m_has_id(false) { m_values.push_back(value); // Store seconds and microseconds @@ -48,7 +51,7 @@ Reading::Reading(const string& asset, Datapoint *value) : m_asset(asset) * Each actual datavalue that relates to that asset is held within an * instance of a Datapoint class. */ -Reading::Reading(const string& asset, vector values) : m_asset(asset) +Reading::Reading(const string& asset, vector values) : m_asset(asset), m_has_id(false) { for (auto it = values.cbegin(); it != values.cend(); it++) { @@ -67,7 +70,7 @@ Reading::Reading(const string& asset, vector values) : m_asset(asse * Each actual datavalue that relates to that asset is held within an * instance of a Datapoint class. */ -Reading::Reading(const string& asset, vector values, const string& ts) : m_asset(asset) +Reading::Reading(const string& asset, vector values, const string& ts) : m_asset(asset), m_has_id(false) { for (auto it = values.cbegin(); it != values.cend(); it++) { @@ -78,6 +81,65 @@ Reading::Reading(const string& asset, vector values, const string& m_userTimestamp = m_timestamp; } +/** + * Construct a reading with datapoints given as JSON + */ +Reading::Reading(const string& asset, const string& datapoints) : m_asset(asset), m_has_id(false) +{ + Document d; + if (d.Parse(datapoints.c_str()).HasParseError()) + { + throw runtime_error("Failed to parse reading datapoints " + datapoints); + } + for (Value::ConstMemberIterator itr = d.MemberBegin(); itr != d.MemberEnd(); ++itr) + { + string name = itr->name.GetString(); + if (itr->value.IsInt64()) + { + long v = itr->value.GetInt64(); + DatapointValue dpv(v); + m_values.push_back(new Datapoint(name, dpv)); + } + else if (itr->value.IsDouble()) + { + double v = itr->value.GetDouble(); + DatapointValue dpv(v); + m_values.push_back(new Datapoint(name, dpv)); + } + else if (itr->value.IsString()) + { + string v = itr->value.GetString(); + DatapointValue dpv(v); + m_values.push_back(new Datapoint(name, dpv)); + } + else if (itr->value.IsObject()) + { + // Map objects as nested datapoints + vector *values = JSONtoDatapoints(itr->value); + DatapointValue dpv(values, true); + m_values.push_back(new Datapoint(name, dpv)); + } + else if (itr->value.IsArray()) + { + vector arr; + for (auto& v : itr->value.GetArray()) + { + if (v.IsNumber()) + arr.emplace_back(v.GetDouble()); + else + throw runtime_error("Only numeric lists are currently supported in datapoints"); + } + + DatapointValue dpv(arr); + m_values.emplace_back(new Datapoint(name, dpv)); + } + } + // Store seconds and microseconds + gettimeofday(&m_timestamp, NULL); + // Initialise m_userTimestamp + m_userTimestamp = m_timestamp; +} + /** * Reading copy constructor */ @@ -88,7 +150,7 @@ Reading::Reading(const Reading& orig) : m_asset(orig.m_asset), { for (auto it = orig.m_values.cbegin(); it != orig.m_values.cend(); it++) { - m_values.push_back(new Datapoint(**it)); + m_values.emplace_back(new Datapoint(**it)); } } @@ -225,6 +287,51 @@ ostringstream convert; return convert.str(); } +/** + * Convert time since epoch to a formatted m_timestamp DataTime in UTC + * and use a cache to speed it up + * @param tv_sec Seconds since epoch + * @param date_time Buffer in which to return the formatted timestamp + * @param dateFormat Format: FMT_DEFAULT or FMT_STANDARD + */ +void Reading::getFormattedDateTimeStr(const time_t *tv_sec, char *date_time, readingTimeFormat dateFormat) const +{ + static unsigned long cached_sec_since_epoch = 0; + static char cached_date_time_str[DATE_TIME_BUFFER_LEN] = ""; + static readingTimeFormat cachedDateFormat = (readingTimeFormat) 0xff; + static std::mutex mtx; + + std::unique_lock lck(mtx); + + if(*cached_date_time_str && cached_sec_since_epoch && *tv_sec == cached_sec_since_epoch && cachedDateFormat == dateFormat) + { + strncpy(date_time, cached_date_time_str, DATE_TIME_BUFFER_LEN); + date_time[DATE_TIME_BUFFER_LEN-1] = '\0'; + return; + } + + struct tm timeinfo; + gmtime_r(tv_sec, &timeinfo); + + /** + * Build date_time with format YYYY-MM-DD HH24:MM:SS.MS+00:00 + * this is same as Python3: + * datetime.datetime.now(tz=datetime.timezone.utc) + */ + + // Create datetime with seconds + std::strftime(date_time, DATE_TIME_BUFFER_LEN, + m_dateTypes[dateFormat].c_str(), + &timeinfo); + + // update cache + strncpy(cached_date_time_str, date_time, DATE_TIME_BUFFER_LEN); + cached_date_time_str[DATE_TIME_BUFFER_LEN-1] = '\0'; + cached_sec_since_epoch = *tv_sec; + cachedDateFormat = dateFormat; +} + + /** * Return a formatted m_timestamp DataTime in UTC * @param dateFormat Format: FMT_DEFAULT or FMT_STANDARD @@ -236,20 +343,7 @@ char date_time[DATE_TIME_BUFFER_LEN]; char micro_s[10]; char assetTime[DATE_TIME_BUFFER_LEN + 20]; - // Populate tm structure - struct tm timeinfo; - gmtime_r(&m_timestamp.tv_sec, &timeinfo); - - /** - * Build date_time with format YYYY-MM-DD HH24:MM:SS.MS+00:00 - * this is same as Python3: - * datetime.datetime.now(tz=datetime.timezone.utc) - */ - - // Create datetime with seconds - std::strftime(date_time, sizeof(date_time), - m_dateTypes[dateFormat].c_str(), - &timeinfo); + getFormattedDateTimeStr(&m_timestamp.tv_sec, date_time, dateFormat); if (dateFormat != FMT_ISO8601 && addMS) { @@ -293,21 +387,8 @@ const string Reading::getAssetDateUserTime(readingTimeFormat dateFormat, bool ad char micro_s[10]; char assetTime[DATE_TIME_BUFFER_LEN + 20]; - // Populate tm structure with UTC time - struct tm timeinfo; - gmtime_r(&m_userTimestamp.tv_sec, &timeinfo); - - /** - * Build date_time with format YYYY-MM-DD HH24:MM:SS.MS+00:00 - * this is same as Python3: - * datetime.datetime.now(tz=datetime.timezone.utc) - */ - - // Create datetime with seconds - std::strftime(date_time, sizeof(date_time), - m_dateTypes[dateFormat].c_str(), - &timeinfo); - + getFormattedDateTimeStr(&m_userTimestamp.tv_sec, date_time, dateFormat); + if (dateFormat != FMT_ISO8601 && addMS) { // Add microseconds @@ -375,18 +456,47 @@ void Reading::setUserTimestamp(const string& timestamp) */ void Reading::stringToTimestamp(const string& timestamp, struct timeval *ts) { + static std::mutex mtx; + static char cached_timestamp_upto_min[32] = ""; + static unsigned long cached_sec_since_epoch = 0; + + const int timestamp_str_len_till_min = 16; + const int timestamp_str_len_till_sec = 19; + char date_time [DATE_TIME_BUFFER_LEN]; strcpy (date_time, timestamp.c_str()); - struct tm tm; - memset(&tm, 0, sizeof(struct tm)); - strptime(date_time, "%Y-%m-%d %H:%M:%S", &tm); - // Convert time to epoch - mktime assumes localtime so most adjust for that - ts->tv_sec = mktime(&tm); - extern long timezone; - ts->tv_sec -= timezone; - + { + lock_guard guard(mtx); + + char timestamp_sec[32]; + strncpy(timestamp_sec, date_time, timestamp_str_len_till_sec); + timestamp_sec[timestamp_str_len_till_sec] = '\0'; + if(*cached_timestamp_upto_min && cached_sec_since_epoch && (strncmp(timestamp_sec, cached_timestamp_upto_min, timestamp_str_len_till_min) == 0)) + { + // cache hit + int sec_part = strtoul(timestamp_sec+timestamp_str_len_till_min+1, NULL, 10); + ts->tv_sec = cached_sec_since_epoch + sec_part; + } + else + { + // cache miss + struct tm tm; + memset(&tm, 0, sizeof(struct tm)); + strptime(date_time, "%Y-%m-%d %H:%M:%S", &tm); + // Convert time to epoch - mktime assumes localtime so most adjust for that + ts->tv_sec = mktime(&tm); + + extern long timezone; + ts->tv_sec -= timezone; + + strncpy(cached_timestamp_upto_min, timestamp_sec, timestamp_str_len_till_min); + cached_timestamp_upto_min[timestamp_str_len_till_min] = '\0'; + cached_sec_since_epoch = ts->tv_sec - tm.tm_sec; // store only for upto-minute part + } + } + // Now process the fractional seconds const char *ptr = date_time; while (*ptr && *ptr != '.') @@ -415,12 +525,13 @@ void Reading::stringToTimestamp(const string& timestamp, struct timeval *ts) { int h, m; int sign = (*ptr == '+' ? -1 : +1); - ptr++; - sscanf(ptr, "%02d:%02d", &h, &m); + h = strtoul(ptr+1, NULL, 10); + m = strtoul(ptr+4, NULL, 10); ts->tv_sec += sign * ((3600 * h) + (60 * m)); } } + /** * Escape quotes etc to allow the string to be a property value within * a JSON document @@ -455,3 +566,58 @@ int bscount = 0; } return rval; } + +/** + * Convert a JSON Value object to a set of data points + * + * @param json The json object to convert + */ +vector *Reading::JSONtoDatapoints(const Value& json) +{ +vector *values = new vector; + + for (Value::ConstMemberIterator itr = json.MemberBegin(); itr != json.MemberEnd(); ++itr) + { + string name = itr->name.GetString(); + if (itr->value.IsInt64()) + { + long v = itr->value.GetInt64(); + DatapointValue dpv(v); + values->push_back(new Datapoint(name, dpv)); + } + else if (itr->value.IsDouble()) + { + double v = itr->value.GetDouble(); + DatapointValue dpv(v); + values->push_back(new Datapoint(name, dpv)); + } + else if (itr->value.IsString()) + { + string v = itr->value.GetString(); + DatapointValue dpv(v); + values->push_back(new Datapoint(name, dpv)); + } + else if (itr->value.IsObject()) + { + // Map objects as nested datapoints + vector *nestedValues = JSONtoDatapoints(itr->value); + DatapointValue dpv(nestedValues, true); + values->push_back(new Datapoint(name, dpv)); + } + else if (itr->value.IsArray()) + { + vector arr; + for (auto& v : itr->value.GetArray()) + { + if (v.IsNumber()) + arr.emplace_back(v.GetDouble()); + else + throw runtime_error("Only numeric lists are currently supported in datapoints"); + } + + DatapointValue dpv(arr); + values->emplace_back(new Datapoint(name, dpv)); + } + } + return values; +} diff --git a/C/common/reading_set.cpp b/C/common/reading_set.cpp index 2040d145cb..382d321bc4 100755 --- a/C/common/reading_set.cpp +++ b/C/common/reading_set.cpp @@ -54,7 +54,7 @@ ReadingSet::ReadingSet(const vector* readings) : m_last_id(0) m_count = readings->size(); for (auto it = readings->begin(); it != readings->end(); ++it) { - if ((*it)->getId() > m_last_id) + if ((*it)->hasId() && (*it)->getId() > m_last_id) m_last_id = (*it)->getId(); m_readings.push_back(*it); } @@ -62,7 +62,13 @@ ReadingSet::ReadingSet(const vector* readings) : m_last_id(0) /** * Construct a reading set from a JSON document returned from - * the Fledge storage service query or notification. + * the Fledge storage service query or notification. The JSON + * is parsed using the in-situ RapidJSON parser in order to + * reduce overhead on what is most likely a large JSON document. + * + * WARNING: Although the string passed in is defiend as const + * this call is destructive to this string and the conntents + * of the string should not be used after making this call. * * @param json The JSON document (as string) with readings data */ @@ -70,7 +76,7 @@ ReadingSet::ReadingSet(const std::string& json) : m_last_id(0) { unsigned long rows = 0; Document doc; - doc.Parse(json.c_str()); + doc.ParseInsitu((char *)json.c_str()); // Cast away const in order to use in-situ if (doc.HasParseError()) { throw new ReadingSetException("Unable to parse results json document"); @@ -158,30 +164,44 @@ ReadingSet::~ReadingSet() /** * Append the readings in a second reading set to this reading set. * The readings are removed from the original reading set + * + * @param readings A ReadingSet to append to the current ReadingSet */ void ReadingSet::append(ReadingSet *readings) { - append(readings->getAllReadings()); + vector *vec = readings->getAllReadingsPtr(); + append(*vec); readings->clear(); } /** * Append the readings in a second reading set to this reading set. * The readings are removed from the original reading set + * + * @param readings A ReadingSet to append to the current ReadingSet */ void ReadingSet::append(ReadingSet& readings) { - append(readings.getAllReadings()); + vector *vec = readings.getAllReadingsPtr(); + append(*vec); readings.clear(); } /** - * Append a set of readings to this reading set. + * Append a set of readings to this reading set. The + * readings are not copied, but rather moved from the + * vector, with the resulting vector havign the values + * removed on return. + * + * It is assumed the readings in the vector have been + * created with the new operator. + * + * @param readings A vector of Reading pointers to append to the ReadingSet */ void -ReadingSet::append(const vector& readings) +ReadingSet::append(vector& readings) { for (auto it = readings.cbegin(); it != readings.cend(); it++) { @@ -190,6 +210,92 @@ ReadingSet::append(const vector& readings) m_readings.push_back(*it); m_count++; } + readings.clear(); +} + +/** +* Deep copy a set of readings to this reading set. +*/ +bool +ReadingSet::copy(const ReadingSet& src) +{ + vector readings; + bool copyResult = true; + try + { + // Iterate over all the readings in ReadingSet + for (auto const &reading : src.getAllReadings()) + { + std::string assetName = reading->getAssetName(); + std::vector dataPoints; + try + { + // Iterate over all the datapoints associated with one reading + for (auto const &dp : reading->getReadingData()) + { + std::string dataPointName = dp->getName(); + DatapointValue dv = dp->getData(); + dataPoints.emplace_back(new Datapoint(dataPointName, dv)); + + } + } + // Catch exception while copying datapoints + catch(std::bad_alloc& ex) + { + Logger::getLogger()->error("Insufficient memory, failed while copying dataPoints from ReadingSet, %s ", ex.what()); + copyResult = false; + for (auto const &dp : dataPoints) + { + delete dp; + } + dataPoints.clear(); + throw; + } + catch (std::exception& ex) + { + Logger::getLogger()->error("Unknown exception, failed while copying datapoint from ReadingSet, %s ", ex.what()); + copyResult = false; + for (auto const &dp : dataPoints) + { + delete dp; + } + dataPoints.clear(); + throw; + } + + Reading *in = new Reading(assetName, dataPoints); + readings.emplace_back(in); + } + } + // Catch exception while copying readings + catch (std::bad_alloc& ex) + { + Logger::getLogger()->error("Insufficient memory, failed while copying %d reading from ReadingSet, %s ",readings.size()+1, ex.what()); + copyResult = false; + for (auto const &r : readings) + { + delete r; + } + readings.clear(); + } + catch (std::exception& ex) + { + Logger::getLogger()->error("Unknown exception, failed while copying %d reading from ReadingSet, %s ",readings.size()+1, ex.what()); + copyResult = false; + for (auto const &r : readings) + { + delete r; + } + readings.clear(); + } + + //Append if All elements have been copied successfully + if (copyResult) + { + append(readings); + } + + return copyResult; } /** @@ -204,6 +310,8 @@ ReadingSet::removeAll() delete *it; } m_readings.clear(); + m_count = 0; + m_last_id = 0; } /** @@ -213,6 +321,39 @@ void ReadingSet::clear() { m_readings.clear(); + m_count = 0; + m_last_id = 0; +} + +/** + * Remove readings from the vector and return a reference to new vector + * containing readings* +*/ +std::vector* ReadingSet::moveAllReadings() +{ + std::vector* transferredPtr = new std::vector(std::move(m_readings)); + m_count = 0; + m_last_id = 0; + m_readings.clear(); + + return transferredPtr; +} + +/** + * Remove reading from vector based on index and return its pointer +*/ +Reading* ReadingSet::removeReading(unsigned long id) +{ + if (id >= m_readings.size()) + { + return nullptr; + } + + Reading* reading = m_readings[id]; + m_readings.erase(m_readings.begin() + id); + m_count--; + + return reading; } /** @@ -432,23 +573,28 @@ Datapoint *rval = NULL; // Number case (kNumberType): { - if (item.IsInt() || - item.IsUint() || - item.IsInt64() || - item.IsUint64()) + if (item.IsInt()) { - - DatapointValue *value; - if (item.IsInt() || item.IsUint()) - { - value = new DatapointValue((long) item.GetInt()); - } - else - { - value = new DatapointValue((long) item.GetInt64()); - } - rval = new Datapoint(name, *value); - delete value; + DatapointValue value((long)item.GetInt()); + rval = new Datapoint(name, value); + break; + } + else if (item.IsUint()) + { + DatapointValue value((long)item.GetUint()); + rval = new Datapoint(name, value); + break; + } + else if (item.IsInt64()) + { + DatapointValue value((long)item.GetInt64()); + rval = new Datapoint(name, value); + break; + } + else if (item.IsUint64()) + { + DatapointValue value((long)item.GetUint64()); + rval = new Datapoint(name, value); break; } else if (item.IsDouble()) diff --git a/C/common/result_set.cpp b/C/common/result_set.cpp index e29c5a4f74..953d15dd68 100644 --- a/C/common/result_set.cpp +++ b/C/common/result_set.cpp @@ -30,7 +30,7 @@ ResultSet::ResultSet(const std::string& json) { throw new ResultException("Unable to parse results json document"); } - if (doc.HasMember("count")) + if (doc.HasMember("count") && doc["count"].IsUint()) { m_rowCount = doc["count"].GetUint(); if (m_rowCount) @@ -95,7 +95,7 @@ ResultSet::ResultSet(const std::string& json) rowValue->append(new ColumnValue(string(item->value.GetString()))); break; case INT_COLUMN: - rowValue->append(new ColumnValue(item->value.GetInt())); + rowValue->append(new ColumnValue((long)(item->value.GetInt64()))); break; case NUMBER_COLUMN: rowValue->append(new ColumnValue(item->value.GetDouble())); diff --git a/C/common/storage_asset_tracking.cpp b/C/common/storage_asset_tracking.cpp deleted file mode 100644 index 3ba3ad0238..0000000000 --- a/C/common/storage_asset_tracking.cpp +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Fledge Storage asset tracking related - * - * Copyright (c) 2022 Dianomic Systems - * - * Released under the Apache 2.0 Licence - * - * Author: Ashwini Sinha - */ - -#include -#include - -using namespace std; - -StorageAssetTracker *StorageAssetTracker::instance = 0; - -/** - * Get asset tracker singleton instance for the current south service - * - * @return Singleton asset tracker instance - */ -StorageAssetTracker *StorageAssetTracker::getStorageAssetTracker() -{ - return instance; -} - -/** - * Release the storage asset tracker singleton instance - * - * @return void - */ - -void StorageAssetTracker::releaseStorageAssetTracker() -{ - if (instance) - delete instance; - instance = nullptr; -} - - -/** - * AssetTracker class constructor - * - * @param mgtClient Management client object for this south service - * @param service Service name - */ -StorageAssetTracker::StorageAssetTracker(ManagementClient *mgtClient, std::string service) - : m_mgtClient(mgtClient), m_service(service), m_event("store") -{ - instance = this; -} - -/** - * Fetch all storage asset tracking tuples from DB and populate local cache - * - * Return the vector of deprecated asset names - * - */ -void StorageAssetTracker::populateStorageAssetTrackingCache() -{ - - try { - std::vector& vec = m_mgtClient->getStorageAssetTrackingTuples(m_service); - - for (StorageAssetTrackingTuple* & rec : vec) - { - set setOfDPs = getDataPointsSet(rec->m_datapoints); - if (setOfDPs.size() == 0) - { - Logger::getLogger()->warn("%s:%d Datapoints unavailable for service %s ", __FUNCTION__, __LINE__, m_service.c_str()); - } - storageAssetTrackerTuplesCache[rec] = setOfDPs; - } - delete (&vec); - } - catch (...) - { - Logger::getLogger()->error("%s:%d Failed to populate storage asset tracking tuples' cache", __FUNCTION__, __LINE__); - return; - } - - return; -} - -/** - * Return Plugin Information in the Fledge configuration - * - * @return bool True if the plugin info could be obtained - */ -bool StorageAssetTracker::getFledgeConfigInfo() -{ - Logger::getLogger()->error("StorageAssetTracker::getPluginInfo start"); - try { - string url = "/fledge/category/service"; - if (!m_mgtClient) - { - Logger::getLogger()->error("%s:%d, m_mgtClient Ptr is NULL", __FUNCTION__, __LINE__); - return false; - } - - auto res = m_mgtClient->getHttpClient()->request("GET", url.c_str()); - Document doc; - string response = res->content.string(); - doc.Parse(response.c_str()); - if (doc.HasParseError()) - { - bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); - Logger::getLogger()->error("%s fetching service record: %s\n", - httpError?"HTTP error while":"Failed to parse result of", - response.c_str()); - return false; - } - else if (doc.HasMember("message")) - { - Logger::getLogger()->error("Failed to fetch /fledge/category/service %s.", - doc["message"].GetString()); - return false; - } - else - { - Value& serviceName = doc["name"]; - if (!serviceName.IsObject()) - { - Logger::getLogger()->error("%s:%d, serviceName is not an object", __FUNCTION__, __LINE__); - return false; - } - - if (!serviceName.HasMember("value")) - { - Logger::getLogger()->error("%s:%d, serviceName has no member value", __FUNCTION__, __LINE__); - return false; - - } - Value& serviceVal = serviceName["value"]; - if ( !serviceVal.IsString()) - { - Logger::getLogger()->error("%s:%d, serviceVal is not a string", __FUNCTION__, __LINE__); - return false; - } - - m_fledgeService = serviceVal.GetString(); - Logger::getLogger()->error("%s:%d, m_plugin value = %s", __FUNCTION__, __LINE__, m_fledgeService.c_str()); - return true; - } - - } catch (const SimpleWeb::system_error &e) { - Logger::getLogger()->error("Get service failed %s.", e.what()); - return false; - } - return false; -} - -/** - * Updates datapoints present in the arg dpSet in the cache - * - * @param dpSet set of datapoints string values to be updated in cache - * @param ptr StorageAssetTrackingTuple* , as key in cache (map) - * Retval void - */ - -void StorageAssetTracker::updateCache(std::set dpSet, StorageAssetTrackingTuple* ptr) -{ - if(ptr == nullptr) - { - Logger::getLogger()->error("%s:%d: StorageAssetTrackingTuple should not be NULL pointer", __FUNCTION__, __LINE__); - return; - } - - unsigned int sizeOfInputSet = dpSet.size(); - StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(ptr); - - // search for the record in cache , if not present, simply update cache and return - if (it == storageAssetTrackerTuplesCache.end()) - { - Logger::getLogger()->debug("%s:%d :tuple not found in cache ", __FUNCTION__, __LINE__); - storageAssetTrackerTuplesCache[ptr] = dpSet; - - std::string strDatapoints; - unsigned int count = 0; - for (auto itr : dpSet) - { - strDatapoints.append(itr); - strDatapoints.append(","); - count++; - } - if (strDatapoints[strDatapoints.size()-1] == ',') - strDatapoints.pop_back(); - - bool rv = m_mgtClient->addStorageAssetTrackingTuple(ptr->getServiceName(), ptr->getPluginName(), ptr->getAssetName(), ptr->getEventName(), false, strDatapoints, count); - if (rv) - { - storageAssetTrackerTuplesCache[ptr] = dpSet; - } - else - Logger::getLogger()->error("%s:%d: Failed to insert storage asset tracking tuple into DB: '%s'", __FUNCTION__, __LINE__, (ptr->getAssetName()).c_str()); - - return; - } - else - { - // record is found in cache , compare the datapoints of the argument ptr to that present in the cache - // update the cache with datapoints present in argument record but absent in cache - // - std::set &cacheRecord = it->second; - unsigned int sizeOfCacheRecord = cacheRecord.size(); - - // store all the datapoints to be updated in string strDatapoints which is sent to management_client - std::string strDatapoints; - unsigned int count = 0; - for (auto itr : cacheRecord) - { - strDatapoints.append(itr); - strDatapoints.append(","); - count++; - } - - // check which datapoints are not present in cache record, and need to be updated - // in cache and db, store them in string strDatapoints, in comma-separated format - for(auto itr: dpSet) - { - if (cacheRecord.find(itr) == cacheRecord.end()) - { - strDatapoints.append(itr); - strDatapoints.append(","); - count++; - } - } - - // remove the last comma - if (strDatapoints[strDatapoints.size()-1] == ',') - strDatapoints.pop_back(); - - if (count <= sizeOfCacheRecord) - { - // No need to update as count of cache record is not getting increased - return; - } - - // Update the DB - bool rv = m_mgtClient->addStorageAssetTrackingTuple(ptr->getServiceName(), ptr->getPluginName(), ptr->getAssetName(), ptr->getEventName(), false, strDatapoints, count); - if(rv) - { - // if update of DB successful , then update the CacheRecord - for(auto itr: dpSet) - { - if (cacheRecord.find(itr) == cacheRecord.end()) - { - cacheRecord.insert(itr); - } - } - } - else - { - // Log error if Update DB unsuccessful - Logger::getLogger()->error("%s:%d: Failed to insert storage asset tracking tuple into DB: '%s'", __FUNCTION__, __LINE__, (ptr->getAssetName()).c_str()); - - } - } -} - -//This function takes a string of datapoints in comma-separated format and returns -//set of string datapoint values -std::set StorageAssetTracker::getDataPointsSet(std::string strDatapoints) -{ - - std::set tokens; - stringstream st(strDatapoints); - std::string temp; - - while(getline(st, temp, ',')) - { - tokens.insert(temp); - } - - return tokens; -} - - -/** This function takes a StorageAssetTrackingTuple pointer and searches for - * it in cache, if found then returns its Deprecated status - * - * @param ptr StorageAssetTrackingTuple* , as key in cache (map) - * Retval bool Deprecation status - */ - - -bool StorageAssetTracker::getDeprecated(StorageAssetTrackingTuple* ptr) -{ - StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(ptr); - - if (it == storageAssetTrackerTuplesCache.end()) - { - Logger::getLogger()->debug("%s:%d :tuple not found in cache ", __FUNCTION__, __LINE__); - return false; - } - else - { - return (it->first)->isDeprecated(); - } - - return false; -} diff --git a/C/common/storage_client.cpp b/C/common/storage_client.cpp old mode 100755 new mode 100644 index 28732a4fe0..d4b6cdc505 --- a/C/common/storage_client.cpp +++ b/C/common/storage_client.cpp @@ -27,7 +27,10 @@ #define EXCEPTION_BUFFER_SIZE 120 -#define INSTRUMENT 0 +#define INSTRUMENT 0 +// Streaming is currently disabled due to an issue that causes the stream to +// hang after a period. Set the followign to 1 in order to enable streaming +#define ENABLE_STREAMING 0 #if INSTRUMENT #include @@ -160,8 +163,8 @@ bool StorageClient::readingAppend(const vector& readings) double timeSpan = dur.tv_sec + ((double)dur.tv_usec / 1000000); double rate = (double)readings.size() / timeSpan; // Stream functionality disabled - // if (rate > STREAM_THRESHOLD) - if (0) +#if ENABLE_STREAMING + if (rate > STREAM_THRESHOLD) { m_logger->info("Reading rate %.1f readings per second above threshold, attmempting to switch to stream mode", rate); if (openStream()) @@ -171,6 +174,7 @@ bool StorageClient::readingAppend(const vector& readings) } m_logger->warn("Failed to switch to streaming mode"); } +#endif static HttpClient *httpClient = this->getHttpClient(); // to initialize m_seqnum_map[thread_id] for this thread try { std::thread::id thread_id = std::this_thread::get_id(); @@ -320,7 +324,7 @@ ReadingSet *StorageClient::readingFetch(const unsigned long readingId, const uns { ostringstream resultPayload; resultPayload << res->content.rdbuf(); - ReadingSet *result = new ReadingSet(resultPayload.str().c_str()); + ReadingSet *result = new ReadingSet(resultPayload.str()); return result; } ostringstream resultPayload; @@ -1151,7 +1155,7 @@ void StorageClient::handleUnexpectedResponse(const char *operation, const string /** * Standard logging method for all interactions * - * @param operation The operation beign undertaken + * @param operation The operation being undertaken * @param responseCode The HTTP response code * @param payload The payload in the response message */ @@ -1259,6 +1263,125 @@ bool StorageClient::unregisterAssetNotification(const string& assetName, return false; } +/** + * Register interest for a table + * + * @param tableName The table name to register for notification + * @param tableKey The key of interest in the table + * @param tableKeyValues The key values of interest + * @param tableOperation The table operation of interest (insert/update/delete) + * @param callbackUrl The callback URL to send change data + * @return True on success, false otherwise. + */ +bool StorageClient::registerTableNotification(const string& tableName, const string& key, std::vector keyValues, + const string& operation, const string& callbackUrl) +{ + try + { + ostringstream keyValuesStr; + for (auto & s : keyValues) + { + keyValuesStr << "\"" << s << "\""; + if (&s != &keyValues.back()) + keyValuesStr << ", "; + } + + ostringstream convert; + + convert << "{ "; + convert << "\"url\" : \"" << callbackUrl << "\", "; + convert << "\"key\" : \"" << key << "\", "; + convert << "\"values\" : [" << keyValuesStr.str() << "], "; + convert << "\"operation\" : \"" << operation << "\" "; + convert << "}"; + + auto res = this->getHttpClient()->request("POST", + "/storage/table/interest/" + urlEncode(tableName), + convert.str()); + if (res->status_code.compare("200 OK") == 0) + { + return true; + } + ostringstream resultPayload; + resultPayload << res->content.rdbuf(); + handleUnexpectedResponse("Register table", + tableName, + res->status_code, + resultPayload.str()); + m_logger->error("POST /storage/table/interest/%s: %s", + urlEncode(tableName).c_str(), res->status_code.c_str()); + + return false; + } catch (exception& ex) + { + handleException(ex, "register table '%s'", tableName.c_str()); + } + return false; +} + +/** + * Unregister interest for a table name + * + * @param tableName The table name to unregister interest in + * @param tableKey The key of interest in the table + * @param tableKeyValues The key values of interest + * @param tableOperation The table operation of interest (insert/update/delete) + * @param callbackUrl The callback URL to send change data + * @return True on success, false otherwise. + */ +bool StorageClient::unregisterTableNotification(const string& tableName, const string& key, std::vector keyValues, + const string& operation, const string& callbackUrl) +{ + try + { + ostringstream keyValuesStr; + for (auto & s : keyValues) + { + keyValuesStr << "\"" << s << "\""; + if (&s != &keyValues.back()) + keyValuesStr << ", "; + } + + ostringstream convert; + + convert << "{ "; + convert << "\"url\" : \"" << callbackUrl << "\", "; + convert << "\"key\" : \"" << key << "\", "; + convert << "\"values\" : [" << keyValuesStr.str() << "], "; + convert << "\"operation\" : \"" << operation << "\" "; + convert << "}"; + + auto res = this->getHttpClient()->request("DELETE", + "/storage/table/interest/" + urlEncode(tableName), + convert.str()); + if (res->status_code.compare("200 OK") == 0) + { + return true; + } + ostringstream resultPayload; + resultPayload << res->content.rdbuf(); + handleUnexpectedResponse("Unregister table", + tableName, + res->status_code, + resultPayload.str()); + m_logger->error("DELETE /storage/table/interest/%s: %s", + urlEncode(tableName).c_str(), res->status_code.c_str()); + + return false; + } catch (exception& ex) + { + handleException(ex, "unregister table '%s'", tableName.c_str()); + } + return false; +} + +/* + * Attempt to open a streaming connection to the storage service. We use a REST API + * call to create the stream. If successful this call will return a port and a token + * to use when sending data via the stream. + * + * @return bool Return true if the stream was setup + */ bool StorageClient::openStream() { try { @@ -1339,6 +1462,33 @@ bool StorageClient::openStream() /** * Stream a set of readings to the storage service. * + * The stream uses a TCP connection to the storage system, it sends + * blocks of readings to the storage engine and bypasses the usual + * JSON conversion and imoprtantly parsing on the storage system + * side. + * + * A block of readings is introduced by a block header, the block + * header contains a magic number, the block number and the count + * of the number of readings in a block. + * + * Each reading within the block is preceeded by a reading header + * that contains a magic number, a reading number within the block, + * The length of the asset name for the reading, the length of the + * payload within the reading. The reading itself follows the herader + * and consists of the timestamp as a binary timeval structure, the name + * of the asset, including the null terminator. If the asset name length + * is 0 then no asset name is sent and the name of the asset is the same + * as the previous asset in the block. Following this the paylod is included. + * + * Each block is sent to the storage layer in a number of chunks rather + * that a single write per block. The implementation make use of the + * Linux scatter/gather IO calls to reduce the number of copies of data + * that are required. + * + * Currently there is no acknowledement handling as TCP is used as the underlying + * transport and the TCP acknowledgement is assumed to be a good enough + * indication of delivery. + * * TODO Deal with acknowledgements, add error checking/recovery * * @param readings The readings to stream @@ -1349,7 +1499,7 @@ bool StorageClient::streamReadings(const std::vector & readings) RDSBlockHeader blkhdr; RDSReadingHeader rdhdrs[STREAM_BLK_SIZE]; register RDSReadingHeader *phdr; -struct { const void *iov_base; size_t iov_len;} iovs[STREAM_BLK_SIZE * 4], *iovp; +struct iovec iovs[STREAM_BLK_SIZE * 4], *iovp; string payloads[STREAM_BLK_SIZE]; struct timeval tm[STREAM_BLK_SIZE]; ssize_t n, length = 0; @@ -1358,6 +1508,7 @@ string lastAsset; if (!m_streaming) { + m_logger->warn("Attempt to send data via a storage stream when streaming is not setup"); return false; } @@ -1373,7 +1524,7 @@ string lastAsset; { if (errno == EPIPE || errno == ECONNRESET) { - Logger::getLogger()->warn("Storage service has closed stream unexpectedly"); + Logger::getLogger()->error("Storage service has closed stream unexpectedly"); m_streaming = false; } else @@ -1385,7 +1536,7 @@ string lastAsset; /* * Use the writev scatter/gather interface to send the reading headers and reading data. - * We sent chunks of data in order to allow the parallel sendign and unpacking process + * We sent chunks of data in order to allow the parallel sending and unpacking process * at the two ends. The chunk size is STREAM_BLK_SIZE readings. */ iovp = iovs; @@ -1408,7 +1559,7 @@ string lastAsset; phdr->assetLength = assetCode.length() + 1; } - // Alwayts generate the JSON variant of the data points and send + // Always generate the JSON variant of the data points and send payloads[offset] = readings[i]->getDatapointsJSON(); phdr->payloadLength = payloads[offset].length() + 1; @@ -1428,14 +1579,14 @@ string lastAsset; // If the asset code has changed than add that if (phdr->assetLength) { - iovp->iov_base = readings[i]->getAssetName().c_str(); + iovp->iov_base = (void *)(readings[i]->getAssetName().c_str()); // Cast away const due to iovec definition iovp->iov_len = phdr->assetLength; length += iovp->iov_len; iovp++; } // Add the data points themselves - iovp->iov_base = payloads[offset].c_str(); + iovp->iov_base = (void *)(payloads[offset].c_str()); // Cast away const due to iovec definition iovp->iov_len = phdr->payloadLength; length += iovp->iov_len; iovp++; @@ -1443,23 +1594,31 @@ string lastAsset; offset++; if (offset == STREAM_BLK_SIZE - 1) { + if (iovp - iovs > STREAM_BLK_SIZE * 4) + Logger::getLogger()->error("Too many iov blocks %d", iovp - iovs); + // Send a chunk of readings in the block n = writev(m_stream, (const iovec *)iovs, iovp - iovs); - if (n < length) + if (n == -1) { if (errno == EPIPE || errno == ECONNRESET) { Logger::getLogger()->error("Stream has been closed by the storage service"); m_streaming = false; } - else - { - Logger::getLogger()->error("Write of block short, %d < %d: %s", + Logger::getLogger()->error("Write of block %d filed: %s", + m_readingBlock - 1, strerror(errno)); + return false; + } + else if (n < length) + { + Logger::getLogger()->error("Write of block short, %d < %d: %s", n, length, strerror(errno)); - } return false; } else if (n > length) + { Logger::getLogger()->fatal("Long write %d < %d", length, n); + } offset = 0; length = 0; iovp = iovs; @@ -1470,25 +1629,33 @@ string lastAsset; phdr++; } } + if (length) // Remaining data to be sent to finish the block { - if ((n = writev(m_stream, (const iovec *)iovs, iovp - iovs)) < length) + n = writev(m_stream, (const iovec *)iovs, iovp - iovs); + if (n == -1) { if (errno == EPIPE || errno == ECONNRESET) { Logger::getLogger()->error("Stream has been closed by the storage service"); m_streaming = false; } - else - { - Logger::getLogger()->error("Write of block short, %d < %d: %s", + Logger::getLogger()->error("Write of block %d filed: %s", + m_readingBlock - 1, strerror(errno)); + return false; + } + else if (n < length) + { + Logger::getLogger()->error("Write of block short, %d < %d: %s", n, length, strerror(errno)); - } return false; } else if (n > length) + { Logger::getLogger()->fatal("Long write %d < %d", length, n); + } } + Logger::getLogger()->info("Written block of %d readings via streaming connection", readings.size()); return true; } @@ -1575,3 +1742,164 @@ bool StorageClient::createSchema(const std::string& payload) } return false; } + +/** + * Update data into an arbitrary table + * + * @param schema The name of the schema into which data will be added + * @param tableName The name of the table into which data will be added + * @param updates The values and condition pairs to update in the table + * @param modifier Optional update modifier + * @return int The number of rows updated + */ +int StorageClient::updateTable(const string& schema, const string& tableName, std::vector >& updates, const UpdateModifier *modifier) +{ + static HttpClient *httpClient = this->getHttpClient(); // to initialize m_seqnum_map[thread_id] for this thread + try { + std::thread::id thread_id = std::this_thread::get_id(); + ostringstream ss; + sto_mtx_client_map.lock(); + m_seqnum_map[thread_id].fetch_add(1); + ss << m_pid << "#" << thread_id << "_" << m_seqnum_map[thread_id].load(); + sto_mtx_client_map.unlock(); + + SimpleWeb::CaseInsensitiveMultimap headers = {{"SeqNum", ss.str()}}; + + ostringstream convert; + convert << "{ \"updates\" : [ "; + + for (vector>::const_iterator it = updates.cbegin(); + it != updates.cend(); ++it) + { + if (it != updates.cbegin()) + { + convert << ", "; + } + convert << "{ "; + if (modifier) + { + convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ], "; + } + convert << "\"where\" : "; + convert << it->second->toJSON(); + convert << ", \"values\" : "; + convert << " { " << it->first->toJSON() << " } "; + convert << " }"; + } + convert << " ] }"; + + char url[128]; + snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); + auto res = this->getHttpClient()->request("PUT", url, convert.str(), headers); + + if (res->status_code.compare("200 OK") == 0) + { + ostringstream resultPayload; + resultPayload << res->content.rdbuf(); + Document doc; + doc.Parse(resultPayload.str().c_str()); + if (doc.HasParseError()) + { + m_logger->info("PUT result %s.", res->status_code.c_str()); + m_logger->error("Failed to parse result of updateTable. %s", + GetParseError_En(doc.GetParseError())); + return -1; + } + else if (doc.HasMember("message")) + { + m_logger->error("Failed to update table data: %s", + doc["message"].GetString()); + return -1; + } + return doc["rows_affected"].GetInt(); + } + ostringstream resultPayload; + resultPayload << res->content.rdbuf(); + handleUnexpectedResponse("Update table", tableName, res->status_code, resultPayload.str()); + } catch (exception& ex) { + handleException(ex, "update table %s", tableName.c_str()); + throw; + } + return -1; +} + +/** + * Update data into an arbitrary table + * + * @param tableName The name of the table into which data will be added + * @param updates The values to insert into the table + * @param modifier Optional storage modifier + * @return int The number of rows updated + */ + +int StorageClient::updateTable(const string& tableName, std::vector >& updates, const UpdateModifier *modifier) +{ + return updateTable(DEFAULT_SCHEMA, tableName, updates, modifier); +} + +/** + * Insert data into an arbitrary table + * + * @param tableName The name of the table into which data will be added + * @param values The values to insert into the table + * @return int The number of rows inserted + */ +int StorageClient::insertTable(const string& tableName, const std::vector& values) +{ + return insertTable(DEFAULT_SCHEMA, tableName, values); +} +/** + * Insert data into an arbitrary table + * + * @param schema The name of the schema to insert into + * @param tableName The name of the table into which data will be added + * @param values The values to insert into the table + * @return int The number of rows inserted + */ +int StorageClient::insertTable(const string& schema, const string& tableName, const std::vector& values) +{ + try { + ostringstream convert; + convert << "{ \"inserts\": [" ; + for (std::vector::const_iterator it = values.cbegin(); + it != values.cend(); ++it) + { + if (it != values.cbegin()) + { + convert << ", "; + } + convert << it->toJSON() ; + } + convert << "]}"; + + char url[1000]; + snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); + + auto res = this->getHttpClient()->request("POST", url, convert.str()); + ostringstream resultPayload; + resultPayload << res->content.rdbuf(); + if (res->status_code.compare("200 OK") == 0 || res->status_code.compare("201 Created") == 0) + { + + Document doc; + doc.Parse(resultPayload.str().c_str()); + if (doc.HasParseError()) + { + m_logger->info("POST result %s.", res->status_code.c_str()); + m_logger->error("Failed to parse result of insertTable. %s. Document is %s", + GetParseError_En(doc.GetParseError()), + resultPayload.str().c_str()); + return -1; + } + else if (doc.HasMember("rows_affected")) + { + return doc["rows_affected"].GetInt(); + } + } + handleUnexpectedResponse("Insert table", res->status_code, resultPayload.str()); + } catch (exception& ex) { + handleException(ex, "insert into table %s", tableName.c_str()); + throw; + } + return 0; +} diff --git a/C/common/string_utils.cpp b/C/common/string_utils.cpp index a6e82d7957..49ae4da234 100644 --- a/C/common/string_utils.cpp +++ b/C/common/string_utils.cpp @@ -445,3 +445,20 @@ bool IsRegex(const string &str) { return (nChar != 0); } + +/** + * Return a new string that extracts from the passed in string either side + * of a position within the string. + * + * @param str The string to return a portion of + * @param pos The position around which to extract a portion + * @param after The number of characters after the position to return, defaults to 30 if omitted + * @param before The number of characters before the position to return, defaults to 10 + */ +std::string StringAround(const std::string& str, unsigned int pos, + unsigned int after, unsigned int before) +{ + size_t start = pos > before ? (pos - before) : 0; + size_t len = before + after; + return str.substr(start, len); +} diff --git a/C/plugins/common/include/http_sender.h b/C/plugins/common/include/http_sender.h index 810dec3d70..c00d343cbd 100644 --- a/C/plugins/common/include/http_sender.h +++ b/C/plugins/common/include/http_sender.h @@ -77,4 +77,44 @@ class BadRequest : public std::exception { private: std::string m_errmsg; }; + +/** + * Unauthorized exception + */ +class Unauthorized : public std::exception { + public: + // Constructor with parameter + Unauthorized (const std::string& serverReply) + { + m_errmsg = serverReply; + }; + + virtual const char *what() const throw() + { + return m_errmsg.c_str(); + } + + private: + std::string m_errmsg; +}; + +/** + * Conflict exception + */ +class Conflict : public std::exception { + public: + // Constructor with parameter + Conflict (const std::string& serverReply) + { + m_errmsg = serverReply; + }; + + virtual const char *what() const throw() + { + return m_errmsg.c_str(); + } + + private: + std::string m_errmsg; +}; #endif diff --git a/C/plugins/common/libcurl_https.cpp b/C/plugins/common/libcurl_https.cpp index 722950857b..f1cc6eb582 100644 --- a/C/plugins/common/libcurl_https.cpp +++ b/C/plugins/common/libcurl_https.cpp @@ -443,6 +443,14 @@ int LibcurlHttps::sendRequest( { throw BadRequest(errorMessage); } + else if (httpCode == 401) + { + throw Unauthorized(errorMessage); + } + else if (httpCode == 409) + { + throw Conflict(errorMessage); + } else if (httpCode >= 401) { string errorMessageHTTP; diff --git a/C/plugins/common/piwebapi.cpp b/C/plugins/common/piwebapi.cpp index a79f777da7..f29ffd6b5b 100644 --- a/C/plugins/common/piwebapi.cpp +++ b/C/plugins/common/piwebapi.cpp @@ -150,6 +150,14 @@ int PIWebAPI::GetVersion(const string& host, string &version, bool logMessage) } httpCode = (int) SimpleWeb::StatusCode::client_error_bad_request; } + catch (const Unauthorized& ex) + { + if (logMessage) + { + Logger::getLogger()->error("The PI Web API server at %s has rejected our request due to an authentication issue. Please check the authentication method and credentials are correctly configured.", host.c_str()); + } + httpCode = (int) SimpleWeb::StatusCode::client_error_unauthorized; + } catch (exception &ex) { if (logMessage) diff --git a/C/plugins/common/simple_http.cpp b/C/plugins/common/simple_http.cpp index d9d55d31f2..4c76a0c28c 100644 --- a/C/plugins/common/simple_http.cpp +++ b/C/plugins/common/simple_http.cpp @@ -237,7 +237,15 @@ int SimpleHttp::sendRequest( { throw BadRequest(response); } - else if (http_code >= 401) + else if (http_code == 401) + { + throw Unauthorized(response); + } + else if (http_code == 409) + { + throw Conflict(response); + } + else if (http_code > 401) { std::stringstream error_message; error_message << "HTTP code |" << to_string(http_code) << "| HTTP error |" << response << "|"; diff --git a/C/plugins/common/simple_https.cpp b/C/plugins/common/simple_https.cpp index 372937c88d..35c2584c49 100644 --- a/C/plugins/common/simple_https.cpp +++ b/C/plugins/common/simple_https.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #define VERBOSE_LOG 0 @@ -245,9 +246,18 @@ int SimpleHttps::sendRequest( { throw BadRequest(response); } - else if (http_code >= 401) + else if (http_code == 401) + { + throw Unauthorized(response); + } + else if (http_code == 409) + { + throw Conflict(response); + } + else if (http_code > 401) { std::stringstream error_message; + StringReplace(response, "\r\n", ""); error_message << "HTTP code |" << to_string(http_code) << "| HTTP error |" << response << "|"; throw runtime_error(error_message.str()); diff --git a/C/plugins/north/OMF/OMFError.cpp b/C/plugins/north/OMF/OMFError.cpp new file mode 100644 index 0000000000..a97f222b11 --- /dev/null +++ b/C/plugins/north/OMF/OMFError.cpp @@ -0,0 +1,155 @@ +/* + * Fledge OSIsoft OMF interface to PI Server. + * + * Copyright (c) 2023 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ + +#include +#include +#include +#include +#include +#include "string_utils.h" + +#include +#include +#include + +#include +#include + +#include + +using namespace std; +using namespace rapidjson; + +/** + * Constructor + */ +OMFError::OMFError(const string& json) : m_messageCount(0) +{ + char *p = (char *)json.c_str(); + + FILE *fp = fopen("/tmp/error", "a"); + fprintf(fp, "%s\n\n", p); + fclose(fp); + + while (*p && *p != '{') + p++; + Document doc; + if (doc.ParseInsitu(p).HasParseError()) + { + Logger::getLogger()->error("Unable to parse response from OMF endpoint: %s", + GetParseError_En(doc.GetParseError())); + Logger::getLogger()->error("Error response was: %s", json.c_str()); + } + else if (doc.HasMember("Messages") && doc["Messages"].IsArray()) + { + const Value& messages = doc["Messages"].GetArray(); + m_messageCount = messages.Size(); + + for (Value::ConstValueIterator a = messages.Begin(); a != messages.End(); a++) + { + const Value& msg = *a; + if (msg.HasMember("Events") && msg["Events"].IsArray()) + { + const Value& events = msg["Events"]; + const Value& event = events[0]; + string message, reason, severity; + if (event.HasMember("Severity") && event["Severity"].IsString()) + { + severity = event["Severity"].GetString(); + if (severity.compare("Error") == 0) + { + m_hasErrors = true; + } + } + if (event.HasMember("EventInfo") && event["EventInfo"].IsObject()) + { + const Value& eventInfo = event["EventInfo"]; + if (eventInfo.HasMember("Message") && eventInfo["Message"].IsString()) + { + message = eventInfo["Message"].GetString(); + } + if (eventInfo.HasMember("Reason") && eventInfo["Reason"].IsString()) + { + reason = eventInfo["Reason"].GetString(); + } + + } + m_messages.push_back(Message(severity, message, reason)); + } + } + } +} + +/** + * Destructor for the error class + */ +OMFError::~OMFError() +{ +} + +/** + * Return the number of messages within the error report + */ +unsigned int OMFError::messageCount() +{ + return m_messageCount; +} + +/** + * Return the error message for the given message + * + * @param offset The error within the report to return + * @return string The event message + */ +string OMFError::getMessage(unsigned int offset) +{ +string rval; + + if (offset < m_messageCount) + { + rval = m_messages[offset].getMessage(); + } + return rval; +} + +/** + * Return the error reason for the given message + * + * @param offset The error within the report to return + * @return string The event reason + */ +string OMFError::getEventReason(unsigned int offset) +{ +string rval; + + if (offset < m_messageCount) + { + rval = m_messages[offset].getReason(); + } + return rval; +} + +/** + * Get the event severity for a given message + * + * @param offset The message to examine + * @return string The event severity + */ +string OMFError::getEventSeverity(unsigned int offset) +{ +string rval; + + if (offset < m_messageCount) + { + rval = m_messages[offset].getSeverity(); + } + return rval; +} + diff --git a/C/plugins/north/OMF/include/basetypes.h b/C/plugins/north/OMF/include/basetypes.h index df6e33ee6a..48395acfae 100644 --- a/C/plugins/north/OMF/include/basetypes.h +++ b/C/plugins/north/OMF/include/basetypes.h @@ -52,7 +52,7 @@ static const char *baseOMFTypes = QUOTE( "properties":{ "Integer16":{ "type":["integer","null"], - "format":"int16", + "format":"int16" }, "Time":{ "type":"string", @@ -68,7 +68,7 @@ static const char *baseOMFTypes = QUOTE( "properties":{ "Integer32":{ "type":["integer","null"], - "format":"int32", + "format":"int32" }, "Time":{ "type":"string", @@ -84,7 +84,7 @@ static const char *baseOMFTypes = QUOTE( "properties":{ "Integer64":{ "type":["integer","null"], - "format":"int64", + "format":"int64" }, "Time":{ "type":"string", @@ -100,7 +100,7 @@ static const char *baseOMFTypes = QUOTE( "properties":{ "UInteger16":{ "type":["integer","null"], - "format":"uint16", + "format":"uint16" }, "Time":{ "type":"string", @@ -116,7 +116,7 @@ static const char *baseOMFTypes = QUOTE( "properties":{ "UInteger32":{ "type":["integer","null"], - "format":"uint32", + "format":"uint32" }, "Time":{ "type":"string", @@ -132,7 +132,7 @@ static const char *baseOMFTypes = QUOTE( "properties":{ "UInteger64":{ "type":["integer","null"], - "format":"uint64", + "format":"uint64" }, "Time":{ "type":"string", diff --git a/C/plugins/north/OMF/include/omf.h b/C/plugins/north/OMF/include/omf.h index a7995fde90..f3081d0bb6 100644 --- a/C/plugins/north/OMF/include/omf.h +++ b/C/plugins/north/OMF/include/omf.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -95,12 +96,14 @@ class OMF * Constructor: * pass server URL path, OMF_type_id and producerToken. */ - OMF(HttpSender& sender, + OMF(const std::string& name, + HttpSender& sender, const std::string& path, const long typeId, const std::string& producerToken); - OMF(HttpSender& sender, + OMF(const std::string& name, + HttpSender& sender, const std::string& path, std::map& types, const std::string& producerToken); @@ -122,6 +125,11 @@ class OMF } }; + void setSender(HttpSender& sender) + { + m_sender = sender; + }; + /** * Send data to PI Server passing a vector of readings. * @@ -223,7 +231,7 @@ class OMF bool getAFMapEmptyMetadata() const { return m_AFMapEmptyMetadata; }; bool getConnected() const { return m_connected; }; - void setConnected(const bool connectionStatus) { m_connected = connectionStatus; }; + void setConnected(const bool connectionStatus); void setLegacyMode(bool legacy) { m_legacy = legacy; }; @@ -233,6 +241,7 @@ class OMF static std::string variableValueHandle(const Reading& reading, std::string &AFHierarchy); static bool extractVariable(string &strToHandle, string &variable, string &value, string &defaultValue); + static void reportAsset(const string& asset, const string& level, const string& msg); private: /** @@ -350,6 +359,7 @@ class OMF // string createAFLinks(Reading &reading, OMFHints *hints); + private: // Use for the evaluation of the OMFDataTypes.typesShort union t_typeCount { @@ -478,27 +488,43 @@ class OMF * The container for this asset and data point has been sent in * this session. */ - std::map + std::unordered_map m_containerSent; /** * The data message for this asset and data point has been sent in * this session. */ - std::map + std::unordered_map m_assetSent; /** * The link for this asset and data point has been sent in * this session. */ - std::map + std::unordered_map m_linkSent; /** * Force the data to be sent using the legacy, complex OMF types */ bool m_legacy; + + /** + * Assets that have been logged as having errors. This prevents us + * from flooding the logs with reports for the same asset. + */ + static std::vector + m_reportedAssets; + /** + * Service name + */ + const std::string m_name; + + /** + * Have base types been sent to the PI Server + */ + bool m_baseTypesSent; }; /** diff --git a/C/plugins/north/OMF/include/omferror.h b/C/plugins/north/OMF/include/omferror.h new file mode 100644 index 0000000000..debfa506c0 --- /dev/null +++ b/C/plugins/north/OMF/include/omferror.h @@ -0,0 +1,57 @@ +#ifndef _OMFERROR_H +#define _OMFERROR_H +/* + * Fledge OSIsoft OMF interface to PI Server. + * + * Copyright (c) 2023 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ + +#include +#include +#include + +/** + * An encapsulation of an error return from an OMF call. + * The class parses the JSON response and gives access to porion of that JSON response. + */ +class OMFError { + public: + OMFError(const std::string& json); + ~OMFError(); + + unsigned int messageCount(); + std::string getMessage(unsigned int offset); + std::string getEventReason(unsigned int offset); + std::string getEventSeverity(unsigned int offset); + /** + * The error report contains at least on error level event + */ + bool hasErrors() { return m_hasErrors; }; + private: + unsigned int m_messageCount; + class Message { + public: + Message(const std::string& severity, + const std::string& message, + const std::string& reason) : + m_severity(severity), + m_message(message), + m_reason(reason) + { + }; + std::string getSeverity() { return m_severity; }; + std::string getMessage() { return m_message; }; + std::string getReason() { return m_reason; }; + private: + std::string m_severity; + std::string m_message; + std::string m_reason; + }; + std::vector m_messages; + bool m_hasErrors; +}; +#endif diff --git a/C/plugins/north/OMF/include/omfinfo.h b/C/plugins/north/OMF/include/omfinfo.h new file mode 100644 index 0000000000..d2d2aeae45 --- /dev/null +++ b/C/plugins/north/OMF/include/omfinfo.h @@ -0,0 +1,183 @@ +#ifndef _OMFINFO_H +#define _OMFINFO_H +/* + * Fledge OSIsoft OMF interface to PI Server. + * + * Copyright (c) 2023 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rapidjson/writer.h" +#include "rapidjson/stringbuffer.h" +#include "json_utils.h" +#include "libcurl_https.h" +#include "utils.h" +#include "string_utils.h" +#include + +#include "crypto.hpp" + +#define PLUGIN_NAME "OMF" +#define TYPE_ID_KEY "type-id" +#define SENT_TYPES_KEY "sentDataTypes" +#define DATA_KEY "dataTypes" +#define DATA_KEY_SHORT "dataTypesShort" +#define DATA_KEY_HINT "hintChecksum" +#define NAMING_SCHEME "namingScheme" +#define AFH_HASH "afhHash" +#define AF_HIERARCHY "afHierarchy" +#define AF_HIERARCHY_ORIG "afHierarchyOrig" + + +#define PROPERTY_TYPE "type" +#define PROPERTY_NUMBER "number" +#define PROPERTY_STRING "string" + +#define ENDPOINT_URL_PI_WEB_API "https://HOST_PLACEHOLDER:PORT_PLACEHOLDER/piwebapi/omf" +#define ENDPOINT_URL_CR "https://HOST_PLACEHOLDER:PORT_PLACEHOLDER/ingress/messages" +#define ENDPOINT_URL_OCS "https://REGION_PLACEHOLDER.osisoft.com:PORT_PLACEHOLDER/api/v1/tenants/TENANT_ID_PLACEHOLDER/Namespaces/NAMESPACE_ID_PLACEHOLDER/omf" +#define ENDPOINT_URL_ADH "https://REGION_PLACEHOLDER.datahub.connect.aveva.com:PORT_PLACEHOLDER/api/v1/Tenants/TENANT_ID_PLACEHOLDER/Namespaces/NAMESPACE_ID_PLACEHOLDER/omf" + +#define ENDPOINT_URL_EDS "http://localhost:PORT_PLACEHOLDER/api/v1/tenants/default/namespaces/default/omf" + + +enum OMF_ENDPOINT_PORT { + ENDPOINT_PORT_PIWEB_API=443, + ENDPOINT_PORT_CR=5460, + ENDPOINT_PORT_OCS=443, + ENDPOINT_PORT_EDS=5590, + ENDPOINT_PORT_ADH=443 +}; + +/** + * Plugin specific default configuration + */ + +#define NOT_BLOCKING_ERRORS_DEFAULT QUOTE( \ + { \ + "errors400" : [ \ + "Redefinition of the type with the same ID is not allowed", \ + "Invalid value type for the property", \ + "Property does not exist in the type definition", \ + "Container is not defined", \ + "Unable to find the property of the container of type" \ + ] \ + } \ +) + +#define NOT_BLOCKING_ERRORS_DEFAULT_PI_WEB_API QUOTE( \ + { \ + "EventInfo" : [ \ + "The specified value is outside the allowable range" \ + ] \ + } \ +) + +#define AF_HIERARCHY_RULES QUOTE( \ + { \ + } \ +) + +/** + * A class that holds the configuration information for the OMF plugin. + * + * Note this is the first stage of refactoring the OMF pluigns and represents + * the CONNECTOR_INFO structure of original plugin as a class + */ +class OMFInformation { + public: + OMFInformation(ConfigCategory* configData); + ~OMFInformation(); + void start(const std::string& storedData); + uint32_t send(const vector& readings); + std::string saveData(); + private: + void loadSentDataTypes(rapidjson::Document& JSONData); + long getMaxTypeId(); + int PIWebAPIGetVersion(bool logMessage = true); + int EDSGetVersion(); + void SetOMFVersion(); + std::string OCSRetrieveAuthToken(); + OMF_ENDPOINT identifyPIServerEndpoint(); + std::string saveSentDataTypes(); + unsigned long calcTypeShort(const std::string& dataTypes); + void ParseProductVersion(std::string &versionString, int *major, int *minor); + std::string ParseEDSProductInformation(std::string json); + std::string AuthBasicCredentialsGenerate(std::string& userId, std::string& password); + void AuthKerberosSetup(std::string& keytabEnv, std::string& keytabFileName); + double GetElapsedTime(struct timeval *startTime); + bool IsPIWebAPIConnected(); + + private: + Logger *m_logger; + HttpSender *m_sender; // HTTPS connection + OMF *m_omf; // OMF data protocol + bool m_sendFullStructure; // It sends the minimum OMF structural messages to load data into PI Data Archive if disabled + bool m_compression; // whether to compress readings' data + string m_protocol; // http / https + string m_hostAndPort; // hostname:port for SimpleHttps + unsigned int m_retrySleepTime; // Seconds between each retry + unsigned int m_maxRetry; // Max number of retries in the communication + unsigned int m_timeout; // connect and operation timeout + string m_path; // PI Server application path + long m_typeId; // OMF protocol type-id prefix + string m_producerToken; // PI Server connector token + string m_formatNumber; // OMF protocol Number format + string m_formatInteger; // OMF protocol Integer format + OMF_ENDPOINT m_PIServerEndpoint; // Defines which End point should be used for the communication + NAMINGSCHEME_ENDPOINT + m_NamingScheme; // Define how the object names should be generated - https://fledge-iot.readthedocs.io/en/latest/OMF.html#naming-scheme + string m_DefaultAFLocation; // 1st hierarchy in Asset Framework, PI Web API only. + string m_AFMap; // Defines a set of rules to address where assets should be placed in the AF hierarchy. + // https://fledge-iot.readthedocs.io/en/latest/OMF.html#asset-framework-hierarchy-rules + + string m_prefixAFAsset; // Prefix to generate unique asset id + string m_PIWebAPIProductTitle; + string m_RestServerVersion; + string m_PIWebAPIAuthMethod; // Authentication method to be used with the PI Web API. + string m_PIWebAPICredentials; // Credentials is the base64 encoding of id and password joined by a single colon (:) + string m_KerberosKeytab; // Kerberos authentication keytab file + // stores the environment variable value about the keytab file path + // to allow the environment to persist for all the execution of the plugin + // + // Note : A keytab is a file containing pairs of Kerberos principals + // and encrypted keys (which are derived from the Kerberos password). + // You can use a keytab file to authenticate to various remote systems + // using Kerberos without entering a password. + + string m_OCSNamespace; // OCS configurations + string m_OCSTenantId; + string m_OCSClientId; + string m_OCSClientSecret; + string m_OCSToken; + + vector> + m_staticData; // Static data + // Errors considered not blocking in the communication with the PI Server + std::vector + m_notBlockingErrors; + // Per asset DataTypes + std::map + m_assetsDataTypes; + string m_omfversion; + bool m_legacy; + string m_name; + bool m_connected; +}; +#endif diff --git a/C/plugins/north/OMF/include/omflinkeddata.h b/C/plugins/north/OMF/include/omflinkeddata.h index c2e174a962..bf12ac4e6b 100644 --- a/C/plugins/north/OMF/include/omflinkeddata.h +++ b/C/plugins/north/OMF/include/omflinkeddata.h @@ -34,9 +34,9 @@ class OMFLinkedData { public: - OMFLinkedData( std::map *containerSent, - std::map *assetSent, - std::map *linkSent, + OMFLinkedData( std::unordered_map *containerSent, + std::unordered_map *assetSent, + std::unordered_map *linkSent, const OMF_ENDPOINT PIServerEndpoint = ENDPOINT_CR) : m_containerSent(containerSent), m_assetSent(assetSent), @@ -55,7 +55,8 @@ class OMFLinkedData m_integerFormat = integerFormat; }; private: - std::string sendContainer(std::string& link, Datapoint *dp, const std::string& format, OMFHints * hints); + std::string getBaseType(Datapoint *dp, const std::string& format); + void sendContainer(std::string& link, Datapoint *dp, OMFHints * hints, const std::string& baseType); bool isTypeSupported(DatapointValue& dataPoint) { switch (dataPoint.getType()) @@ -72,21 +73,24 @@ class OMFLinkedData private: /** * The container for this asset and data point has been sent in - * this session. + * this session. The key is the asset followed by the datapoint name + * with a '.' delimiter between. The value is the base type used, a + * container will be sent if the base type changes. */ - std::map *m_containerSent; + std::unordered_map *m_containerSent; /** - * The data message for this asset and data point has been sent in - * this session. + * The data message for this asset has been sent in + * this session. The key is the asset name. The value is always true. */ - std::map *m_assetSent; + std::unordered_map *m_assetSent; /** * The link for this asset and data point has been sent in - * this session. + * this session. key is the asset followed by the datapoint name + * with a '.' delimiter between. The value is always true. */ - std::map *m_linkSent; + std::unordered_map *m_linkSent; /** * The endpoint to which we are sending data @@ -100,5 +104,6 @@ class OMFLinkedData std::string m_containers; std::string m_doubleFormat; std::string m_integerFormat; + }; #endif diff --git a/C/plugins/north/OMF/linkdata.cpp b/C/plugins/north/OMF/linkdata.cpp index f3f8d0d9ec..edc4771389 100644 --- a/C/plugins/north/OMF/linkdata.cpp +++ b/C/plugins/north/OMF/linkdata.cpp @@ -20,10 +20,45 @@ #include #include +#include #include +#include + +/** + * In order to cut down on the number of string copies made whilst building + * the OMF message for a reading we reseeve a number of bytes in a string and + * each time we get close to filling the string we reserve mode. The value below + * defines the increment we use to grow the string reservation. + */ +#define RESERVE_INCREMENT 100 using namespace std; + +/** + * Create a comma-separated string of all Datapoint names in a Reading + * + * @param reading Reading + * @return Datapoint names in the Reading + */ +static std::string DataPointNamesAsString(const Reading& reading) +{ + std::string dataPointNames; + + for (Datapoint *datapoint : reading.getReadingData()) + { + dataPointNames.append(datapoint->getName()); + dataPointNames.append(","); + } + + if (dataPointNames.size() > 0) + { + dataPointNames.resize(dataPointNames.size() - 1); // remove trailing comma + } + + return dataPointNames; +} + /** * OMFLinkedData constructor, generates the OMF message containing the data * @@ -36,6 +71,8 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi { string outData; bool changed; + int reserved = RESERVE_INCREMENT * 2; + outData.reserve(reserved); string assetName = reading.getAssetName(); @@ -47,13 +84,17 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi { if (typeid(**it) == typeid(OMFTagNameHint)) { - assetName = (*it)->getHint(); - Logger::getLogger()->info("Using OMF TagName hint: %s", assetName.c_str()); + string hintValue = (*it)->getHint(); + Logger::getLogger()->info("Using OMF TagName hint: %s for asset %s", + hintValue.c_str(), assetName.c_str()); + assetName = hintValue; } if (typeid(**it) == typeid(OMFTagHint)) { - assetName = (*it)->getHint(); - Logger::getLogger()->info("Using OMF Tag hint: %s", assetName.c_str()); + string hintValue = (*it)->getHint(); + Logger::getLogger()->info("Using OMF Tag hint: %s for asset %s", + hintValue.c_str(), assetName.c_str()); + assetName = hintValue; } } } @@ -61,12 +102,14 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi // Get reading data const vector data = reading.getReadingData(); - unsigned long skipDatapoints = 0; + vector skippedDatapoints; - Logger::getLogger()->info("Processing %s with new OMF method", assetName.c_str()); + Logger::getLogger()->debug("Processing %s (%s) using Linked Types", assetName.c_str(), DataPointNamesAsString(reading).c_str()); + + assetName = OMF::ApplyPIServerNamingRulesObj(assetName, NULL); bool needDelim = false; - if (m_assetSent->find(assetName) == m_assetSent->end()) + if (m_assetSent->count(assetName) == 0) { // Send the data message to create the asset instance outData.append("{ \"typeid\":\"FledgeAsset\", \"values\":[ { \"AssetId\":\""); @@ -83,15 +126,22 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi */ for (vector::const_iterator it = data.begin(); it != data.end(); ++it) { - string dpName = (*it)->getName(); + Datapoint *dp = *it; + if (reserved - outData.size() < RESERVE_INCREMENT / 2) + { + reserved += RESERVE_INCREMENT; + outData.reserve(reserved); + } + string dpName = dp->getName(); if (dpName.compare(OMF_HINT) == 0) { // Don't send the OMF Hint to the PI Server continue; } - if (!isTypeSupported((*it)->getData())) + dpName = OMF::ApplyPIServerNamingRulesObj(dpName, NULL); + if (!isTypeSupported(dp->getData())) { - skipDatapoints++;; + skippedDatapoints.push_back(dpName); continue; } else @@ -126,20 +176,34 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi // Create the link for the asset if not already created string link = assetName + "." + dpName; - string baseType; + string baseType = getBaseType(dp, format); auto container = m_containerSent->find(link); if (container == m_containerSent->end()) { - baseType = sendContainer(link, *it, format, hints); + sendContainer(link, dp, hints, baseType); m_containerSent->insert(pair(link, baseType)); } - else + else if (baseType.compare(container->second) != 0) { - baseType = container->second; + if (container->second.compare(0, 6, "Double") == 0 && + (baseType.compare(0, 7, "Integer") == 0 + || baseType.compare(0, 8, "UInteger") == 0)) + { + string msg = "Asset " + assetName + " data point " + dpName + + " conversion from floating point to integer is being ignored"; + OMF::reportAsset(assetName, "warn", msg); + baseType = container->second; + } + else + { + sendContainer(link, dp, hints, baseType); + (*m_containerSent)[link] = baseType; + } } if (baseType.empty()) { // Type is not supported, skip the datapoint + skippedDatapoints.push_back(dpName); continue; } if (m_linkSent->find(link) == m_linkSent->end()) @@ -163,27 +227,43 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi // Base type we are using for this data point outData.append("\"" + baseType + "\": "); // Add datapoint Value - outData.append((*it)->getData().toString()); + outData.append(dp->getData().toString()); outData.append(", "); // Append Z to getAssetDateTime(FMT_STANDARD) outData.append("\"Time\": \"" + reading.getAssetDateUserTime(Reading::FMT_STANDARD) + "Z" + "\""); outData.append("} ] }"); } } - Logger::getLogger()->debug("Created data messasges %s", outData.c_str()); + if (skippedDatapoints.size() > 0) + { + string points; + for (string& dp : skippedDatapoints) + { + if (!points.empty()) + points.append(", "); + points.append(dp); + } + auto pos = points.find_last_of(","); + if (pos != string::npos) + { + points.replace(pos, 1, " and"); + } + string assetName = reading.getAssetName(); + string msg = "The asset " + assetName + " had a number of datapoints, " + points + " that are not supported by OMF and have been omitted"; + OMF::reportAsset(assetName, "warn", msg); + } + Logger::getLogger()->debug("Created data messages %s", outData.c_str()); return outData; } /** - * Send the container message for the linked datapoint + * Calculate the base type we need to link the container * - * @param linkName The name to use for the container * @param dp The datapoint to process * @param format The format to use based on a hint, this may be empty - * @param hints Hints related to this asset * @return The base type linked in the container */ -string OMFLinkedData::sendContainer(string& linkName, Datapoint *dp, const string& format, OMFHints * hints) +string OMFLinkedData::getBaseType(Datapoint *dp, const string& format) { string baseType; switch (dp->getData().getType()) @@ -226,11 +306,24 @@ string OMFLinkedData::sendContainer(string& linkName, Datapoint *dp, const strin break; } default: - Logger::getLogger()->error("Unsupported type %s", dp->getData().getTypeStr()); + Logger::getLogger()->error("Unsupported type %s for the data point %s", dp->getData().getTypeStr(), + dp->getName().c_str()); // Not supported return baseType; } + return baseType; +} +/** + * Send the container message for the linked datapoint + * + * @param linkName The name to use for the container + * @param dp The datapoint to process + * @param hints Hints related to this asset + * @param baseType The baseType we will use + */ +void OMFLinkedData::sendContainer(string& linkName, Datapoint *dp, OMFHints * hints, const string& baseType) +{ string dataSource = "Fledge"; string uom, minimum, maximum, interpolation; bool propertyOverrides = false; @@ -328,8 +421,6 @@ string OMFLinkedData::sendContainer(string& linkName, Datapoint *dp, const strin if (! m_containers.empty()) m_containers += ","; m_containers.append(container); - - return baseType; } /** @@ -355,28 +446,55 @@ bool OMFLinkedData::flushContainers(HttpSender& sender, const string& path, vect payload); if ( ! (res >= 200 && res <= 299) ) { - Logger::getLogger()->error("Sending containers, HTTP code %d - %s %s", + Logger::getLogger()->error("An error occurred sending the container data. HTTP code %d - %s %s", res, sender.getHostPort().c_str(), - path.c_str()); + sender.getHTTPResponse().c_str()); return false; } } // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) { + OMFError error(sender.getHTTPResponse()); + if (error.hasErrors()) + { + Logger::getLogger()->warn("The OMF endpoint reported a bad request when sending containers: %d messages", + error.messageCount()); + for (unsigned int i = 0; i < error.messageCount(); i++) + { + Logger::getLogger()->warn("Message %d: %s, %s, %s", + i, error.getEventSeverity(i).c_str(), error.getMessage(i).c_str(), error.getEventReason(i).c_str()); + } + } - Logger::getLogger()->warn("Sending containers, not blocking issue: %s - %s %s", - e.what(), - sender.getHostPort().c_str(), - path.c_str()); + return error.hasErrors(); + } + catch (const Conflict& e) + { + OMFError error(sender.getHTTPResponse()); + // The following is possibly too verbose + if (error.hasErrors()) + { + Logger::getLogger()->warn("The OMF endpoint reported a conflict when sending containers: %d messages", + error.messageCount()); + for (unsigned int i = 0; i < error.messageCount(); i++) + { + string severity = error.getEventSeverity(i); + if (severity.compare("Error") == 0) + { + Logger::getLogger()->warn("Message %d: %s, %s, %s", + i, error.getEventSeverity(i).c_str(), error.getMessage(i).c_str(), error.getEventReason(i).c_str()); + } + } + } - return false; + return error.hasErrors(); } catch (const std::exception& e) { - Logger::getLogger()->error("Sending containers, %s - %s %s", + Logger::getLogger()->error("An exception occurred when sending container information the OMF endpoint, %s - %s %s", e.what(), sender.getHostPort().c_str(), path.c_str()); diff --git a/C/plugins/north/OMF/omf.cpp b/C/plugins/north/OMF/omf.cpp index 80036d92af..c4f0e4cf06 100644 --- a/C/plugins/north/OMF/omf.cpp +++ b/C/plugins/north/OMF/omf.cpp @@ -32,11 +32,14 @@ #include #include +#include +#include using namespace std; using namespace rapidjson; static bool isTypeSupported(DatapointValue& dataPoint); +vector OMF::m_reportedAssets; // 1 enable performance tracking #define INSTRUMENT 0 @@ -223,7 +226,8 @@ const string& OMFData::OMFdataVal() const /** * OMF constructor */ -OMF::OMF(HttpSender& sender, +OMF::OMF(const string& name, + HttpSender& sender, const string& path, const long id, const string& token) : @@ -231,7 +235,10 @@ OMF::OMF(HttpSender& sender, m_typeId(id), m_producerToken(token), m_sender(sender), - m_legacy(false) + m_legacy(false), + m_name(name), + m_baseTypesSent(false), + m_linkedProperties(true) { m_lastError = false; m_changeTypeId = false; @@ -243,14 +250,18 @@ OMF::OMF(HttpSender& sender, * OMF constructor with per asset data types */ -OMF::OMF(HttpSender& sender, +OMF::OMF(const string& name, + HttpSender& sender, const string& path, map& types, const string& token) : m_path(path), m_OMFDataTypes(&types), m_producerToken(token), - m_sender(sender) + m_sender(sender), + m_name(name), + m_baseTypesSent(false), + m_linkedProperties(true) { // Get starting type-id sequence or set the default value auto it = (*m_OMFDataTypes).find(FAKE_ASSET_KEY); @@ -350,6 +361,7 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) // Build an HTTPS POST with 'resType' headers // and 'typeData' JSON payload // Then get HTTPS POST ret code and return 0 to client on error + string assetName = row.getAssetName(); try { res = m_sender.sendRequest("POST", @@ -358,39 +370,55 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) typeData); if ( ! (res >= 200 && res <= 299) ) { - Logger::getLogger()->error("Sending JSON dataType message 'Type', HTTP code %d - %s %s", - res, - m_sender.getHostPort().c_str(), - m_path.c_str()); + string msg = "An error occurred sending the dataType message for the asset " + assetName; + msg.append(". HTTP error code " + to_string(res)); + reportAsset(assetName, "error", msg); return false; } } // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) { + OMFError error(m_sender.getHTTPResponse()); + // FIXME The following is too verbose + if (error.hasErrors()) + { + Logger::getLogger()->warn("The OMF endpoint reported a bad request when sending data types: %d messages", + error.messageCount()); + for (unsigned int i = 0; i < error.messageCount(); i++) + Logger::getLogger()->warn("Message %d: %s, %s, %s", + i, error.getEventSeverity(i).c_str(), error.getMessage(i).c_str(), error.getEventReason(i).c_str()); + } + if (OMF::isDataTypeError(e.what())) { // Data type error: force type-id change m_changeTypeId = true; } - string errorMsg = errorMessageHandler(e.what()); + string errorMsg = errorMessageHandler(e.what()); - Logger::getLogger()->warn("Sending dataType message 'Type', not blocking issue: %s %s - %s %s", - (m_changeTypeId ? "Data Type " : "" ), - errorMsg.c_str(), - m_sender.getHostPort().c_str(), - m_path.c_str()); + string msg = "An error occurred sending the dataType message for the asset " + assetName + + ". " + errorMsg; + if (m_changeTypeId) + { + msg.append(". A data type change will take place to try to resolve this error"); + } + reportAsset(assetName, "error", msg); return false; } + catch (const Unauthorized& e) + { + Logger::getLogger()->error("OMF endpoint reported we are not authorized, please check configuration of the authentication method and credentials"); + return false; + } catch (const std::exception& e) { string errorMsg = errorMessageHandler(e.what()); - Logger::getLogger()->error("Sending dataType message 'Type', %s - %s %s", - errorMsg.c_str(), - m_sender.getHostPort().c_str(), - m_path.c_str()); + string msg = "An error occurred sending the dataType message for the asset " + assetName + + ". " + errorMsg; + reportAsset(assetName, "error", msg); m_connected = false; return false; } @@ -411,17 +439,28 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) typeContainer); if ( ! (res >= 200 && res <= 299) ) { - Logger::getLogger()->error("Sending JSON dataType message 'Container' " - "- error: HTTP code |%d| - %s %s", - res, - m_sender.getHostPort().c_str(), - m_path.c_str() ); + string msg = "An error occurred sending the dataType container message for the asset " + assetName; + msg.append(". HTTP error code " + to_string(res)); + reportAsset(assetName, "error", msg); return false; } } // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) { + OMFError error(m_sender.getHTTPResponse()); + // FIXME The following is too verbose + if (error.hasErrors()) + { + Logger::getLogger()->warn("The OMF endpoint reported a bad request when sending data type containers : %d messages", + error.messageCount()); + for (unsigned int i = 0; i < error.messageCount(); i++) + { + Logger::getLogger()->warn("Message %d: %s, %s, %s", + i, error.getEventSeverity(i).c_str(), error.getMessage(i).c_str(), error.getEventReason(i).c_str()); + } + } + if (OMF::isDataTypeError(e.what())) { // Data type error: force type-id change @@ -429,29 +468,33 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) } string errorMsg = errorMessageHandler(e.what()); - Logger::getLogger()->warn("Sending JSON dataType message 'Container' " - "not blocking issue: |%s| - %s - %s %s", - (m_changeTypeId ? "Data Type " : "" ), - errorMsg.c_str(), - m_sender.getHostPort().c_str(), - m_path.c_str() ); + string msg = "An error occurred sending the dataType container message for the asset " + assetName + + ". " + errorMsg; + if (m_changeTypeId) + { + msg.append(". A data type change will take place to try to resolve this error"); + } + reportAsset(assetName, "error", msg); + return false; + } + catch (const Unauthorized& e) + { + Logger::getLogger()->error("OMF endpoint reported we are not authorized, please check configuration of the authentication method and credentials"); return false; } catch (const std::exception& e) { string errorMsg = errorMessageHandler(e.what()); - Logger::getLogger()->error("Sending JSON dataType message 'Container' - %s - %s %s", - errorMsg.c_str(), - m_sender.getHostPort().c_str(), - m_path.c_str()); + string msg = "An error occurred sending the dataType message for the asset " + assetName + + ". " + errorMsg; + reportAsset(assetName, "error", msg); m_connected = false; return false; } - if (m_sendFullStructure) { - - + if (m_sendFullStructure) + { // Create header for Static data vector> resStaticData = OMF::createMessageHeader("Data"); // Create data for Static Data message @@ -468,17 +511,28 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) typeStaticData); if ( ! (res >= 200 && res <= 299) ) { - Logger::getLogger()->error("Sending JSON dataType message 'StaticData' " - "- error: HTTP code |%d| - %s %s", - res, - m_sender.getHostPort().c_str(), - m_path.c_str() ); + string msg = "An error occurred sending the StaticData dataType message for the asset " + assetName; + msg.append(". HTTP error code " + to_string(res)); + reportAsset(assetName, "warn", msg); return false; } } // Exception raised fof HTTP 400 Bad Request catch (const BadRequest& e) { + OMFError error(m_sender.getHTTPResponse()); + // FIXME The following is too verbose + if (error.hasErrors()) + { + Logger::getLogger()->warn("The OMF endpoint reported a bad request when sending Static dataType: %d messages", + error.messageCount()); + for (unsigned int i = 0; i < error.messageCount(); i++) + { + Logger::getLogger()->warn("Message %d: %s, %s, %s", + i, error.getEventSeverity(i).c_str(), error.getMessage(i).c_str(), error.getEventReason(i).c_str()); + } + } + if (OMF::isDataTypeError(e.what())) { // Data type error: force type-id change @@ -486,31 +540,27 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) } string errorMsg = errorMessageHandler(e.what()); - Logger::getLogger()->warn("Sending JSON dataType message 'StaticData'" - "not blocking issue: |%s| - %s - %s %s", - (m_changeTypeId ? "Data Type " : "" ), - errorMsg.c_str(), - m_sender.getHostPort().c_str(), - m_path.c_str() ); + string msg = "An error occurred sending the dataType staticData message for the asset " + assetName + + ". " + errorMsg; + if (m_changeTypeId) + { + msg.append(". A data type change will take place to try to resolve this error"); + } + reportAsset(assetName, "warn", msg); return false; } catch (const std::exception& e) { string errorMsg = errorMessageHandler(e.what()); - Logger::getLogger()->error("Sending JSON dataType message 'StaticData'" - "- generic error: %s - %s %s", - errorMsg.c_str(), - m_sender.getHostPort().c_str(), - m_path.c_str() ); + string msg = "An error occurred sending the dataType staticData message for the asset " + assetName + + ". " + errorMsg; + reportAsset(assetName, "debug", msg); m_connected = false; return false; } - } - if (m_sendFullStructure) - { // Create header for Link data vector> resLinkData = OMF::createMessageHeader("Data"); @@ -540,13 +590,6 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) objectPrefix = prefix; } - Logger::getLogger()->debug("%s - assetName :%s: AFHierarchy :%s: prefix :%s: objectPrefix :%s: AFHierarchyLevel :%s: ", __FUNCTION__ - ,assetName.c_str() - , AFHierarchy.c_str() - , prefix.c_str() - , objectPrefix.c_str() - , AFHierarchyLevel.c_str() ); - // Create data for Static Data message string typeLinkData = OMF::createLinkData(row, AFHierarchyLevel, prefix, objectPrefix, hints, true); string payload = "[" + typeLinkData + "]"; @@ -562,47 +605,60 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) payload); if (!(res >= 200 && res <= 299)) { - Logger::getLogger()->error("Sending JSON dataType message 'Data' (lynk) - error: HTTP code |%d| - %s %s", - res, - m_sender.getHostPort().c_str(), - m_path.c_str()); + string msg = "An error occurred sending the link dataType message for the asset " + assetName; + msg.append(". HTTP error code " + to_string(res)); + reportAsset(assetName, "warn", msg); return false; } } - // Exception raised fof HTTP 400 Bad Request + // Exception raised for HTTP 400 Bad Request catch (const BadRequest &e) { + OMFError error(m_sender.getHTTPResponse()); + // FIXME The following is too verbose + if (error.hasErrors()) + { + Logger::getLogger()->warn("The OMF endpoint reported a bad request when sending link types: %d messages", + error.messageCount()); + for (unsigned int i = 0; i < error.messageCount(); i++) + { + Logger::getLogger()->warn("Message %d: %s, %s, %s", + i, error.getEventSeverity(i).c_str(), + error.getMessage(i).c_str(), error.getEventReason(i).c_str()); + } + } + if (OMF::isDataTypeError(e.what())) { // Data type error: force type-id change m_changeTypeId = true; } string errorMsg = errorMessageHandler(e.what()); + string msg = "An error occurred sending the dataType link message for the asset " + assetName + + ". " + errorMsg; + if (m_changeTypeId) + { + msg.append(". A data type change will take place to try to resolve this error"); + } + reportAsset(assetName, "warn", msg); - Logger::getLogger()->warn("Sending JSON dataType message 'Data' (lynk) " - "not blocking issue: |%s| - %s - %s %s", - (m_changeTypeId ? "Data Type " : ""), - errorMsg.c_str(), - m_sender.getHostPort().c_str(), - m_path.c_str() ); return false; } catch (const std::exception &e) { string errorMsg = errorMessageHandler(e.what()); - Logger::getLogger()->error("Sending JSON dataType message 'Data' (lynk) " - "- generic error: %s - %s %s", - errorMsg.c_str(), - m_sender.getHostPort().c_str(), - m_path.c_str() ); + string msg = "An error occurred sending the dataType staticData message for the asset " + assetName + + ". " + errorMsg; + reportAsset(assetName, "debug", msg); return false; } } } else { - Logger::getLogger()->error("AF hiererachy is not defined for the asset Name |%s|", assetName.c_str()); + string msg("AF hierarchy is not defined for the asset " + assetName); + reportAsset(assetName, "warn", msg); } } // All data types sent: success @@ -635,6 +691,17 @@ bool OMF::AFHierarchySendMessage(const string& msgType, string& jsonData, const } catch (const BadRequest& ex) { + OMFError error(m_sender.getHTTPResponse()); + // FIXME The following is too verbose + Logger::getLogger()->warn("The OMF endpoint reported a bad request when sending AF hierarchy: %d messages", + error.messageCount()); + for (unsigned int i = 0; i < error.messageCount(); i++) + { + Logger::getLogger()->warn("Message %d: %s, %s, %s", + i, error.getEventSeverity(i).c_str(), error.getMessage(i).c_str(), + error.getEventReason(i).c_str()); + } + success = false; errorMessage = ex.what(); } @@ -1037,15 +1104,23 @@ uint32_t OMF::sendToServer(const vector& readings, gettimeofday(&start, NULL); #endif - if (m_linkedProperties) + if (m_linkedProperties && m_baseTypesSent == false) { if (!sendBaseTypes()) { - Logger::getLogger()->error("Unable to send base types, linked assets will not be sent"); + Logger::getLogger()->error("Unable to send base types, linked assets will not be sent. The system will fall back to using complex types."); m_linkedProperties = false; } + else + { + m_baseTypesSent = true; + } } + // TODO We do not need the superset stuff if we are using linked data types, + // this would save us interating over the dat aan extra time and reduce our + // memory footprint + // // Create a superset of all the datapoints for each assetName // the superset[assetName] is then passed to routines which handles // creation of OMF data types. This is used for the initial type @@ -1264,8 +1339,8 @@ uint32_t OMF::sendToServer(const vector& readings, } } - if (m_sendFullStructure) { - + if (m_sendFullStructure) + { // The AF hierarchy is created/recreated if an OMF type message is sent // it sends the hierarchy once if (sendDataTypes and ! AFHierarchySent) @@ -1327,7 +1402,7 @@ uint32_t OMF::sendToServer(const vector& readings, auto asset_sent = m_assetSent.find(m_assetName); // Send data for this reading using the new mechanism outData = linkedData.processReading(*reading, AFHierarchyPrefix, hints); - if (asset_sent == m_assetSent.end()) + if (m_sendFullStructure && asset_sent == m_assetSent.end()) { // If the hierarchy has not already been sent then send it if (! AFHierarchySent) @@ -1392,7 +1467,8 @@ uint32_t OMF::sendToServer(const vector& readings, */ // Create header for Readings data - vector> readingData = OMF::createMessageHeader("Data"); + std::string action = (this->m_OMFVersion.compare("1.2") == 0) ? "update" : "create"; + vector> readingData = OMF::createMessageHeader("Data", action); if (compression) readingData.push_back(pair("compression", "gzip")); @@ -1442,7 +1518,7 @@ uint32_t OMF::sendToServer(const vector& readings, timersub(&t5, &t4, &tm); timeT5 = tm.tv_sec + ((double)tm.tv_usec / 1000000); - Logger::getLogger()->debug("Timing seconds - thread :%s: - superSet :%6.3f: - Loop :%6.3f: - compress :%6.3f: - send data :%6.3f: - readings |%d| - msg size |%d| - msg size compressed |%d| ", + Logger::getLogger()->warn("Timing seconds - thread :%s: - superSet :%6.3f: - Loop :%6.3f: - compress :%6.3f: - send data :%6.3f: - readings |%d| - msg size |%d| - msg size compressed |%d| ", threadId.str().c_str(), timeT1, timeT2, @@ -1462,6 +1538,19 @@ uint32_t OMF::sendToServer(const vector& readings, // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) { + OMFError error(m_sender.getHTTPResponse()); + // FIXME The following is too verbose + if (error.hasErrors()) + { + Logger::getLogger()->warn("The OMF endpoint reported a bad request when sending data: %d messages", + error.messageCount()); + for (unsigned int i = 0; i < error.messageCount(); i++) + { + Logger::getLogger()->warn("Message %d: %s, %s, %s", + i, error.getEventSeverity(i).c_str(), error.getMessage(i).c_str(), error.getEventReason(i).c_str()); + } + } + if (OMF::isDataTypeError(e.what())) { // Some assets have invalid or redefined data type @@ -1798,8 +1887,8 @@ const std::string OMF::createTypeData(const Reading& reading, OMFHints *hints) string tData="["; - if (m_sendFullStructure) { - + if (m_sendFullStructure) + { // Add the Static data part tData.append("{ \"type\": \"object\", \"properties\": { "); for (auto it = m_staticData->cbegin(); it != m_staticData->cend(); ++it) @@ -4620,7 +4709,7 @@ std::string OMF::ApplyPIServerNamingRulesPath(const std::string &objName, bool * /** * Send the base types that we use to define all the data point values * - * @return true If the data types were sent correctly. Otherwsie false. + * @return true If the data types were sent correctly. Otherwise false. */ bool OMF::sendBaseTypes() { @@ -4647,6 +4736,19 @@ bool OMF::sendBaseTypes() // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) { + OMFError error(m_sender.getHTTPResponse()); + // FIXME The following is too verbose + if (error.hasErrors()) + { + Logger::getLogger()->warn("The OMF endpoint reported a bad request when sending base types: %d messages", + error.messageCount()); + for (unsigned int i = 0; i < error.messageCount(); i++) + { + Logger::getLogger()->warn("Message %d: %s, %s, %s", + i, error.getEventSeverity(i).c_str(), error.getMessage(i).c_str(), error.getEventReason(i).c_str()); + } + } + if (OMF::isDataTypeError(e.what())) { // Data type error: force type-id change @@ -4673,14 +4775,14 @@ bool OMF::sendBaseTypes() m_connected = false; return false; } - Logger::getLogger()->info("Base types successully sent"); + Logger::getLogger()->debug("Base types successfully sent"); return true; } /** * Create the messages to link the asset into the right place in the AF structure * - * @param reading The reading beign sent + * @param reading The reading being sent * @param hints OMF Hints for this reading */ string OMF::createAFLinks(Reading& reading, OMFHints *hints) @@ -4734,3 +4836,51 @@ string AFDataMessage; } return AFDataMessage; } + +/** + * Report an error related to an asset if the asset has not already been reported + * + * @param asset The asset name + * @param level The level to log the message at + * @param msg The message to log + */ +void OMF::reportAsset(const string& asset, const string& level, const string& msg) +{ + if (std::find(m_reportedAssets.begin(), m_reportedAssets.end(), asset) == m_reportedAssets.end()) + { + m_reportedAssets.push_back(asset); + if (level.compare("error") == 0) + Logger::getLogger()->error(msg); + else if (level.compare("warn") == 0) + Logger::getLogger()->warn(msg); + else if (level.compare("fatal") == 0) + Logger::getLogger()->fatal(msg); + else if (level.compare("info") == 0) + Logger::getLogger()->info(msg); + else + Logger::getLogger()->debug(msg); + } +} + +/** + * Set the connection state + * + * @param connectionStatus The target connection status + */ +void OMF::setConnected(const bool connectionStatus) +{ + if (connectionStatus != m_connected) + { + // Send an audit event for the change of state + string data = "{ \"plugin\" : \"OMF\", \"service\" : \"" + m_name + "\" }"; + if (!connectionStatus) + { + AuditLogger::auditLog("NHDWN", "ERROR", data); + } + else + { + AuditLogger::auditLog("NHAVL", "INFORMATION", data); + } + } + m_connected = connectionStatus; +} diff --git a/C/plugins/north/OMF/omfinfo.cpp b/C/plugins/north/OMF/omfinfo.cpp new file mode 100644 index 0000000000..f9efe8b01f --- /dev/null +++ b/C/plugins/north/OMF/omfinfo.cpp @@ -0,0 +1,1395 @@ +/* + * Fledge OSIsoft OMF interface to PI Server. + * + * Copyright (c) 2023 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ +#include + +using namespace std; +using namespace rapidjson; +using namespace SimpleWeb; + +/** + * Constructor for the OMFInformation class + */ +OMFInformation::OMFInformation(ConfigCategory *config) : m_sender(NULL), m_omf(NULL), m_connected(false) +{ + + m_logger = Logger::getLogger(); + m_name = config->getName(); + + int endpointPort = 0; + + // PIServerEndpoint handling + string PIServerEndpoint = config->getValue("PIServerEndpoint"); + string ADHRegions = config->getValue("ADHRegions"); + string ServerHostname = config->getValue("ServerHostname"); + if (gethostbyname(ServerHostname.c_str()) == NULL) + { + Logger::getLogger()->warn("Unable to resolve server hostname '%s'. This should be a valid hostname or IP Address.", ServerHostname.c_str()); + } + string ServerPort = config->getValue("ServerPort"); + string url; + string NamingScheme = config->getValue("NamingScheme"); + + // Translate the PIServerEndpoint configuration + if(PIServerEndpoint.compare("PI Web API") == 0) + { + Logger::getLogger()->debug("PI-Server end point manually selected - PI Web API "); + m_PIServerEndpoint = ENDPOINT_PIWEB_API; + url = ENDPOINT_URL_PI_WEB_API; + endpointPort = ENDPOINT_PORT_PIWEB_API; + } + else if(PIServerEndpoint.compare("Connector Relay") == 0) + { + Logger::getLogger()->debug("PI-Server end point manually selected - Connector Relay "); + m_PIServerEndpoint = ENDPOINT_CR; + url = ENDPOINT_URL_CR; + endpointPort = ENDPOINT_PORT_CR; + } + else if(PIServerEndpoint.compare("AVEVA Data Hub") == 0) + { + Logger::getLogger()->debug("End point manually selected - AVEVA Data Hub"); + m_PIServerEndpoint = ENDPOINT_ADH; + url = ENDPOINT_URL_ADH; + std::string region = "uswe"; + if(ADHRegions.compare("EU-West") == 0) + region = "euno"; + else if(ADHRegions.compare("Australia") == 0) + region = "auea"; + StringReplace(url, "REGION_PLACEHOLDER", region); + endpointPort = ENDPOINT_PORT_ADH; + } + else if(PIServerEndpoint.compare("OSIsoft Cloud Services") == 0) + { + Logger::getLogger()->debug("End point manually selected - OSIsoft Cloud Services"); + m_PIServerEndpoint = ENDPOINT_OCS; + url = ENDPOINT_URL_OCS; + std::string region = "dat-b"; + if(ADHRegions.compare("EU-West") == 0) + region = "dat-d"; + else if(ADHRegions.compare("Australia") == 0) + Logger::getLogger()->error("OSIsoft Cloud Services are not hosted in Australia"); + StringReplace(url, "REGION_PLACEHOLDER", region); + endpointPort = ENDPOINT_PORT_OCS; + } + else if(PIServerEndpoint.compare("Edge Data Store") == 0) + { + Logger::getLogger()->debug("End point manually selected - Edge Data Store"); + m_PIServerEndpoint = ENDPOINT_EDS; + url = ENDPOINT_URL_EDS; + endpointPort = ENDPOINT_PORT_EDS; + } + ServerPort = (ServerPort.compare("0") == 0) ? to_string(endpointPort) : ServerPort; + + if (endpointPort == ENDPOINT_PORT_PIWEB_API) { + + // Use SendFullStructure ? + string fullStr = config->getValue("SendFullStructure"); + + if (fullStr == "True" || fullStr == "true" || fullStr == "TRUE") + m_sendFullStructure = true; + else + m_sendFullStructure = false; + } else { + m_sendFullStructure = true; + } + + unsigned int retrySleepTime = atoi(config->getValue("OMFRetrySleepTime").c_str()); + unsigned int maxRetry = atoi(config->getValue("OMFMaxRetry").c_str()); + unsigned int timeout = atoi(config->getValue("OMFHttpTimeout").c_str()); + + string producerToken = config->getValue("producerToken"); + + string formatNumber = config->getValue("formatNumber"); + string formatInteger = config->getValue("formatInteger"); + string DefaultAFLocation = config->getValue("DefaultAFLocation"); + string AFMap = config->getValue("AFMap"); + + string PIWebAPIAuthMethod = config->getValue("PIWebAPIAuthenticationMethod"); + string PIWebAPIUserId = config->getValue("PIWebAPIUserId"); + string PIWebAPIPassword = config->getValue("PIWebAPIPassword"); + string KerberosKeytabFileName = config->getValue("PIWebAPIKerberosKeytabFileName"); + + // OCS configurations + string OCSNamespace = config->getValue("OCSNamespace"); + string OCSTenantId = config->getValue("OCSTenantId"); + string OCSClientId = config->getValue("OCSClientId"); + string OCSClientSecret = config->getValue("OCSClientSecret"); + + StringReplace(url, "HOST_PLACEHOLDER", ServerHostname); + StringReplace(url, "PORT_PLACEHOLDER", ServerPort); + + // TENANT_ID_PLACEHOLDER and NAMESPACE_ID_PLACEHOLDER, if present, will be replaced with the values of OCSTenantId and OCSNamespace + StringReplace(url, "TENANT_ID_PLACEHOLDER", OCSTenantId); + StringReplace(url, "NAMESPACE_ID_PLACEHOLDER", OCSNamespace); + + /** + * Extract host, port, path from URL + */ + size_t findProtocol = url.find_first_of(":"); + string protocol = url.substr(0, findProtocol); + + string tmpUrl = url.substr(findProtocol + 3); + size_t findPort = tmpUrl.find_first_of(":"); + string hostName = tmpUrl.substr(0, findPort); + + size_t findPath = tmpUrl.find_first_of("/"); + string port = tmpUrl.substr(findPort + 1, findPath - findPort - 1); + string path = tmpUrl.substr(findPath); + + string hostAndPort(hostName + ":" + port); + + // Set configuration fields + m_protocol = protocol; + m_hostAndPort = hostAndPort; + m_path = path; + m_retrySleepTime = retrySleepTime; + m_maxRetry = maxRetry; + m_timeout = timeout; + m_typeId = TYPE_ID_DEFAULT; + m_producerToken = producerToken; + m_formatNumber = formatNumber; + m_formatInteger = formatInteger; + m_DefaultAFLocation = DefaultAFLocation; + m_AFMap = AFMap; + + // OCS configurations + OCSNamespace = OCSNamespace; + OCSTenantId = OCSTenantId; + OCSClientId = OCSClientId; + OCSClientSecret = OCSClientSecret; + + // PI Web API end-point - evaluates the authentication method requested + if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) + { + if (PIWebAPIAuthMethod.compare("anonymous") == 0) + { + Logger::getLogger()->debug("PI Web API end-point - anonymous authentication"); + m_PIWebAPIAuthMethod = "a"; + } + else if (PIWebAPIAuthMethod.compare("basic") == 0) + { + Logger::getLogger()->debug("PI Web API end-point - basic authentication"); + m_PIWebAPIAuthMethod = "b"; + m_PIWebAPICredentials = AuthBasicCredentialsGenerate(PIWebAPIUserId, PIWebAPIPassword); + } + else if (PIWebAPIAuthMethod.compare("kerberos") == 0) + { + Logger::getLogger()->debug("PI Web API end-point - kerberos authentication"); + m_PIWebAPIAuthMethod = "k"; + AuthKerberosSetup(m_KerberosKeytab, KerberosKeytabFileName); + } + else + { + Logger::getLogger()->error("Invalid authentication method for PI Web API :%s: ", PIWebAPIAuthMethod.c_str()); + } + } + else + { + // For all other endpoint types, set PI Web API authentication to 'anonymous.' + // This prevents the HttpSender from inserting PI Web API authentication headers. + m_PIWebAPIAuthMethod = "a"; + } + + // Use compression ? + string compr = config->getValue("compression"); + if (compr == "True" || compr == "true" || compr == "TRUE") + m_compression = true; + else + m_compression = false; + + // Set the list of errors considered not blocking in the communication + // with the PI Server + if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) + { + JSONStringToVectorString(m_notBlockingErrors, + config->getValue("PIWebAPInotBlockingErrors"), + string("EventInfo")); + } + else + { + JSONStringToVectorString(m_notBlockingErrors, + config->getValue("notBlockingErrors"), + string("errors400")); + } + /** + * Add static data + * Split the string up into each pair + */ + string staticData = config->getValue("StaticData"); + size_t pos = 0; + size_t start = 0; + do { + pos = staticData.find(",", start); + string item = staticData.substr(start, pos); + start = pos + 1; + size_t pos2 = 0; + if ((pos2 = item.find(":")) != string::npos) + { + string name = item.substr(0, pos2); + while (name[0] == ' ') + name = name.substr(1); + string value = item.substr(pos2 + 1); + while (value[0] == ' ') + value = value.substr(1); + pair sData = make_pair(name, value); + m_staticData.push_back(sData); + } + } while (pos != string::npos); + + { + // NamingScheme handling + if(NamingScheme.compare("Concise") == 0) + { + m_NamingScheme = NAMINGSCHEME_CONCISE; + } + else if(NamingScheme.compare("Use Type Suffix") == 0) + { + m_NamingScheme = NAMINGSCHEME_SUFFIX; + } + else if(NamingScheme.compare("Use Attribute Hash") == 0) + { + m_NamingScheme = NAMINGSCHEME_HASH; + } + else if(NamingScheme.compare("Backward compatibility") == 0) + { + m_NamingScheme = NAMINGSCHEME_COMPATIBILITY; + } + Logger::getLogger()->debug("End point naming scheme :%s: ", NamingScheme.c_str() ); + + } + + // Fetch legacy OMF type option + string legacy = config->getValue("Legacy"); + if (legacy == "True" || legacy == "true" || legacy == "TRUE") + m_legacy = true; + else + m_legacy = false; + +} + +/** + * Destructor for the OMFInformation class. + */ +OMFInformation::~OMFInformation() +{ + if (m_sender) + delete m_sender; + if (m_omf) + delete m_omf; + // TODO cleanup the allocated member variables +} + +/** + * The plugin start entry point has been called + * + * @param storedData The data that has been persisted by a previous execution + * of the plugin + */ +void OMFInformation::start(const string& storedData) +{ + + m_logger->info("Host: %s", m_hostAndPort.c_str()); + if ((m_PIServerEndpoint == ENDPOINT_OCS) || (m_PIServerEndpoint == ENDPOINT_ADH)) + { + m_logger->info("Namespace: %s", m_OCSNamespace.c_str()); + } + + // Parse JSON plugin_data + Document JSONData; + JSONData.Parse(storedData.c_str()); + if (JSONData.HasParseError()) + { + m_logger->error("%s plugin error: failure parsing " + "plugin data JSON object '%s'", + PLUGIN_NAME, + storedData.c_str()); + } + else if (JSONData.HasMember(TYPE_ID_KEY) && + (JSONData[TYPE_ID_KEY].IsString() || + JSONData[TYPE_ID_KEY].IsNumber())) + { + // Update type-id in PLUGIN_HANDLE object + if (JSONData[TYPE_ID_KEY].IsNumber()) + { + m_typeId = JSONData[TYPE_ID_KEY].GetInt(); + } + else + { + m_typeId = atol(JSONData[TYPE_ID_KEY].GetString()); + } + } + + // Load sentdataTypes + loadSentDataTypes(JSONData); + + // Log default type-id + if (m_assetsDataTypes.size() == 1 && + m_assetsDataTypes.find(FAKE_ASSET_KEY) != m_assetsDataTypes.end()) + { + // Only one value: we have the FAKE_ASSET_KEY and no other data + Logger::getLogger()->info("%s plugin is using global OMF prefix %s=%d", + PLUGIN_NAME, + TYPE_ID_KEY, + m_typeId); + } + else + { + Logger::getLogger()->info("%s plugin is using per asset OMF prefix %s=%d " + "(max value found)", + PLUGIN_NAME, + TYPE_ID_KEY, + getMaxTypeId()); + } + + // Retrieve the PI Web API Version + m_connected = true; + if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) + { + int httpCode = PIWebAPIGetVersion(); + if (httpCode >= 200 && httpCode < 400) + { + SetOMFVersion(); + Logger::getLogger()->info("%s connected to %s OMF Version: %s", + m_RestServerVersion.c_str(), m_hostAndPort.c_str(), m_omfversion.c_str()); + m_connected = true; + } + else + { + m_connected = false; + } + } + else if (m_PIServerEndpoint == ENDPOINT_EDS) + { + EDSGetVersion(); + SetOMFVersion(); + Logger::getLogger()->info("Edge Data Store %s OMF Version: %s", m_RestServerVersion.c_str(), m_omfversion.c_str()); + } + else + { + SetOMFVersion(); + Logger::getLogger()->info("OMF Version: %s", m_omfversion.c_str()); + } +} + +/** + * Send data to the OMF endpoint + * + * @param readings The block of readings to send + * @return uint32_t The number of readings sent + */ +uint32_t OMFInformation::send(const vector& readings) +{ +#if INSTRUMENT + struct timeval startTime; + gettimeofday(&startTime, NULL); +#endif + string version; + + // Check if the endpoint is PI Web API and if the PI Web API server is available + if (!IsPIWebAPIConnected()) + { + // Error already reported by IsPIWebAPIConnected + return 0; + } + + if (m_sender && m_connected == false) + { + // TODO Make the info when reconnection has been proved to work + Logger::getLogger()->warn("Connection failed creating a new sender"); + delete m_sender; + m_sender = NULL; + } + + if (!m_sender) + { + /** + * Select the transport library based on the authentication method and transport encryption + * requirements. + * + * LibcurlHttps is used to integrate Kerberos as the SimpleHttp does not support it + * the Libcurl integration implements only HTTPS not HTTP currently. We use SimpleHttp or + * SimpleHttps, as appropriate for the URL given, if not using Kerberos + * + * + * The handler is allocated using "Hostname : port", connect_timeout and request_timeout. + * Default is no timeout + */ + if (m_PIWebAPIAuthMethod.compare("k") == 0) + { + m_sender = new LibcurlHttps(m_hostAndPort, + m_timeout, + m_timeout, + m_retrySleepTime, + m_maxRetry); + } + else + { + if (m_protocol.compare("http") == 0) + { + m_sender = new SimpleHttp(m_hostAndPort, + m_timeout, + m_timeout, + m_retrySleepTime, + m_maxRetry); + } + else + { + m_sender = new SimpleHttps(m_hostAndPort, + m_timeout, + m_timeout, + m_retrySleepTime, + m_maxRetry); + } + } + + m_sender->setAuthMethod (m_PIWebAPIAuthMethod); + m_sender->setAuthBasicCredentials(m_PIWebAPICredentials); + + // OCS configurations + m_sender->setOCSNamespace (m_OCSNamespace); + m_sender->setOCSTenantId (m_OCSTenantId); + m_sender->setOCSClientId (m_OCSClientId); + m_sender->setOCSClientSecret (m_OCSClientSecret); + + if (m_omf) + { + // Created a new sender after a connection failure + m_omf->setSender(*m_sender); + } + } + + // OCS or ADH - retrieves the authentication token + // It is retrieved at every send as it can expire and the configuration is only in OCS and ADH + if (m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_ADH) + { + m_OCSToken = OCSRetrieveAuthToken(); + m_sender->setOCSToken (m_OCSToken); + } + + // Allocate the OMF class that implements the PI Server data protocol + if (!m_omf) + { + m_omf = new OMF(m_name, *m_sender, m_path, m_assetsDataTypes, + m_producerToken); + + m_omf->setConnected(m_connected); + m_omf->setSendFullStructure(m_sendFullStructure); + + // Set PIServerEndpoint configuration + m_omf->setNamingScheme(m_NamingScheme); + m_omf->setPIServerEndpoint(m_PIServerEndpoint); + m_omf->setDefaultAFLocation(m_DefaultAFLocation); + m_omf->setAFMap(m_AFMap); + + m_omf->setOMFVersion(m_omfversion); + + // Generates the prefix to have unique asset_id across different levels of hierarchies + string AFHierarchyLevel; + m_omf->generateAFHierarchyPrefixLevel(m_DefaultAFLocation, m_prefixAFAsset, AFHierarchyLevel); + + m_omf->setPrefixAFAsset(m_prefixAFAsset); + + // Set OMF FormatTypes + m_omf->setFormatType(OMF_TYPE_FLOAT, + m_formatNumber); + m_omf->setFormatType(OMF_TYPE_INTEGER, + m_formatInteger); + + m_omf->setStaticData(&m_staticData); + m_omf->setNotBlockingErrors(m_notBlockingErrors); + + if (m_omfversion == "1.1" || m_omfversion == "1.0") + { + Logger::getLogger()->info("Setting LegacyType to be true for OMF Version '%s'. This will force use old style complex types. ", m_omfversion.c_str()); + m_omf->setLegacyMode(true); + } + else + { + m_omf->setLegacyMode(m_legacy); + } + } + // Send the readings data to the PI Server + uint32_t ret = m_omf->sendToServer(readings, m_compression); + + // Detect typeId change in OMF class + if (m_omf->getTypeId() != m_typeId) + { + // Update typeId in plugin handle + m_typeId = m_omf->getTypeId(); + // Log change + Logger::getLogger()->info("%s plugin: a new OMF global %s (%d) has been created.", + PLUGIN_NAME, + TYPE_ID_KEY, + m_typeId); + } + + // Write a warning if the connection to PI Web API has been lost + bool updatedConnected = m_omf->getConnected(); + if (m_PIServerEndpoint == ENDPOINT_PIWEB_API && m_connected && !updatedConnected) + { + Logger::getLogger()->warn("Connection to PI Web API at %s has been lost", m_hostAndPort.c_str()); + } + m_connected = updatedConnected; + +#if INSTRUMENT + Logger::getLogger()->debug("plugin_send elapsed time: %6.3f seconds, NumValues: %u", GetElapsedTime(&startTime), ret); +#endif + + // Return sent data ret code + return ret; +} + +/** + * Return the data to be persisted + * @return string The data to persist + */ +string OMFInformation::saveData() +{ +#if INSTRUMENT + struct timeval startTime; + gettimeofday(&startTime, NULL); +#endif + // Create save data + std::ostringstream saveData; + saveData << "{"; + + // Add sent data types + string typesData = saveSentDataTypes(); + if (!typesData.empty()) + { + // Save datatypes + saveData << typesData; + } + else + { + // Just save type-id + saveData << "\"" << TYPE_ID_KEY << "\": " << to_string(m_typeId); + } + + saveData << "}"; + + // Log saving the plugin configuration + Logger::getLogger()->debug("%s plugin: saving plugin_data '%s'", + PLUGIN_NAME, + saveData.str().c_str()); + + +#if INSTRUMENT + // For debugging: write plugin's JSON data to a file + string jsonFilePath = getDataDir() + string("/logs/OMFSaveData.json"); + ofstream f(jsonFilePath.c_str(), ios_base::trunc); + f << saveData.str(); + f.close(); + + Logger::getLogger()->debug("plugin_shutdown elapsed time: %6.3f seconds", GetElapsedTime(&startTime)); +#endif + + // Return current plugin data to save + return saveData.str(); +} + + +/** + * Load stored data types (already sent to PI server) + * + * Each element, the assetName, has type-id and datatype for each datapoint + * + * If no data exists in the plugin_data table, then a map entry + * with FAKE_ASSET_KEY is made in order to set the start type-id + * sequence with default value set to 1: + * all new created OMF dataTypes have type-id prefix set to the value of 1. + * + * If data like {"type-id": 14} or {"type-id": "14" } is found, a map entry + * with FAKE_ASSET_KEY is made and the start type-id sequence value is set + * to the found value, i.e. 14: + * all new created OMF dataTypes have type-id prefix set to the value of 14. + * + * If proper per asset types data is loaded, the FAKE_ASSET_KEY is not set: + * all new created OMF dataTypes have type-id prefix set to the value of 1 + * while existing (loaded) OMF dataTypes will keep their type-id values. + * + * @param JSONData The JSON document containing all saved data + */ +void OMFInformation::loadSentDataTypes(Document& JSONData) +{ + if (JSONData.HasMember(SENT_TYPES_KEY) && + JSONData[SENT_TYPES_KEY].IsArray()) + { + const Value& cachedTypes = JSONData[SENT_TYPES_KEY]; + for (Value::ConstValueIterator it = cachedTypes.Begin(); + it != cachedTypes.End(); + ++it) + { + if (!it->IsObject()) + { + Logger::getLogger()->warn("%s plugin: current element in '%s' " \ + "property is not an object, ignoring it", + PLUGIN_NAME, + SENT_TYPES_KEY); + continue; + } + + for (Value::ConstMemberIterator itr = it->MemberBegin(); + itr != it->MemberEnd(); + ++itr) + { + string key = itr->name.GetString(); + const Value& cachedValue = itr->value; + + // Add typeId and dataTypes to the in memory cache + long typeId; + if (cachedValue.HasMember(TYPE_ID_KEY) && + cachedValue[TYPE_ID_KEY].IsNumber()) + { + typeId = cachedValue[TYPE_ID_KEY].GetInt(); + } + else + { + Logger::getLogger()->warn("%s plugin: current element '%s'" \ + " doesn't have '%s' property, ignoring it", + PLUGIN_NAME, + key.c_str(), + TYPE_ID_KEY); + continue; + } + + long NamingScheme; + if (cachedValue.HasMember(NAMING_SCHEME) && + cachedValue[NAMING_SCHEME].IsNumber()) + { + NamingScheme = cachedValue[NAMING_SCHEME].GetInt(); + } + else + { + Logger::getLogger()->warn("%s plugin: current element '%s'" \ + " doesn't have '%s' property, handling naming scheme in compatibility mode", + PLUGIN_NAME, + key.c_str(), + NAMING_SCHEME); + NamingScheme = NAMINGSCHEME_COMPATIBILITY; + } + + string AFHHash; + if (cachedValue.HasMember(AFH_HASH) && + cachedValue[AFH_HASH].IsString()) + { + AFHHash = cachedValue[AFH_HASH].GetString(); + } + else + { + Logger::getLogger()->warn("%s plugin: current element '%s'" \ + " doesn't have '%s' property", + PLUGIN_NAME, + key.c_str(), + AFH_HASH); + AFHHash = ""; + } + + string AFHierarchy; + if (cachedValue.HasMember(AF_HIERARCHY) && + cachedValue[AF_HIERARCHY].IsString()) + { + AFHierarchy = cachedValue[AF_HIERARCHY].GetString(); + } + else + { + Logger::getLogger()->warn("%s plugin: current element '%s'" \ + " doesn't have '%s' property", + PLUGIN_NAME, + key.c_str(), + AF_HIERARCHY); + AFHierarchy = ""; + } + + string AFHierarchyOrig; + if (cachedValue.HasMember(AF_HIERARCHY_ORIG) && + cachedValue[AF_HIERARCHY_ORIG].IsString()) + { + AFHierarchyOrig = cachedValue[AF_HIERARCHY_ORIG].GetString(); + } + else + { + Logger::getLogger()->warn("%s plugin: current element '%s'" \ + " doesn't have '%s' property", + PLUGIN_NAME, + key.c_str(), + AF_HIERARCHY_ORIG); + AFHierarchyOrig = ""; + } + + string dataTypes; + if (cachedValue.HasMember(DATA_KEY) && + cachedValue[DATA_KEY].IsObject()) + { + StringBuffer buffer; + Writer writer(buffer); + const Value& types = cachedValue[DATA_KEY]; + types.Accept(writer); + dataTypes = buffer.GetString(); + } + else + { + Logger::getLogger()->warn("%s plugin: current element '%s'" \ + " doesn't have '%s' property, ignoring it", + PLUGIN_NAME, + key.c_str(), + DATA_KEY); + + continue; + } + + unsigned long dataTypesShort; + if (cachedValue.HasMember(DATA_KEY_SHORT) && + cachedValue[DATA_KEY_SHORT].IsString()) + { + string strDataTypesShort = cachedValue[DATA_KEY_SHORT].GetString(); + // The information are stored as string in hexadecimal format + dataTypesShort = stoi (strDataTypesShort,nullptr,16); + } + else + { + dataTypesShort = calcTypeShort(dataTypes); + if (dataTypesShort == 0) + { + Logger::getLogger()->warn("%s plugin: current element '%s'" \ + " doesn't have '%s' property", + PLUGIN_NAME, + key.c_str(), + DATA_KEY_SHORT); + } + else + { + Logger::getLogger()->warn("%s plugin: current element '%s'" \ + " doesn't have '%s' property, calculated '0x%X'", + PLUGIN_NAME, + key.c_str(), + DATA_KEY_SHORT, + dataTypesShort); + } + } + unsigned short hintChecksum = 0; + if (cachedValue.HasMember(DATA_KEY_HINT) && + cachedValue[DATA_KEY_HINT].IsString()) + { + string strHint = cachedValue[DATA_KEY_HINT].GetString(); + // The information are stored as string in hexadecimal format + hintChecksum = stoi (strHint,nullptr,16); + } + OMFDataTypes dataType; + dataType.typeId = typeId; + dataType.types = dataTypes; + dataType.typesShort = dataTypesShort; + dataType.hintChkSum = hintChecksum; + dataType.namingScheme = NamingScheme; + dataType.afhHash = AFHHash; + dataType.afHierarchy = AFHierarchy; + dataType.afHierarchyOrig = AFHierarchyOrig; + + Logger::getLogger()->debug("%s - AFHHash :%s: AFHierarchy :%s: AFHierarchyOrig :%s: ", __FUNCTION__, AFHHash.c_str(), AFHierarchy.c_str() , AFHierarchyOrig.c_str() ); + + + Logger::getLogger()->debug("%s - NamingScheme :%ld: ", __FUNCTION__,NamingScheme ); + + // Add data into the map + m_assetsDataTypes[key] = dataType; + } + } + } + else + { + // There is no stored data when plugin starts first time + if (JSONData.MemberBegin() != JSONData.MemberEnd()) + { + Logger::getLogger()->warn("Persisted data is not of the correct format, ignoring"); + } + + OMFDataTypes dataType; + dataType.typeId = m_typeId; + dataType.types = "{}"; + + // Add default data into the map + m_assetsDataTypes[FAKE_ASSET_KEY] = dataType; + } +} + + + +/** + * Return the maximum value of type-id, among all entries in the map + * + * If the array is empty the m_typeId is returned. + * + * @return The maximum value of type-id found + */ +long OMFInformation::getMaxTypeId() +{ + long maxId = m_typeId; + for (auto it = m_assetsDataTypes.begin(); + it != m_assetsDataTypes.end(); + ++it) + { + if ((*it).second.typeId > maxId) + { + maxId = (*it).second.typeId; + } + } + return maxId; +} + +/** + * Calls the PI Web API to retrieve the version + * + * @param logMessage If true, log error messages (default: true) + * @return httpCode HTTP response code + */ +int OMFInformation::PIWebAPIGetVersion(bool logMessage) +{ + PIWebAPI *_PIWebAPI; + + _PIWebAPI = new PIWebAPI(); + + // Set requested authentication + _PIWebAPI->setAuthMethod (m_PIWebAPIAuthMethod); + _PIWebAPI->setAuthBasicCredentials(m_PIWebAPICredentials); + + int httpCode = _PIWebAPI->GetVersion(m_hostAndPort, m_RestServerVersion, logMessage); + delete _PIWebAPI; + + return httpCode; +} + + + +/** + * Calls the Edge Data Store product information endpoint to get the EDS version + * + * @return HttpCode REST response code + */ +int OMFInformation::EDSGetVersion() +{ + int res; + + HttpSender *endPoint = new SimpleHttp(m_hostAndPort, + m_timeout, + m_timeout, + m_retrySleepTime, + m_maxRetry); + + try + { + string path = "http://" + m_hostAndPort + "/api/v1/diagnostics/productinformation"; + vector> headers; + m_RestServerVersion.clear(); + + res = endPoint->sendRequest("GET", path, headers, std::string("")); + if (res >= 200 && res <= 299) + { + m_RestServerVersion = ParseEDSProductInformation(endPoint->getHTTPResponse()); + } + } + catch (const BadRequest &ex) + { + Logger::getLogger()->error("Edge Data Store productinformation BadRequest exception: %s", ex.what()); + res = 400; + } + catch (const std::exception &ex) + { + Logger::getLogger()->error("Edge Data Store productinformation exception: %s", ex.what()); + res = 400; + } + catch (...) + { + Logger::getLogger()->error("Edge Data Store productinformation generic exception"); + res = 400; + } + + delete endPoint; + return res; +} + +/** + * Set the supported OMF Version for the OMF endpoint + */ +void OMFInformation::SetOMFVersion() +{ + switch (m_PIServerEndpoint) + { + case ENDPOINT_PIWEB_API: + if (m_RestServerVersion.find("2019") != std::string::npos) + { + m_omfversion = "1.0"; + } + else if (m_RestServerVersion.find("2020") != std::string::npos) + { + m_omfversion = "1.1"; + } + else if (m_RestServerVersion.find("2021") != std::string::npos) + { + m_omfversion = "1.2"; + } + else + { + m_omfversion = "1.2"; + } + break; + case ENDPOINT_EDS: + // Edge Data Store versions with supported OMF versions: + // EDS 2020 (1.0.0.609) OMF 1.0, 1.1 + // EDS 2023 (1.1.1.46) OMF 1.0, 1.1, 1.2 + // EDS 2023 Patch 1 (1.1.3.2) OMF 1.0, 1.1, 1.2 + { + int major = 0; + int minor = 0; + ParseProductVersion(m_RestServerVersion, &major, &minor); + if ((major > 1) || (major == 1 && minor > 0)) + { + m_omfversion = "1.2"; + } + else + { + m_omfversion = EDS_OMF_VERSION; + } + } + break; + case ENDPOINT_CR: + m_omfversion = CR_OMF_VERSION; + break; + case ENDPOINT_OCS: + case ENDPOINT_ADH: + default: + m_omfversion = "1.2"; // assume cloud service OMF endpoint types support OMF 1.2 + break; + } +} + +/** + * Calls the OCS API to retrieve the authentication token + * + * @return token Authorization token + */ +string OMFInformation::OCSRetrieveAuthToken() +{ + string token; + OCS *ocs; + + if (m_PIServerEndpoint == ENDPOINT_OCS) + ocs = new OCS(); + else if (m_PIServerEndpoint == ENDPOINT_ADH) + ocs = new OCS(true); + + token = ocs->retrieveToken(m_OCSClientId , m_OCSClientSecret); + + delete ocs; + + return token; +} + +/** + * Evaluate if the endpoint is a PI Web API or a Connector Relay. + * + * @return OMF_ENDPOINT values + */ +OMF_ENDPOINT OMFInformation::identifyPIServerEndpoint() +{ + OMF_ENDPOINT PIServerEndpoint; + + HttpSender *endPoint; + vector> header; + int httpCode; + + + if (m_PIWebAPIAuthMethod.compare("k") == 0) + { + endPoint = new LibcurlHttps(m_hostAndPort, + m_timeout, + m_timeout, + m_retrySleepTime, + m_maxRetry); + } + else + { + endPoint = new SimpleHttps(m_hostAndPort, + m_timeout, + m_timeout, + m_retrySleepTime, + m_maxRetry); + } + + // Set requested authentication + endPoint->setAuthMethod (m_PIWebAPIAuthMethod); + endPoint->setAuthBasicCredentials(m_PIWebAPICredentials); + + try + { + httpCode = endPoint->sendRequest("GET", + m_path, + header, + ""); + + if (httpCode >= 200 && httpCode <= 399) + { + PIServerEndpoint = ENDPOINT_PIWEB_API; + if (m_PIWebAPIAuthMethod == "b") + Logger::getLogger()->debug("PI Web API end-point basic authorization granted"); + } + else + { + PIServerEndpoint = ENDPOINT_CR; + } + + } + catch (exception &ex) + { + Logger::getLogger()->warn("PI-Server end-point discovery encountered the error :%s: " + "trying selecting the Connector Relay as an end-point", ex.what()); + PIServerEndpoint = ENDPOINT_CR; + } + + delete endPoint; + + return (PIServerEndpoint); +} + + +/** + * Return a JSON string with the dataTypes to save in plugin_data + * + * Note: the entry with FAKE_ASSET_KEY is never saved. + * + * @return The string with JSON data + */ +string OMFInformation::saveSentDataTypes() +{ + string ret; + std::ostringstream newData; + + auto it = m_assetsDataTypes.find(FAKE_ASSET_KEY); + if (it != m_assetsDataTypes.end()) + { + // Set typeId in FAKE_ASSET_KEY + m_typeId = (*it).second.typeId; + // Remove the entry + m_assetsDataTypes.erase(it); + } + + + unsigned long tSize = m_assetsDataTypes.size(); + if (tSize) + { + + // Prepare output data (skip empty data types) + newData << "\"" << SENT_TYPES_KEY << "\" : ["; + + bool pendingSeparator = false; + for (auto it = m_assetsDataTypes.begin(); + it != m_assetsDataTypes.end(); + ++it) + { + if (((*it).second).types.compare("{}") != 0) + { + newData << (pendingSeparator ? ", " : ""); + newData << "{\"" << (*it).first << "\" : {\"" << TYPE_ID_KEY << + "\": " << to_string(((*it).second).typeId); + + // The information should be stored as string in hexadecimal format + std::stringstream tmpStream; + tmpStream << std::hex << ((*it).second).typesShort; + std::string typesShort = tmpStream.str(); + + newData << ", \"" << DATA_KEY_SHORT << "\": \"0x" << typesShort << "\""; + std::stringstream hintStream; + hintStream << std::hex << ((*it).second).hintChkSum; + std::string hintChecksum = hintStream.str(); + newData << ", \"" << DATA_KEY_HINT << "\": \"0x" << hintChecksum << "\""; + + long NamingScheme; + NamingScheme = ((*it).second).namingScheme; + newData << ", \"" << NAMING_SCHEME << "\": " << to_string(NamingScheme) << ""; + + string AFHHash; + AFHHash = ((*it).second).afhHash; + newData << ", \"" << AFH_HASH << "\": \"" << AFHHash << "\""; + + string AFHierarchy; + AFHierarchy = ((*it).second).afHierarchy; + newData << ", \"" << AF_HIERARCHY << "\": \"" << AFHierarchy << "\""; + + string AFHierarchyOrig; + AFHierarchyOrig = ((*it).second).afHierarchyOrig; + newData << ", \"" << AF_HIERARCHY_ORIG << "\": \"" << AFHierarchyOrig << "\""; + + Logger::getLogger()->debug("%s - AFHHash :%s: AFHierarchy :%s: AFHierarchyOrig :%s:", __FUNCTION__, AFHHash.c_str(), AFHierarchy.c_str(), AFHierarchyOrig.c_str() ); + Logger::getLogger()->debug("%s - NamingScheme :%ld: ", __FUNCTION__,NamingScheme ); + + newData << ", \"" << DATA_KEY << "\": " << + (((*it).second).types.empty() ? "{}" : ((*it).second).types) << + "}}"; + pendingSeparator = true; + } + } + + tSize = m_assetsDataTypes.size(); + if (!tSize) + { + // DataTypes map is empty + return ret; + } + + newData << "]"; + + ret = newData.str(); + } + + return ret; +} + + +/** + * Calculate the TypeShort in the case it is missing loading type definition + * + * Generate a 64 bit number containing a set of counts, + * number of datapoints in an asset and the number of datapoint of each type we support. + * + */ +unsigned long OMFInformation::calcTypeShort(const string& dataTypes) +{ + union t_typeCount { + struct + { + unsigned char tTotal; + unsigned char tFloat; + unsigned char tString; + unsigned char spare0; + + unsigned char spare1; + unsigned char spare2; + unsigned char spare3; + unsigned char spare4; + } cnt; + unsigned long valueLong = 0; + + } typeCount; + + Document JSONData; + JSONData.Parse(dataTypes.c_str()); + + if (JSONData.HasParseError()) + { + Logger::getLogger()->error("calcTypeShort - unable to calculate TypeShort on :%s: ", dataTypes.c_str()); + return (0); + } + + for (Value::ConstMemberIterator it = JSONData.MemberBegin(); it != JSONData.MemberEnd(); ++it) + { + + string key = it->name.GetString(); + const Value& value = it->value; + + if (value.HasMember(PROPERTY_TYPE) && value[PROPERTY_TYPE].IsString()) + { + string type =value[PROPERTY_TYPE].GetString(); + + // Integer is handled as float in the OMF integration + if (type.compare(PROPERTY_NUMBER) == 0) + { + typeCount.cnt.tFloat++; + } else if (type.compare(PROPERTY_STRING) == 0) + { + typeCount.cnt.tString++; + } else { + + Logger::getLogger()->error("calcTypeShort - unrecognized type :%s: ", type.c_str()); + } + typeCount.cnt.tTotal++; + } + else + { + Logger::getLogger()->error("calcTypeShort - unable to extract the type for :%s: ", key.c_str()); + return (0); + } + } + + return typeCount.valueLong; +} + +/** + * Finds major and minor product version numbers in a version string + * + * @param versionString Version string of the form x.x.x.x where x's are integers + * @param major Major product version returned (first digit) + * @param minor Minor product version returned (second digit) + */ +void OMFInformation::ParseProductVersion(std::string &versionString, int *major, int *minor) +{ + *major = 0; + *minor = 0; + size_t last = 0; + size_t next = versionString.find(".", last); + if (next != string::npos) + { + *major = atoi(versionString.substr(last, next - last).c_str()); + last = next + 1; + next = versionString.find(".", last); + if (next != string::npos) + { + *minor = atoi(versionString.substr(last, next - last).c_str()); + } + } +} + +/** + * Parses the Edge Data Store version string from the /productinformation REST response. + * Note that the response format differs between EDS 2020 and EDS 2023. + * + * @param json REST response from /api/v1/diagnostics/productinformation + * @return version Edge Data Store version string + */ +std::string OMFInformation::ParseEDSProductInformation(std::string json) +{ + std::string version; + + Document doc; + + if (!doc.Parse(json.c_str()).HasParseError()) + { + try + { + if (doc.HasMember("Edge Data Store")) // EDS 2020 response + { + const rapidjson::Value &EDS = doc["Edge Data Store"]; + version = EDS.GetString(); + } + else if (doc.HasMember("Product Version")) // EDS 2023 response + { + const rapidjson::Value &EDS = doc["Product Version"]; + version = EDS.GetString(); + } + } + catch (...) + { + } + } + + Logger::getLogger()->debug("Edge Data Store Version: %s JSON: %s", version.c_str(), json.c_str()); + return version; +} + +/** + * Generate the credentials for the basic authentication + * encoding user id and password joined by a single colon (:) using base64 + * + * @param userId User id to be used for the generation of the credentials + * @param password Password to be used for the generation of the credentials + * @return credentials to be used with the basic authentication + */ +string OMFInformation::AuthBasicCredentialsGenerate(string& userId, string& password) +{ + string Credentials; + + Credentials = Crypto::Base64::encode(userId + ":" + password); + + return (Credentials); +} + +/** + * Configures for Kerberos authentication : + * - set the environment KRB5_CLIENT_KTNAME to the position containing the + * Kerberos keys, the keytab file. + * + * @param out keytabEnv string containing the command to set the + * KRB5_CLIENT_KTNAME environment variable + * @param keytabFileName File name of the keytab file + * + */ +void OMFInformation::AuthKerberosSetup(string& keytabEnv, string& keytabFileName) +{ + string fledgeData = getDataDir (); + string keytabFullPath = fledgeData + "/etc/kerberos" + "/" + keytabFileName; + + keytabEnv = "KRB5_CLIENT_KTNAME=" + keytabFullPath; + putenv((char *) keytabEnv.c_str()); + + if (access(keytabFullPath.c_str(), F_OK) != 0) + { + Logger::getLogger()->error("Kerberos authentication not possible, the keytab file :%s: is missing.", keytabFullPath.c_str()); + } + +} + +/** + * Calculate elapsed time in seconds + * + * @param startTime Start time of the interval to be evaluated + * @return Elapsed time in seconds + */ +double OMFInformation::GetElapsedTime(struct timeval *startTime) +{ + struct timeval endTime, diff; + gettimeofday(&endTime, NULL); + timersub(&endTime, startTime, &diff); + return diff.tv_sec + ((double)diff.tv_usec / 1000000); +} + +/** + * Check if the PI Web API server is available by reading the product version + * + * @return Connection status + */ +bool OMFInformation::IsPIWebAPIConnected() +{ + static std::chrono::steady_clock::time_point nextCheck; + static bool reported = false; // Has the state been reported yet + static bool reportedState; // What was the last reported state + + if (!m_connected && m_PIServerEndpoint == ENDPOINT_PIWEB_API) + { + std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now(); + + if (now >= nextCheck) + { + int httpCode = PIWebAPIGetVersion(false); + if (httpCode >= 400) + { + m_connected = false; + now = std::chrono::steady_clock::now(); + nextCheck = now + std::chrono::seconds(60); + Logger::getLogger()->debug("PI Web API %s is not available. HTTP Code: %d", m_hostAndPort.c_str(), httpCode); + if (reported == false || reportedState == true) + { + reportedState = false; + reported = true; + Logger::getLogger()->error("The PI Web API service %s is not available", + m_hostAndPort.c_str()); + } + } + else + { + m_connected = true; + SetOMFVersion(); + Logger::getLogger()->info("%s reconnected to %s OMF Version: %s", + m_RestServerVersion.c_str(), m_hostAndPort.c_str(), m_omfversion.c_str()); + if (reported == true || reportedState == false) + { + reportedState = true; + reported = true; + Logger::getLogger()->warn("The PI Web API service %s has become available", + m_hostAndPort.c_str()); + } + } + } + } + else + { + // Endpoints other than PI Web API fail quickly when they are unavailable + // so there is no need to check their status in advance. + m_connected = true; + } + + return m_connected; +} diff --git a/C/plugins/north/OMF/plugin.cpp b/C/plugins/north/OMF/plugin.cpp index 30bc0bb99b..6d2d543c71 100755 --- a/C/plugins/north/OMF/plugin.cpp +++ b/C/plugins/north/OMF/plugin.cpp @@ -41,7 +41,6 @@ #include #include #include -#include #include #include #include @@ -56,7 +55,7 @@ #include "utils.h" #include "string_utils.h" #include - +#include #include "crypto.hpp" @@ -67,70 +66,6 @@ using namespace std; using namespace rapidjson; using namespace SimpleWeb; - -#define PLUGIN_NAME "OMF" -#define TYPE_ID_KEY "type-id" -#define SENT_TYPES_KEY "sentDataTypes" -#define DATA_KEY "dataTypes" -#define DATA_KEY_SHORT "dataTypesShort" -#define DATA_KEY_HINT "hintChecksum" -#define NAMING_SCHEME "namingScheme" -#define AFH_HASH "afhHash" -#define AF_HIERARCHY "afHierarchy" -#define AF_HIERARCHY_ORIG "afHierarchyOrig" - - -#define PROPERTY_TYPE "type" -#define PROPERTY_NUMBER "number" -#define PROPERTY_STRING "string" - - -#define ENDPOINT_URL_PI_WEB_API "https://HOST_PLACEHOLDER:PORT_PLACEHOLDER/piwebapi/omf" -#define ENDPOINT_URL_CR "https://HOST_PLACEHOLDER:PORT_PLACEHOLDER/ingress/messages" -#define ENDPOINT_URL_OCS "https://dat-b.osisoft.com:PORT_PLACEHOLDER/api/v1/tenants/TENANT_ID_PLACEHOLDER/Namespaces/NAMESPACE_ID_PLACEHOLDER/omf" -#define ENDPOINT_URL_ADH "https://REGION_PLACEHOLDER.datahub.connect.aveva.com:PORT_PLACEHOLDER/api/v1/Tenants/TENANT_ID_PLACEHOLDER/Namespaces/NAMESPACE_ID_PLACEHOLDER/omf" - -#define ENDPOINT_URL_EDS "http://localhost:PORT_PLACEHOLDER/api/v1/tenants/default/namespaces/default/omf" - -static bool s_connected = true; // if true, access to PI Web API is working - -enum OMF_ENDPOINT_PORT { - ENDPOINT_PORT_PIWEB_API=443, - ENDPOINT_PORT_CR=5460, - ENDPOINT_PORT_OCS=443, - ENDPOINT_PORT_EDS=5590, - ENDPOINT_PORT_ADH=443 -}; - -/** - * Plugin specific default configuration - */ - -#define NOT_BLOCKING_ERRORS_DEFAULT QUOTE( \ - { \ - "errors400" : [ \ - "Redefinition of the type with the same ID is not allowed", \ - "Invalid value type for the property", \ - "Property does not exist in the type definition", \ - "Container is not defined", \ - "Unable to find the property of the container of type" \ - ] \ - } \ -) - -#define NOT_BLOCKING_ERRORS_DEFAULT_PI_WEB_API QUOTE( \ - { \ - "EventInfo" : [ \ - "The specified value is outside the allowable range" \ - ] \ - } \ -) - -#define AF_HIERARCHY_RULES QUOTE( \ - { \ - } \ -) - /* * Note that the properties "group" is used to group related items, these will appear in different tabs, * using the group name, in the GUI. @@ -162,14 +97,15 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "displayName": "Endpoint" }, "ADHRegions": { - "description": "AVEVA Data Hub region", - "type": "enumeration", - "options":["US-West", "EU-West", "Australia"], - "default": "US-West", - "order": "2", - "displayName": "ADH Region", - "validity" : "PIServerEndpoint == \"AVEVA Data Hub\"" - }, + "description": "AVEVA Data Hub or OSIsoft Cloud Services region", + "type": "enumeration", + "options":["US-West", "EU-West", "Australia"], + "default": "US-West", + "order": "2", + "group" : "Cloud", + "displayName": "Cloud Service Region", + "validity" : "PIServerEndpoint == \"AVEVA Data Hub\" || PIServerEndpoint == \"OSIsoft Cloud Services\"" + }, "SendFullStructure": { "description": "It sends the minimum OMF structural messages to load data into Data Archive if disabled", "type": "boolean", @@ -403,75 +339,6 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( // "default": "{\"pipeline\": [\"DeltaFilter\"]}" -/** - * Historian PI Server connector info - */ -typedef struct -{ - HttpSender *sender; // HTTPS connection - OMF *omf; // OMF data protocol - bool sendFullStructure; // It sends the minimum OMF structural messages to load data into Data Archive if disabled - bool compression; // whether to compress readings' data - string protocol; // http / https - string hostAndPort; // hostname:port for SimpleHttps - unsigned int retrySleepTime; // Seconds between each retry - unsigned int maxRetry; // Max number of retries in the communication - unsigned int timeout; // connect and operation timeout - string path; // PI Server application path - long typeId; // OMF protocol type-id prefix - string producerToken; // PI Server connector token - string formatNumber; // OMF protocol Number format - string formatInteger; // OMF protocol Integer format - OMF_ENDPOINT PIServerEndpoint; // Defines which End point should be used for the communication - NAMINGSCHEME_ENDPOINT NamingScheme; // Define how the object names should be generated - https://fledge-iot.readthedocs.io/en/latest/OMF.html#naming-scheme - string DefaultAFLocation; // 1st hierarchy in Asset Framework, PI Web API only. - string AFMap; // Defines a set of rules to address where assets should be placed in the AF hierarchy. - // https://fledge-iot.readthedocs.io/en/latest/OMF.html#asset-framework-hierarchy-rules - - string prefixAFAsset; // Prefix to generate unique asste id - string PIWebAPIProductTitle; - string PIWebAPIVersion; - string PIWebAPIAuthMethod; // Authentication method to be used with the PI Web API. - string PIWebAPICredentials; // Credentials is the base64 encoding of id and password joined by a single colon (:) - string KerberosKeytab; // Kerberos authentication keytab file - // stores the environment variable value about the keytab file path - // to allow the environment to persist for all the execution of the plugin - // - // Note : A keytab is a file containing pairs of Kerberos principals - // and encrypted keys (which are derived from the Kerberos password). - // You can use a keytab file to authenticate to various remote systems - // using Kerberos without entering a password. - - string OCSNamespace; // OCS configurations - string OCSTenantId; - string OCSClientId; - string OCSClientSecret; - string OCSToken; - - vector> - staticData; // Static data - // Errors considered not blocking in the communication with the PI Server - std::vector - notBlockingErrors; - // Per asset DataTypes - std::map - assetsDataTypes; - string omfversion; - bool legacy; -} CONNECTOR_INFO; - -unsigned long calcTypeShort (const string& dataTypes); -string saveSentDataTypes (CONNECTOR_INFO* connInfo); -void loadSentDataTypes (CONNECTOR_INFO* connInfo, Document& JSONData); -long getMaxTypeId (CONNECTOR_INFO* connInfo); -OMF_ENDPOINT identifyPIServerEndpoint (CONNECTOR_INFO* connInfo); -string AuthBasicCredentialsGenerate (string& userId, string& password); -void AuthKerberosSetup (string& keytabFile, string& keytabFileName); -string OCSRetrieveAuthToken (CONNECTOR_INFO* connInfo); -int PIWebAPIGetVersion (CONNECTOR_INFO* connInfo, std::string &version, bool logMessage = true); -double GetElapsedTime (struct timeval *startTime); -bool IsPIWebAPIConnected (CONNECTOR_INFO* connInfo, std::string& version); - /** * Return the information about this plugin @@ -519,251 +386,12 @@ PLUGIN_HANDLE plugin_init(ConfigCategory* configData) * Handle the PI Server parameters here */ // Allocate connector struct - CONNECTOR_INFO *connInfo = new CONNECTOR_INFO; - - // PIServerEndpoint handling - string PIServerEndpoint = configData->getValue("PIServerEndpoint"); - string ADHRegions = configData->getValue("ADHRegions"); - string ServerHostname = configData->getValue("ServerHostname"); - string ServerPort = configData->getValue("ServerPort"); - string url; - string NamingScheme = configData->getValue("NamingScheme"); - { - // Translate the PIServerEndpoint configuration - if(PIServerEndpoint.compare("PI Web API") == 0) - { - Logger::getLogger()->debug("PI-Server end point manually selected - PI Web API "); - connInfo->PIServerEndpoint = ENDPOINT_PIWEB_API; - url = ENDPOINT_URL_PI_WEB_API; - endpointPort = ENDPOINT_PORT_PIWEB_API; - } - else if(PIServerEndpoint.compare("Connector Relay") == 0) - { - Logger::getLogger()->debug("PI-Server end point manually selected - Connector Relay "); - connInfo->PIServerEndpoint = ENDPOINT_CR; - url = ENDPOINT_URL_CR; - endpointPort = ENDPOINT_PORT_CR; - } - else if(PIServerEndpoint.compare("AVEVA Data Hub") == 0) - { - Logger::getLogger()->debug("End point manually selected - AVEVA Data Hub"); - connInfo->PIServerEndpoint = ENDPOINT_ADH; - url = ENDPOINT_URL_ADH; - std::string region = "uswe"; - if(ADHRegions.compare("EU-West") == 0) - region = "euno"; - else if(ADHRegions.compare("Australia") == 0) - region = "auea"; - StringReplace(url, "REGION_PLACEHOLDER", region); - endpointPort = ENDPOINT_PORT_ADH; - } - else if(PIServerEndpoint.compare("OSIsoft Cloud Services") == 0) - { - Logger::getLogger()->debug("End point manually selected - OSIsoft Cloud Services"); - connInfo->PIServerEndpoint = ENDPOINT_OCS; - url = ENDPOINT_URL_OCS; - endpointPort = ENDPOINT_PORT_OCS; - } - else if(PIServerEndpoint.compare("Edge Data Store") == 0) - { - Logger::getLogger()->debug("End point manually selected - Edge Data Store"); - connInfo->PIServerEndpoint = ENDPOINT_EDS; - url = ENDPOINT_URL_EDS; - endpointPort = ENDPOINT_PORT_EDS; - } - ServerPort = (ServerPort.compare("0") == 0) ? to_string(endpointPort) : ServerPort; - } - - if (endpointPort == ENDPOINT_PORT_PIWEB_API) { - - // Use SendFullStructure ? - string fullStr = configData->getValue("SendFullStructure"); - - if (fullStr == "True" || fullStr == "true" || fullStr == "TRUE") - connInfo->sendFullStructure = true; - else - connInfo->sendFullStructure = false; - } else { - connInfo->sendFullStructure = true; - } - - unsigned int retrySleepTime = atoi(configData->getValue("OMFRetrySleepTime").c_str()); - unsigned int maxRetry = atoi(configData->getValue("OMFMaxRetry").c_str()); - unsigned int timeout = atoi(configData->getValue("OMFHttpTimeout").c_str()); - - string producerToken = configData->getValue("producerToken"); - - string formatNumber = configData->getValue("formatNumber"); - string formatInteger = configData->getValue("formatInteger"); - string DefaultAFLocation = configData->getValue("DefaultAFLocation"); - string AFMap = configData->getValue("AFMap"); - - string PIWebAPIAuthMethod = configData->getValue("PIWebAPIAuthenticationMethod"); - string PIWebAPIUserId = configData->getValue("PIWebAPIUserId"); - string PIWebAPIPassword = configData->getValue("PIWebAPIPassword"); - string KerberosKeytabFileName = configData->getValue("PIWebAPIKerberosKeytabFileName"); - - // OCS configurations - string OCSNamespace = configData->getValue("OCSNamespace"); - string OCSTenantId = configData->getValue("OCSTenantId"); - string OCSClientId = configData->getValue("OCSClientId"); - string OCSClientSecret = configData->getValue("OCSClientSecret"); - - StringReplace(url, "HOST_PLACEHOLDER", ServerHostname); - StringReplace(url, "PORT_PLACEHOLDER", ServerPort); - - // TENANT_ID_PLACEHOLDER and NAMESPACE_ID_PLACEHOLDER, if present, will be replaced with the values of OCSTenantId and OCSNamespace - StringReplace(url, "TENANT_ID_PLACEHOLDER", OCSTenantId); - StringReplace(url, "NAMESPACE_ID_PLACEHOLDER", OCSNamespace); - - /** - * Extract host, port, path from URL - */ - size_t findProtocol = url.find_first_of(":"); - string protocol = url.substr(0, findProtocol); - - string tmpUrl = url.substr(findProtocol + 3); - size_t findPort = tmpUrl.find_first_of(":"); - string hostName = tmpUrl.substr(0, findPort); - - size_t findPath = tmpUrl.find_first_of("/"); - string port = tmpUrl.substr(findPort + 1, findPath - findPort - 1); - string path = tmpUrl.substr(findPath); - - string hostAndPort(hostName + ":" + port); - - // Set configuration fields - connInfo->protocol = protocol; - connInfo->hostAndPort = hostAndPort; - connInfo->path = path; - connInfo->retrySleepTime = retrySleepTime; - connInfo->maxRetry = maxRetry; - connInfo->timeout = timeout; - connInfo->typeId = TYPE_ID_DEFAULT; - connInfo->producerToken = producerToken; - connInfo->formatNumber = formatNumber; - connInfo->formatInteger = formatInteger; - connInfo->DefaultAFLocation = DefaultAFLocation; - connInfo->AFMap = AFMap; - - // OCS configurations - connInfo->OCSNamespace = OCSNamespace; - connInfo->OCSTenantId = OCSTenantId; - connInfo->OCSClientId = OCSClientId; - connInfo->OCSClientSecret = OCSClientSecret; - - // PI Web API end-point - evaluates the authentication method requested - if (PIWebAPIAuthMethod.compare("anonymous") == 0) - { - Logger::getLogger()->debug("PI Web API end-point - anonymous authentication"); - connInfo->PIWebAPIAuthMethod = "a"; - } - else if (PIWebAPIAuthMethod.compare("basic") == 0) - { - Logger::getLogger()->debug("PI Web API end-point - basic authentication"); - connInfo->PIWebAPIAuthMethod = "b"; - connInfo->PIWebAPICredentials = AuthBasicCredentialsGenerate(PIWebAPIUserId, PIWebAPIPassword); - } - else if (PIWebAPIAuthMethod.compare("kerberos") == 0) - { - Logger::getLogger()->debug("PI Web API end-point - kerberos authentication"); - connInfo->PIWebAPIAuthMethod = "k"; - AuthKerberosSetup(connInfo->KerberosKeytab, KerberosKeytabFileName); - } - else - { - Logger::getLogger()->error("Invalid authentication method for PI Web API :%s: ", PIWebAPIAuthMethod.c_str()); - } - - // Use compression ? - string compr = configData->getValue("compression"); - if (compr == "True" || compr == "true" || compr == "TRUE") - connInfo->compression = true; - else - connInfo->compression = false; - - // Set the list of errors considered not blocking in the communication - // with the PI Server - if (connInfo->PIServerEndpoint == ENDPOINT_PIWEB_API) - { - JSONStringToVectorString(connInfo->notBlockingErrors , - configData->getValue("PIWebAPInotBlockingErrors"), - std::string("EventInfo")); - } - else - { - JSONStringToVectorString(connInfo->notBlockingErrors , - configData->getValue("notBlockingErrors"), - std::string("errors400")); - } - /** - * Add static data - * Split the string up into each pair - */ - string staticData = configData->getValue("StaticData"); - size_t pos = 0; - size_t start = 0; - do { - pos = staticData.find(",", start); - string item = staticData.substr(start, pos); - start = pos + 1; - size_t pos2 = 0; - if ((pos2 = item.find(":")) != string::npos) - { - string name = item.substr(0, pos2); - while (name[0] == ' ') - name = name.substr(1); - string value = item.substr(pos2 + 1); - while (value[0] == ' ') - value = value.substr(1); - pair sData = make_pair(name, value); - connInfo->staticData.push_back(sData); - } - } while (pos != string::npos); - - { - // NamingScheme handling - if(NamingScheme.compare("Concise") == 0) - { - connInfo->NamingScheme = NAMINGSCHEME_CONCISE; - } - else if(NamingScheme.compare("Use Type Suffix") == 0) - { - connInfo->NamingScheme = NAMINGSCHEME_SUFFIX; - } - else if(NamingScheme.compare("Use Attribute Hash") == 0) - { - connInfo->NamingScheme = NAMINGSCHEME_HASH; - } - else if(NamingScheme.compare("Backward compatibility") == 0) - { - connInfo->NamingScheme = NAMINGSCHEME_COMPATIBILITY; - } - Logger::getLogger()->debug("End point naming scheme :%s: ", NamingScheme.c_str() ); - - } - - // Fetch legacy OMF type option - string legacy = configData->getValue("Legacy"); - if (legacy == "True" || legacy == "true" || legacy == "TRUE") - connInfo->legacy = true; - else - connInfo->legacy = false; - -#if VERBOSE_LOG - // Log plugin configuration - Logger::getLogger()->info("%s plugin configured: URL=%s, " - "producerToken=%s, compression=%s", - PLUGIN_NAME, - url.c_str(), - producerToken.c_str(), - connInfo->compression ? "True" : "False"); -#endif + OMFInformation *info = new OMFInformation(configData); #if INSTRUMENT Logger::getLogger()->debug("plugin_init elapsed time: %6.3f seconds", GetElapsedTime(&startTime)); #endif - return (PLUGIN_HANDLE)connInfo; + return (PLUGIN_HANDLE)info; } @@ -788,70 +416,9 @@ void plugin_start(const PLUGIN_HANDLE handle, #endif Logger* logger = Logger::getLogger(); - CONNECTOR_INFO* connInfo = (CONNECTOR_INFO *)handle; - - // Parse JSON plugin_data - Document JSONData; - JSONData.Parse(storedData.c_str()); - if (JSONData.HasParseError()) - { - logger->error("%s plugin error: failure parsing " - "plugin data JSON object '%s'", - PLUGIN_NAME, - storedData.c_str()); - } - else if (JSONData.HasMember(TYPE_ID_KEY) && - (JSONData[TYPE_ID_KEY].IsString() || - JSONData[TYPE_ID_KEY].IsNumber())) - { - // Update type-id in PLUGIN_HANDLE object - if (JSONData[TYPE_ID_KEY].IsNumber()) - { - connInfo->typeId = JSONData[TYPE_ID_KEY].GetInt(); - } - else - { - connInfo->typeId = atol(JSONData[TYPE_ID_KEY].GetString()); - } - } - - // Load sentdataTypes - loadSentDataTypes(connInfo, JSONData); - - // Log default type-id - if (connInfo->assetsDataTypes.size() == 1 && - connInfo->assetsDataTypes.find(FAKE_ASSET_KEY) != connInfo->assetsDataTypes.end()) - { - // Only one value: we have the FAKE_ASSET_KEY and no other data - Logger::getLogger()->info("%s plugin is using global OMF prefix %s=%d", - PLUGIN_NAME, - TYPE_ID_KEY, - connInfo->typeId); - } - else - { - Logger::getLogger()->info("%s plugin is using per asset OMF prefix %s=%d " - "(max value found)", - PLUGIN_NAME, - TYPE_ID_KEY, - getMaxTypeId(connInfo)); - } + OMFInformation *info = (OMFInformation *)handle; + info->start(storedData); - // Retrieve the PI Web API Version - s_connected = true; - if (connInfo->PIServerEndpoint == ENDPOINT_PIWEB_API) - { - int httpCode = PIWebAPIGetVersion(connInfo, connInfo->PIWebAPIVersion); - if (httpCode >= 200 && httpCode < 400) - { - Logger::getLogger()->info("%s connected to %s" ,connInfo->PIWebAPIVersion.c_str(), connInfo->hostAndPort.c_str()); - s_connected = true; - } - else - { - s_connected = false; - } - } #if INSTRUMENT Logger::getLogger()->debug("plugin_start elapsed time: %6.3f seconds", GetElapsedTime(&startTime)); @@ -864,188 +431,8 @@ void plugin_start(const PLUGIN_HANDLE handle, uint32_t plugin_send(const PLUGIN_HANDLE handle, const vector& readings) { -#if INSTRUMENT - struct timeval startTime; - gettimeofday(&startTime, NULL); -#endif - CONNECTOR_INFO* connInfo = (CONNECTOR_INFO *)handle; - string version; - - // Check if the endpoint is PI Web API and if the PI Web API server is available - if (!IsPIWebAPIConnected(connInfo, version)) - { - Logger::getLogger()->fatal("OMF Endpoint is not available"); - return 0; - } - // FIXME - The above call is not working. Investigate why? FOGL-7293 - - // Above call does not always populate version - if (version.empty()) - { - PIWebAPIGetVersion(connInfo, version, false); - } - - Logger::getLogger()->info("Version is '%s'", version.c_str()); - - // Until we know better assume OMF 1.2 as this is the base base point - // to give us the flexible type support we need - connInfo->omfversion = "1.2"; - if (version.find("2019") != std::string::npos) - { - connInfo->omfversion = "1.0"; - } - else if (version.find("2020") != std::string::npos) - { - connInfo->omfversion = "1.1"; - } - else if (version.find("2021") != std::string::npos) - { - connInfo->omfversion = "1.2"; - } - Logger::getLogger()->info("Using OMF Version '%s'", connInfo->omfversion.c_str()); - /** - * Select the transport library based on the authentication method and transport encryption - * requirements. - * - * LibcurlHttps is used to integrate Kerberos as the SimpleHttp does not support it - * the Libcurl integration implements only HTTPS not HTTP currently. We use SimpleHttp or - * SimpleHttps, as appropriate for the URL given, if not using Kerberos - * - * - * The handler is allocated using "Hostname : port", connect_timeout and request_timeout. - * Default is no timeout - */ - if (connInfo->PIWebAPIAuthMethod.compare("k") == 0) - { - connInfo->sender = new LibcurlHttps(connInfo->hostAndPort, - connInfo->timeout, - connInfo->timeout, - connInfo->retrySleepTime, - connInfo->maxRetry); - } - else - { - if (connInfo->protocol.compare("http") == 0) - { - connInfo->sender = new SimpleHttp(connInfo->hostAndPort, - connInfo->timeout, - connInfo->timeout, - connInfo->retrySleepTime, - connInfo->maxRetry); - } - else - { - connInfo->sender = new SimpleHttps(connInfo->hostAndPort, - connInfo->timeout, - connInfo->timeout, - connInfo->retrySleepTime, - connInfo->maxRetry); - } - } - - connInfo->sender->setAuthMethod (connInfo->PIWebAPIAuthMethod); - connInfo->sender->setAuthBasicCredentials(connInfo->PIWebAPICredentials); - - // OCS configurations - connInfo->sender->setOCSNamespace (connInfo->OCSNamespace); - connInfo->sender->setOCSTenantId (connInfo->OCSTenantId); - connInfo->sender->setOCSClientId (connInfo->OCSClientId); - connInfo->sender->setOCSClientSecret (connInfo->OCSClientSecret); - - // OCS or ADH - retrieves the authentication token - // It is retrieved at every send as it can expire and the configuration is only in OCS and ADH - if (connInfo->PIServerEndpoint == ENDPOINT_OCS || connInfo->PIServerEndpoint == ENDPOINT_ADH) - { - connInfo->OCSToken = OCSRetrieveAuthToken(connInfo); - connInfo->sender->setOCSToken (connInfo->OCSToken); - } - - // Allocate the OMF class that implements the PI Server data protocol - connInfo->omf = new OMF(*connInfo->sender, - connInfo->path, - connInfo->assetsDataTypes, - connInfo->producerToken); - - connInfo->omf->setConnected(s_connected); - connInfo->omf->setSendFullStructure(connInfo->sendFullStructure); - - // Set PIServerEndpoint configuration - connInfo->omf->setNamingScheme(connInfo->NamingScheme); - connInfo->omf->setPIServerEndpoint(connInfo->PIServerEndpoint); - connInfo->omf->setDefaultAFLocation(connInfo->DefaultAFLocation); - connInfo->omf->setAFMap(connInfo->AFMap); -#ifdef EDS_OMF_VERSION - if (connInfo->PIServerEndpoint == ENDPOINT_EDS) - { - connInfo->omfversion = EDS_OMF_VERSION; - } -#endif - - // Version for Connector Relay is 1.0 only. - if (connInfo->PIServerEndpoint == ENDPOINT_CR) - { - connInfo->omfversion = CR_OMF_VERSION; - } - - connInfo->omf->setOMFVersion(connInfo->omfversion); - - // Generates the prefix to have unique asset_id across different levels of hierarchies - string AFHierarchyLevel; - connInfo->omf->generateAFHierarchyPrefixLevel(connInfo->DefaultAFLocation, connInfo->prefixAFAsset, AFHierarchyLevel); - - connInfo->omf->setPrefixAFAsset(connInfo->prefixAFAsset); - - // Set OMF FormatTypes - connInfo->omf->setFormatType(OMF_TYPE_FLOAT, - connInfo->formatNumber); - connInfo->omf->setFormatType(OMF_TYPE_INTEGER, - connInfo->formatInteger); - - connInfo->omf->setStaticData(&connInfo->staticData); - connInfo->omf->setNotBlockingErrors(connInfo->notBlockingErrors); - - if (connInfo->omfversion == "1.1" || connInfo->omfversion == "1.0") { - Logger::getLogger()->info("Setting LegacyType to be true for OMF Version '%s'. This will force use old style complex types. ", connInfo->omfversion.c_str()); - connInfo->omf->setLegacyMode(true); - } - else - { - connInfo->omf->setLegacyMode(connInfo->legacy); - } - // Send the readings data to the PI Server - uint32_t ret = connInfo->omf->sendToServer(readings, - connInfo->compression); - - // Detect typeId change in OMF class - if (connInfo->omf->getTypeId() != connInfo->typeId) - { - // Update typeId in plugin handle - connInfo->typeId = connInfo->omf->getTypeId(); - // Log change - Logger::getLogger()->info("%s plugin: a new OMF global %s (%d) has been created.", - PLUGIN_NAME, - TYPE_ID_KEY, - connInfo->typeId); - } - - // Write a warning if the connection to PI Web API has been lost - bool updatedConnected = connInfo->omf->getConnected(); - if (connInfo->PIServerEndpoint == ENDPOINT_PIWEB_API && s_connected && !updatedConnected) - { - Logger::getLogger()->warn("Connection to PI Web API at %s has been lost", connInfo->hostAndPort.c_str()); - } - s_connected = updatedConnected; - - // Delete objects - delete connInfo->sender; - delete connInfo->omf; - -#if INSTRUMENT - Logger::getLogger()->debug("plugin_send elapsed time: %6.3f seconds, NumValues: %u", GetElapsedTime(&startTime), ret); -#endif - - // Return sent data ret code - return ret; + OMFInformation *info = (OMFInformation *)handle; + return info->send(readings); } /** @@ -1061,672 +448,13 @@ uint32_t plugin_send(const PLUGIN_HANDLE handle, */ string plugin_shutdown(PLUGIN_HANDLE handle) { -#if INSTRUMENT - struct timeval startTime; - gettimeofday(&startTime, NULL); -#endif - // Delete the handle - CONNECTOR_INFO* connInfo = (CONNECTOR_INFO *) handle; - - // Create save data - std::ostringstream saveData; - saveData << "{"; - - // Add sent data types - string typesData = saveSentDataTypes(connInfo); - if (!typesData.empty()) - { - // Save datatypes - saveData << typesData; - } - else - { - // Just save type-id - saveData << "\"" << TYPE_ID_KEY << "\": " << to_string(connInfo->typeId); - } - - saveData << "}"; - - // Log saving the plugin configuration - Logger::getLogger()->debug("%s plugin: saving plugin_data '%s'", - PLUGIN_NAME, - saveData.str().c_str()); - - // Delete plugin handle - delete connInfo; - -#if INSTRUMENT - // For debugging: write plugin's JSON data to a file - string jsonFilePath = getDataDir() + string("/logs/OMFSaveData.json"); - ofstream f(jsonFilePath.c_str(), ios_base::trunc); - f << saveData.str(); - f.close(); + OMFInformation *info = (OMFInformation *) handle; - Logger::getLogger()->debug("plugin_shutdown elapsed time: %6.3f seconds", GetElapsedTime(&startTime)); -#endif - - // Return current plugin data to save - return saveData.str(); + string rval = info->saveData(); + delete info; + return rval; } // End of extern "C" }; - -/** - * Return a JSON string with the dataTypes to save in plugin_data - * - * Note: the entry with FAKE_ASSET_KEY is never saved. - * - * @param connInfo The CONNECTOR_INFO data structure - * @return The string with JSON data - */ -string saveSentDataTypes(CONNECTOR_INFO* connInfo) -{ - string ret; - std::ostringstream newData; - - auto it = connInfo->assetsDataTypes.find(FAKE_ASSET_KEY); - if (it != connInfo->assetsDataTypes.end()) - { - // Set typeId in FAKE_ASSET_KEY - connInfo->typeId = (*it).second.typeId; - // Remove the entry - connInfo->assetsDataTypes.erase(it); - } - - - unsigned long tSize = connInfo->assetsDataTypes.size(); - if (tSize) - { - - // Prepare output data (skip empty data types) - newData << "\"" << SENT_TYPES_KEY << "\" : ["; - - bool pendingSeparator = false; - for (auto it = connInfo->assetsDataTypes.begin(); - it != connInfo->assetsDataTypes.end(); - ++it) - { - if (((*it).second).types.compare("{}") != 0) - { - newData << (pendingSeparator ? ", " : ""); - newData << "{\"" << (*it).first << "\" : {\"" << TYPE_ID_KEY << - "\": " << to_string(((*it).second).typeId); - - // The information should be stored as string in hexadecimal format - std::stringstream tmpStream; - tmpStream << std::hex << ((*it).second).typesShort; - std::string typesShort = tmpStream.str(); - - newData << ", \"" << DATA_KEY_SHORT << "\": \"0x" << typesShort << "\""; - std::stringstream hintStream; - hintStream << std::hex << ((*it).second).hintChkSum; - std::string hintChecksum = hintStream.str(); - newData << ", \"" << DATA_KEY_HINT << "\": \"0x" << hintChecksum << "\""; - - long NamingScheme; - NamingScheme = ((*it).second).namingScheme; - newData << ", \"" << NAMING_SCHEME << "\": " << to_string(NamingScheme) << ""; - - string AFHHash; - AFHHash = ((*it).second).afhHash; - newData << ", \"" << AFH_HASH << "\": \"" << AFHHash << "\""; - - string AFHierarchy; - AFHierarchy = ((*it).second).afHierarchy; - newData << ", \"" << AF_HIERARCHY << "\": \"" << AFHierarchy << "\""; - - string AFHierarchyOrig; - AFHierarchyOrig = ((*it).second).afHierarchyOrig; - newData << ", \"" << AF_HIERARCHY_ORIG << "\": \"" << AFHierarchyOrig << "\""; - - Logger::getLogger()->debug("%s - AFHHash :%s: AFHierarchy :%s: AFHierarchyOrig :%s:", __FUNCTION__, AFHHash.c_str(), AFHierarchy.c_str(), AFHierarchyOrig.c_str() ); - Logger::getLogger()->debug("%s - NamingScheme :%ld: ", __FUNCTION__,NamingScheme ); - - newData << ", \"" << DATA_KEY << "\": " << - (((*it).second).types.empty() ? "{}" : ((*it).second).types) << - "}}"; - pendingSeparator = true; - } - } - - tSize = connInfo->assetsDataTypes.size(); - if (!tSize) - { - // DataTypes map is empty - return ret; - } - - newData << "]"; - - ret = newData.str(); - } - - return ret; -} - - -/** - * Calculate the TypeShort in the case it is missing loading type definition - * - * Generate a 64 bit number containing a set of counts, - * number of datapoints in an asset and the number of datapoint of each type we support. - * - */ -unsigned long calcTypeShort(const string& dataTypes) -{ - union t_typeCount { - struct - { - unsigned char tTotal; - unsigned char tFloat; - unsigned char tString; - unsigned char spare0; - - unsigned char spare1; - unsigned char spare2; - unsigned char spare3; - unsigned char spare4; - } cnt; - unsigned long valueLong = 0; - - } typeCount; - - Document JSONData; - JSONData.Parse(dataTypes.c_str()); - - if (JSONData.HasParseError()) - { - Logger::getLogger()->error("calcTypeShort - unable to calculate TypeShort on :%s: ", dataTypes.c_str()); - return (0); - } - - for (Value::ConstMemberIterator it = JSONData.MemberBegin(); it != JSONData.MemberEnd(); ++it) - { - - string key = it->name.GetString(); - const Value& value = it->value; - - if (value.HasMember(PROPERTY_TYPE) && value[PROPERTY_TYPE].IsString()) - { - string type =value[PROPERTY_TYPE].GetString(); - - // Integer is handled as float in the OMF integration - if (type.compare(PROPERTY_NUMBER) == 0) - { - typeCount.cnt.tFloat++; - } else if (type.compare(PROPERTY_STRING) == 0) - { - typeCount.cnt.tString++; - } else { - - Logger::getLogger()->error("calcTypeShort - unrecognized type :%s: ", type.c_str()); - } - typeCount.cnt.tTotal++; - } - else - { - Logger::getLogger()->error("calcTypeShort - unable to extract the type for :%s: ", key.c_str()); - return (0); - } - } - - return typeCount.valueLong; -} - - -/** - * Load stored data types (already sent to PI server) - * - * Each element, the assetName, has type-id and datatype for each datapoint - * - * If no data exists in the plugin_data table, then a map entry - * with FAKE_ASSET_KEY is made in order to set the start type-id - * sequence with default value set to 1: - * all new created OMF dataTypes have type-id prefix set to the value of 1. - * - * If data like {"type-id": 14} or {"type-id": "14" } is found, a map entry - * with FAKE_ASSET_KEY is made and the start type-id sequence value is set - * to the found value, i.e. 14: - * all new created OMF dataTypes have type-id prefix set to the value of 14. - * - * If proper per asset types data is loaded, the FAKE_ASSET_KEY is not set: - * all new created OMF dataTypes have type-id prefix set to the value of 1 - * while existing (loaded) OMF dataTypes will keep their type-id values. - * - * @param connInfo The CONNECTOR_INFO data structure - * @param JSONData The JSON document containing all saved data - */ -void loadSentDataTypes(CONNECTOR_INFO* connInfo, - Document& JSONData) -{ - if (JSONData.HasMember(SENT_TYPES_KEY) && - JSONData[SENT_TYPES_KEY].IsArray()) - { - const Value& cachedTypes = JSONData[SENT_TYPES_KEY]; - for (Value::ConstValueIterator it = cachedTypes.Begin(); - it != cachedTypes.End(); - ++it) - { - if (!it->IsObject()) - { - Logger::getLogger()->warn("%s plugin: current element in '%s' " \ - "property is not an object, ignoring it", - PLUGIN_NAME, - SENT_TYPES_KEY); - continue; - } - - for (Value::ConstMemberIterator itr = it->MemberBegin(); - itr != it->MemberEnd(); - ++itr) - { - string key = itr->name.GetString(); - const Value& cachedValue = itr->value; - - // Add typeId and dataTypes to the in memory cache - long typeId; - if (cachedValue.HasMember(TYPE_ID_KEY) && - cachedValue[TYPE_ID_KEY].IsNumber()) - { - typeId = cachedValue[TYPE_ID_KEY].GetInt(); - } - else - { - Logger::getLogger()->warn("%s plugin: current element '%s'" \ - " doesn't have '%s' property, ignoring it", - PLUGIN_NAME, - key.c_str(), - TYPE_ID_KEY); - continue; - } - - long NamingScheme; - if (cachedValue.HasMember(NAMING_SCHEME) && - cachedValue[NAMING_SCHEME].IsNumber()) - { - NamingScheme = cachedValue[NAMING_SCHEME].GetInt(); - } - else - { - Logger::getLogger()->warn("%s plugin: current element '%s'" \ - " doesn't have '%s' property, handling naming scheme in compatibility mode", - PLUGIN_NAME, - key.c_str(), - NAMING_SCHEME); - NamingScheme = NAMINGSCHEME_COMPATIBILITY; - } - - string AFHHash; - if (cachedValue.HasMember(AFH_HASH) && - cachedValue[AFH_HASH].IsString()) - { - AFHHash = cachedValue[AFH_HASH].GetString(); - } - else - { - Logger::getLogger()->warn("%s plugin: current element '%s'" \ - " doesn't have '%s' property", - PLUGIN_NAME, - key.c_str(), - AFH_HASH); - AFHHash = ""; - } - - string AFHierarchy; - if (cachedValue.HasMember(AF_HIERARCHY) && - cachedValue[AF_HIERARCHY].IsString()) - { - AFHierarchy = cachedValue[AF_HIERARCHY].GetString(); - } - else - { - Logger::getLogger()->warn("%s plugin: current element '%s'" \ - " doesn't have '%s' property", - PLUGIN_NAME, - key.c_str(), - AF_HIERARCHY); - AFHierarchy = ""; - } - - string AFHierarchyOrig; - if (cachedValue.HasMember(AF_HIERARCHY_ORIG) && - cachedValue[AF_HIERARCHY_ORIG].IsString()) - { - AFHierarchyOrig = cachedValue[AF_HIERARCHY_ORIG].GetString(); - } - else - { - Logger::getLogger()->warn("%s plugin: current element '%s'" \ - " doesn't have '%s' property", - PLUGIN_NAME, - key.c_str(), - AF_HIERARCHY_ORIG); - AFHierarchyOrig = ""; - } - - string dataTypes; - if (cachedValue.HasMember(DATA_KEY) && - cachedValue[DATA_KEY].IsObject()) - { - StringBuffer buffer; - Writer writer(buffer); - const Value& types = cachedValue[DATA_KEY]; - types.Accept(writer); - dataTypes = buffer.GetString(); - } - else - { - Logger::getLogger()->warn("%s plugin: current element '%s'" \ - " doesn't have '%s' property, ignoring it", - PLUGIN_NAME, - key.c_str(), - DATA_KEY); - - continue; - } - - unsigned long dataTypesShort; - if (cachedValue.HasMember(DATA_KEY_SHORT) && - cachedValue[DATA_KEY_SHORT].IsString()) - { - string strDataTypesShort = cachedValue[DATA_KEY_SHORT].GetString(); - // The information are stored as string in hexadecimal format - dataTypesShort = stoi (strDataTypesShort,nullptr,16); - } - else - { - dataTypesShort = calcTypeShort(dataTypes); - if (dataTypesShort == 0) - { - Logger::getLogger()->warn("%s plugin: current element '%s'" \ - " doesn't have '%s' property", - PLUGIN_NAME, - key.c_str(), - DATA_KEY_SHORT); - } - else - { - Logger::getLogger()->warn("%s plugin: current element '%s'" \ - " doesn't have '%s' property, calculated '0x%X'", - PLUGIN_NAME, - key.c_str(), - DATA_KEY_SHORT, - dataTypesShort); - } - } - unsigned short hintChecksum = 0; - if (cachedValue.HasMember(DATA_KEY_HINT) && - cachedValue[DATA_KEY_HINT].IsString()) - { - string strHint = cachedValue[DATA_KEY_HINT].GetString(); - // The information are stored as string in hexadecimal format - hintChecksum = stoi (strHint,nullptr,16); - } - OMFDataTypes dataType; - dataType.typeId = typeId; - dataType.types = dataTypes; - dataType.typesShort = dataTypesShort; - dataType.hintChkSum = hintChecksum; - dataType.namingScheme = NamingScheme; - dataType.afhHash = AFHHash; - dataType.afHierarchy = AFHierarchy; - dataType.afHierarchyOrig = AFHierarchyOrig; - - Logger::getLogger()->debug("%s - AFHHash :%s: AFHierarchy :%s: AFHierarchyOrig :%s: ", __FUNCTION__, AFHHash.c_str(), AFHierarchy.c_str() , AFHierarchyOrig.c_str() ); - - - Logger::getLogger()->debug("%s - NamingScheme :%ld: ", __FUNCTION__,NamingScheme ); - - // Add data into the map - connInfo->assetsDataTypes[key] = dataType; - } - } - } - else - { - Logger::getLogger()->warn("Persisted data is not of the correct format, ignoring"); - OMFDataTypes dataType; - dataType.typeId = connInfo->typeId; - dataType.types = "{}"; - - // Add default data into the map - connInfo->assetsDataTypes[FAKE_ASSET_KEY] = dataType; - } -} - -/** - * Return the maximum value of type-id, among all entries in the map - * - * If the array is empty the connInfo->typeId is returned. - * - * @param connInfo The CONNECTOR_INFO data structure - * @return The maximum value of type-id found - */ -long getMaxTypeId(CONNECTOR_INFO* connInfo) -{ - long maxId = connInfo->typeId; - for (auto it = connInfo->assetsDataTypes.begin(); - it != connInfo->assetsDataTypes.end(); - ++it) - { - if ((*it).second.typeId > maxId) - { - maxId = (*it).second.typeId; - } - } - return maxId; -} - -/** - * Calls the PI Web API to retrieve the version - * - * @param connInfo The CONNECTOR_INFO data structure - * @param version Returned version string - * @param logMessage If true, log error messages (default: true) - * @return httpCode HTTP response code - */ -int PIWebAPIGetVersion(CONNECTOR_INFO* connInfo, std::string &version, bool logMessage) -{ - PIWebAPI *_PIWebAPI; - - _PIWebAPI = new PIWebAPI(); - - // Set requested authentication - _PIWebAPI->setAuthMethod (connInfo->PIWebAPIAuthMethod); - _PIWebAPI->setAuthBasicCredentials(connInfo->PIWebAPICredentials); - - int httpCode = _PIWebAPI->GetVersion(connInfo->hostAndPort, version, logMessage); - delete _PIWebAPI; - - return httpCode; -} - -/** - * Calls the OCS API to retrieve the authentication token - * - * @param connInfo The CONNECTOR_INFO data structure - * @return token Authorization token - */ -string OCSRetrieveAuthToken(CONNECTOR_INFO* connInfo) -{ - string token; - OCS *ocs; - - if (connInfo->PIServerEndpoint == ENDPOINT_OCS) - ocs = new OCS(); - else if (connInfo->PIServerEndpoint == ENDPOINT_ADH) - ocs = new OCS(true); - - token = ocs->retrieveToken(connInfo->OCSClientId , connInfo->OCSClientSecret); - - delete ocs; - - return token; -} - -/** - * Evaluate if the endpoint is a PI Web API or a Connector Relay. - * - * @param connInfo The CONNECTOR_INFO data structure - * @return OMF_ENDPOINT values - */ -OMF_ENDPOINT identifyPIServerEndpoint(CONNECTOR_INFO* connInfo) -{ - OMF_ENDPOINT PIServerEndpoint; - - HttpSender *endPoint; - vector> header; - int httpCode; - - - if (connInfo->PIWebAPIAuthMethod.compare("k") == 0) - { - endPoint = new LibcurlHttps(connInfo->hostAndPort, - connInfo->timeout, - connInfo->timeout, - connInfo->retrySleepTime, - connInfo->maxRetry); - } - else - { - endPoint = new SimpleHttps(connInfo->hostAndPort, - connInfo->timeout, - connInfo->timeout, - connInfo->retrySleepTime, - connInfo->maxRetry); - } - - // Set requested authentication - endPoint->setAuthMethod (connInfo->PIWebAPIAuthMethod); - endPoint->setAuthBasicCredentials(connInfo->PIWebAPICredentials); - - try - { - httpCode = endPoint->sendRequest("GET", - connInfo->path, - header, - ""); - - if (httpCode >= 200 && httpCode <= 399) - { - PIServerEndpoint = ENDPOINT_PIWEB_API; - if (connInfo->PIWebAPIAuthMethod == "b") - Logger::getLogger()->debug("PI Web API end-point basic authorization granted"); - } - else - { - PIServerEndpoint = ENDPOINT_CR; - } - - } - catch (exception &ex) - { - Logger::getLogger()->warn("PI-Server end-point discovery encountered the error :%s: " - "trying selecting the Connector Relay as an end-point", ex.what()); - PIServerEndpoint = ENDPOINT_CR; - } - - delete endPoint; - - return (PIServerEndpoint); -} - -/** - * Generate the credentials for the basic authentication - * encoding user id and password joined by a single colon (:) using base64 - * - * @param userId User id to be used for the generation of the credentials - * @param password Password to be used for the generation of the credentials - * @return credentials to be used with the basic authentication - */ -string AuthBasicCredentialsGenerate(string& userId, string& password) -{ - string Credentials; - - Credentials = Crypto::Base64::encode(userId + ":" + password); - - return (Credentials); -} - -/** - * Configures for Kerberos authentication : - * - set the environment KRB5_CLIENT_KTNAME to the position containing the - * Kerberos keys, the keytab file. - * - * @param out keytabEnv string containing the command to set the - * KRB5_CLIENT_KTNAME environment variable - * @param keytabFileName File name of the keytab file - * - */ -void AuthKerberosSetup(string& keytabEnv, string& keytabFileName) -{ - string fledgeData = getDataDir (); - string keytabFullPath = fledgeData + "/etc/kerberos" + "/" + keytabFileName; - - keytabEnv = "KRB5_CLIENT_KTNAME=" + keytabFullPath; - putenv((char *) keytabEnv.c_str()); - - if (access(keytabFullPath.c_str(), F_OK) != 0) - { - Logger::getLogger()->error("Kerberos authentication not possible, the keytab file :%s: is missing.", keytabFullPath.c_str()); - } - -} - -/** - * Calculate elapsed time in seconds - * - * @param startTime Start time of the interval to be evaluated - * @return Elapsed time in seconds - */ -double GetElapsedTime(struct timeval *startTime) -{ - struct timeval endTime, diff; - gettimeofday(&endTime, NULL); - timersub(&endTime, startTime, &diff); - return diff.tv_sec + ((double)diff.tv_usec / 1000000); -} - -/** - * Check if the PI Web API server is available by reading the product version - * - * @param connInfo The CONNECTOR_INFO data structure - * @param version Returned version string - * @return Connection status - */ -bool IsPIWebAPIConnected(CONNECTOR_INFO* connInfo, std::string& version) -{ - static std::chrono::steady_clock::time_point nextCheck; - - if (!s_connected && connInfo->PIServerEndpoint == ENDPOINT_PIWEB_API) - { - std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now(); - - if (now >= nextCheck) - { - int httpCode = PIWebAPIGetVersion(connInfo, version, false); - if (httpCode >= 500) - { - s_connected = false; - now = std::chrono::steady_clock::now(); - nextCheck = now + std::chrono::seconds(60); - Logger::getLogger()->debug("PI Web API %s is not available. HTTP Code: %d", connInfo->hostAndPort.c_str(), httpCode); - } - else - { - s_connected = true; - Logger::getLogger()->info("%s reconnected to %s", version.c_str(), connInfo->hostAndPort.c_str()); - } - } - } - else - { - // Endpoints other than PI Web API fail quickly when they are unavailable - // so there is no need to check their status in advance. - s_connected = true; - } - - return s_connected; -} diff --git a/C/plugins/storage/common/include/sql_buffer.h b/C/plugins/storage/common/include/sql_buffer.h index 5a5d023aae..ea1142651f 100644 --- a/C/plugins/storage/common/include/sql_buffer.h +++ b/C/plugins/storage/common/include/sql_buffer.h @@ -47,6 +47,7 @@ class SQLBuffer { void append(const std::string&); void quote(const std::string&); const char *coalesce(); + void clear(); private: std::list buffers; diff --git a/C/plugins/storage/common/sql_buffer.cpp b/C/plugins/storage/common/sql_buffer.cpp index ad08e71885..7261032c39 100644 --- a/C/plugins/storage/common/sql_buffer.cpp +++ b/C/plugins/storage/common/sql_buffer.cpp @@ -36,6 +36,19 @@ SQLBuffer::~SQLBuffer() } } +/** + * Clear all the buffers from the SQLBuffer and allow it to be reused + */ +void SQLBuffer::clear() +{ + for (list::iterator it = buffers.begin(); it != buffers.end(); ++it) + { + delete *it; + } + buffers.clear(); + buffers.push_front(new SQLBuffer::Buffer()); +} + /** * Append a character to a buffer * diff --git a/C/plugins/storage/postgres/connection.cpp b/C/plugins/storage/postgres/connection.cpp index baa6656e93..837fe36b6d 100644 --- a/C/plugins/storage/postgres/connection.cpp +++ b/C/plugins/storage/postgres/connection.cpp @@ -28,6 +28,8 @@ #include #include +#include "json_utils.h" + #include #include #include @@ -327,7 +329,7 @@ bool Connection::aggregateQuery(const Value& payload, string& resultSet) /** * Create a database connection */ -Connection::Connection() +Connection::Connection() : m_maxReadingRows(INSERT_ROW_LIMIT) { const char *defaultConninfo = "dbname = fledge"; char *connInfo = NULL; @@ -1234,8 +1236,11 @@ SQLBuffer sql; } else { + StringBuffer buffer; + Writer writer(buffer); + value.Accept(writer); sql.append("'\""); - sql.append(escape_double_quotes(escape(str))); + sql.append(escape_double_quotes(escape(JSONunescape(buffer.GetString())))); sql.append("\"'"); } } @@ -1511,12 +1516,10 @@ bool add_row = false; return -1; } - sql.append("INSERT INTO fledge.readings ( user_ts, asset_code, reading ) VALUES "); - if (!doc.HasMember("readings")) { raiseError("appendReadings", "Payload is missing a readings array"); - return -1; + return -1; } Value &rdings = doc["readings"]; if (!rdings.IsArray()) @@ -1524,8 +1527,33 @@ bool add_row = false; raiseError("appendReadings", "Payload is missing the readings array"); return -1; } + + const char *head = "INSERT INTO fledge.readings ( user_ts, asset_code, reading ) VALUES "; + sql.append(head); + + int count = 0; for (Value::ConstValueIterator itr = rdings.Begin(); itr != rdings.End(); ++itr) { + if (count == m_maxReadingRows) + { + sql.append(';'); + + const char *query = sql.coalesce(); + logSQL("ReadingsAppend", query); + PGresult *res = PQexec(dbConnection, query); + delete[] query; + if (PQresultStatus(res) != PGRES_COMMAND_OK) + { + raiseError("appendReadings", PQerrorMessage(dbConnection)); + PQclear(res); + return -1; + } + PQclear(res); + + sql.clear(); + sql.append(head); + count = 0; + } if (!itr->IsObject()) { raiseError("appendReadings", @@ -1533,12 +1561,18 @@ bool add_row = false; return -1; } add_row = true; + const char *asset_code = (*itr)["asset_code"].GetString(); + if (strlen(asset_code) == 0) + { + Logger::getLogger()->warn("Postgres appendReadings - empty asset code value, row is ignored"); + continue; + } const char *str = (*itr)["user_ts"].GetString(); // Check if the string is a function if (isFunction(str)) { - if (row) + if (count) sql.append(", ("); else sql.append('('); @@ -1555,7 +1589,7 @@ bool add_row = false; } else { - if (row) + if (count) { sql.append(", ("); } @@ -1573,10 +1607,11 @@ bool add_row = false; if (add_row) { row++; + count++; // Handles - asset_code sql.append(",\'"); - sql.append((*itr)["asset_code"].GetString()); + sql.append(asset_code); sql.append("', '"); // Handles - reading @@ -1589,21 +1624,35 @@ bool add_row = false; sql.append(')'); } } + + if (count == 0) + { + // No rows in final block + return 0; + } sql.append(';'); const char *query = sql.coalesce(); - logSQL("ReadingsAppend", query); - PGresult *res = PQexec(dbConnection, query); - delete[] query; - if (PQresultStatus(res) == PGRES_COMMAND_OK) + if (row > 0) { + logSQL("ReadingsAppend", query); + PGresult *res = PQexec(dbConnection, query); + delete[] query; + if (PQresultStatus(res) == PGRES_COMMAND_OK) + { + PQclear(res); + return atoi(PQcmdTuples(res)); + } + raiseError("appendReadings", PQerrorMessage(dbConnection)); PQclear(res); - return atoi(PQcmdTuples(res)); + return -1; + } + else + { + delete[] query; + return 0; } - raiseError("appendReadings", PQerrorMessage(dbConnection)); - PQclear(res); - return -1; } /** @@ -1663,7 +1712,9 @@ unsigned int Connection::purgeReadings(unsigned long age, unsigned int flags, u result = "{ \"removed\" : 0, "; result += " \"unsentPurged\" : 0, "; result += " \"unsentRetained\" : 0, "; - result += " \"readings\" : 0 }"; + result += " \"readings\" : 0, "; + result += " \"method\" : \"age\", "; + result += " \"duration\" : 0 }"; logger->info("Purge starting..."); gettimeofday(&startTv, NULL); @@ -1879,20 +1930,21 @@ unsigned int Connection::purgeReadings(unsigned long age, unsigned int flags, u ostringstream convert; + unsigned long duration; + gettimeofday(&endTv, NULL); + duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; + convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; - convert << " \"readings\" : " << numReadings << " }"; + convert << " \"readings\" : " << numReadings << ", "; + convert << " \"method\" : \"age\", "; + convert << " \"duration\" : " << duration << " }"; result = convert.str(); - { // Timing - unsigned long duration; - gettimeofday(&endTv, NULL); - duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; - duration = duration / 1000; // milliseconds - logger->info("Purge process complete in %d blocks in %ld milliseconds", blocks, duration); - } + duration = duration / 1000; // milliseconds + logger->info("Purge process complete in %d blocks in %ld milliseconds", blocks, duration); Logger::getLogger()->debug("%s - age :%lu: flag_retain :%x: sent :%lu: result :%s:", __FUNCTION__, age, flags, flag_retain, result.c_str() ); @@ -1964,6 +2016,7 @@ unsigned int Connection::purgeReadingsByRows(unsigned long rows, unsigned long rowcount, minId, maxId; unsigned long rowsAffectedLastComand; unsigned long deletePoint; + struct timeval startTv, endTv; string sqlCommand; bool flag_retain; @@ -1972,6 +2025,7 @@ unsigned int Connection::purgeReadingsByRows(unsigned long rows, Logger *logger = Logger::getLogger(); + gettimeofday(&startTv, NULL); flag_retain = false; if ( (flags & STORAGE_PURGE_RETAIN_ANY) || (flags & STORAGE_PURGE_RETAIN_ALL) ) @@ -2064,12 +2118,17 @@ unsigned int Connection::purgeReadingsByRows(unsigned long rows, unsentRetained = numReadings - rows; } + gettimeofday(&endTv, NULL); + unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; + ostringstream convert; convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; - convert << " \"readings\" : " << numReadings << " }"; + convert << " \"readings\" : " << numReadings << ", "; + convert << " \"method\" : \"rows\", "; + convert << " \"duration\" : " << duration << " }"; result = convert.str(); diff --git a/C/plugins/storage/postgres/connection_manager.cpp b/C/plugins/storage/postgres/connection_manager.cpp index 6a1c7ddc86..025f168033 100644 --- a/C/plugins/storage/postgres/connection_manager.cpp +++ b/C/plugins/storage/postgres/connection_manager.cpp @@ -60,6 +60,7 @@ void ConnectionManager::growPool(unsigned int delta) { Connection *conn = new Connection(); conn->setTrace(m_logSQL); + conn->setMaxReadingRows(m_maxReadingRows); idleLock.lock(); idle.push_back(conn); idleLock.unlock(); @@ -108,6 +109,8 @@ Connection *conn = 0; if (idle.empty()) { conn = new Connection(); + conn->setTrace(m_logSQL); + conn->setMaxReadingRows(m_maxReadingRows); } else { diff --git a/C/plugins/storage/postgres/include/connection.h b/C/plugins/storage/postgres/include/connection.h index 3d0ef33adb..1a99ed9b2c 100644 --- a/C/plugins/storage/postgres/include/connection.h +++ b/C/plugins/storage/postgres/include/connection.h @@ -21,7 +21,13 @@ #define STORAGE_PURGE_RETAIN_ANY 0x0001U #define STORAGE_PURGE_RETAIN_ALL 0x0002U -#define STORAGE_PURGE_SIZE 0x0004U +#define STORAGE_PURGE_SIZE 0x0004U + +/** + * Maximum number of readings to insert in a single + * insert statement + */ +#define INSERT_ROW_LIMIT 5000 class Connection { public: @@ -53,6 +59,10 @@ class Connection { const std::string &name, std::string &resultSet); unsigned int purgeReadingsAsset(const std::string& asset); + void setMaxReadingRows(long rows) + { + m_maxReadingRows = rows; + } private: bool m_logSQL; @@ -75,6 +85,7 @@ class Connection { std::string getIndexName(std::string s); bool checkValidDataType(const std::string &s); + long m_maxReadingRows; typedef struct{ diff --git a/C/plugins/storage/postgres/include/connection_manager.h b/C/plugins/storage/postgres/include/connection_manager.h index 6b2009c4c4..8c6fbf1a62 100644 --- a/C/plugins/storage/postgres/include/connection_manager.h +++ b/C/plugins/storage/postgres/include/connection_manager.h @@ -32,6 +32,10 @@ class ConnectionManager { { return &lastError; } + void setMaxReadingRows(long rows) + { + m_maxReadingRows = rows; + } private: ConnectionManager(); @@ -43,6 +47,7 @@ class ConnectionManager { std::mutex errorLock; PLUGIN_ERROR lastError; bool m_logSQL; + long m_maxReadingRows; }; #endif diff --git a/C/plugins/storage/postgres/plugin.cpp b/C/plugins/storage/postgres/plugin.cpp index 06212cfae3..bdd0902b43 100644 --- a/C/plugins/storage/postgres/plugin.cpp +++ b/C/plugins/storage/postgres/plugin.cpp @@ -22,6 +22,7 @@ #include #include #include +#include using namespace std; using namespace rapidjson; @@ -43,6 +44,13 @@ const char *default_config = QUOTE({ "default" : "5", "displayName" : "Pool Size", "order" : "1" + }, + "maxReadingRows" : { + "description" : "The maximum numebnr of readings to insert in a single statement", + "type" : "integer", + "default" : "5000", + "displayName" : "Max. Insert Rows", + "order" : "2" } }); @@ -71,11 +79,23 @@ PLUGIN_INFORMATION *plugin_info() * In the case of Postgres we also get a pool of connections * to use. */ -PLUGIN_HANDLE plugin_init() +PLUGIN_HANDLE plugin_init(ConfigCategory *category) { ConnectionManager *manager = ConnectionManager::getInstance(); +long poolSize = 5, maxReadingRows = 5000; - manager->growPool(5); + if (category->itemExists("poolSize")) + { + poolSize = strtol(category->getValue("poolSize").c_str(), NULL, 10); + } + if (category->itemExists("maxReadingRows")) + { + long val = strtol(category->getValue("maxReadingRows").c_str(), NULL, 10); + if (val > 0) + maxReadingRows = val; + } + manager->setMaxReadingRows(maxReadingRows); + manager->growPool(poolSize); return manager; } diff --git a/C/plugins/storage/sqlite/CMakeLists.txt b/C/plugins/storage/sqlite/CMakeLists.txt index 180d56b356..4a997a02a0 100644 --- a/C/plugins/storage/sqlite/CMakeLists.txt +++ b/C/plugins/storage/sqlite/CMakeLists.txt @@ -6,8 +6,8 @@ set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(STORAGE_COMMON_LIB storage-common-lib) -# Path of compiled libsqlite3.a and .h files: /tmp/sqlite3-pkg/src -set(FLEDGE_SQLITE3_LIBS "/tmp/sqlite3-pkg/src" CACHE INTERNAL "") +# Path of compiled sqlite3 file: /usr/local/bin +set(FLEDGE_SQLITE3_LIBS "/usr/local/bin" CACHE INTERNAL "") # Find source files file(GLOB SOURCES ./common/*.cpp ./schema/*.cpp *.cpp) diff --git a/C/plugins/storage/sqlite/common/connection.cpp b/C/plugins/storage/sqlite/common/connection.cpp index 21f9b61de5..989e607c31 100644 --- a/C/plugins/storage/sqlite/common/connection.cpp +++ b/C/plugins/storage/sqlite/common/connection.cpp @@ -7,10 +7,11 @@ * * Author: Massimiliano Pinto */ +#include #include #include -#include #include +#include #include "readings_catalogue.h" @@ -112,7 +113,7 @@ bool Connection::getNow(string& Now) string nowSqlCMD = "SELECT " SQLITE3_NOW_READING; - int rc = SQLexec(dbHandle, + int rc = SQLexec(dbHandle, "now", nowSqlCMD.c_str(), dateCallback, nowDate, @@ -206,8 +207,16 @@ bool Connection::applyColumnDateTimeFormat(sqlite3_stmt *pStmt, // Column metadata found and column datatype is "pzDataType" formatStmt = string("SELECT strftime('"); formatStmt += string(F_DATEH24_MS); - formatStmt += "', '" + string((char *) sqlite3_column_text(pStmt, i)); - formatStmt += "')"; + + string columnText ((char *) sqlite3_column_text(pStmt, i)); + if (columnText.find("strftime") != string::npos) + { + formatStmt += "', " + columnText + ")"; + } + else + { + formatStmt += "', '" + columnText + "')"; + } apply_format = true; @@ -236,7 +245,7 @@ bool Connection::applyColumnDateTimeFormat(sqlite3_stmt *pStmt, char formattedData[100] = ""; // Exec the format SQL - int rc = SQLexec(dbHandle, + int rc = SQLexec(dbHandle, "date", formatStmt.c_str(), dateCallback, formattedData, @@ -473,7 +482,7 @@ Connection::Connection() const char* dbErrMsg = sqlite3_errmsg(dbHandle); const char* errMsg = "Failed to open the SQLite3 database"; - Logger::getLogger()->error("%s '%s': %s", + logger->error("%s '%s': %s", dbErrMsg, dbPath.c_str(), dbErrMsg); @@ -497,7 +506,7 @@ Connection::Connection() if (rc != SQLITE_OK) { string errMsg = "Failed to set WAL from the fledge DB - " DB_CONFIGURATION; - Logger::getLogger()->error("%s : error %s", + logger->error("%s : error %s", DB_CONFIGURATION, zErrMsg); connectErrorTime = time(0); @@ -516,7 +525,7 @@ Connection::Connection() const char *sqlStmt = attachDb.coalesce(); // Exec the statement - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "database", sqlStmt, NULL, NULL, @@ -526,7 +535,7 @@ Connection::Connection() if (rc != SQLITE_OK) { const char* errMsg = "Failed to attach 'fledge' database in"; - Logger::getLogger()->error("%s '%s': error %s", + logger->error("%s '%s': error %s", errMsg, sqlStmt, zErrMsg); @@ -537,14 +546,21 @@ Connection::Connection() } else { - Logger::getLogger()->info("Connected to SQLite3 database: %s", + logger->info("Connected to SQLite3 database: %s", dbPath.c_str()); } //Release sqlStmt buffer delete[] sqlStmt; // Attach readings database - readings_1 + if (access(dbPathReadings.c_str(), R_OK) != 0) { + logger->info("No readings database, assuming seperate readings plugin is avialable"); + m_noReadings = true; + } + else + { + m_noReadings = false; SQLBuffer attachReadingsDb; attachReadingsDb.append("ATTACH DATABASE '"); attachReadingsDb.append(dbPathReadings + "' AS readings_1;"); @@ -552,7 +568,7 @@ Connection::Connection() const char *sqlReadingsStmt = attachReadingsDb.coalesce(); // Exec the statement - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "database", sqlReadingsStmt, NULL, NULL, @@ -562,7 +578,7 @@ Connection::Connection() if (rc != SQLITE_OK) { const char* errMsg = "Failed to attach 'readings' database in"; - Logger::getLogger()->error("%s '%s': error %s", + logger->error("%s '%s': error %s", errMsg, sqlReadingsStmt, zErrMsg); @@ -573,7 +589,7 @@ Connection::Connection() } else { - Logger::getLogger()->info("Connected to SQLite3 database: %s", + logger->info("Connected to SQLite3 database: %s", dbPath.c_str()); } //Release sqlStmt buffer @@ -591,22 +607,35 @@ Connection::Connection() sqlite3_free(zErrMsg); } + + ReadingsCatalogue *catalogue = ReadingsCatalogue::getInstance(); + catalogue->createReadingsOverflowTable(dbHandle, 1); } } + if (!m_noReadings) { // Attach all the defined/used databases ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); if ( !readCat->connectionAttachAllDbs(dbHandle) ) { - const char* errMsg = "Failed to attach all the dbs to the connection :%X:'readings' database in"; - Logger::getLogger()->error("%s '%s': error %s", errMsg, dbHandle); + const char* errMsg = "Failed to attach all the databases to the connection database in"; + logger->error(errMsg); connectErrorTime = time(0); sqlite3_close_v2(dbHandle); + throw new runtime_error(errMsg); + } + else + { + logger->info("Attached all %d readings databases to connection", readCat->getReadingsCount()); } } + else + { + logger->info("Connection will not attach to readings tables"); + } m_schemaManager = SchemaManager::getInstance(); } @@ -1056,7 +1085,7 @@ vector asset_codes; if (document.HasMember("join")) { - if (!jsonWhereClause(document["where"], sql, asset_codes, true, "t0.")) + if (!jsonWhereClause(document["where"], sql, asset_codes, false, "t0.")) { return false; } @@ -1101,7 +1130,7 @@ vector asset_codes; } else if (document.HasMember("where")) { - if (!jsonWhereClause(document["where"], sql, asset_codes, true)) + if (!jsonWhereClause(document["where"], sql, asset_codes, false)) { raiseError("retrieve", "Failed to add where clause"); return false; @@ -1132,7 +1161,7 @@ vector asset_codes; const char *query = sql.coalesce(); char *zErrMsg = NULL; int rc; - sqlite3_stmt *stmt; + sqlite3_stmt *stmt = NULL; logSQL("CommonRetrive", query); @@ -1144,6 +1173,10 @@ vector asset_codes; raiseError("retrieve", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); delete[] query; + if (stmt) + { + sqlite3_finalize(stmt); + } return false; } @@ -1180,9 +1213,10 @@ vector asset_codes; */ int Connection::insert(const string& schema, const string& table, const string& data) { -SQLBuffer sql; Document document; ostringstream convert; +sqlite3_stmt *stmt = NULL; +int rc; std::size_t arr = data.find("inserts"); if (!m_schemaManager->exists(dbHandle, schema)) @@ -1217,13 +1251,11 @@ std::size_t arr = data.find("inserts"); return -1; } - // Start a trabsaction - sql.append("BEGIN TRANSACTION;"); - // Number of inserts int ins = 0; - - // Iterate through insert array + int failedInsertCount = 0; + + // Generate sql query for prepared statement for (Value::ConstValueIterator iter = inserts.Begin(); iter != inserts.End(); ++iter) @@ -1235,138 +1267,164 @@ std::size_t arr = data.find("inserts"); return -1; } - int col = 0; - SQLBuffer values; - - sql.append("INSERT INTO "); - sql.append(schema); - sql.append('.'); - sql.append(table); - sql.append(" ("); - - for (Value::ConstMemberIterator itr = (*iter).MemberBegin(); - itr != (*iter).MemberEnd(); - ++itr) { - // Append column name - if (col) + int col = 0; + SQLBuffer sql; + SQLBuffer values; + sql.append("INSERT INTO " + schema + "." + table + " ("); + + for (Value::ConstMemberIterator itr = (*iter).MemberBegin(); + itr != (*iter).MemberEnd(); + ++itr) { - sql.append(", "); + // Append column name + if (col) + { + sql.append(", "); + } + sql.append(itr->name.GetString()); + col++; } - sql.append(itr->name.GetString()); - - // Append column value - if (col) + + sql.append(") VALUES ("); + for ( auto i = 0 ; i < col; i++ ) { - values.append(", "); + if (i) + { + sql.append(","); + } + sql.append("?"); } - if (itr->value.IsString()) + sql.append(");"); + + const char *query = sql.coalesce(); + + rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); + if (rc != SQLITE_OK) { - const char *str = itr->value.GetString(); - if (strcmp(str, "now()") == 0) + if (stmt) { - values.append(SQLITE3_NOW); + sqlite3_finalize(stmt); } - else + raiseError("insert", sqlite3_errmsg(dbHandle)); + Logger::getLogger()->error("SQL statement: %s", query); + delete[] query; + return -1; + } + delete[] query; + + // Bind columns with prepared sql query + int columID = 1; + for (Value::ConstMemberIterator itr = (*iter).MemberBegin(); + itr != (*iter).MemberEnd(); + ++itr) + { + + if (itr->value.IsString()) { - values.append('\''); - values.append(escape(str)); - values.append('\''); + const char *str = itr->value.GetString(); + if (strcmp(str, "now()") == 0) + { + sqlite3_bind_text(stmt, columID, SQLITE3_NOW, -1, SQLITE_TRANSIENT); + } + else + { + sqlite3_bind_text(stmt, columID, escape(str).c_str(), -1, SQLITE_TRANSIENT); + } } + else if (itr->value.IsDouble()) { + sqlite3_bind_double(stmt, columID,itr->value.GetDouble()); + } + + else if (itr->value.IsInt64()) + { + sqlite3_bind_int(stmt, columID,(long)itr->value.GetInt64()); + } + + else if (itr->value.IsInt()) + { + sqlite3_bind_int(stmt, columID,itr->value.GetInt()); + } + + else if (itr->value.IsObject()) + { + StringBuffer buffer; + Writer writer(buffer); + itr->value.Accept(writer); + sqlite3_bind_text(stmt, columID, buffer.GetString(), -1, SQLITE_TRANSIENT); + } + columID++ ; } - else if (itr->value.IsDouble()) - values.append(itr->value.GetDouble()); - else if (itr->value.IsInt64()) - values.append((long)itr->value.GetInt64()); - else if (itr->value.IsInt()) - values.append(itr->value.GetInt()); - else if (itr->value.IsObject()) + + if (sqlite3_exec(dbHandle, "BEGIN TRANSACTION", NULL, NULL, NULL) != SQLITE_OK) { - StringBuffer buffer; - Writer writer(buffer); - itr->value.Accept(writer); - values.append('\''); - values.append(escape(buffer.GetString())); - values.append('\''); + if (stmt) + { + sqlite3_clear_bindings(stmt); + sqlite3_reset(stmt); + sqlite3_finalize(stmt); + } + raiseError("insert", sqlite3_errmsg(dbHandle)); + return -1; } - col++; - } - sql.append(") VALUES ("); - const char *vals = values.coalesce(); - sql.append(vals); - delete[] vals; - sql.append(");"); + m_writeAccessOngoing.fetch_add(1); + + int sqlite3_resut = SQLstep(stmt); + + m_writeAccessOngoing.fetch_sub(1); + + if (sqlite3_resut != SQLITE_DONE) + { + failedInsertCount++; + raiseError("insert", sqlite3_errmsg(dbHandle)); + Logger::getLogger()->error("SQL statement: %s", sqlite3_expanded_sql(stmt)); + + // transaction is still open, do rollback + if (sqlite3_get_autocommit(dbHandle) == 0) + { + rc = sqlite3_exec(dbHandle,"ROLLBACK TRANSACTION;",NULL,NULL,NULL); + if (rc != SQLITE_OK) + { + raiseError("insert rollback", sqlite3_errmsg(dbHandle)); + } + + } + } + sqlite3_clear_bindings(stmt); + sqlite3_reset(stmt); + + + if (sqlite3_resut == SQLITE_DONE && sqlite3_exec(dbHandle, "COMMIT TRANSACTION", NULL, NULL, NULL) != SQLITE_OK) + { + if (stmt) + { + sqlite3_finalize(stmt); + } + raiseError("insert", sqlite3_errmsg(dbHandle)); + return -1; + } + sqlite3_finalize(stmt); + } // Increment row count ins++; + } - sql.append("COMMIT TRANSACTION;"); - const char *query = sql.coalesce(); - logSQL("CommonInsert", query); - char *zErrMsg = NULL; - int rc; - - // Exec INSERT statement: no callback, no result set - m_writeAccessOngoing.fetch_add(1); - rc = SQLexec(dbHandle, - query, - NULL, - NULL, - &zErrMsg); - m_writeAccessOngoing.fetch_sub(1); if (m_writeAccessOngoing == 0) db_cv.notify_all(); - // Check exec result - if (rc != SQLITE_OK ) + if (failedInsertCount) { - raiseError("insert", zErrMsg); - Logger::getLogger()->error("SQL statement: %s", query); - sqlite3_free(zErrMsg); - - // transaction is still open, do rollback - if (sqlite3_get_autocommit(dbHandle) == 0) - { - rc = SQLexec(dbHandle, - "ROLLBACK TRANSACTION;", - NULL, - NULL, - &zErrMsg); - if (rc != SQLITE_OK) - { - raiseError("insert rollback", zErrMsg); - sqlite3_free(zErrMsg); - } - } - - Logger::getLogger()->error("SQL statement: %s", query); - // Release memory for 'query' var - delete[] query; - - // Failure - return -1; + char buf[100]; + snprintf(buf, sizeof(buf), + "Not all inserts into table '%s.%s' within transaction succeeded", + schema.c_str(), table.c_str()); + raiseError("insert", buf); } - else - { - // Release memory for 'query' var - delete[] query; - - int insert = sqlite3_changes(dbHandle); - if (insert == 0) - { - char buf[100]; - snprintf(buf, sizeof(buf), - "Not all inserts into table '%s.%s' within transaction succeeded", - schema.c_str(), table.c_str()); - raiseError("insert", buf); - } - - // Return the status - return (insert ? ins : -1); - } + return (!failedInsertCount ? ins : -1); } #endif @@ -1732,7 +1790,7 @@ bool allowZero = false; // Exec the UPDATE statement: no callback, no result set m_writeAccessOngoing.fetch_add(1); - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, table, query, NULL, NULL, @@ -1748,7 +1806,7 @@ bool allowZero = false; sqlite3_free(zErrMsg); if (sqlite3_get_autocommit(dbHandle)==0) // transaction is still open, do rollback { - rc=SQLexec(dbHandle, + rc=SQLexec(dbHandle, table, "ROLLBACK TRANSACTION;", NULL, NULL, @@ -3052,15 +3110,21 @@ void Connection::logSQL(const char *tag, const char *stmt) * @param cbArg Callback 1st argument * @param errmsg Locaiton to write error message */ -int Connection::SQLexec(sqlite3 *db, const char *sql, int (*callback)(void*,int,char**,char**), +int Connection::SQLexec(sqlite3 *db, const string& table, const char *sql, int (*callback)(void*,int,char**,char**), void *cbArg, char **errmsg) { int retries = 0, rc; + *errmsg = NULL; do { #if DO_PROFILE ProfileItem *prof = new ProfileItem(sql); #endif + if (*errmsg) + { + sqlite3_free(*errmsg); + *errmsg = NULL; + } rc = sqlite3_exec(db, sql, callback, cbArg, errmsg); #if DO_PROFILE prof->complete(); @@ -3093,7 +3157,7 @@ int retries = 0, rc; { int rc2; char *zErrMsg = NULL; - rc2=SQLexec(db, + rc2=SQLexec(db, table, "ROLLBACK TRANSACTION;", NULL, NULL, @@ -3128,22 +3192,31 @@ int retries = 0, rc; if (rc == SQLITE_LOCKED) { - Logger::getLogger()->error("Database still locked after maximum retries"); + Logger::getLogger()->error("Database still locked after maximum retries, executing %s operation on %s", operation(sql).c_str(), table.c_str()); } if (rc == SQLITE_BUSY) { - Logger::getLogger()->error("Database still busy after maximum retries"); + Logger::getLogger()->error("Database still busy after maximum retries, executing %s operation on %s", operation(sql).c_str(), table.c_str()); } if (rc != SQLITE_OK) { - Logger::getLogger()->error("Database error after maximum retries - dbHandle :%X:", this->getDbHandle()); + Logger::getLogger()->error("Database error after maximum retries, executing %s operation on %s", operation(sql).c_str(), table.c_str()); } return rc; } #endif +/** + * Execute a step command on a prepared statement but add the ability to retry on error. + * + * It is assumed that binding has already taken place and that those bound + * vaiables are maintained for all retries. + * + * @param statement The prepared statement to step + * @return int The status of the final sqlite3_step that was issued + */ int Connection::SQLstep(sqlite3_stmt *statement) { int retries = 0, rc; @@ -3152,6 +3225,10 @@ int retries = 0, rc; #if DO_PROFILE ProfileItem *prof = new ProfileItem(sqlite3_sql(statement)); #endif + if (retries) + { + sqlite3_reset(statement); + } rc = sqlite3_step(statement); #if DO_PROFILE prof->complete(); @@ -3257,7 +3334,7 @@ vector asset_codes; // Exec the DELETE statement: no callback, no result set m_writeAccessOngoing.fetch_add(1); - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, table, query, NULL, NULL, @@ -3305,7 +3382,7 @@ int Connection::create_table_snapshot(const string& table, const string& id) logSQL("CreateTableSnapshot", query.c_str()); char* zErrMsg = NULL; - int rc = SQLexec(dbHandle, + int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, @@ -3343,7 +3420,7 @@ int Connection::load_table_snapshot(const string& table, const string& id) logSQL("LoadTableSnapshot", query.c_str()); char* zErrMsg = NULL; - int rc = SQLexec(dbHandle, + int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, @@ -3362,7 +3439,7 @@ int Connection::load_table_snapshot(const string& table, const string& id) // transaction is still open, do rollback if (sqlite3_get_autocommit(dbHandle) == 0) { - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, table, "ROLLBACK TRANSACTION;", NULL, NULL, @@ -3392,7 +3469,7 @@ int Connection::delete_table_snapshot(const string& table, const string& id) logSQL("DeleteTableSnapshot", query.c_str()); char* zErrMsg = NULL; - int rc = SQLexec(dbHandle, + int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, @@ -3435,7 +3512,7 @@ SQLBuffer sql; const char *query = sql.coalesce(); char *zErrMsg = NULL; int rc; - sqlite3_stmt *stmt; + sqlite3_stmt *stmt = NULL; logSQL("GetTableSnapshots", query); @@ -3446,6 +3523,8 @@ SQLBuffer sql; { raiseError("get_table_snapshots", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); + if (stmt) + sqlite3_finalize(stmt); delete[] query; return false; } @@ -3754,7 +3833,7 @@ bool Connection::vacuum() { char* zErrMsg = NULL; // Exec the statement - int rc = SQLexec(dbHandle, "VACUUM;", NULL, NULL, &zErrMsg); + int rc = SQLexec(dbHandle, "", "VACUUM;", NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) @@ -3772,3 +3851,20 @@ bool Connection::vacuum() return true; } #endif + +/** + * Return the first word in a SQL statement, ie the operation that is beign executed. + * + * @param sql The complete SQL statement + * @return string The operation + */ +string Connection::operation(const char *sql) +{ + const char *p1 = sql; + char buf[40], *p2 = buf; + while (*p1 && !isspace(*p1) && p2 - buf < 40) + *p2++ = *p1++; + *p2 = '\0'; + return string(buf); + +} diff --git a/C/plugins/storage/sqlite/common/connection_manager.cpp b/C/plugins/storage/sqlite/common/connection_manager.cpp index ee417de40f..d841322ac5 100644 --- a/C/plugins/storage/sqlite/common/connection_manager.cpp +++ b/C/plugins/storage/sqlite/common/connection_manager.cpp @@ -9,9 +9,12 @@ */ #include #include +#include +#include #include #include +#include #include ConnectionManager *ConnectionManager::instance = 0; @@ -28,7 +31,9 @@ static void managerBackground(void *arg) /** * Default constructor for the connection manager. */ -ConnectionManager::ConnectionManager() : m_shutdown(false), m_vacuumInterval(6 * 60 * 60) +ConnectionManager::ConnectionManager() : m_shutdown(false), + m_vacuumInterval(6 * 60 * 60), + m_attachedDatabases(0) { lastError.message = NULL; lastError.entryPoint = NULL; @@ -37,6 +42,11 @@ ConnectionManager::ConnectionManager() : m_shutdown(false), m_vacuumInterval(6 * else m_trace = false; m_background = new std::thread(managerBackground, this); + + struct rlimit lim; + getrlimit(RLIMIT_NOFILE, &lim); + m_descriptorLimit = lim.rlim_cur; + } @@ -73,14 +83,39 @@ ConnectionManager *ConnectionManager::getInstance() */ void ConnectionManager::growPool(unsigned int delta) { + int poolSize = idle.size() + inUse.size(); + + if ((delta + poolSize) * m_attachedDatabases * NO_DESCRIPTORS_PER_DB + > (DESCRIPTOR_THRESHOLD * m_descriptorLimit) / 100) + { + Logger::getLogger()->warn("Request to grow database connection pool rejected" + " due to excessive file descriptor usage"); + return; + } + int failures = 0; while (delta-- > 0) { - Connection *conn = new Connection(); - if (m_trace) - conn->setTrace(true); + try { + Connection *conn = new Connection(); + if (m_trace) + conn->setTrace(true); + idleLock.lock(); + idle.push_back(conn); + idleLock.unlock(); + } catch (...) { + failures++; + } + } + if (failures > 0) + { idleLock.lock(); - idle.push_back(conn); + int idleCount = idle.size(); idleLock.unlock(); + inUseLock.lock(); + int inUseCount = inUse.size(); + inUseLock.unlock(); + Logger::getLogger()->warn("Connection pool growth restricted due to failure to create %d connections, %d idle connections & %d connection in use currently", failures, idleCount, inUseCount); + noConnectionsDiagnostic(); } } @@ -125,7 +160,13 @@ Connection *conn = 0; idleLock.lock(); if (idle.empty()) { - conn = new Connection(); + try { + conn = new Connection(); + } catch (...) { + conn = NULL; + Logger::getLogger()->error("Failed to create database connection to allocate"); + noConnectionsDiagnostic(); + } } else { @@ -156,6 +197,14 @@ bool ConnectionManager::attachNewDb(std::string &path, std::string &alias) bool result; char *zErrMsg = NULL; + int poolSize = idle.size() + inUse.size(); + if (poolSize * m_attachedDatabases * NO_DESCRIPTORS_PER_DB + > (DESCRIPTOR_THRESHOLD * m_descriptorLimit) / 100) + { + Logger::getLogger()->warn("Request to attach new database rejected" + " due to excessive file descriptor usage"); + return false; + } result = true; sqlCmd = "ATTACH DATABASE '" + path + "' AS " + alias + ";"; @@ -164,48 +213,49 @@ bool ConnectionManager::attachNewDb(std::string &path, std::string &alias) inUseLock.lock(); // attach the DB to all idle connections + for (auto conn : idle) { + dbHandle = conn->getDbHandle(); + rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); + if (rc != SQLITE_OK) + { + Logger::getLogger()->error("attachNewDb - It was not possible to attach the db :%s: to an idle connection, error :%s:", path.c_str(), zErrMsg); + sqlite3_free(zErrMsg); + result = false; + // TODO We are potentially left in an inconsistant state with the new database + // attached to some connections but not all. + break; + } + - for ( auto conn : idle) { + Logger::getLogger()->debug("attachNewDb idle dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); + } + + if (result) + { + // attach the DB to all inUse connections + for (auto conn : inUse) + { dbHandle = conn->getDbHandle(); rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); if (rc != SQLITE_OK) { - Logger::getLogger()->error("attachNewDb - It was not possible to attach the db :%s: to an idle connection, error :%s:", path.c_str(), zErrMsg); + Logger::getLogger()->error("attachNewDb - It was not possible to attach the db :%s: to an inUse connection, error :%s:", path.c_str() ,zErrMsg); sqlite3_free(zErrMsg); result = false; + // TODO We are potentially left in an inconsistant state with the new + // database attached to some connections but not all. break; } - Logger::getLogger()->debug("attachNewDb idle dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); - + Logger::getLogger()->debug("attachNewDb inUse dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); } } + m_attachedDatabases++; - if (result) - { - // attach the DB to all inUse connections - { - - for ( auto conn : inUse) { - - dbHandle = conn->getDbHandle(); - rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); - if (rc != SQLITE_OK) - { - Logger::getLogger()->error("attachNewDb - It was not possible to attach the db :%s: to an inUse connection, error :%s:", path.c_str() ,zErrMsg); - sqlite3_free(zErrMsg); - result = false; - break; - } - - Logger::getLogger()->debug("attachNewDb inUse dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); - } - } - } - idleLock.unlock(); inUseLock.unlock(); + idleLock.unlock(); return (result); } @@ -231,44 +281,41 @@ bool ConnectionManager::detachNewDb(std::string &alias) inUseLock.lock(); // attach the DB to all idle connections + for (auto conn : idle) { - for ( auto conn : idle) { + dbHandle = conn->getDbHandle(); + rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); + if (rc != SQLITE_OK) + { + Logger::getLogger()->error("detachNewDb - It was not possible to detach the db :%s: from an idle connection, error :%s:", alias.c_str(), zErrMsg); + sqlite3_free(zErrMsg); + result = false; + break; + } + Logger::getLogger()->debug("detachNewDb - idle dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); + } + if (result) + { + // attach the DB to all inUse connections + for (auto conn : inUse) + { dbHandle = conn->getDbHandle(); rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); if (rc != SQLITE_OK) { - Logger::getLogger()->error("detachNewDb - It was not possible to detach the db :%s: from an idle connection, error :%s:", alias.c_str(), zErrMsg); + Logger::getLogger()->error("detachNewDb - It was not possible to detach the db :%s: from an inUse connection, error :%s:", alias.c_str() ,zErrMsg); sqlite3_free(zErrMsg); result = false; break; } - Logger::getLogger()->debug("detachNewDb - idle dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); + Logger::getLogger()->debug("detachNewDb - inUse dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); } } + m_attachedDatabases--; - if (result) - { - // attach the DB to all inUse connections - { - - for ( auto conn : inUse) { - - dbHandle = conn->getDbHandle(); - rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); - if (rc != SQLITE_OK) - { - Logger::getLogger()->error("detachNewDb - It was not possible to detach the db :%s: from an inUse connection, error :%s:", alias.c_str() ,zErrMsg); - sqlite3_free(zErrMsg); - result = false; - break; - } - Logger::getLogger()->debug("detachNewDb - inUse dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); - } - } - } - idleLock.unlock(); inUseLock.unlock(); + idleLock.unlock(); return (result); } @@ -289,28 +336,33 @@ bool ConnectionManager::attachRequestNewDb(int newDbId, sqlite3 *dbHandle) bool result; char *zErrMsg = NULL; + int poolSize = idle.size() + inUse.size(); + if (poolSize * m_attachedDatabases * NO_DESCRIPTORS_PER_DB + > (DESCRIPTOR_THRESHOLD * m_descriptorLimit) / 100) + { + Logger::getLogger()->warn("Request to attach nwe database rejected" + " due to excessive file descriptor usage"); + return false; + } result = true; idleLock.lock(); inUseLock.lock(); // attach the DB to all idle connections + for (auto conn : idle) { + if (dbHandle == conn->getDbHandle()) + { + Logger::getLogger()->debug("attachRequestNewDb - idle skipped dbHandle :%X: sqlCmd :%s: ", conn->getDbHandle(), sqlCmd.c_str()); - for ( auto conn : idle) { - - if (dbHandle == conn->getDbHandle()) - { - Logger::getLogger()->debug("attachRequestNewDb - idle skipped dbHandle :%X: sqlCmd :%s: ", conn->getDbHandle(), sqlCmd.c_str()); - - } else - { - conn->setUsedDbId(newDbId); - - Logger::getLogger()->debug("attachRequestNewDb - idle, dbHandle :%X: sqlCmd :%s: ", conn->getDbHandle(), sqlCmd.c_str()); - } + } else + { + conn->setUsedDbId(newDbId); + Logger::getLogger()->debug("attachRequestNewDb - idle, dbHandle :%X: sqlCmd :%s: ", conn->getDbHandle(), sqlCmd.c_str()); } + } if (result) @@ -318,7 +370,7 @@ bool ConnectionManager::attachRequestNewDb(int newDbId, sqlite3 *dbHandle) // attach the DB to all inUse connections { - for ( auto conn : inUse) { + for (auto conn : inUse) { if (dbHandle == conn->getDbHandle()) { @@ -332,8 +384,10 @@ bool ConnectionManager::attachRequestNewDb(int newDbId, sqlite3 *dbHandle) } } } - idleLock.unlock(); + m_attachedDatabases++; + inUseLock.unlock(); + idleLock.unlock(); return (result); } @@ -347,6 +401,9 @@ bool ConnectionManager::attachRequestNewDb(int newDbId, sqlite3 *dbHandle) */ void ConnectionManager::release(Connection *conn) { +#if TRACK_CONNECTION_USER + conn->clearUsage(); +#endif inUseLock.lock(); inUse.remove(conn); inUseLock.unlock(); @@ -391,6 +448,11 @@ int ConnectionManager::SQLExec(sqlite3 *dbHandle, const char *sqlCmd, char **err } else { + if (*errMsg) + { + sqlite3_free(*errMsg); + *errMsg = NULL; + } rc = sqlite3_exec(dbHandle, sqlCmd, NULL, NULL, errMsg); Logger::getLogger()->debug("SQLExec: rc :%d: ", rc); } @@ -446,3 +508,40 @@ void ConnectionManager::background() } } } + +/** + * Determine if we can allow another database to be created and attached to all the + * connections. + * + * @return True if we can create anotehr database. + */ +bool ConnectionManager::allowMoreDatabases() +{ + // Allow for a couple of user defined schemas as well as the fledge database + if (m_attachedDatabases + 4 > ReadingsCatalogue::getInstance()->getMaxAttached()) + { + return false; + } + int poolSize = idle.size() + inUse.size(); + if (poolSize * (m_attachedDatabases + 1) * NO_DESCRIPTORS_PER_DB + > (DESCRIPTOR_THRESHOLD * m_descriptorLimit) / 100) + { + return false; + } + return true; +} + +void ConnectionManager::noConnectionsDiagnostic() +{ +#if TRACK_CONNECTION_USER + Logger *logger = Logger::getLogger(); + + inUseLock.lock(); + logger->warn("There are %d connections in use currently", inUse.size()); + for (auto conn : inUse) + { + logger->warn(" Connection is use by %s", conn->getUsage().c_str()); + } + inUseLock.unlock(); +#endif +} diff --git a/C/plugins/storage/sqlite/common/include/connection.h b/C/plugins/storage/sqlite/common/include/connection.h index 62cef54a34..899bc30a96 100644 --- a/C/plugins/storage/sqlite/common/include/connection.h +++ b/C/plugins/storage/sqlite/common/include/connection.h @@ -21,28 +21,13 @@ #include #include -#define _DB_NAME "/fledge.db" -#define READINGS_DB_NAME_BASE "readings" +#define TRACK_CONNECTION_USER 0 // Set to 1 to get dianositcs about connection pool use + #define READINGS_DB_FILE_NAME "/" READINGS_DB_NAME_BASE "_1.db" #define READINGS_DB READINGS_DB_NAME_BASE "_1" #define READINGS_TABLE "readings" #define READINGS_TABLE_MEM READINGS_TABLE "_1" -#define LEN_BUFFER_DATE 100 -#define F_TIMEH24_S "%H:%M:%S" -#define F_DATEH24_S "%Y-%m-%d %H:%M:%S" -#define F_DATEH24_M "%Y-%m-%d %H:%M" -#define F_DATEH24_H "%Y-%m-%d %H" -// This is the default datetime format in Fledge: 2018-05-03 18:15:00.622 -#define F_DATEH24_MS "%Y-%m-%d %H:%M:%f" -// Format up to seconds -#define F_DATEH24_SEC "%Y-%m-%d %H:%M:%S" -#define SQLITE3_NOW "strftime('%Y-%m-%d %H:%M:%f', 'now', 'localtime')" -// The default precision is milliseconds, it adds microseconds and timezone -#define SQLITE3_NOW_READING "strftime('%Y-%m-%d %H:%M:%f000+00:00', 'now')" -#define SQLITE3_FLEDGE_DATETIME_TYPE "DATETIME" - -#define DB_CONFIGURATION "PRAGMA busy_timeout = 5000; PRAGMA cache_size = -4000; PRAGMA journal_mode = WAL; PRAGMA secure_delete = off; PRAGMA journal_size_limit = 4096000;" // Set plugin name for log messages #ifndef PLUGIN_LOG_NAME @@ -160,9 +145,15 @@ class Connection { void shutdownAppendReadings(); unsigned int purgeReadingsAsset(const std::string& asset); bool vacuum(); + bool supportsReadings() { return ! m_noReadings; }; +#if TRACK_CONNECTION_USER + void setUsage(std::string usage) { m_usage = usage; }; + void clearUsage() { m_usage = ""; }; + std::string getUsage() { return m_usage; }; +#endif private: - + std::string operation(const char *sql); std::vector m_NewDbIdList; // Newly created databases that should be attached @@ -170,7 +161,7 @@ class Connection { int m_queuing; std::mutex m_qMutex; int SQLPrepare(sqlite3 *dbHandle, const char *sqlCmd, sqlite3_stmt **readingsStmt); - int SQLexec(sqlite3 *db, const char *sql, + int SQLexec(sqlite3 *db, const std::string& table, const char *sql, int (*callback)(void*,int,char**,char**), void *cbArg, char **errmsg); @@ -213,6 +204,10 @@ class Connection { bool selectColumns(const rapidjson::Value& document, SQLBuffer& sql, int level); bool appendTables(const std::string& schema, const rapidjson::Value& document, SQLBuffer& sql, int level); bool processJoinQueryWhereClause(const rapidjson::Value& query, SQLBuffer& sql, std::vector &asset_codes, int level); + bool m_noReadings; +#if TRACK_CONNECTION_USER + std::string m_usage; +#endif }; #endif diff --git a/C/plugins/storage/sqlite/common/include/connection_manager.h b/C/plugins/storage/sqlite/common/include/connection_manager.h index 2d33cc81fe..7a448ab8ca 100644 --- a/C/plugins/storage/sqlite/common/include/connection_manager.h +++ b/C/plugins/storage/sqlite/common/include/connection_manager.h @@ -17,6 +17,9 @@ #include #include +#define NO_DESCRIPTORS_PER_DB 3 // 3 deascriptors per database when using WAL mode +#define DESCRIPTOR_THRESHOLD 75 // Percentage of descriptors that can be used on database connections + class Connection; /** @@ -30,7 +33,7 @@ class ConnectionManager { Connection *allocate(); bool attachNewDb(std::string &path, std::string &alias); bool attachRequestNewDb(int newDbId, sqlite3 *dbHandle); - bool detachNewDb(std::string &alias); + bool detachNewDb(std::string &alias); void release(Connection *); void shutdown(); void setError(const char *, const char *, bool); @@ -39,16 +42,20 @@ class ConnectionManager { return &lastError; } void background(); - void setVacuumInterval(long hours) { - m_vacuumInterval = 60 * 60 * hours; - }; + void setVacuumInterval(long hours) + { + m_vacuumInterval = 60 * 60 * hours; + }; + bool allowMoreDatabases(); protected: ConnectionManager(); private: static ConnectionManager *instance; - int SQLExec(sqlite3 *dbHandle, const char *sqlCmd, char **errMsg); + int SQLExec(sqlite3 *dbHandle, const char *sqlCmd, + char **errMsg); + void noConnectionsDiagnostic(); protected: std::list idle; @@ -61,6 +68,8 @@ class ConnectionManager { bool m_shutdown; std::thread *m_background; long m_vacuumInterval; + unsigned int m_descriptorLimit; + unsigned int m_attachedDatabases; }; #endif diff --git a/C/plugins/storage/sqlite/common/include/readings_catalogue.h b/C/plugins/storage/sqlite/common/include/readings_catalogue.h index 0bddc55863..4562bb066f 100644 --- a/C/plugins/storage/sqlite/common/include/readings_catalogue.h +++ b/C/plugins/storage/sqlite/common/include/readings_catalogue.h @@ -13,6 +13,8 @@ #include "connection.h" #include +#define OVERFLOW_TABLE_ID 0 // Table ID to use for the overflow table + /** * This class handles per thread started transaction boundaries: */ @@ -131,24 +133,29 @@ class ReadingsCatalogue { void preallocateReadingsTables(int dbId); bool loadAssetReadingCatalogue(); + bool loadEmptyAssetReadingCatalogue(bool clean = true); bool latestDbUpdate(sqlite3 *dbHandle, int newDbId); - void preallocateNewDbsRange(int dbIdStart, int dbIdEnd); - bool getEmptyReadingTableReference(tyReadingReference& emptyTableReference); + int preallocateNewDbsRange(int dbIdStart, int dbIdEnd); + tyReadingReference getEmptyReadingTableReference(std::string& asset); tyReadingReference getReadingReference(Connection *connection, const char *asset_code); bool attachDbsToAllConnections(); std::string sqlConstructMultiDb(std::string &sqlCmdBase, std::vector &assetCodes, bool considerExclusion=false); + std::string sqlConstructOverflow(std::string &sqlCmdBase, std::vector &assetCodes, bool considerExclusion=false, bool groupBy = false); int purgeAllReadings(sqlite3 *dbHandle, const char *sqlCmdBase, char **errMsg = NULL, unsigned long *rowsAffected = NULL); bool connectionAttachAllDbs(sqlite3 *dbHandle); bool connectionAttachDbList(sqlite3 *dbHandle, std::vector &dbIdList); - bool attachDb(sqlite3 *dbHandle, std::string &path, std::string &alias); + bool attachDb(sqlite3 *dbHandle, std::string &path, std::string &alias, int dbId); void detachDb(sqlite3 *dbHandle, std::string &alias); void setUsedDbId(int dbId); int extractReadingsIdFromName(std::string tableName); int extractDbIdFromName(std::string tableName); int SQLExec(sqlite3 *dbHandle, const char *sqlCmd, char **errMsg = NULL); + bool createReadingsOverflowTable(sqlite3 *dbHandle, int dbId); + int getMaxAttached() { return m_attachLimit; }; + private: STORAGE_CONFIGURATION m_storageConfigCurrent; // The current configuration of the multiple readings @@ -176,7 +183,7 @@ class ReadingsCatalogue { } tyReadingsAvailable; - ReadingsCatalogue(){}; + ReadingsCatalogue(); bool createNewDB(sqlite3 *dbHandle, int newDbId, int startId, NEW_DB_OPERATION attachAllDb); int getUsedTablesDbId(int dbId); @@ -199,7 +206,7 @@ class ReadingsCatalogue { void dbsRemove(int startId, int endId); void storeReadingsConfiguration (sqlite3 *dbHandle); ACTION changesLogicDBs(int dbIdCurrent , int dbIdLast, int nDbPreallocateCurrent, int nDbPreallocateRequest, int nDbLeftFreeBeforeAllocate); - ACTION changesLogicTables(int maxUsed ,int Current, int Request); + ACTION changesLogicTables(int maxUsed ,int Current, int Request); int retrieveDbIdFromTableId(int tableId); void configChangeAddDb(sqlite3 *dbHandle); @@ -211,18 +218,30 @@ class ReadingsCatalogue { void dropReadingsTables(sqlite3 *dbHandle, int dbId, int idStart, int idEnd); - int m_dbIdCurrent; // Current database in use - int m_dbIdLast; // Last database available not already in use - int m_dbNAvailable; // Number of databases available - std::vector m_dbIdList; // Databases already created but not in use + int m_dbIdCurrent; // Current database in use + int m_dbIdLast; // Last database available not already in use + int m_dbNAvailable; // Number of databases available + std::vector + m_dbIdList; // Databases already created but not in use - std::atomic m_ReadingsGlobalId; // Global row id shared among all the readings table - int m_nReadingsAvailable = 0; // Number of readings tables available + std::atomic + m_ReadingsGlobalId; // Global row id shared among all the readings table + int + m_nReadingsAvailable = 0; // Number of readings tables available std::map > m_AssetReadingCatalogue={ // In memory structure to identify in which database/table an asset is stored // asset_code - reading Table Id, Db Id // {"", ,{1 ,1 }} }; + std::map > m_EmptyAssetReadingCatalogue={ // In memory structure to identify in which database/table an asset is empty + // asset_code - reading Table Id, Db Id + // {"", ,{1 ,1 }} + }; + int m_nextOverflow; // The next database to use for overflow assets + int m_attachLimit; + int m_maxOverflowUsed; + int m_compounds; // Max number of compound statements + std::mutex m_emptyReadingTableMutex; public: TransactionBoundary m_tx; diff --git a/C/plugins/storage/sqlite/common/include/common.h b/C/plugins/storage/sqlite/common/include/sqlite_common.h similarity index 56% rename from C/plugins/storage/sqlite/common/include/common.h rename to C/plugins/storage/sqlite/common/include/sqlite_common.h index a6c168dd9e..c55db37077 100644 --- a/C/plugins/storage/sqlite/common/include/common.h +++ b/C/plugins/storage/sqlite/common/include/sqlite_common.h @@ -23,6 +23,29 @@ #include #include +#define _DB_NAME "/fledge.db" +#define READINGS_DB_NAME_BASE "readings" + +#define DB_CONFIGURATION "PRAGMA busy_timeout = 5000; PRAGMA cache_size = -4000; PRAGMA journal_mode = WAL; PRAGMA secure_delete = off; PRAGMA journal_size_limit = 4096000;" + +#define LEN_BUFFER_DATE 100 +#define F_TIMEH24_S "%H:%M:%S" +#define F_DATEH24_S "%Y-%m-%d %H:%M:%S" +#define F_DATEH24_M "%Y-%m-%d %H:%M" +#define F_DATEH24_H "%Y-%m-%d %H" +// This is the default datetime format in Fledge: 2018-05-03 18:15:00.622 +#define F_DATEH24_MS "%Y-%m-%d %H:%M:%f" +// Format up to seconds +#define F_DATEH24_SEC "%Y-%m-%d %H:%M:%S" +#define SQLITE3_NOW "strftime('%Y-%m-%d %H:%M:%f', 'now', 'localtime')" +// The default precision is milliseconds, it adds microseconds and timezone +#define SQLITE3_NOW_READING "strftime('%Y-%m-%d %H:%M:%f000+00:00', 'now')" +#define SQLITE3_FLEDGE_DATETIME_TYPE "DATETIME" + +#define STORAGE_PURGE_RETAIN_ANY 0x0001U +#define STORAGE_PURGE_RETAIN_ALL 0x0002U +#define STORAGE_PURGE_SIZE 0x0004U + static std::map sqliteDateFormat = { {"HH24:MI:SS", F_TIMEH24_S}, diff --git a/C/plugins/storage/sqlite/common/readings.cpp b/C/plugins/storage/sqlite/common/readings.cpp index d729baca6e..71b214cb0c 100644 --- a/C/plugins/storage/sqlite/common/readings.cpp +++ b/C/plugins/storage/sqlite/common/readings.cpp @@ -9,9 +9,9 @@ */ #include +#include #include #include -#include #include #include #include @@ -416,6 +416,12 @@ int Connection::readingStream(ReadingStream **readings, bool commit) int sqlite3_resut; int rowNumber = -1; + if (m_noReadings) + { + Logger::getLogger()->error("Attempt to stream readings to plugin that has no storage for readings"); + return 0; + } + ostringstream threadId; threadId << std::this_thread::get_id(); ReadingsCatalogue *readCatalogue = ReadingsCatalogue::getInstance(); @@ -495,7 +501,7 @@ int Connection::readingStream(ReadingStream **readings, bool commit) { if (!formatDate(formatted_date, sizeof(formatted_date), user_ts)) { - raiseError("appendReadings", "Invalid date |%s|", user_ts); + raiseError("appendReadings", "Invalid date '%s'", user_ts); add_row = false; } else @@ -533,7 +539,7 @@ int Connection::readingStream(ReadingStream **readings, bool commit) sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); retries++; - Logger::getLogger()->info("SQLITE_LOCKED - record :%d: - retry number :%d: sleep time ms :%d:",i, retries, sleep_time_ms); + Logger::getLogger()->info("SQLITE_LOCKED - record %d - retry number %d sleep time ms %d",i, retries, sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } @@ -545,7 +551,7 @@ int Connection::readingStream(ReadingStream **readings, bool commit) sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); retries++; - Logger::getLogger()->info("SQLITE_BUSY - thread :%s: - record :%d: - retry number :%d: sleep time ms :%d:", threadId.str().c_str() ,i , retries, sleep_time_ms); + Logger::getLogger()->info("SQLITE_BUSY - thread '%s' - record %d - retry number %d sleep time ms %d", threadId.str().c_str() ,i , retries, sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } @@ -561,7 +567,7 @@ int Connection::readingStream(ReadingStream **readings, bool commit) else { raiseError("appendReadings", - "Inserting a row into SQLIte using a prepared command - asset_code :%s: error :%s: reading :%s: ", + "Inserting a row into SQLIte using a prepared command - asset_code '%s' error '%s' reading '%s' ", asset_code, sqlite3_errmsg(dbHandle), reading.c_str()); @@ -577,7 +583,7 @@ int Connection::readingStream(ReadingStream **readings, bool commit) } catch (exception e) { - raiseError("appendReadings", "Inserting a row into SQLIte using a prepared command - error :%s:", e.what()); + raiseError("appendReadings", "Inserting a row into SQLIte using a prepared command - error '%s'", e.what()); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); m_streamOpenTransaction = true; @@ -593,7 +599,7 @@ int Connection::readingStream(ReadingStream **readings, bool commit) sqlite3_resut = sqlite3_exec(dbHandle, "END TRANSACTION", NULL, NULL, NULL); if (sqlite3_resut != SQLITE_OK) { - raiseError("appendReadings", "Executing the commit of the transaction - error :%s:", sqlite3_errmsg(dbHandle)); + raiseError("appendReadings", "Executing the commit of the transaction - error '%s'", sqlite3_errmsg(dbHandle)); rowNumber = -1; } m_streamOpenTransaction = true; @@ -603,7 +609,7 @@ int Connection::readingStream(ReadingStream **readings, bool commit) { if (sqlite3_finalize(stmt) != SQLITE_OK) { - raiseError("appendReadings","freeing SQLite in memory structure - error :%s:", sqlite3_errmsg(dbHandle)); + raiseError("appendReadings","freeing SQLite in memory structure - error '%s'", sqlite3_errmsg(dbHandle)); } } @@ -621,7 +627,7 @@ int Connection::readingStream(ReadingStream **readings, bool commit) timersub(&t2, &t1, &tm); timeT2 = tm.tv_sec + ((double)tm.tv_usec / 1000000); - Logger::getLogger()->debug("readingStream row count :%d:", rowNumber); + Logger::getLogger()->debug("readingStream row count %d", rowNumber); Logger::getLogger()->debug("readingStream Timing - stream handling %.3f seconds - commit/finalize %.3f seconds", timeT1, @@ -644,20 +650,21 @@ void Connection::setUsedDbId(int dbId) { /** * Wait until all the threads executing the appendReadings are shutted down */ -void Connection::shutdownAppendReadings() { +void Connection::shutdownAppendReadings() +{ ostringstream threadId; threadId << std::this_thread::get_id(); - Logger::getLogger()->debug("%s - thread Id :%s: appendReadings shutting down started", __FUNCTION__, threadId.str().c_str()); + Logger::getLogger()->debug("%s - thread Id '%s' appendReadings shutting down started", __FUNCTION__, threadId.str().c_str()); m_shutdown=true; while (m_appendCount > 0) { - Logger::getLogger()->debug("%s - thread Id :%s: waiting threads to shut down, count :%d: ", __FUNCTION__, threadId.str().c_str(), int(m_appendCount)); + Logger::getLogger()->debug("%s - thread Id '%s' waiting threads to shut down, count %d ", __FUNCTION__, threadId.str().c_str(), int(m_appendCount)); std::this_thread::sleep_for(std::chrono::milliseconds(150)); } - Logger::getLogger()->debug("%s - thread Id :%s: appendReadings shutting down ended", __FUNCTION__, threadId.str().c_str()); + Logger::getLogger()->debug("%s - thread Id '%s' appendReadings shutting down ended", __FUNCTION__, threadId.str().c_str()); } @@ -696,39 +703,43 @@ int stmtArraySize; std::thread::id tid = std::this_thread::get_id(); ostringstream threadId; + if (m_noReadings) + { + Logger::getLogger()->error("Attempt to append readings to plugin that has no storage for readings"); + return 0; + } + threadId << tid; { if (m_shutdown) { - Logger::getLogger()->debug("%s - thread Id :%s: plugin is shutting down, operation cancelled", __FUNCTION__, threadId.str().c_str()); + Logger::getLogger()->debug("%s - thread Id '%s' plugin is shutting down, operation cancelled", __FUNCTION__, threadId.str().c_str()); return -1; } m_appendCount++; - Logger::getLogger()->debug("%s - thread Id :%s: operation started , threads count :%d: ", __FUNCTION__, threadId.str().c_str(), int(m_appendCount) ); + Logger::getLogger()->debug("%s - thread Id '%s' operation started , threads count %d ", __FUNCTION__, threadId.str().c_str(), int(m_appendCount) ); } ReadingsCatalogue *readCatalogue = ReadingsCatalogue::getInstance(); - { - // Attaches the needed databases if the queue is not empty - AttachDbSync *attachSync = AttachDbSync::getInstance(); - attachSync->lock(); + // Attaches the needed databases if the queue is not empty + AttachDbSync *attachSync = AttachDbSync::getInstance(); + attachSync->lock(); - if ( ! m_NewDbIdList.empty()) - { - readCatalogue->connectionAttachDbList(this->getDbHandle(), m_NewDbIdList); - } - attachSync->unlock(); + if ( ! m_NewDbIdList.empty()) + { + readCatalogue->connectionAttachDbList(this->getDbHandle(), m_NewDbIdList); } + attachSync->unlock(); stmtArraySize = readCatalogue->getReadingPosition(0, 0); vector readingsStmt(stmtArraySize + 1, nullptr); #if INSTRUMENT - Logger::getLogger()->debug("appendReadings start thread :%s:", threadId.str().c_str()); + Logger::getLogger()->debug("appendReadings start thread '%s'", threadId.str().c_str()); struct timeval start, t1, t2, t3, t4, t5; #endif @@ -810,19 +821,25 @@ ostringstream threadId; // Handles - asset_code asset_code = (*itr)["asset_code"].GetString(); + if (strlen(asset_code) == 0) + { + Logger::getLogger()->warn("Sqlite appendReadings - empty asset code value, row ignored."); + stmt = NULL; + } + //# A different asset is managed respect the previous one - if (lastAsset.compare(asset_code)!= 0) + if (strlen(asset_code) && lastAsset.compare(asset_code) != 0) { ReadingsCatalogue::tyReadingReference ref; ref = readCatalogue->getReadingReference(this, asset_code); readingsId = ref.tableId; - Logger::getLogger()->debug("tyReadingReference :%s: :%d: :%d: ", asset_code, ref.dbId, ref.tableId); + Logger::getLogger()->debug("tyReadingReference '%s' %d %d ", asset_code, ref.dbId, ref.tableId); if (readingsId == -1) { - Logger::getLogger()->warn("appendReadings - It was not possible to insert the row for the asset_code :%s: into the readings, row ignored.", asset_code); + Logger::getLogger()->warn("appendReadings - It was not possible to insert the row for the asset_code '%s' into the readings, row ignored.", asset_code); stmt = NULL; } else @@ -832,14 +849,14 @@ ostringstream threadId; nReadings = readCatalogue->getReadingsCount(); idxReadings = readCatalogue->getReadingPosition(ref.dbId, ref.tableId); - Logger::getLogger()->debug("tyReadingReference :%s: :%d: :%d: idxReadings :%d:", asset_code, ref.dbId, ref.tableId, idxReadings); + Logger::getLogger()->debug("tyReadingReference '%s' %d %d idxReadings %d", asset_code, ref.dbId, ref.tableId, idxReadings); if (idxReadings >= stmtArraySize) { stmtArraySize = idxReadings + 1; readingsStmt.resize(stmtArraySize, nullptr); - Logger::getLogger()->debug("appendReadings: thread :%s: resize size :%d: idx :%d: ", threadId.str().c_str(), stmtArraySize, readingsId); + Logger::getLogger()->debug("appendReadings: thread '%s' resize size %d idx %d ", threadId.str().c_str(), stmtArraySize, readingsId); } if (readingsStmt[idxReadings] == nullptr) @@ -847,10 +864,18 @@ ostringstream threadId; string dbName = readCatalogue->generateDbName(ref.dbId); string dbReadingsName = readCatalogue->generateReadingsName(ref.dbId, readingsId); - sql_cmd = "INSERT INTO " + dbName + "." + dbReadingsName + " ( id, user_ts, reading ) VALUES (?,?,?)"; + if (readingsId == 0) + { + // Overflow table + sql_cmd = "INSERT INTO " + dbName + ".readings_" + to_string(ref.dbId) + "_overflow ( id, asset_code, user_ts, reading ) VALUES (?,'" + asset_code + "',?,?)"; + } + else + { + sql_cmd = "INSERT INTO " + dbName + "." + dbReadingsName + " ( id, user_ts, reading ) VALUES (?,?,?)"; + } rc = SQLPrepare(dbHandle, sql_cmd.c_str(), &readingsStmt[idxReadings]); - Logger::getLogger()->debug("tyReadingReference sql_cmd :%s: :%s: :%d: :%d: ", sql_cmd.c_str(), asset_code, ref.dbId, ref.tableId); + Logger::getLogger()->debug("tyReadingReference sql_cmd '%s' '%s' %d %d ", sql_cmd.c_str(), asset_code, ref.dbId, ref.tableId); if (rc != SQLITE_OK) { @@ -929,9 +954,9 @@ ostringstream threadId; if (retries >= LOG_AFTER_NERRORS) { Logger::getLogger()->warn("appendReadings - %s - " \ - "asset_code :%s: readingsId :%d: " \ - "thread :%s: dbHandle :%X: record " \ - ":%d: retry number :%d: sleep time ms :%d:error :%s:", + "asset_code '%s' readingsId %d " \ + "thread '%s' dbHandle %X record " \ + "%d retry number %d sleep time ms %derror '%s'", msgError.c_str(), asset_code, readingsId, @@ -959,17 +984,34 @@ ostringstream threadId; { raiseError("appendReadings","Inserting a row into " \ "SQLIte using a prepared command - asset_code " \ - ":%s: error :%s: reading :%s: dbHandle :%X:", + "'%s' error '%s' reading '%s' dbHandle %X", asset_code, sqlite3_errmsg(dbHandle), reading.c_str(), dbHandle); + sqlite3_clear_bindings(stmt); + sqlite3_reset(stmt); + sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); m_appendCount--; // Clear transaction boundary for this thread readCatalogue->m_tx.ClearThreadTransaction(tid); + + // Finalize sqlite structures + for (auto &item : readingsStmt) + { + if(item != nullptr) + { + + if (sqlite3_finalize(item) != SQLITE_OK) + { + raiseError("appendReadings","freeing SQLite in memory structure - error '%s'", sqlite3_errmsg(dbHandle)); + } + } + + } return -1; } } @@ -980,7 +1022,7 @@ ostringstream threadId; if (sqlite3_resut != SQLITE_OK) { raiseError("appendReadings", - "Executing the commit of the transaction :%s:", + "Executing the commit of the transaction '%s'", sqlite3_errmsg(dbHandle)); row = -1; } @@ -1003,7 +1045,7 @@ ostringstream threadId; if (sqlite3_finalize(item) != SQLITE_OK) { - raiseError("appendReadings","freeing SQLite in memory structure - error :%s:", sqlite3_errmsg(dbHandle)); + raiseError("appendReadings","freeing SQLite in memory structure - error '%s'", sqlite3_errmsg(dbHandle)); } } @@ -1026,7 +1068,7 @@ ostringstream threadId; timersub(&t3, &t2, &tm); timeT3 = tm.tv_sec + ((double)tm.tv_usec / 1000000); - Logger::getLogger()->debug("appendReadings end thread :%s: buffer :%10lu: count :%5d: JSON :%6.3f: inserts :%6.3f: finalize :%6.3f:", + Logger::getLogger()->debug("appendReadings end thread '%s' buffer :%10lu: count :%5d: JSON :%6.3f: inserts :%6.3f: finalize :%6.3f:", threadId.str().c_str(), strlen(readings), row, @@ -1070,6 +1112,12 @@ unsigned int minGlobalId; unsigned int idWindow; unsigned long rowsCount; + if (m_noReadings) + { + Logger::getLogger()->error("Attempt to fetch readings to plugin that has no storage for readings"); + return false; + } + ostringstream threadId; threadId << std::this_thread::get_id(); ReadingsCatalogue *readCatalogue = ReadingsCatalogue::getInstance(); @@ -1100,55 +1148,68 @@ unsigned long rowsCount; } // Generate a single SQL statement that using a set of UNION considers all the readings table in handling + // SQL - start + sql_cmd = R"( + SELECT + id, + asset_code, + reading, + strftime('%Y-%m-%d %H:%M:%S', user_ts, 'utc') || + substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, + strftime('%Y-%m-%d %H:%M:%f', ts, 'utc') AS ts + FROM + ( + )"; + + // SQL - union of all the readings tables + string sql_cmd_base; + string sql_cmd_tmp; + // Would like to add a LIMIT on each sub-query in the union all, however SQLITE + // does not support this. Note we can not use id + blocksize as this fail if we + // have holes in the id space + sql_cmd_base = " SELECT id, \"_assetcode_\" asset_code, reading, user_ts, ts " \ + "FROM _dbname_._tablename_ WHERE id >= " + + to_string(id) + " "; + + // Check for any uncommitted transactions: + // fetch the minimum reading id among all per thread transactions + // an use it as a boundary limit. + // If no pending transactions just use current global reading id as limit + unsigned long safe_id = readCatalogue->m_tx.GetMinReadingId(); + if (safe_id) { - // SQL - start - sql_cmd = R"( - SELECT - id, - asset_code, - reading, - strftime('%Y-%m-%d %H:%M:%S', user_ts, 'utc') || - substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, - strftime('%Y-%m-%d %H:%M:%f', ts, 'utc') AS ts - FROM - ( - )"; + sql_cmd_base += "AND id < " + to_string(safe_id) + " "; + } + else + { + sql_cmd_base += "AND id < " + to_string(readCatalogue->getGlobalId()) + " "; + } - // SQL - union of all the readings tables - string sql_cmd_base; - string sql_cmd_tmp; - // Would like to add a LIMIT on each sub-query in the union all, however SQLITE - // does not support this. Note we can not use id + blocksize as this fail if we - // have holes in the id space - sql_cmd_base = " SELECT id, \"_assetcode_\" asset_code, reading, user_ts, ts " \ - "FROM _dbname_._tablename_ WHERE id >= " + - to_string(id) + " "; - - // Check for any uncommitted transactions: - // fetch the minimum reading id among all per thread transactions - // an use it as a boundary limit. - // If no pending transactions just use current global reading id as limit - unsigned long safe_id = readCatalogue->m_tx.GetMinReadingId(); - if (safe_id) - { - sql_cmd_base += "AND id < " + to_string(safe_id) + " "; - } - else - { - sql_cmd_base += "AND id < " + to_string(readCatalogue->getGlobalId()) + " "; - } + sql_cmd_tmp = readCatalogue->sqlConstructMultiDb(sql_cmd_base, asset_codes); + sql_cmd += sql_cmd_tmp; - sql_cmd_tmp = readCatalogue->sqlConstructMultiDb(sql_cmd_base, asset_codes); - sql_cmd += sql_cmd_tmp; + // Now add in ther overflow tables + sql_cmd_base = " SELECT id, asset_code, reading, user_ts, ts " \ + "FROM _dbname_._tablename_ WHERE id >= " + + to_string(id) + " "; + if (safe_id) + { + sql_cmd_base += "AND id < " + to_string(safe_id) + " "; + } + else + { + sql_cmd_base += "AND id < " + to_string(readCatalogue->getGlobalId()) + " "; + } - // SQL - end - sql_cmd += R"( - ) as tb - ORDER BY id ASC - LIMIT - )" + to_string(blksize); + sql_cmd_tmp = readCatalogue->sqlConstructOverflow(sql_cmd_base, asset_codes); + sql_cmd += sql_cmd_tmp; - } + // SQL - end + sql_cmd += R"( + ) as tb + ORDER BY id ASC + LIMIT + )" + to_string(blksize); logSQL("ReadingsFetch", sql_cmd.c_str()); @@ -1256,7 +1317,8 @@ unsigned long rowsCount; /** * Perform a query against the readings table * - * retrieveReadings, used by the API, returns timestamp in localtime. + * retrieveReadings, used by the API, returns timestamp in utc unless + * otherwise requested. * */ bool Connection::retrieveReadings(const string& condition, string& resultSet) @@ -1272,18 +1334,26 @@ SQLBuffer jsonConstraintsExt; SQLBuffer jsonConstraints; bool isAggregate = false; bool isOptAggregate = false; +const char *timezone = "utc"; string modifierExt; string modifierInt; vector asset_codes; + if (m_noReadings) + { + Logger::getLogger()->error("Attempt to retrieve readings to plugin that has no storage for readings"); + return false; + } + ostringstream threadId; threadId << std::this_thread::get_id(); ReadingsCatalogue *readCatalogue = ReadingsCatalogue::getInstance(); + if (readCatalogue) { - // Attaches the needed databases if the queue is not empty + // Attaches the required databases if the queue is not empty AttachDbSync *attachSync = AttachDbSync::getInstance(); attachSync->lock(); @@ -1293,6 +1363,10 @@ vector asset_codes; } attachSync->unlock(); } + else + { + Logger::getLogger()->error("Readings catalogue not avialable"); + } try { if (dbHandle == NULL) @@ -1312,9 +1386,9 @@ vector asset_codes; id, asset_code, reading, - strftime(')" F_DATEH24_SEC R"(', user_ts, 'localtime') || + strftime(')" F_DATEH24_SEC R"(', user_ts, 'utc') || substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, - strftime(')" F_DATEH24_MS R"(', ts, 'localtime') AS ts + strftime(')" F_DATEH24_MS R"(', ts, 'utc') AS ts FROM ( )"; @@ -1325,6 +1399,10 @@ vector asset_codes; sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, asset_codes); sql_cmd += sql_cmd_tmp; + sql_cmd_base = " SELECT id, asset_code, reading, user_ts, ts FROM _dbname_._tablename_ "; + sql_cmd_tmp = readCatalogue->sqlConstructOverflow(sql_cmd_base, asset_codes); + sql_cmd += sql_cmd_tmp; + // SQL - end sql_cmd += R"( ) as tb; @@ -1339,6 +1417,11 @@ vector asset_codes; return false; } + if (document.HasMember("timezone") && document["timezone"].IsString()) + { + timezone = document["timezone"].GetString(); + } + // timebucket aggregate all datapoints if (aggregateAll(document)) { @@ -1394,14 +1477,18 @@ vector asset_codes; if (strcmp(itr->GetString() ,"user_ts") == 0) { // Display without TZ expression and microseconds also - sql.append(" strftime('" F_DATEH24_SEC "', user_ts, 'localtime') "); + sql.append(" strftime('" F_DATEH24_SEC "', user_ts, '"); + sql.append(timezone); + sql.append("') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); sql.append(" as user_ts "); } else if (strcmp(itr->GetString() ,"ts") == 0) { // Display without TZ expression and microseconds also - sql.append(" strftime('" F_DATEH24_MS "', ts, 'localtime') "); + sql.append(" strftime('" F_DATEH24_MS "', ts, '"); + sql.append(timezone); + sql.append("') "); sql.append(" as ts "); } else @@ -1513,7 +1600,9 @@ vector asset_codes; { // Extract milliseconds and microseconds for the user_ts fields - sql.append("strftime('" F_DATEH24_SEC "', user_ts, 'localtime') "); + sql.append("strftime('" F_DATEH24_SEC "', user_ts, '"); + sql.append(timezone); + sql.append("') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); if (! itr->HasMember("alias")) { @@ -1525,7 +1614,9 @@ vector asset_codes; { sql.append("strftime('" F_DATEH24_MS "', "); sql.append((*itr)["column"].GetString()); - sql.append(", 'localtime')"); + sql.append(", '"); + sql.append(timezone); + sql.append("')"); if (! itr->HasMember("alias")) { sql.append(" AS "); @@ -1568,16 +1659,11 @@ vector asset_codes; sql.append(' '); } - const char *sql_cmd = R"( - id, - asset_code, - reading, - strftime(')" F_DATEH24_SEC R"(', user_ts, 'localtime') || - substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, - strftime(')" F_DATEH24_MS R"(', ts, 'localtime') AS ts - FROM )"; - - sql.append(sql_cmd); + sql.append("id, asset_code, reading, strftime('" F_DATEH24_SEC "', user_ts, '"); + sql.append(timezone); + sql.append("') || substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, strftime('" F_DATEH24_MS "', ts, '"); + sql.append(timezone); + sql.append("') AS ts FROM "); } { @@ -1597,6 +1683,7 @@ vector asset_codes; // SQL - union of all the readings tables string sql_cmd_base; + string sql_cmd_overflow_base; string sql_cmd_tmp; // Specific optimization for the count operation @@ -1614,17 +1701,23 @@ vector asset_codes; sql_cmd_base += ", asset_code"; sql_cmd_base += ", id, reading, user_ts, ts "; + sql_cmd_overflow_base = sql_cmd_base; StringReplaceAll (sql_cmd_base, "asset_code", " \"_assetcode_\" .assetcode. "); sql_cmd_base += " FROM _dbname_._tablename_ "; + sql_cmd_overflow_base += " FROM _dbname_._tablename_ "; delete[] queryTmp; } else { sql_cmd_base = " SELECT ROWID, id, \"_assetcode_\" asset_code, reading, user_ts, ts FROM _dbname_._tablename_ "; + sql_cmd_overflow_base = " SELECT ROWID, id, asset_code, reading, user_ts, ts FROM _dbname_._tablename_ "; } sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, asset_codes); sql_cmd += sql_cmd_tmp; + + sql_cmd_tmp = readCatalogue->sqlConstructOverflow(sql_cmd_overflow_base, asset_codes, false, isOptAggregate); + sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( @@ -1677,7 +1770,6 @@ vector asset_codes; sql.append(';'); const char *query = sql.coalesce(); - char *zErrMsg = NULL; int rc; sqlite3_stmt *stmt; @@ -1733,9 +1825,15 @@ unsigned long rowidLimit = 0, minrowidLimit = 0, maxrowidLimit = 0, rowidMin; struct timeval startTv, endTv; int blocks = 0; bool flag_retain; - +char *zErrMsg = NULL; vector assetCodes; + if (m_noReadings) + { + Logger::getLogger()->error("Attempt to purge readings from plugin that has no storage for readings"); + return 0; + } + Logger *logger = Logger::getLogger(); ostringstream threadId; @@ -1760,13 +1858,17 @@ vector assetCodes; { flag_retain = true; } - Logger::getLogger()->debug("%s - flags :%X: flag_retain :%d: sent :%ld:", __FUNCTION__, flags, flag_retain, sent); + + + Logger::getLogger()->debug("%s - flags %X flag_retain %d sent :%ld:", __FUNCTION__, flags, flag_retain, sent); // Prepare empty result result = "{ \"removed\" : 0, "; result += " \"unsentPurged\" : 0, "; result += " \"unsentRetained\" : 0, "; - result += " \"readings\" : 0 }"; + result += " \"readings\" : 0, "; + result += " \"method\" : \"rows\", "; + result += " \"duration\" : 0 }"; logger->info("Purge starting..."); gettimeofday(&startTv, NULL); @@ -1776,93 +1878,80 @@ vector assetCodes; * This provents us looping in the purge process if new readings become * eligible for purging at a rate that is faster than we can purge them. */ - { - char *zErrMsg = NULL; - int rc; - - string sql_cmd; - // Generate a single SQL statement that using a set of UNION considers all the readings table in handling - { - // SQL - start - sql_cmd = R"( - SELECT MAX(rowid) - FROM - ( - )"; - - // SQL - union of all the readings tables - string sql_cmd_base; - string sql_cmd_tmp; - sql_cmd_base = " SELECT MAX(rowid) rowid FROM _dbname_._tablename_ "; - ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); - sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes); - sql_cmd += sql_cmd_tmp; - - // SQL - end - sql_cmd += R"( - ) as readings_1 - )"; - } - rc = SQLexec(dbHandle, - sql_cmd.c_str(), - rowidCallback, - &rowidLimit, - &zErrMsg); + string sql_cmd; + string sql_cmd_tmp; + // Generate a single SQL statement that using a set of UNION considers all the readings table in handling + // SQL - start + sql_cmd = R"( + SELECT MAX(rowid) + FROM + ( + )"; + + // SQL - union of all the readings tables + string sql_cmd_base = " SELECT MAX(rowid) rowid FROM _dbname_._tablename_ "; + ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); + sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes); + sql_cmd += sql_cmd_tmp; + sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes); + sql_cmd += sql_cmd_tmp; + + // SQL - end + sql_cmd += R"( + ) as readings_1 + )"; + + int rc = SQLexec(dbHandle, "readings", + sql_cmd.c_str(), + rowidCallback, + &rowidLimit, + &zErrMsg); - if (rc != SQLITE_OK) - { - raiseError("purge - phase 0, fetching rowid limit ", zErrMsg); - sqlite3_free(zErrMsg); - return 0; - } - maxrowidLimit = rowidLimit; + if (rc != SQLITE_OK) + { + raiseError("purge - phase 0, fetching rowid limit ", zErrMsg); + sqlite3_free(zErrMsg); + return 0; } + maxrowidLimit = rowidLimit; Logger::getLogger()->debug("purgeReadings rowidLimit %lu", rowidLimit); - { - char *zErrMsg = NULL; - int rc; - string sql_cmd; - // Generate a single SQL statement that using a set of UNION considers all the readings table in handling - { - // SQL - start - sql_cmd = R"( - SELECT MIN(rowid) - FROM - ( - )"; - - // SQL - union of all the readings tables - string sql_cmd_base; - string sql_cmd_tmp; - sql_cmd_base = " SELECT MIN(rowid) rowid FROM _dbname_._tablename_ "; - ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); - sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes, true); - sql_cmd += sql_cmd_tmp; - - // SQL - end - sql_cmd += R"( - ) as readings_1 - )"; - } - - Logger::getLogger()->debug("%s - SELECT MIN - :%s:", __FUNCTION__, sql_cmd.c_str() ); - - rc = SQLexec(dbHandle, - sql_cmd.c_str(), - rowidCallback, - &minrowidLimit, - &zErrMsg); + // Generate a single SQL statement that using a set of UNION considers all the readings table in handling + // SQL - start + sql_cmd = R"( + SELECT MIN(rowid) + FROM + ( + )"; + + // SQL - union of all the readings tables + sql_cmd_base = " SELECT MIN(rowid) rowid FROM _dbname_._tablename_ "; + sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes, true); + sql_cmd += sql_cmd_tmp; + sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes, true); + sql_cmd += sql_cmd_tmp; + + // SQL - end + sql_cmd += R"( + ) as readings_1 + )"; + + Logger::getLogger()->debug("%s - SELECT MIN - '%s'", __FUNCTION__, sql_cmd.c_str() ); + + rc = SQLexec(dbHandle, "readings", + sql_cmd.c_str(), + rowidCallback, + &minrowidLimit, + &zErrMsg); - if (rc != SQLITE_OK) - { - raiseError("purge - phaase 0, fetching minrowid limit ", zErrMsg); - sqlite3_free(zErrMsg); - return 0; - } + if (rc != SQLITE_OK) + { + raiseError("purge - phaase 0, fetching minrowid limit ", zErrMsg); + sqlite3_free(zErrMsg); + return 0; } Logger::getLogger()->debug("purgeReadings minrowidLimit %lu", minrowidLimit); @@ -1875,28 +1964,27 @@ vector assetCodes; */ string sql_cmd; // Generate a single SQL statement that using a set of UNION considers all the readings table in handling - { - // SQL - start - sql_cmd = R"( - SELECT (strftime('%s','now', 'utc') - strftime('%s', MIN(user_ts)))/360 - FROM - ( - )"; - - // SQL - union of all the readings tables - string sql_cmd_base; - string sql_cmd_tmp; - sql_cmd_base = " SELECT MIN(user_ts) user_ts FROM _dbname_._tablename_ WHERE rowid <= " + to_string(rowidLimit); - ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); - sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes, true); - sql_cmd += sql_cmd_tmp; + // SQL - start + sql_cmd = R"( + SELECT (strftime('%s','now', 'utc') - strftime('%s', MIN(user_ts)))/360 + FROM + ( + )"; - // SQL - end - sql_cmd += R"( - ) as readings_1 - )"; + // SQL - union of all the readings tables + string sql_cmd_base; + string sql_cmd_tmp; + sql_cmd_base = " SELECT MIN(user_ts) user_ts FROM _dbname_._tablename_ WHERE rowid <= " + to_string(rowidLimit); + ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); + sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes, true); + sql_cmd += sql_cmd_tmp; + sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes, true); + sql_cmd += sql_cmd_tmp; - } + // SQL - end + sql_cmd += R"( + ) as readings_1 + )"; SQLBuffer oldest; oldest.append(sql_cmd); @@ -1908,7 +1996,7 @@ vector assetCodes; int purge_readings = 0; // Exec query and get result in 'purge_readings' via 'selectCallback' - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", query, selectCallback, &purge_readings, @@ -1927,7 +2015,7 @@ vector assetCodes; return 0; } - Logger::getLogger()->debug("purgeReadings purge_readings :%d: age :%d:", purge_readings, age); + Logger::getLogger()->debug("purgeReadings purge_readings %d age %d", purge_readings, age); } Logger::getLogger()->debug("%s - rowidLimit :%lu: maxrowidLimit :%lu: maxrowidLimit :%lu: age :%lu:", __FUNCTION__, rowidLimit, maxrowidLimit, minrowidLimit, age); @@ -1985,6 +2073,8 @@ vector assetCodes; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; + sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes); + sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( @@ -1998,7 +2088,7 @@ vector assetCodes; sqlBuffer.append(';'); const char *query = sqlBuffer.coalesce(); - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", query, rowidCallback, &midRowId, @@ -2069,6 +2159,8 @@ vector assetCodes; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; + sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes); + sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( @@ -2082,7 +2174,7 @@ vector assetCodes; idBuffer.append(';'); const char *idQuery = idBuffer.coalesce(); - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", idQuery, rowidCallback, &lastPurgedId, @@ -2105,7 +2197,7 @@ vector assetCodes; unsentPurged = unsent; } - Logger::getLogger()->debug("%s - lastPurgedId :%d: unsentPurged :%ld:",__FUNCTION__, lastPurgedId, unsentPurged); + Logger::getLogger()->debug("%s - lastPurgedId %d unsentPurged :%ld:",__FUNCTION__, lastPurgedId, unsentPurged); } if (m_writeAccessOngoing) { @@ -2116,13 +2208,11 @@ vector assetCodes; } unsigned int deletedRows = 0; - char *zErrMsg = NULL; + zErrMsg = NULL; unsigned long rowsAffected; unsigned int totTime=0, prevBlocks=0, prevTotTime=0; logger->info("Purge about to delete readings # %ld to %ld", rowidMin, rowidLimit); - ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); - while (rowidMin < rowidLimit) { blocks++; @@ -2149,7 +2239,7 @@ vector assetCodes; rc = readCat->purgeAllReadings(dbHandle, query ,&zErrMsg, &rowsAffected); END_TIME; - logger->debug("%s - DELETE sql :%s: rowsAffected :%ld:", __FUNCTION__, query ,rowsAffected); + logger->debug("%s - DELETE sql '%s' rowsAffected :%ld:", __FUNCTION__, query ,rowsAffected); // Release memory for 'query' var delete[] query; @@ -2210,20 +2300,23 @@ vector assetCodes; unsentPurged = deletedRows; } + gettimeofday(&endTv, NULL); + unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; + ostringstream convert; convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; - convert << " \"readings\" : " << numReadings << " }"; + convert << " \"readings\" : " << numReadings << ", "; + convert << " \"method\" : \"age\", "; + convert << " \"duration\" : " << duration << " }"; result = convert.str(); - gettimeofday(&endTv, NULL); - unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; logger->info("Purge process complete in %d blocks in %lduS", blocks, duration); - Logger::getLogger()->debug("%s - age :%lu: flag_retain :%x: sent :%lu: result :%s:", __FUNCTION__, age, flags, flag_retain, result.c_str() ); + Logger::getLogger()->debug("%s - age :%lu: flag_retain :%x: sent :%lu: result '%s'", __FUNCTION__, age, flags, flag_retain, result.c_str() ); return deletedRows; } @@ -2244,6 +2337,7 @@ unsigned long limit = 0; string sql_cmd; vector assetCodes; bool flag_retain; +struct timeval startTv, endTv; // rowidCallback expects unsigned long @@ -2255,6 +2349,13 @@ bool flag_retain; Logger *logger = Logger::getLogger(); + if (m_noReadings) + { + logger->error("Attempt to purge readings from plugin that has no storage for readings"); + return 0; + } + + gettimeofday(&startTv, NULL); ostringstream threadId; threadId << std::this_thread::get_id(); ReadingsCatalogue *readCatalogue = ReadingsCatalogue::getInstance(); @@ -2278,7 +2379,7 @@ bool flag_retain; { flag_retain = true; } - Logger::getLogger()->debug("%s - flags :%X: flag_retain :%d: sent :%ld:", __FUNCTION__, flags, flag_retain, sent); + Logger::getLogger()->debug("%s - flags %X flag_retain %d sent :%ld:", __FUNCTION__, flags, flag_retain, sent); logger->info("Purge by Rows called"); @@ -2311,6 +2412,8 @@ bool flag_retain; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; + sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes); + sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( @@ -2318,7 +2421,7 @@ bool flag_retain; )"; } - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", sql_cmd.c_str(), rowidCallback, &rowcount, @@ -2350,6 +2453,8 @@ bool flag_retain; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; + sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes); + sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( @@ -2358,7 +2463,7 @@ bool flag_retain; } - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", sql_cmd.c_str(), rowidCallback, &maxId, @@ -2400,16 +2505,18 @@ bool flag_retain; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes, true); sql_cmd += sql_cmd_tmp; + sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes, true); + sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as readings_1 )"; - logger->debug("%s - SELECT MIN - sql_cmd :%s: ", __FUNCTION__, sql_cmd.c_str() ); + logger->debug("%s - SELECT MIN - sql_cmd '%s' ", __FUNCTION__, sql_cmd.c_str() ); } - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", sql_cmd.c_str(), rowidCallback, &minId, @@ -2453,7 +2560,7 @@ bool flag_retain; // Exec DELETE query: no callback, no resultset rc = readCat->purgeAllReadings(dbHandle, query ,&zErrMsg, &rowsAffected); - logger->debug(" %s - DELETE - query :%s: rowsAffected :%ld:", __FUNCTION__, query ,rowsAffected); + logger->debug(" %s - DELETE - query '%s' rowsAffected :%ld:", __FUNCTION__, query ,rowsAffected); deletedRows += rowsAffected; numReadings -= rowsAffected; @@ -2483,17 +2590,21 @@ bool flag_retain; unsentRetained = numReadings - rows; } + gettimeofday(&endTv, NULL); + unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; ostringstream convert; convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; - convert << " \"readings\" : " << numReadings << " }"; + convert << " \"readings\" : " << numReadings << ", "; + convert << " \"method\" : \"rows\", "; + convert << " \"duration\" : " << duration << " }"; result = convert.str(); - Logger::getLogger()->debug("%s - Purge by Rows complete - rows :%lu: flag :%x: sent :%lu: numReadings :%lu: rowsAffected :%u: result :%s:", __FUNCTION__, rows, flags, sent, numReadings, rowsAffected, result.c_str() ); + Logger::getLogger()->debug("%s - Purge by Rows complete - rows :%lu: flag :%x: sent :%lu: numReadings :%lu: rowsAffected :%u: result '%s'", __FUNCTION__, rows, flags, sent, numReadings, rowsAffected, result.c_str() ); return deletedRows; } @@ -2515,7 +2626,7 @@ int Connection::SQLPrepare(sqlite3 *dbHandle, const char *sqlCmd, sqlite3_stmt * { if (retries >= LOG_AFTER_NERRORS){ - Logger::getLogger()->warn("SQLPrepare - error :%s: dbHandle :%X: sqlCmd :%s: retry :%d: of :%d:", + Logger::getLogger()->warn("SQLPrepare - error '%s' dbHandle %X sqlCmd '%s' retry %d of %d", sqlite3_errmsg(dbHandle), dbHandle, sqlCmd, @@ -2551,6 +2662,10 @@ char *zErrMsg = NULL; int rc; sqlite3_stmt *stmt; + if (m_noReadings) + { + return 0; + } ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); if (readCat == NULL) { @@ -2608,7 +2723,7 @@ sqlite3_stmt *stmt; sqlite3_free(zErrMsg); return 0; } - + readCat->loadEmptyAssetReadingCatalogue(); // Get numbwer of affected rows return (unsigned int)sqlite3_changes(dbHandle); } diff --git a/C/plugins/storage/sqlite/common/readings_catalogue.cpp b/C/plugins/storage/sqlite/common/readings_catalogue.cpp index 61952824d7..5427a27e4f 100644 --- a/C/plugins/storage/sqlite/common/readings_catalogue.cpp +++ b/C/plugins/storage/sqlite/common/readings_catalogue.cpp @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include "readings_catalogue.h" #include @@ -28,8 +28,22 @@ using namespace rapidjson; #define LOG_TX_BOUNDARIES 0 /** - * Logs an error + * Constructor * + * This is never explicitly called as the ReadingsCatalogue is a + * singleton class. + */ +ReadingsCatalogue::ReadingsCatalogue() : m_nextOverflow(1), m_maxOverflowUsed(0) +{ +} + +/** + * Logs an error. A variable argument function that + * uses a printf format string to log an error message with the + * associated operation. + * + * @param operation The operastion in progress + * @param reason A printf format string with the error message text */ void ReadingsCatalogue::raiseError(const char *operation, const char *reason, ...) { @@ -39,7 +53,7 @@ void ReadingsCatalogue::raiseError(const char *operation, const char *reason, .. va_start(ap, reason); vsnprintf(tmpbuf, sizeof(tmpbuf), reason, ap); va_end(ap); - Logger::getLogger()->error("ReadingsCatalogues error: %s", tmpbuf); + Logger::getLogger()->error("ReadingsCatalogues: %s during operation %s", tmpbuf, operation); } /** @@ -59,43 +73,41 @@ bool ReadingsCatalogue::configurationRetrieve(sqlite3 *dbHandle) sqlite3_stmt *stmt; // Retrieves the global_id from thd DB - { - sql_cmd = " SELECT global_id, db_id_Last, n_readings_per_db, n_db_preallocate FROM " READINGS_DB ".configuration_readings "; + sql_cmd = " SELECT global_id, db_id_Last, n_readings_per_db, n_db_preallocate FROM " READINGS_DB ".configuration_readings "; - if (sqlite3_prepare_v2(dbHandle,sql_cmd.c_str(),-1, &stmt,NULL) != SQLITE_OK) - { - raiseError("configurationRetrieve", sqlite3_errmsg(dbHandle)); - return false; - } + if (sqlite3_prepare_v2(dbHandle,sql_cmd.c_str(),-1, &stmt,NULL) != SQLITE_OK) + { + raiseError("configurationRetrieve", sqlite3_errmsg(dbHandle)); + return false; + } - if (SQLStep(stmt) != SQLITE_ROW) - { - m_ReadingsGlobalId = 1; - m_dbIdLast = 0; + if (SQLStep(stmt) != SQLITE_ROW) + { + m_ReadingsGlobalId = 1; + m_dbIdLast = 0; - m_storageConfigCurrent.nReadingsPerDb = m_storageConfigApi.nReadingsPerDb; - m_storageConfigCurrent.nDbPreallocate = m_storageConfigApi.nDbPreallocate; + m_storageConfigCurrent.nReadingsPerDb = m_storageConfigApi.nReadingsPerDb; + m_storageConfigCurrent.nDbPreallocate = m_storageConfigApi.nDbPreallocate; - sql_cmd = " INSERT INTO " READINGS_DB ".configuration_readings VALUES (" + to_string(m_ReadingsGlobalId) + "," - + to_string(m_dbIdLast) + "," - + to_string(m_storageConfigCurrent.nReadingsPerDb) + "," - + to_string(m_storageConfigCurrent.nDbPreallocate) + ")"; - if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) - { - raiseError("configurationRetrieve", sqlite3_errmsg(dbHandle)); - return false; - } - } - else + sql_cmd = " INSERT INTO " READINGS_DB ".configuration_readings VALUES (" + to_string(m_ReadingsGlobalId) + "," + + to_string(m_dbIdLast) + "," + + to_string(m_storageConfigCurrent.nReadingsPerDb) + "," + + to_string(m_storageConfigCurrent.nDbPreallocate) + ")"; + if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) { - nCols = sqlite3_column_count(stmt); - m_ReadingsGlobalId = sqlite3_column_int(stmt, 0); - m_dbIdLast = sqlite3_column_int(stmt, 1); - m_storageConfigCurrent.nReadingsPerDb = sqlite3_column_int(stmt, 2); - m_storageConfigCurrent.nDbPreallocate = sqlite3_column_int(stmt, 3); + raiseError("configurationRetrieve", sqlite3_errmsg(dbHandle)); + return false; } } - Logger::getLogger()->debug("configurationRetrieve: ReadingsGlobalId :%d: dbIdLast :%d: ", (int) m_ReadingsGlobalId, m_dbIdLast); + else + { + nCols = sqlite3_column_count(stmt); + m_ReadingsGlobalId = sqlite3_column_int(stmt, 0); + m_dbIdLast = sqlite3_column_int(stmt, 1); + m_storageConfigCurrent.nReadingsPerDb = sqlite3_column_int(stmt, 2); + m_storageConfigCurrent.nDbPreallocate = sqlite3_column_int(stmt, 3); + } + Logger::getLogger()->debug("configurationRetrieve: ReadingsGlobalId %d dbIdLast %d ", (int) m_ReadingsGlobalId, m_dbIdLast); sqlite3_finalize(stmt); @@ -122,42 +134,46 @@ bool ReadingsCatalogue::evaluateGlobalId () ConnectionManager *manager = ConnectionManager::getInstance(); Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Evaluate Global ID"; + connection->setUsage(usage); +#endif dbHandle = connection->getDbHandle(); // Retrieves the global_id from thd DB - { - sql_cmd = " SELECT global_id FROM " READINGS_DB ".configuration_readings "; + sql_cmd = " SELECT global_id FROM " READINGS_DB ".configuration_readings "; - if (sqlite3_prepare_v2(dbHandle,sql_cmd.c_str(),-1, &stmt,NULL) != SQLITE_OK) - { - raiseError("evaluateGlobalId", sqlite3_errmsg(dbHandle)); - return false; - } + if (sqlite3_prepare_v2(dbHandle,sql_cmd.c_str(),-1, &stmt,NULL) != SQLITE_OK) + { + raiseError("evaluateGlobalId", sqlite3_errmsg(dbHandle)); + manager->release(connection); + return false; + } - if (SQLStep(stmt) != SQLITE_ROW) - { - m_ReadingsGlobalId = 1; + if (SQLStep(stmt) != SQLITE_ROW) + { + m_ReadingsGlobalId = 1; - sql_cmd = " INSERT INTO " READINGS_DB ".configuration_readings VALUES (" + to_string(m_ReadingsGlobalId) + "," - + "0" + "," - + to_string(m_storageConfigApi.nReadingsPerDb) + "," - + to_string(m_storageConfigApi.nDbPreallocate) + ")"; + sql_cmd = " INSERT INTO " READINGS_DB ".configuration_readings VALUES (" + to_string(m_ReadingsGlobalId) + "," + + "0" + "," + + to_string(m_storageConfigApi.nReadingsPerDb) + "," + + to_string(m_storageConfigApi.nDbPreallocate) + ")"; - if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) - { - raiseError("evaluateGlobalId", sqlite3_errmsg(dbHandle)); - return false; - } - } - else + if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) { - nCols = sqlite3_column_count(stmt); - m_ReadingsGlobalId = sqlite3_column_int(stmt, 0); + raiseError("evaluateGlobalId", sqlite3_errmsg(dbHandle)); + manager->release(connection); + return false; } } + else + { + nCols = sqlite3_column_count(stmt); + m_ReadingsGlobalId = sqlite3_column_int(stmt, 0); + } id = m_ReadingsGlobalId; - Logger::getLogger()->debug("evaluateGlobalId - global id from the DB :%d:", id); + Logger::getLogger()->debug("evaluateGlobalId - global id from the DB %d", id); if (m_ReadingsGlobalId == -1) { @@ -165,18 +181,17 @@ bool ReadingsCatalogue::evaluateGlobalId () } id = m_ReadingsGlobalId; - Logger::getLogger()->debug("evaluateGlobalId - global id from the DB :%d:", id); + Logger::getLogger()->debug("evaluateGlobalId - global id from the DB %d", id); // Set the global_id in the DB to -1 to force a calculation at the restart // in case the shutdown is not executed and the proper value stored - { - sql_cmd = " UPDATE " READINGS_DB ".configuration_readings SET global_id=-1;"; + sql_cmd = " UPDATE " READINGS_DB ".configuration_readings SET global_id=-1;"; - if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) - { - raiseError("evaluateGlobalId", sqlite3_errmsg(dbHandle)); - return false; - } + if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) + { + raiseError("evaluateGlobalId", sqlite3_errmsg(dbHandle)); + manager->release(connection); + return false; } sqlite3_finalize(stmt); @@ -200,11 +215,15 @@ bool ReadingsCatalogue::storeGlobalId () int i; i = m_ReadingsGlobalId; - Logger::getLogger()->debug("storeGlobalId m_globalId :%d: ", i); + Logger::getLogger()->debug("storeGlobalId m_globalId %d ", i); ConnectionManager *manager = ConnectionManager::getInstance(); Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Store Global ID"; + connection->setUsage(usage); +#endif dbHandle = connection->getDbHandle(); sql_cmd = " UPDATE " READINGS_DB ".configuration_readings SET global_id=" + to_string(m_ReadingsGlobalId); @@ -212,6 +231,7 @@ bool ReadingsCatalogue::storeGlobalId () if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) { raiseError("storeGlobalId", sqlite3_errmsg(dbHandle)); + manager->release(connection); return false; } @@ -240,24 +260,25 @@ int ReadingsCatalogue::calculateGlobalId (sqlite3 *dbHandle) id = 1; // Prepare the sql command to calculate the global id from the rows in the DB - { - sql_cmd = R"( - SELECT - max(id) id - FROM - ( - )"; + sql_cmd = R"( + SELECT + max(id) id + FROM + ( + )"; - bool firstRow = true; - if (m_AssetReadingCatalogue.empty()) - { - string dbReadingsName = generateReadingsName(1, 1); + bool firstRow = true; + if (m_AssetReadingCatalogue.empty()) + { + string dbReadingsName = generateReadingsName(1, 1); - sql_cmd += " SELECT max(id) id FROM " READINGS_DB "." + dbReadingsName + " "; - } - else + sql_cmd += " SELECT max(id) id FROM " READINGS_DB "." + dbReadingsName + " "; + } + else + { + for (auto &item : m_AssetReadingCatalogue) { - for (auto &item : m_AssetReadingCatalogue) + if (item.second.first != 0) { if (!firstRow) { @@ -271,8 +292,20 @@ int ReadingsCatalogue::calculateGlobalId (sqlite3 *dbHandle) firstRow = false; } } - sql_cmd += ") AS tb"; + // Now add overflow tables + for (int i = 1; i <= m_maxOverflowUsed; i++) + { + if (!firstRow) + { + sql_cmd += " UNION "; + } + dbName = generateDbName(i); + dbReadingsName = generateReadingsName(i, 0); + sql_cmd += " SELECT max(id) id FROM " + dbName + "." + dbReadingsName + " "; + firstRow = false; + } } + sql_cmd += ") AS tb"; if (sqlite3_prepare_v2(dbHandle,sql_cmd.c_str(),-1, &stmt,NULL) != SQLITE_OK) @@ -283,6 +316,7 @@ int ReadingsCatalogue::calculateGlobalId (sqlite3 *dbHandle) if (SQLStep(stmt) != SQLITE_ROW) { + raiseError("calculateGlobalId SQLStep", sqlite3_errmsg(dbHandle)); id = 1; } else @@ -293,7 +327,7 @@ int ReadingsCatalogue::calculateGlobalId (sqlite3 *dbHandle) id++; } - Logger::getLogger()->debug("calculateGlobalId - global id evaluated :%d:", id); + Logger::getLogger()->debug("calculateGlobalId - global id evaluated %d", id); sqlite3_finalize(stmt); return (id); @@ -337,15 +371,30 @@ int ReadingsCatalogue::getMinGlobalId (sqlite3 *dbHandle) else { for (auto &item : m_AssetReadingCatalogue) + { + if (item.second.first != 0) + { + if (!firstRow) + { + sql_cmd += " UNION "; + } + + dbName = generateDbName(item.second.second); + dbReadingsName = generateReadingsName(item.second.second, item.second.first); + + sql_cmd += " SELECT min(id) id FROM " + dbName + "." + dbReadingsName + " "; + firstRow = false; + } + } + // Now add overflow tables + for (int i = 1; i <= m_maxOverflowUsed; i++) { if (!firstRow) { sql_cmd += " UNION "; } - - dbName = generateDbName(item.second.second); - dbReadingsName = generateReadingsName(item.second.second, item.second.first); - + dbName = generateDbName(i); + dbReadingsName = generateReadingsName(i, 0); sql_cmd += " SELECT min(id) id FROM " + dbName + "." + dbReadingsName + " "; firstRow = false; } @@ -353,7 +402,6 @@ int ReadingsCatalogue::getMinGlobalId (sqlite3 *dbHandle) sql_cmd += ") AS tb"; } - if (sqlite3_prepare_v2(dbHandle,sql_cmd.c_str(),-1, &stmt,NULL) != SQLITE_OK) { raiseError(__FUNCTION__, sqlite3_errmsg(dbHandle)); @@ -370,7 +418,7 @@ int ReadingsCatalogue::getMinGlobalId (sqlite3 *dbHandle) id = sqlite3_column_int(stmt, 0); } - Logger::getLogger()->debug("%s - global id evaluated :%d:", __FUNCTION__, id); + Logger::getLogger()->debug("%s - global id evaluated %d", __FUNCTION__, id); sqlite3_finalize(stmt); @@ -395,6 +443,10 @@ bool ReadingsCatalogue::loadAssetReadingCatalogue() ConnectionManager *manager = ConnectionManager::getInstance(); Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Load Asset Reading Catalogue"; + connection->setUsage(usage); +#endif dbHandle = connection->getDbHandle(); // loads readings catalog from the db @@ -412,6 +464,7 @@ bool ReadingsCatalogue::loadAssetReadingCatalogue() if (sqlite3_prepare_v2(dbHandle,sql_cmd,-1, &stmt,NULL) != SQLITE_OK) { raiseError("retrieve asset_reading_catalogue", sqlite3_errmsg(dbHandle)); + manager->release(connection); return false; } else @@ -428,11 +481,15 @@ bool ReadingsCatalogue::loadAssetReadingCatalogue() if (dbId > maxDbID) maxDbID = dbId; - Logger::getLogger()->debug("loadAssetReadingCatalogue - thread :%s: reading Id :%d: dbId :%d: asset name :%s: max db Id :%d:", threadId.str().c_str(), tableId, dbId, asset_name, maxDbID); + Logger::getLogger()->debug("loadAssetReadingCatalogue - thread '%s' reading Id %d dbId %d asset name '%s' max db Id %d", threadId.str().c_str(), tableId, dbId, asset_name, maxDbID); auto newItem = make_pair(tableId,dbId); auto newMapValue = make_pair(asset_name,newItem); m_AssetReadingCatalogue.insert(newMapValue); + if (tableId == 0 && dbId > m_maxOverflowUsed) // Overflow + { + m_maxOverflowUsed = dbId; + } } @@ -441,7 +498,7 @@ bool ReadingsCatalogue::loadAssetReadingCatalogue() manager->release(connection); m_dbIdCurrent = maxDbID; - Logger::getLogger()->debug("loadAssetReadingCatalogue maxdb :%d:", m_dbIdCurrent); + Logger::getLogger()->debug("loadAssetReadingCatalogue maxdb %d", m_dbIdCurrent); return true; } @@ -450,23 +507,24 @@ bool ReadingsCatalogue::loadAssetReadingCatalogue() * Add the newly create db to the list * */ -void ReadingsCatalogue::setUsedDbId(int dbId) { - +void ReadingsCatalogue::setUsedDbId(int dbId) +{ m_dbIdList.push_back(dbId); } /** - * Preallocate all the needed database: + * Preallocate all the required databases: * * - Initial stage - creates the databases requested by the preallocation * - Following runs - attaches all the databases already created * */ -void ReadingsCatalogue::prepareAllDbs() { +void ReadingsCatalogue::prepareAllDbs() +{ int dbId, dbIdStart, dbIdEnd; - Logger::getLogger()->debug("prepareAllDbs - dbIdCurrent :%d: dbIdLast :%d: nDbPreallocate :%d:", m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate); + Logger::getLogger()->debug("prepareAllDbs - dbIdCurrent %d dbIdLast %d nDbPreallocate %d", m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate); if (m_dbIdLast == 0) { @@ -476,10 +534,13 @@ void ReadingsCatalogue::prepareAllDbs() { dbIdStart = 2; dbIdEnd = dbIdStart + m_storageConfigCurrent.nDbPreallocate - 2; - preallocateNewDbsRange(dbIdStart, dbIdEnd); - - m_dbIdLast = dbIdEnd; - } else + int created = preallocateNewDbsRange(dbIdStart, dbIdEnd); + if (created) + { + m_dbIdLast = dbIdStart + created - 1; + } + } + else { Logger::getLogger()->debug("prepareAllDbs - following runs"); @@ -493,7 +554,7 @@ void ReadingsCatalogue::prepareAllDbs() { m_dbNAvailable = (m_dbIdLast - m_dbIdCurrent) - m_storageConfigCurrent.nDbLeftFreeBeforeAllocate; - Logger::getLogger()->debug("prepareAllDbs - dbNAvailable :%d:", m_dbNAvailable); + Logger::getLogger()->debug("prepareAllDbs - dbNAvailable %d", m_dbNAvailable); } /** @@ -501,33 +562,46 @@ void ReadingsCatalogue::prepareAllDbs() { * * @param dbIdStart Range of the database to create * @param dbIdEnd Range of the database to create + * @return int The number of datbases created * */ -void ReadingsCatalogue::preallocateNewDbsRange(int dbIdStart, int dbIdEnd) { +int ReadingsCatalogue::preallocateNewDbsRange(int dbIdStart, int dbIdEnd) { int dbId; int startReadingsId; tyReadingsAvailable readingsAvailable; + int created = 0; - Logger::getLogger()->debug("preallocateNewDbsRange - Id start :%d: Id end :%d: ", dbIdStart, dbIdEnd); + Logger::getLogger()->debug("preallocateNewDbsRange - Id start %d Id end %d ", dbIdStart, dbIdEnd); for (dbId = dbIdStart; dbId <= dbIdEnd; dbId++) { readingsAvailable = evaluateLastReadingAvailable(NULL, dbId - 1); startReadingsId = 1; - createNewDB(NULL, dbId, startReadingsId, NEW_DB_ATTACH_ALL); + if (!createNewDB(NULL, dbId, startReadingsId, NEW_DB_ATTACH_ALL)) + { + Logger::getLogger()->error("Failed to preallocated all databases, terminated after creating %d databases", created); + break; + } + else + { + created++; + } - Logger::getLogger()->debug("preallocateNewDbsRange - db created :%d: startReadingsIdOnDB :%d:", dbId, startReadingsId); + Logger::getLogger()->debug("preallocateNewDbsRange - db created %d startReadingsIdOnDB %d", dbId, startReadingsId); } + return created; } /** - * Generates a list of all the used databases + * Generates a list of all the used databases. Note this list does not include + * the first database, readings_1, onl the ohtes that have been added. * * @param dbIdList returned by reference, the list databases in use * */ -void ReadingsCatalogue::getAllDbs(vector &dbIdList) { +void ReadingsCatalogue::getAllDbs(vector &dbIdList) +{ int dbId; @@ -541,7 +615,7 @@ void ReadingsCatalogue::getAllDbs(vector &dbIdList) { if (std::find(dbIdList.begin(), dbIdList.end(), dbId) == dbIdList.end() ) { dbIdList.push_back(dbId); - Logger::getLogger()->debug("getAllDbs DB :%d:", dbId); + Logger::getLogger()->debug("getAllDbs DB %d", dbId); } } @@ -554,7 +628,7 @@ void ReadingsCatalogue::getAllDbs(vector &dbIdList) { if (std::find(dbIdList.begin(), dbIdList.end(), dbId) == dbIdList.end() ) { dbIdList.push_back(dbId); - Logger::getLogger()->debug("getAllDbs DB created :%d:", dbId); + Logger::getLogger()->debug("getAllDbs DB created %d", dbId); } } @@ -562,7 +636,7 @@ void ReadingsCatalogue::getAllDbs(vector &dbIdList) { } /** - * Retrieve the list of newly created db + * Retrieve the list of newly created databases * * @param dbIdList returned by reference, the list of new databases * @@ -576,7 +650,7 @@ void ReadingsCatalogue::getNewDbs(vector &dbIdList) { if (std::find(dbIdList.begin(), dbIdList.end(), dbId) == dbIdList.end() ) { dbIdList.push_back(dbId); - Logger::getLogger()->debug("getNewDbs - dbId :%d:", dbId); + Logger::getLogger()->debug("getNewDbs - dbId %d", dbId); } } @@ -584,7 +658,8 @@ void ReadingsCatalogue::getNewDbs(vector &dbIdList) { } /** - * Enable WAL on the provided database file + * Enable WAL mode on the given database file. This method will open and then + * close the database and does not use any existing connection. * * @param dbPathReadings Database path for which the WAL must be enabled * @@ -594,7 +669,7 @@ bool ReadingsCatalogue::enableWAL(string &dbPathReadings) { int rc; sqlite3 *dbHandle; - Logger::getLogger()->debug("enableWAL on :%s:", dbPathReadings.c_str()); + Logger::getLogger()->debug("enableWAL on '%s'", dbPathReadings.c_str()); rc = sqlite3_open(dbPathReadings.c_str(), &dbHandle); if(rc != SQLITE_OK) @@ -617,33 +692,42 @@ bool ReadingsCatalogue::enableWAL(string &dbPathReadings) { } /** - * Attach a database to all the connections, idle and inuse + * Attach a database to the database connection passed to the call * * @param dbHandle Database connection to use for the operations * @param path path of the database to attach * @param alias alias to be assigned to the attached database + * @param id the database ID */ -bool ReadingsCatalogue::attachDb(sqlite3 *dbHandle, std::string &path, std::string &alias) +bool ReadingsCatalogue::attachDb(sqlite3 *dbHandle, std::string &path, std::string &alias, int id) { - int rc; - std::string sqlCmd; - bool result; - char *zErrMsg = NULL; - - result = true; +int rc; +string sqlCmd; +bool result = true; +char *zErrMsg = NULL; sqlCmd = "ATTACH DATABASE '" + path + "' AS " + alias + ";"; - Logger::getLogger()->debug("attachDb - path :%s: alias :%s: cmd :%s:" , path.c_str(), alias.c_str() , sqlCmd.c_str() ); + Logger::getLogger()->debug("attachDb - path '%s' alias '%s' cmd '%s'" , path.c_str(), alias.c_str() , sqlCmd.c_str() ); rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); if (rc != SQLITE_OK) { - Logger::getLogger()->error("attachDb - It was not possible to attach the db :%s: to the connection :%X:, error :%s:", path.c_str(), dbHandle, zErrMsg); + Logger::getLogger()->error("Failed to attach the db '%s' to the connection %X, error '%s'", path.c_str(), dbHandle, zErrMsg); sqlite3_free(zErrMsg); result = false; } - return (result); + // See if the overflow table exists and if not create it + // This is a workaround as the schema update mechanism can't cope + // with multiple readings tables + sqlCmd = "select count(*) from " + alias + ".readings_overflow;"; + rc = SQLExec(dbHandle, sqlCmd.c_str(), &zErrMsg); + if (rc != SQLITE_OK) + { + createReadingsOverflowTable(dbHandle, id); + } + + return result; } /** @@ -660,11 +744,11 @@ void ReadingsCatalogue::detachDb(sqlite3 *dbHandle, std::string &alias) sqlCmd = "DETACH DATABASE " + alias + ";"; - Logger::getLogger()->debug("%s - db :%s: cmd :%s:" ,__FUNCTION__, alias.c_str() , sqlCmd.c_str() ); + Logger::getLogger()->debug("%s - db '%s' cmd '%s'" ,__FUNCTION__, alias.c_str() , sqlCmd.c_str() ); rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); if (rc != SQLITE_OK) { - Logger::getLogger()->error("%s - It was not possible to detach the db :%s: from the connection :%X:, error :%s:", __FUNCTION__, alias.c_str(), dbHandle, zErrMsg); + Logger::getLogger()->error("%s - It was not possible to detach the db '%s' from the connection %X, error '%s'", __FUNCTION__, alias.c_str(), dbHandle, zErrMsg); sqlite3_free(zErrMsg); } } @@ -688,25 +772,27 @@ bool ReadingsCatalogue::connectionAttachDbList(sqlite3 *dbHandle, vector &d result = true; - Logger::getLogger()->debug("connectionAttachDbList - start dbHandle :%X:" ,dbHandle); + Logger::getLogger()->debug("connectionAttachDbList - start dbHandle %X" ,dbHandle); - while (!dbIdList.empty()) + while (result && !dbIdList.empty()) { item = dbIdList.back(); dbPathReadings = generateDbFilePah(item); dbAlias = generateDbAlias(item); - Logger::getLogger()->debug("connectionAttachDbList - dbHandle :%X: dbId :%d: path :%s: alias :%s:",dbHandle, item, dbPathReadings.c_str(), dbAlias.c_str()); + Logger::getLogger()->debug( + "connectionAttachDbList - dbHandle %X dbId %d path %s alias %s", + dbHandle, item, dbPathReadings.c_str(), dbAlias.c_str()); - result = attachDb(dbHandle, dbPathReadings, dbAlias); + result = attachDb(dbHandle, dbPathReadings, dbAlias, item); dbIdList.pop_back(); } - Logger::getLogger()->debug("connectionAttachDbList - end dbHandle :%X:" ,dbHandle); + Logger::getLogger()->debug("connectionAttachDbList - end dbHandle %X" ,dbHandle); - return (result); + return result; } @@ -734,13 +820,16 @@ bool ReadingsCatalogue::connectionAttachAllDbs(sqlite3 *dbHandle) dbPathReadings = generateDbFilePah(item); dbAlias = generateDbAlias(item); - result = attachDb(dbHandle, dbPathReadings, dbAlias); + result = attachDb(dbHandle, dbPathReadings, dbAlias, item); if (! result) + { + Logger::getLogger()->error("Unable to attach all databases to the connection"); break; + } - Logger::getLogger()->debug("connectionAttachAllDbs - dbId :%d: path :%s: alias :%s:", item, dbPathReadings.c_str(), dbAlias.c_str()); + Logger::getLogger()->debug("connectionAttachAllDbs - dbId %d path %s alias %s", item, dbPathReadings.c_str(), dbAlias.c_str()); } - return (result); + return result; } @@ -762,10 +851,14 @@ bool ReadingsCatalogue::attachDbsToAllConnections() ConnectionManager *manager = ConnectionManager::getInstance(); Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Attach DBs to all connections"; + connection->setUsage(usage); +#endif getAllDbs(dbIdList); - for(int item : dbIdList) + for (int item : dbIdList) { dbPathReadings = generateDbFilePah(item); dbAlias = generateDbAlias(item); @@ -776,7 +869,7 @@ bool ReadingsCatalogue::attachDbsToAllConnections() if (! result) break; - Logger::getLogger()->debug("attachDbsToAllConnections - dbId :%d: path :%s: alias :%s:", item, dbPathReadings.c_str(), dbAlias.c_str()); + Logger::getLogger()->debug("attachDbsToAllConnections - dbId %d path '%s' alias '%s'", item, dbPathReadings.c_str(), dbAlias.c_str()); } manager->release(connection); @@ -796,9 +889,22 @@ void ReadingsCatalogue::multipleReadingsInit(STORAGE_CONFIGURATION &storageConfi ConnectionManager *manager = ConnectionManager::getInstance(); Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Multiple readings init"; + connection->setUsage(usage); +#endif + if (! connection->supportsReadings()) + { + manager->release(connection); + return; + } dbHandle = connection->getDbHandle(); - + // Enquire for the attached database limit + m_attachLimit = sqlite3_limit(dbHandle, SQLITE_LIMIT_ATTACHED, -1); + Logger::getLogger()->info("The version of SQLite can support %d attached databases", + m_attachLimit); + m_compounds = sqlite3_limit(dbHandle, SQLITE_LIMIT_COMPOUND_SELECT, -1); if (storageConfig.nDbLeftFreeBeforeAllocate < 1) { @@ -826,14 +932,14 @@ void ReadingsCatalogue::multipleReadingsInit(STORAGE_CONFIGURATION &storageConfi loadAssetReadingCatalogue(); preallocateReadingsTables(1); // on the first database - Logger::getLogger()->debug("nReadingsPerDb :%d:", m_storageConfigCurrent.nReadingsPerDb); - Logger::getLogger()->debug("nDbPreallocate :%d:", m_storageConfigCurrent.nDbPreallocate); + Logger::getLogger()->debug("nReadingsPerDb %d", m_storageConfigCurrent.nReadingsPerDb); + Logger::getLogger()->debug("nDbPreallocate %d", m_storageConfigCurrent.nDbPreallocate); prepareAllDbs(); applyStorageConfigChanges(dbHandle); - Logger::getLogger()->debug("multipleReadingsInit - dbIdCurrent :%d: dbIdLast :%d: nDbPreallocate current :%d: requested :%d:", + Logger::getLogger()->debug("multipleReadingsInit - dbIdCurrent %d dbIdLast %d nDbPreallocate current %d requested %d", m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate, @@ -845,10 +951,12 @@ void ReadingsCatalogue::multipleReadingsInit(STORAGE_CONFIGURATION &storageConfi preallocateReadingsTables(0); // on the last database evaluateGlobalId(); + std::thread th(&ReadingsCatalogue::loadEmptyAssetReadingCatalogue,this,true); + th.detach(); } catch (exception& e) { - Logger::getLogger()->error("It is not possible to initialize the multiple readings handling, error :%s: ", e.what()); + Logger::getLogger()->error("It is not possible to initialize the multiple readings handling, error '%s' ", e.what()); } manager->release(connection); @@ -866,13 +974,13 @@ void ReadingsCatalogue::storeReadingsConfiguration (sqlite3 *dbHandle) string errMsg; string sql_cmd; - Logger::getLogger()->debug("storeReadingsConfiguration - nReadingsPerDb :%d: nDbPreallocate :%d:", m_storageConfigCurrent.nReadingsPerDb , m_storageConfigCurrent.nDbPreallocate); + Logger::getLogger()->debug("storeReadingsConfiguration - nReadingsPerDb %d nDbPreallocate %d", m_storageConfigCurrent.nReadingsPerDb , m_storageConfigCurrent.nDbPreallocate); sql_cmd = " UPDATE " READINGS_DB ".configuration_readings SET n_readings_per_db=" + to_string(m_storageConfigCurrent.nReadingsPerDb) + "," + "n_db_preallocate=" + to_string(m_storageConfigCurrent.nDbPreallocate) + "," + "db_id_Last=" + to_string(m_dbIdLast) + ";"; - Logger::getLogger()->debug("sql_cmd :%s:", sql_cmd.c_str()); + Logger::getLogger()->debug("sql_cmd '%s'", sql_cmd.c_str()); if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) { @@ -900,13 +1008,14 @@ void ReadingsCatalogue::configChangeAddDb(sqlite3 *dbHandle) startId = m_dbIdLast +1; endId = m_storageConfigApi.nDbPreallocate; - Logger::getLogger()->debug("configChangeAddDb - dbIdCurrent :%d: dbIdLast :%d: nDbPreallocate current :%d: requested :%d:", + Logger::getLogger()->debug("configChangeAddDb - dbIdCurrent %d dbIdLast %d nDbPreallocate current %d requested %d", m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate, m_storageConfigApi.nDbPreallocate); - Logger::getLogger()->debug("configChangeAddDb - Id start :%d: Id end :%d: ", startId, endId); + Logger::getLogger()->debug("configChangeAddDb - Id start %d Id end %d ", startId, endId); + int created = 0; try { @@ -925,16 +1034,20 @@ void ReadingsCatalogue::configChangeAddDb(sqlite3 *dbHandle) errMsg = "Unable to add a new database"; throw runtime_error(errMsg.c_str()); } - Logger::getLogger()->debug("configChangeAddDb - db created :%d: startReadingsIdOnDB :%d:", dbId, startReadingsId); + else + { + created++; + } + Logger::getLogger()->debug("configChangeAddDb - db created %d startReadingsIdOnDB %d", dbId, startReadingsId); } } catch (exception& e) { - Logger::getLogger()->error("It is not possible to add the requested databases, error :%s: - removing created databases", e.what()); + Logger::getLogger()->error("It is not possible to add the requested databases, error '%s' - removing created databases", e.what()); dbsRemove(startId , endId); } - m_dbIdLast = m_storageConfigApi.nDbPreallocate; + m_dbIdLast = startId + created - 1; m_storageConfigCurrent.nDbPreallocate = m_storageConfigApi.nDbPreallocate; m_dbNAvailable = (m_dbIdLast - m_dbIdCurrent) - m_storageConfigCurrent.nDbLeftFreeBeforeAllocate; } @@ -956,14 +1069,14 @@ void ReadingsCatalogue::configChangeRemoveDb(sqlite3 *dbHandle) ConnectionManager *manager = ConnectionManager::getInstance(); - Logger::getLogger()->debug("configChangeRemoveDb - dbIdCurrent :%d: dbIdLast :%d: nDbPreallocate current :%d: requested :%d:", + Logger::getLogger()->debug("configChangeRemoveDb - dbIdCurrent %d dbIdLast %d nDbPreallocate current %d requested %d", m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate, m_storageConfigApi.nDbPreallocate); - Logger::getLogger()->debug("configChangeRemoveDb - Id start :%d: Id end :%d: ", m_dbIdCurrent, m_storageConfigApi.nDbPreallocate); + Logger::getLogger()->debug("configChangeRemoveDb - Id start %d Id end %d ", m_dbIdCurrent, m_storageConfigApi.nDbPreallocate); dbsRemove(m_storageConfigApi.nDbPreallocate + 1, m_dbIdLast); @@ -990,7 +1103,7 @@ void ReadingsCatalogue::configChangeAddTables(sqlite3 *dbHandle, int startId, in nTables = endId - startId +1; - Logger::getLogger()->debug("%s - startId :%d: endId :%d: nTables :%d:", + Logger::getLogger()->debug("%s - startId %d endId %d nTables %d", __FUNCTION__, startId, endId, @@ -998,7 +1111,7 @@ void ReadingsCatalogue::configChangeAddTables(sqlite3 *dbHandle, int startId, in for (dbId = 1; dbId <= m_dbIdLast ; dbId++ ) { - Logger::getLogger()->debug("%s - configChangeAddTables - dbId :%d: startId :%d: nTables :%d:", + Logger::getLogger()->debug("%s - configChangeAddTables - dbId %d startId %d nTables %d", __FUNCTION__, dbId, startId, @@ -1010,7 +1123,7 @@ void ReadingsCatalogue::configChangeAddTables(sqlite3 *dbHandle, int startId, in maxReadingUsed = calcMaxReadingUsed(); m_nReadingsAvailable = m_storageConfigCurrent.nReadingsPerDb - maxReadingUsed; - Logger::getLogger()->debug("%s - maxReadingUsed :%d: nReadingsPerDb :%d: m_nReadingsAvailable :%d:", + Logger::getLogger()->debug("%s - maxReadingUsed %d nReadingsPerDb %d m_nReadingsAvailable %d", __FUNCTION__, maxReadingUsed, m_storageConfigCurrent.nReadingsPerDb, @@ -1030,14 +1143,14 @@ void ReadingsCatalogue::configChangeRemoveTables(sqlite3 *dbHandle, int startId, int dbId; int maxReadingUsed; - Logger::getLogger()->debug("%s - startId :%d: endId :%d:", + Logger::getLogger()->debug("%s - startId %d endId %d", __FUNCTION__, startId, endId); for (dbId = 1; dbId <= m_dbIdLast ; dbId++ ) { - Logger::getLogger()->debug("%s - configChangeRemoveTables - dbId :%d: startId :%d: endId :%d:", + Logger::getLogger()->debug("%s - configChangeRemoveTables - dbId %d startId %d endId %d", __FUNCTION__, dbId, startId, @@ -1049,7 +1162,7 @@ void ReadingsCatalogue::configChangeRemoveTables(sqlite3 *dbHandle, int startId, maxReadingUsed = calcMaxReadingUsed(); m_nReadingsAvailable = m_storageConfigCurrent.nReadingsPerDb - maxReadingUsed; - Logger::getLogger()->debug("%s - maxReadingUsed :%d: nReadingsPerDb :%d: m_nReadingsAvailable :%d:", + Logger::getLogger()->debug("%s - maxReadingUsed %d nReadingsPerDb %d m_nReadingsAvailable %d", __FUNCTION__, maxReadingUsed, m_storageConfigCurrent.nReadingsPerDb, @@ -1076,7 +1189,7 @@ void ReadingsCatalogue::dropReadingsTables(sqlite3 *dbHandle, int dbId, int idS int idx; bool newConnection; - Logger::getLogger()->debug("%s - dropping tales on database id :%d:form id :%d: to :%d:", __FUNCTION__, dbId, idStart, idEnd); + Logger::getLogger()->debug("%s - dropping tales on database id %dform id %d to %d", __FUNCTION__, dbId, idStart, idEnd); dbName = generateDbName(dbId); @@ -1125,14 +1238,14 @@ void ReadingsCatalogue::dbsRemove(int startId, int endId) ConnectionManager *manager = ConnectionManager::getInstance(); - Logger::getLogger()->debug("dbsRemove - startId :%d: endId :%d:", startId, endId); + Logger::getLogger()->debug("dbsRemove - startId %d endId %d", startId, endId); for (dbId = startId; dbId <= endId; dbId++) { dbAlias = generateDbAlias(dbId); dbPath = generateDbFilePah(dbId); - Logger::getLogger()->debug("dbsRemove - db alias :%s: db path :%s:", dbAlias.c_str(), dbPath.c_str()); + Logger::getLogger()->debug("dbsRemove - db alias '%s' db path '%s'", dbAlias.c_str(), dbPath.c_str()); manager->detachNewDb(dbAlias); dbFileDelete(dbPath); @@ -1150,7 +1263,7 @@ void ReadingsCatalogue::dbFileDelete(string dbPath) string errMsg; bool success; - Logger::getLogger()->debug("dbFileDelete - db path :%s:", dbPath.c_str()); + Logger::getLogger()->debug("dbFileDelete - db path '%s'", dbPath.c_str()); if (remove (dbPath.c_str()) !=0) { @@ -1173,7 +1286,7 @@ bool ReadingsCatalogue::applyStorageConfigChanges(sqlite3 *dbHandle) configChanged = false; - Logger::getLogger()->debug("applyStorageConfigChanges - dbIdCurrent :%d: dbIdLast :%d: nDbPreallocate current :%d: requested :%d: nDbLeftFreeBeforeAllocate :%d:", + Logger::getLogger()->debug("applyStorageConfigChanges - dbIdCurrent %d dbIdLast %d nDbPreallocate current %d requested %d nDbLeftFreeBeforeAllocate %d", m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate, @@ -1196,17 +1309,17 @@ bool ReadingsCatalogue::applyStorageConfigChanges(sqlite3 *dbHandle) { if (operation == ACTION_DB_ADD) { - Logger::getLogger()->debug("applyStorageConfigChanges - parameters nDbPreallocate changed, adding more databases from :%d: to :%d:", m_dbIdLast, m_storageConfigApi.nDbPreallocate); + Logger::getLogger()->debug("applyStorageConfigChanges - parameters nDbPreallocate changed, adding more databases from %d to %d", m_dbIdLast, m_storageConfigApi.nDbPreallocate); configChanged = true; configChangeAddDb(dbHandle); } else if (operation == ACTION_INVALID) { - Logger::getLogger()->warn("applyStorageConfigChanges: parameter nDbPreallocate changed, but it is not possible to apply the change as there are already data stored in the database id :%d:, use a larger value", m_dbIdCurrent); + Logger::getLogger()->warn("applyStorageConfigChanges: parameter nDbPreallocate changed, but it is not possible to apply the change as there are already data stored in the database id %d, use a larger value", m_dbIdCurrent); } else if (operation == ACTION_DB_REMOVE) { - Logger::getLogger()->debug("applyStorageConfigChanges - parameters nDbPreallocate changed, removing databases from :%d: to :%d:", m_storageConfigApi.nDbPreallocate, m_dbIdLast); + Logger::getLogger()->debug("applyStorageConfigChanges - parameters nDbPreallocate changed, removing databases from %d to %d", m_storageConfigApi.nDbPreallocate, m_dbIdLast); configChanged = true; configChangeRemoveDb(dbHandle); } else @@ -1226,7 +1339,7 @@ bool ReadingsCatalogue::applyStorageConfigChanges(sqlite3 *dbHandle) m_storageConfigCurrent.nReadingsPerDb, m_storageConfigApi.nReadingsPerDb); - Logger::getLogger()->debug("%s - maxReadingUsed :%d: Current :%d: Requested :%d:", + Logger::getLogger()->debug("%s - maxReadingUsed %d Current %d Requested %d", __FUNCTION__, maxReadingUsed, m_storageConfigCurrent.nReadingsPerDb, @@ -1241,13 +1354,13 @@ bool ReadingsCatalogue::applyStorageConfigChanges(sqlite3 *dbHandle) startId = m_storageConfigCurrent.nReadingsPerDb +1; endId = m_storageConfigApi.nReadingsPerDb; - Logger::getLogger()->debug("applyStorageConfigChanges - parameters nReadingsPerDb changed, adding more tables from :%d: to :%d:", startId, endId); + Logger::getLogger()->debug("applyStorageConfigChanges - parameters nReadingsPerDb changed, adding more tables from %d to %d", startId, endId); configChanged = true; configChangeAddTables(dbHandle, startId, endId); } else if (operation == ACTION_INVALID) { - Logger::getLogger()->warn("applyStorageConfigChanges: parameter nReadingsPerDb changed, but it is not possible to apply the change as there are already data stored in the table id :%d:, use a larger value", maxReadingUsed); + Logger::getLogger()->warn("applyStorageConfigChanges: parameter nReadingsPerDb changed, but it is not possible to apply the change as there are already data stored in the table id %d, use a larger value", maxReadingUsed); } else if (operation == ACTION_TB_REMOVE) { @@ -1256,7 +1369,7 @@ bool ReadingsCatalogue::applyStorageConfigChanges(sqlite3 *dbHandle) startId = m_storageConfigApi.nReadingsPerDb +1; endId = m_storageConfigCurrent.nReadingsPerDb; - Logger::getLogger()->debug("applyStorageConfigChanges - parameters nReadingsPerDb changed, removing tables from :%d: to :%d:", m_storageConfigApi.nReadingsPerDb +1, m_storageConfigCurrent.nReadingsPerDb); + Logger::getLogger()->debug("applyStorageConfigChanges - parameters nReadingsPerDb changed, removing tables from %d to %d", m_storageConfigApi.nReadingsPerDb +1, m_storageConfigCurrent.nReadingsPerDb); configChanged = true; configChangeRemoveTables(dbHandle, startId, endId); } else @@ -1273,7 +1386,7 @@ bool ReadingsCatalogue::applyStorageConfigChanges(sqlite3 *dbHandle) } catch (exception& e) { - Logger::getLogger()->error("It is not possible to apply the chnages to the multi readings handling, error :%s: ", e.what()); + Logger::getLogger()->error("It is not possible to apply the chnages to the multi readings handling, error '%s' ", e.what()); } return configChanged; @@ -1287,11 +1400,10 @@ bool ReadingsCatalogue::applyStorageConfigChanges(sqlite3 *dbHandle) */ int ReadingsCatalogue::calcMaxReadingUsed() { - int maxReading; - maxReading = 0; - - for (auto &item : m_AssetReadingCatalogue) { + int maxReading = 0; + for (auto &item : m_AssetReadingCatalogue) + { if (item.second.first > maxReading) maxReading = item.second.first; } @@ -1313,7 +1425,7 @@ ReadingsCatalogue::ACTION ReadingsCatalogue::changesLogicTables(int maxUsed ,in { ACTION operation; - Logger::getLogger()->debug("%s - maxUsed :%d: Request :%d: Request current :%d:", + Logger::getLogger()->debug("%s - maxUsed %d Request %d Request current %d", __FUNCTION__, maxUsed, Current, @@ -1378,7 +1490,7 @@ ReadingsCatalogue::ACTION ReadingsCatalogue::changesLogicDBs(int dbIdCurrent , i /** - * Creates all the needed readings tables considering the tables already defined in the database + * Creates all the required readings tables considering the tables already defined in the database * and the number of tables to have on each database. * * @param dbId Database Id in which the table must be created @@ -1411,13 +1523,12 @@ void ReadingsCatalogue::preallocateReadingsTables(int dbId) startId = 2; else startId = 1; - createReadingsTables(NULL, dbId, startId, readingsToCreate); } m_nReadingsAvailable = readingsToAllocate - getUsedTablesDbId(dbId); - Logger::getLogger()->debug("preallocateReadingsTables - dbId :%d: nReadingsAvailable :%d: lastReadingsCreated :%d: tableCount :%d:", m_dbIdCurrent, m_nReadingsAvailable, readingsAvailable.lastReadings, readingsAvailable.tableCount); + Logger::getLogger()->debug("preallocateReadingsTables - dbId %d nReadingsAvailable %d lastReadingsCreated %d tableCount %d", m_dbIdCurrent, m_nReadingsAvailable, readingsAvailable.lastReadings, readingsAvailable.tableCount); } /** @@ -1467,7 +1578,7 @@ bool ReadingsCatalogue::latestDbUpdate(sqlite3 *dbHandle, int newDbId) { string sql_cmd; - Logger::getLogger()->debug("latestDbUpdate - dbHandle :%X: newDbId :%d:", dbHandle, newDbId); + Logger::getLogger()->debug("latestDbUpdate - dbHandle %X newDbId %d", dbHandle, newDbId); { sql_cmd = " UPDATE " READINGS_DB ".configuration_readings SET db_id_Last=" + to_string(newDbId) + ";"; @@ -1515,32 +1626,40 @@ bool ReadingsCatalogue::createNewDB(sqlite3 *dbHandle, int newDbId, int startId ConnectionManager *manager = ConnectionManager::getInstance(); + // Are there enough descriptors available to create another database + if (!manager->allowMoreDatabases()) + { + return false; + } + if (dbHandle == NULL) { connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Create New database"; + connection->setUsage(usage); +#endif dbHandle = connection->getDbHandle(); connAllocated = true; } // Creates the DB data file - { - dbPathReadings = generateDbFilePah(newDbId); + dbPathReadings = generateDbFilePah(newDbId); - dbAlreadyPresent = false; - if(stat(dbPathReadings.c_str(),&st) == 0) - { - Logger::getLogger()->info("createNewDB - database file :%s: already present, creation skipped " , dbPathReadings.c_str() ); - dbAlreadyPresent = true; - } - else - { - Logger::getLogger()->debug("createNewDB - new database created :%s:", dbPathReadings.c_str()); - } - enableWAL(dbPathReadings); + dbAlreadyPresent = false; + if(stat(dbPathReadings.c_str(),&st) == 0) + { + Logger::getLogger()->info("createNewDB - database file '%s' already present, creation skipped " , dbPathReadings.c_str() ); + dbAlreadyPresent = true; + } + else + { + Logger::getLogger()->debug("createNewDB - new database created '%s'", dbPathReadings.c_str()); + } + enableWAL(dbPathReadings); - latestDbUpdate(dbHandle, newDbId); + latestDbUpdate(dbHandle, newDbId); - } readingsToAllocate = getNReadingsAllocate(); readingsToCreate = readingsToAllocate; @@ -1556,13 +1675,13 @@ bool ReadingsCatalogue::createNewDB(sqlite3 *dbHandle, int newDbId, int startId { Logger::getLogger()->debug("createNewDB - attach single"); - result = attachDb(dbHandle, dbPathReadings, dbAlias); + result = attachDb(dbHandle, dbPathReadings, dbAlias, newDbId); result = manager->attachRequestNewDb(newDbId, dbHandle); } else if (attachAllDb == NEW_DB_DETACH) { Logger::getLogger()->debug("createNewDB - attach"); - result = attachDb(dbHandle, dbPathReadings, dbAlias); + result = attachDb(dbHandle, dbPathReadings, dbAlias, newDbId); } if (result) @@ -1575,14 +1694,14 @@ bool ReadingsCatalogue::createNewDB(sqlite3 *dbHandle, int newDbId, int startId if (readingsAvailable.lastReadings == -1) { - Logger::getLogger()->error("createNewDB - database file :%s: is already present but it is not possible to evaluate the readings table already present" , dbPathReadings.c_str() ); + Logger::getLogger()->error("createNewDB - database file '%s' is already present but it is not possible to evaluate the readings table already present" , dbPathReadings.c_str() ); result = false; } else { readingsToCreate = readingsToAllocate - readingsAvailable.tableCount; startId = readingsAvailable.lastReadings +1; - Logger::getLogger()->info("createNewDB - database file :%s: is already present, creating readings tables - from id :%d: n :%d: " , dbPathReadings.c_str(), startId, readingsToCreate); + Logger::getLogger()->info("createNewDB - database file '%s' is already present, creating readings tables - from id %d n %d " , dbPathReadings.c_str(), startId, readingsToCreate); } } @@ -1591,11 +1710,14 @@ bool ReadingsCatalogue::createNewDB(sqlite3 *dbHandle, int newDbId, int startId startId = 1; createReadingsTables(dbHandle, newDbId ,startId, readingsToCreate); - Logger::getLogger()->info("createNewDB - database file :%s: created readings table - from id :%d: n :%d: " , dbPathReadings.c_str(), startId, readingsToCreate); + Logger::getLogger()->info("createNewDB - database file '%s' created readings table - from id %d n %d " , dbPathReadings.c_str(), startId, readingsToCreate); } m_nReadingsAvailable = readingsToAllocate; } + // Create the overflow table in the new database + createReadingsOverflowTable(dbHandle, newDbId); + if (attachAllDb == NEW_DB_DETACH) { Logger::getLogger()->debug("createNewDB - deattach"); @@ -1638,11 +1760,15 @@ bool ReadingsCatalogue::createReadingsTables(sqlite3 *dbHandle, int dbId, int i if (dbHandle == NULL) { connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Create Readings Tables"; + connection->setUsage(usage); +#endif dbHandle = connection->getDbHandle(); newConnection = true; } - logger->info("Creating :%d: readings table in advance starting id :%d:", nTables, idStartFrom); + logger->info("Creating %d readings table in advance starting id %d", nTables, idStartFrom); dbName = generateDbName(dbId); @@ -1652,7 +1778,7 @@ bool ReadingsCatalogue::createReadingsTables(sqlite3 *dbHandle, int dbId, int i dbReadingsName = generateReadingsName(dbId, tableId); createReadings = R"( - CREATE TABLE )" + dbName + "." + dbReadingsName + R"( ( + CREATE TABLE IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"( ( id INTEGER PRIMARY KEY AUTOINCREMENT, reading JSON NOT NULL DEFAULT '{}', user_ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')), @@ -1661,15 +1787,19 @@ bool ReadingsCatalogue::createReadingsTables(sqlite3 *dbHandle, int dbId, int i )"; createReadingsIdx = R"( - CREATE INDEX )" + dbName + "." + dbReadingsName + R"(_ix3 ON )" + dbReadingsName + R"( (user_ts); + CREATE INDEX IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"(_ix3 ON )" + dbReadingsName + R"( (user_ts); )"; - logger->info(" Creating table :%s: sql cmd :%s:", dbReadingsName.c_str(), createReadings.c_str()); + logger->info(" Creating table '%s' sql cmd '%s'", dbReadingsName.c_str(), createReadings.c_str()); rc = SQLExec(dbHandle, createReadings.c_str()); if (rc != SQLITE_OK) { raiseError("createReadingsTables", sqlite3_errmsg(dbHandle)); + if (newConnection) + { + manager->release(connection); + } return false; } @@ -1677,6 +1807,10 @@ bool ReadingsCatalogue::createReadingsTables(sqlite3 *dbHandle, int dbId, int i if (rc != SQLITE_OK) { raiseError("createReadingsTables", sqlite3_errmsg(dbHandle)); + if (newConnection) + { + manager->release(connection); + } return false; } } @@ -1688,6 +1822,82 @@ bool ReadingsCatalogue::createReadingsTables(sqlite3 *dbHandle, int dbId, int i return true; } +/** + * Create the overflow reading tables in the given database id + * + * We should only do this once when we upgrade to the version with an + * overflow table. Although this should ideally be done in the schema + * update script we can't do this as we can not loop over all the + * databases in that script. + * + * @param dbHandle Database connection to use for the operation + * + */ +bool ReadingsCatalogue::createReadingsOverflowTable(sqlite3 *dbHandle, int dbId) +{ + string dbReadingsName; + + Logger *logger = Logger::getLogger(); + + ConnectionManager *manager = ConnectionManager::getInstance(); + + string dbName = generateDbName(dbId); + logger->info("Creating reading overflow table for database '%s'", dbName.c_str()); + + dbReadingsName = string(READINGS_TABLE) + "_" + to_string(dbId); + dbReadingsName.append("_overflow"); + + string createReadings = R"( + CREATE TABLE IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"( ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + asset_code CHARACTER varying(50) NOT NULL, + reading JSON NOT NULL DEFAULT '{}', + user_ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')), + ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')) + ); + )"; + + string createReadingsIdx1 = R"( + CREATE INDEX IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"(_ix1 ON )" + dbReadingsName + R"( (asset_code, user_ts desc); + )"; + string createReadingsIdx2 = R"( + CREATE INDEX IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"(_ix2 ON )" + dbReadingsName + R"( (asset_code); + )"; + string createReadingsIdx3 = R"( + CREATE INDEX IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"(_ix3 ON )" + dbReadingsName + R"( (user_ts); + )"; + + logger->info(" Creating table '%s' sql cmd '%s'", dbReadingsName.c_str(), createReadings.c_str()); + + int rc = SQLExec(dbHandle, createReadings.c_str()); + if (rc != SQLITE_OK) + { + raiseError("creating overflow table", sqlite3_errmsg(dbHandle)); + return false; + } + + rc = SQLExec(dbHandle, createReadingsIdx1.c_str()); + if (rc != SQLITE_OK) + { + raiseError("creating overflow table index 1", sqlite3_errmsg(dbHandle)); + return false; + } + rc = SQLExec(dbHandle, createReadingsIdx2.c_str()); + if (rc != SQLITE_OK) + { + raiseError("creating overflow table index 2", sqlite3_errmsg(dbHandle)); + return false; + } + rc = SQLExec(dbHandle, createReadingsIdx3.c_str()); + if (rc != SQLITE_OK) + { + raiseError("creating overflow table index 3", sqlite3_errmsg(dbHandle)); + return false; + } + + return true; +} + /** * Evaluates the latest reading table defined in the provided database id looking at sqlite_master, the SQLite repository * @@ -1710,18 +1920,19 @@ ReadingsCatalogue::tyReadingsAvailable ReadingsCatalogue::evaluateLastReadingAv tyReadingsAvailable readingsAvailable; Connection *connection; - bool connAllocated; - - connAllocated = false; + bool connAllocated = false; vector readingsId(getNReadingsAvailable(), 0); ConnectionManager *manager = ConnectionManager::getInstance(); - if (dbHandle == NULL) { connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Evaluate last reading available"; + connection->setUsage(usage); +#endif dbHandle = connection->getDbHandle(); connAllocated = true; } @@ -1750,14 +1961,17 @@ ReadingsCatalogue::tyReadingsAvailable ReadingsCatalogue::evaluateLastReadingAv nCols = sqlite3_column_count(stmt); tableName = (char *)sqlite3_column_text(stmt, 0); - id = extractReadingsIdFromName(tableName); + if (tableName.find_first_of("overflow") == string::npos) + { + id = extractReadingsIdFromName(tableName); - if (id > readingsAvailable.lastReadings) - readingsAvailable.lastReadings = id; + if (id > readingsAvailable.lastReadings) + readingsAvailable.lastReadings = id; - readingsAvailable.tableCount++; + readingsAvailable.tableCount++; + } } - Logger::getLogger()->debug("evaluateLastReadingAvailable - tableName :%s: lastReadings :%d:", tableName.c_str(), readingsAvailable.lastReadings); + Logger::getLogger()->debug("evaluateLastReadingAvailable - tableName '%s' lastReadings %d", tableName.c_str(), readingsAvailable.lastReadings); sqlite3_finalize(stmt); } @@ -1825,18 +2039,19 @@ ReadingsCatalogue::tyReadingReference ReadingsCatalogue::getReadingReference(Co auto item = m_AssetReadingCatalogue.find(asset_code); if (item != m_AssetReadingCatalogue.end()) { - //# An asset already managed + //# The asset is already allocated to a table ref.tableId = item->second.first; ref.dbId = item->second.second; } else { - Logger::getLogger()->debug("getReadingReference - before lock dbHandle :%X: threadId :%s:", dbHandle, threadId.str().c_str() ); + Logger::getLogger()->debug("getReadingReference - before lock dbHandle %X threadId '%s'", dbHandle, threadId.str().c_str() ); AttachDbSync *attachSync = AttachDbSync::getInstance(); attachSync->lock(); + ReadingsCatalogue::tyReadingReference emptyTableReference = {-1, -1}; - + std::string emptyAsset = {}; auto item = m_AssetReadingCatalogue.find(asset_code); if (item != m_AssetReadingCatalogue.end()) { @@ -1845,106 +2060,151 @@ ReadingsCatalogue::tyReadingReference ReadingsCatalogue::getReadingReference(Co } else { - //# Allocate a new block of readings table - if (! isReadingAvailable () ) + + if (! isReadingAvailable ()) { - Logger::getLogger()->debug("getReadingReference - allocate a new db, dbNAvailable :%d:", m_dbNAvailable); - - if (m_dbNAvailable > 0) + // No Reading table available... Get empty reading table + emptyTableReference = getEmptyReadingTableReference(emptyAsset); + if ( !emptyAsset.empty() ) { - // DBs already created are available - m_dbIdCurrent++; - m_dbNAvailable--; - m_nReadingsAvailable = getNReadingsAllocate(); - - Logger::getLogger()->debug("getReadingReference - allocate a new db, db already available - dbIdCurrent :%d: dbIdLast :%d: dbNAvailable :%d: nReadingsAvailable :%d: ", m_dbIdCurrent, m_dbIdLast, m_dbNAvailable, m_nReadingsAvailable); + ref = emptyTableReference; } - else + else { - // Allocates new DBs - int dbId, dbIdStart, dbIdEnd; + //# Allocate a new block of readings table + Logger::getLogger()->debug("Allocating a new db form the preallocated tables. %d preallocated tables available.", m_dbNAvailable); - dbIdStart = m_dbIdLast +1; - dbIdEnd = m_dbIdLast + m_storageConfigCurrent.nDbToAllocate; - - Logger::getLogger()->debug("getReadingReference - allocate a new db - create new db - dbIdCurrent :%d: dbIdStart :%d: dbIdEnd :%d:", m_dbIdCurrent, dbIdStart, dbIdEnd); + if (m_dbNAvailable > 0) + { + // DBs already pre-allocated are available + m_dbIdCurrent++; + m_dbNAvailable--; + m_nReadingsAvailable = getNReadingsAllocate(); - for (dbId = dbIdStart; dbId <= dbIdEnd; dbId++) + Logger::getLogger()->debug("Allocate dbIdCurrent %d dbIdLast %d dbNAvailable %d nReadingsAvailable %d ", m_dbIdCurrent, m_dbIdLast, m_dbNAvailable, m_nReadingsAvailable); + } + else { - readingsAvailable = evaluateLastReadingAvailable(dbHandle, dbId - 1); + // There are no pre-allocated databases available + // Allocates new DBs + int dbId, dbIdStart, dbIdEnd, allocated = 0; - startReadingsId = 1; + dbIdStart = m_dbIdLast +1; + dbIdEnd = m_dbIdLast + m_storageConfigCurrent.nDbToAllocate; - if (!getEmptyReadingTableReference(emptyTableReference)) + Logger::getLogger()->debug("getReadingReference - allocate a new db - create new db - dbIdCurrent %d dbIdStart %d dbIdEnd %d", m_dbIdCurrent, dbIdStart, dbIdEnd); + + for (dbId = dbIdStart; dbId <= dbIdEnd; dbId++) { + readingsAvailable = evaluateLastReadingAvailable(dbHandle, dbId - 1); + + startReadingsId = 1; + success = createNewDB(dbHandle, dbId, startReadingsId, NEW_DB_ATTACH_REQUEST); if (success) { - Logger::getLogger()->debug("getReadingReference - allocate a new db - create new dbs - dbId :%d: startReadingsIdOnDB :%d:", dbId, startReadingsId); + Logger::getLogger()->debug("getReadingReference - allocate a new db - create new dbs - dbId %d startReadingsIdOnDB %d", dbId, startReadingsId); + allocated++; } + else + { + break; + } + } + if (allocated) + { + m_dbIdLast += allocated; + m_dbIdCurrent++; + m_dbNAvailable += (allocated - 1); } - } - m_dbIdLast = dbIdEnd; - m_dbIdCurrent++; - m_dbNAvailable = (m_dbIdLast - m_dbIdCurrent) - m_storageConfigCurrent.nDbLeftFreeBeforeAllocate; - } - ref.tableId = -1; - ref.dbId = -1; + ref.tableId = -1; + ref.dbId = -1; + } + } if (success) + // Associate a reading table to the asset { - // Associate a reading table to the asset + // Associate the asset to the reading_id + if (emptyAsset.empty()) { - // Associate the asset to the reading_id - { - if (emptyTableReference.tableId > 0) - { - ref.tableId = emptyTableReference.tableId; - ref.dbId = emptyTableReference.dbId; - } - else - { - ref.tableId = getMaxReadingsId(m_dbIdCurrent) + 1; - ref.dbId = m_dbIdCurrent; - } - - auto newItem = make_pair(ref.tableId, ref.dbId); - auto newMapValue = make_pair(asset_code, newItem); - m_AssetReadingCatalogue.insert(newMapValue); - } + ref.tableId = getMaxReadingsId(m_dbIdCurrent) + 1; + ref.dbId = m_dbIdCurrent; + } - Logger::getLogger()->debug("getReadingReference - allocate a new reading table for the asset :%s: db Id :%d: readings Id :%d: ", asset_code, ref.dbId, ref.tableId); + { + m_EmptyAssetReadingCatalogue.erase(emptyAsset); + m_AssetReadingCatalogue.erase(emptyAsset); + auto newItem = make_pair(ref.tableId, ref.dbId); + auto newMapValue = make_pair(asset_code, newItem); + m_AssetReadingCatalogue.insert(newMapValue); + } - // Allocate the table in the reading catalogue - { - if (emptyTableReference.tableId > 0) - { + // Allocate the table in the reading catalogue + if (emptyAsset.empty()) + { + sql_cmd = + "INSERT INTO " READINGS_DB ".asset_reading_catalogue (table_id, db_id, asset_code) VALUES (" + + to_string(ref.tableId) + "," + + to_string(ref.dbId) + "," + + "\"" + asset_code + "\")"; + + Logger::getLogger()->debug("getReadingReference - allocate a new reading table for the asset '%s' db Id %d readings Id %d ", asset_code, ref.dbId, ref.tableId); - sql_cmd = " UPDATE " READINGS_DB ".asset_reading_catalogue SET asset_code ='" + string(asset_code) + "'" + - " WHERE db_id = " + to_string(emptyTableReference.dbId) + " AND table_id = " + to_string(emptyTableReference.tableId) + ";"; - } - else - { - sql_cmd = - "INSERT INTO " READINGS_DB ".asset_reading_catalogue (table_id, db_id, asset_code) VALUES (" - + to_string(ref.tableId) + "," - + to_string(ref.dbId) + "," - + "\"" + asset_code + "\")"; - } + } + else + { + sql_cmd = " UPDATE " READINGS_DB ".asset_reading_catalogue SET asset_code ='" + string(asset_code) + "'" + + " WHERE db_id = " + to_string(ref.dbId) + " AND table_id = " + to_string(ref.tableId) + ";"; - rc = SQLExec(dbHandle, sql_cmd.c_str()); - if (rc != SQLITE_OK) - { - msg = string(sqlite3_errmsg(dbHandle)) + " asset :" + asset_code + ":"; - raiseError("asset_reading_catalogue update", msg.c_str()); - } + Logger::getLogger()->debug("getReadingReference - Use empty table %readings_%d_%d: ",ref.dbId,ref.tableId); + } + + { + rc = SQLExec(dbHandle, sql_cmd.c_str()); + if (rc != SQLITE_OK) + { + msg = string(sqlite3_errmsg(dbHandle)) + " asset :" + asset_code + ":"; + raiseError("asset_reading_catalogue update", msg.c_str()); + } + + if (emptyAsset.empty()) + { allocateReadingAvailable(); } + + } + } + else + { + // Assign to overflow + Logger::getLogger()->info("Assign asset %s to the overflow table", asset_code); + auto newItem = make_pair(0, m_nextOverflow); + auto newMapValue = make_pair(asset_code, newItem); + m_AssetReadingCatalogue.insert(newMapValue); + sql_cmd = + "INSERT INTO " READINGS_DB ".asset_reading_catalogue (table_id, db_id, asset_code) VALUES ( 0," + + to_string(m_nextOverflow) + "," + + "\"" + asset_code + "\")"; + rc = SQLExec(dbHandle, sql_cmd.c_str()); + if (rc != SQLITE_OK) + { + msg = string(sqlite3_errmsg(dbHandle)) + " asset :" + asset_code + ":"; + raiseError("asset_reading_catalogue update", msg.c_str()); + } + ref.tableId = 0; + ref.dbId = m_nextOverflow; + if (m_nextOverflow > m_maxOverflowUsed) + { + m_maxOverflowUsed = m_nextOverflow; } + m_nextOverflow++; + if (m_nextOverflow > m_dbIdLast) + m_nextOverflow = 1; } } @@ -1956,52 +2216,91 @@ ReadingsCatalogue::tyReadingReference ReadingsCatalogue::getReadingReference(Co } /** - * Get Empty Reading Table - * - * @param emptyTableReference An empty reading table reference to be used for the given asset_code - * @return True of success, false on any error + * Loads the empty reading table catalogue * */ -bool ReadingsCatalogue::getEmptyReadingTableReference(tyReadingReference &emptyTableReference) +bool ReadingsCatalogue::loadEmptyAssetReadingCatalogue(bool clean) { - bool isEmptyTableAvailable = false; + std::lock_guard guard(m_emptyReadingTableMutex); sqlite3 *dbHandle; string sql_cmd; sqlite3_stmt *stmt; + ConnectionManager *manager = ConnectionManager::getInstance(); + + if (clean) + { + m_EmptyAssetReadingCatalogue.clear(); + } - // Disable functionality temporarily to avoid regression - return false; + // Do not populate m_EmptyAssetReadingCatalogue if data is already there + if (m_EmptyAssetReadingCatalogue.size()) + { + return true; + } - ConnectionManager *manager = ConnectionManager::getInstance(); Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Load empty sset reading catalogue"; + connection->setUsage(usage); +#endif dbHandle = connection->getDbHandle(); - for (auto &item : m_AssetReadingCatalogue) { - int tableId = item.second.first; - int dbId = item.second.second; - sql_cmd = "SELECT COUNT(*) FROM (SELECT 0 FROM readings_" + to_string(dbId) + ".readings_" + to_string(dbId) + "_" + to_string(tableId) + " LIMIT 1)"; + string asset_name = item.first; // Asset + int tableId = item.second.first; // tableId; + int dbId = item.second.second; // dbId; - if (sqlite3_prepare_v2(dbHandle, sql_cmd.c_str(), -1, &stmt, NULL) != SQLITE_OK) + if (tableId > 0) { - raiseError("getEmptyReadingTableReference", sqlite3_errmsg(dbHandle)); - return false; - } + + sql_cmd = "SELECT COUNT(*) FROM readings_" + to_string(dbId) + ".readings_" + to_string(dbId) + "_" + to_string(tableId) + " ;"; + if (sqlite3_prepare_v2(dbHandle, sql_cmd.c_str(), -1, &stmt, NULL) != SQLITE_OK) + { + sqlite3_finalize(stmt); + continue; + } - if (SQLStep(stmt) == SQLITE_ROW) - { - if (sqlite3_column_int(stmt, 0) == 0) + if (SQLStep(stmt) == SQLITE_ROW) { - isEmptyTableAvailable = true; - emptyTableReference.dbId = dbId; - emptyTableReference.tableId = tableId; + if (sqlite3_column_int(stmt, 0) == 0) + { + auto newItem = make_pair(tableId,dbId); + auto newMapValue = make_pair(asset_name,newItem); + m_EmptyAssetReadingCatalogue.insert(newMapValue); + + } } + sqlite3_finalize(stmt); } - sqlite3_finalize(stmt); + } - manager->release(connection); - return isEmptyTableAvailable; + return true; +} + +/** + * Get Empty Reading Table + * + * @param asset emptyAsset, copies value of asset for which empty table is found + * @return the reading id associated to the provided empty table + */ +ReadingsCatalogue::tyReadingReference ReadingsCatalogue::getEmptyReadingTableReference(std::string& asset) +{ + ReadingsCatalogue::tyReadingReference emptyTableReference = {-1, -1}; + if (m_EmptyAssetReadingCatalogue.size() == 0) + { + loadEmptyAssetReadingCatalogue(); + } + + auto it = m_EmptyAssetReadingCatalogue.begin(); + if (it != m_EmptyAssetReadingCatalogue.end()) + { + asset = it->first; + emptyTableReference.tableId = it->second.first; + emptyTableReference.dbId = it->second.second; + } + + return emptyTableReference; } /** @@ -2015,11 +2314,10 @@ int ReadingsCatalogue::getMaxReadingsId(int dbId) { int maxId = 0; - for (auto &item : m_AssetReadingCatalogue) { - - if (item.second.second == dbId ) - if (item.second.first > maxId) - maxId = item.second.first; + for (auto &item : m_AssetReadingCatalogue) + { + if (item.second.second == dbId && item.second.first > maxId) + maxId = item.second.first; } return (maxId); @@ -2072,9 +2370,9 @@ int ReadingsCatalogue::getUsedTablesDbId(int dbId) { int count = 0; - for (auto &item : m_AssetReadingCatalogue) { - - if (item.second.second == dbId) + for (auto &item : m_AssetReadingCatalogue) + { + if (item.second.first != 0 && item.second.second == dbId) count++; } @@ -2136,7 +2434,7 @@ int ReadingsCatalogue::purgeAllReadings(sqlite3 *dbHandle, const char *sqlCmdBa rc = SQLExec(dbHandle, sqlCmdTmp.c_str(), zErrMsg); - Logger::getLogger()->debug("purgeAllReadings: rc :%d: cmd :%s:", rc ,sqlCmdTmp.c_str() ); + Logger::getLogger()->debug("purgeAllReadings: rc %d cmd '%s'", rc ,sqlCmdTmp.c_str() ); if (rc != SQLITE_OK) { @@ -2151,6 +2449,8 @@ int ReadingsCatalogue::purgeAllReadings(sqlite3 *dbHandle, const char *sqlCmdBa } } + std::thread th(&ReadingsCatalogue::loadEmptyAssetReadingCatalogue,this,false); + th.detach(); return(rc); } @@ -2209,6 +2509,12 @@ string ReadingsCatalogue::sqlConstructMultiDb(string &sqlCmdBase, vector &assetCodes, bool considerExclusion, bool groupBy) +{ + string dbReadingsName; + string dbName; + string sqlCmdTmp; + string sqlCmd; + + string assetCode; + bool addTable; + bool addedOne; + + for (int dbId = 1; dbId <= m_maxOverflowUsed; dbId++) + { + dbReadingsName = generateReadingsName(dbId, 0); + sqlCmdTmp = sqlCmdBase; + + sqlCmd += " UNION ALL "; + + dbName = generateDbName(dbId); + + StringReplaceAll (sqlCmdTmp, ".assetcode.", "asset_code"); + StringReplaceAll(sqlCmdTmp, "_dbname_", dbName); + StringReplaceAll(sqlCmdTmp, "_tablename_", dbReadingsName); + sqlCmd += sqlCmdTmp; + if (! assetCodes.empty()) + { + sqlCmd += " WHERE "; + bool first = true; + for (auto& code : assetCodes) + { + if (!first) + { + sqlCmd += " or "; + first = false; + } + sqlCmd += "asset_code = \'"; + sqlCmd += code; + sqlCmd += "\'"; + } + } + + if (groupBy) + { + sqlCmd += " GROUP By asset_code"; + } + } + + return sqlCmd; } /** - * Generates a SQLIte db alis from the database id + * Generates a SQLite db alias from the database id * * @param dbId Database id for which the alias must be generated * @return Generated alias @@ -2298,49 +2664,60 @@ string ReadingsCatalogue::generateDbFileName(int dbId) * Extracts the readings id from the table name * * @param tableName Table name from which the id must be extracted - * @return Extracted reading id + * @return Extracted reading id or -1 on error * */ int ReadingsCatalogue::extractReadingsIdFromName(string tableName) { int dbId; - int tableId; + int tableId = -1; string dbIdTableId; - dbIdTableId = tableName.substr (tableName.find('_') + 1); + try { + dbIdTableId = tableName.substr (tableName.find('_') + 1); - tableId = stoi(dbIdTableId.substr (dbIdTableId.find('_') + 1)); + tableId = stoi(dbIdTableId.substr (dbIdTableId.find('_') + 1)); - dbId = stoi(dbIdTableId.substr (0, dbIdTableId.find('_') )); + dbId = stoi(dbIdTableId.substr (0, dbIdTableId.find('_') )); + } catch (exception &e) { + Logger::getLogger()->fatal("extractReadingsIdFromName: exception on table %s, %s", + tableName.c_str(), e.what()); + } - return(tableId); + return tableId; } /** * Extract the database id from the table name * * @param tableName Table name from which the database id must be extracted - * @return Extracted database id + * @return Extracted database id or -1 on error * */ int ReadingsCatalogue::extractDbIdFromName(string tableName) { - int dbId; + int dbId = -1; int tableId; string dbIdTableId; - dbIdTableId = tableName.substr (tableName.find('_') + 1); + try { + dbIdTableId = tableName.substr (tableName.find('_') + 1); - tableId = stoi(dbIdTableId.substr (dbIdTableId.find('_') + 1)); + tableId = stoi(dbIdTableId.substr (dbIdTableId.find('_') + 1)); - dbId = stoi(dbIdTableId.substr (0, dbIdTableId.find('_') )); + dbId = stoi(dbIdTableId.substr (0, dbIdTableId.find('_') )); + } catch (exception &e) { + Logger::getLogger()->fatal("extractReadingsIdFromName: exception on table %s, %s", + tableName.c_str(), e.what()); + } - return(dbId); + return dbId; } /** * Generates the name of the reading table from the given table id as: - * Prefix + db Id + reading Id + * Prefix + db Id + reading Id. If the tableId is 0 then this is a + * reference to the overflow table * * @param dbId Database id to use for the generation of the table name * @param tableId Table id to use for the generation of the table name @@ -2352,12 +2729,19 @@ string ReadingsCatalogue::generateReadingsName(int dbId, int tableId) string tableName; if (dbId == -1) - dbId = retrieveDbIdFromTableId (tableId); + dbId = retrieveDbIdFromTableId(tableId); - tableName = READINGS_TABLE "_" + to_string(dbId) + "_" + to_string(tableId); - Logger::getLogger()->debug("%s - dbId :%d: tableId :%d: table name :%s: ", __FUNCTION__, dbId, tableId, tableName.c_str()); + if (tableId == 0) // Overflow table + { + tableName = READINGS_TABLE "_" + to_string(dbId) + "_overflow"; + } + else + { + tableName = READINGS_TABLE "_" + to_string(dbId) + "_" + to_string(tableId); + } + Logger::getLogger()->debug("%s - dbId %d tableId %d table name '%s' ", __FUNCTION__, dbId, tableId, tableName.c_str()); - return (tableName); + return tableName; } /** @@ -2423,25 +2807,27 @@ int ReadingsCatalogue::SQLExec(sqlite3 *dbHandle, const char *sqlCmd, char **err { int retries = 0, rc; - Logger::getLogger()->debug("SQLExec: cmd :%s: ", sqlCmd); + if (errMsg) + { + *errMsg = NULL; + } + Logger::getLogger()->debug("SQLExec: cmd '%s' ", sqlCmd); do { - if (errMsg == NULL) - { - rc = sqlite3_exec(dbHandle, sqlCmd, NULL, NULL, NULL); - } - else + if (errMsg && *errMsg) { - rc = sqlite3_exec(dbHandle, sqlCmd, NULL, NULL, errMsg); - Logger::getLogger()->debug("SQLExec: rc :%d: ", rc); + sqlite3_free(*errMsg); + *errMsg = NULL; } + rc = sqlite3_exec(dbHandle, sqlCmd, NULL, NULL, errMsg); + Logger::getLogger()->debug("SQLExec: rc %d ", rc); retries++; if (rc == SQLITE_LOCKED || rc == SQLITE_BUSY) { int interval = (retries * RETRY_BACKOFF); usleep(interval); // sleep retries milliseconds - if (retries > 5) Logger::getLogger()->info("SQLExec - error :%s: retry %d of %d, rc=%s, DB connection @ %p, slept for %d msecs", + if (retries > 5) Logger::getLogger()->info("SQLExec - error '%s' retry %d of %d, rc=%s, DB connection @ %p, slept for %d msecs", sqlite3_errmsg(dbHandle), retries, MAX_RETRIES, (rc==SQLITE_LOCKED)?"SQLITE_LOCKED":"SQLITE_BUSY", this, interval); } } while (retries < MAX_RETRIES && (rc == SQLITE_LOCKED || rc == SQLITE_BUSY)); diff --git a/C/plugins/storage/sqlite/plugin.cpp b/C/plugins/storage/sqlite/plugin.cpp index c4c2b32cde..506e3ab1cf 100644 --- a/C/plugins/storage/sqlite/plugin.cpp +++ b/C/plugins/storage/sqlite/plugin.cpp @@ -7,7 +7,7 @@ * * Author: Massimiliano Pinto */ -#include +#include #include #include #include @@ -41,14 +41,18 @@ const char *default_config = QUOTE({ "poolSize" : { "description" : "The number of connections to create in the intial pool of connections", "type" : "integer", + "minimum": "1", + "maximum": "10", "default" : "5", "displayName" : "Pool Size", "order" : "1" }, "nReadingsPerDb" : { - "description" : "The number of readings tables in each database that is created", + "description" : "The number of unique assets tables to maintain in each database that is created", "type" : "integer", + "minimum": "1", "default" : "15", + "maximum": "00", "displayName" : "No. Readings per database", "order" : "2" }, @@ -56,6 +60,8 @@ const char *default_config = QUOTE({ "description" : "Number of databases to allocate in advance. NOTE: SQLite has a default maximum of 10 attachable databases", "type" : "integer", "default" : "3", + "minimum": "1", + "maximum" : "10", "displayName" : "No. databases to allocate in advance", "order" : "3" }, @@ -63,6 +69,8 @@ const char *default_config = QUOTE({ "description" : "Allocate new databases when the number of free databases drops below this value", "type" : "integer", "default" : "1", + "minimum": "1", + "maximum": "10", "displayName" : "Database allocation threshold", "order" : "4" }, @@ -70,6 +78,8 @@ const char *default_config = QUOTE({ "description" : "The number of databases to create whenever the number of available databases drops below the allocation threshold", "type" : "integer", "default" : "2", + "minimum" : "1", + "maximum" : "10", "displayName" : "Database allocation size", "order" : "5" }, @@ -182,6 +192,10 @@ int plugin_common_insert(PLUGIN_HANDLE handle, char *schema, char *table, char * ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Insert into " + string(table); + connection->setUsage(usage); +#endif int result = connection->insert(std::string(schema), std::string(table), std::string(data)); manager->release(connection); return result; @@ -196,6 +210,10 @@ ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; +#if TRACK_CONNECTION_USER + string usage = "Retrieve from " + string(table); + connection->setUsage(usage); +#endif bool rval = connection->retrieve(std::string(schema), std::string(table), std::string(query), results); manager->release(connection); if (rval) @@ -213,6 +231,11 @@ int plugin_common_update(PLUGIN_HANDLE handle, char *schema, char *table, char * ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Update " + string(table); + connection->setUsage(usage); +#endif + int result = connection->update(std::string(schema), std::string(table), std::string(data)); manager->release(connection); return result; @@ -226,6 +249,10 @@ int plugin_common_delete(PLUGIN_HANDLE handle, char *schema, char *table, char * ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Delete from " + string(table); + connection->setUsage(usage); +#endif int result = connection->deleteRows(std::string(schema), std::string(table), std::string(condition)); manager->release(connection); return result; @@ -239,6 +266,10 @@ int plugin_reading_append(PLUGIN_HANDLE handle, char *readings) ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Reading append"; + connection->setUsage(usage); +#endif int result = connection->appendReadings(readings); manager->release(connection); return result;; @@ -253,6 +284,11 @@ int plugin_readingStream(PLUGIN_HANDLE handle, ReadingStream **readings, bool co ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Reading Stream"; + connection->setUsage(usage); +#endif + result = connection->readingStream(readings, commit); manager->release(connection); @@ -268,6 +304,11 @@ ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string resultSet; +#if TRACK_CONNECTION_USER + string usage = "Fetch readings"; + connection->setUsage(usage); +#endif + connection->fetchReadings(id, blksize, resultSet); manager->release(connection); return strdup(resultSet.c_str()); @@ -282,6 +323,11 @@ ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; +#if TRACK_CONNECTION_USER + string usage = "Reading retrieve"; + connection->setUsage(usage); +#endif + connection->retrieveReadings(std::string(condition), results); manager->release(connection); return strdup(results.c_str()); @@ -297,6 +343,10 @@ Connection *connection = manager->allocate(); std::string results; unsigned long age, size; +#if TRACK_CONNECTION_USER + string usage = "Purge"; + connection->setUsage(usage); +#endif if (flags & STORAGE_PURGE_SIZE) { (void)connection->purgeReadingsByRows(param, flags, sent, results); @@ -337,10 +387,18 @@ bool plugin_shutdown(PLUGIN_HANDLE handle) ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); - connection->shutdownAppendReadings(); - ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); - readCat->storeGlobalId(); +#if TRACK_CONNECTION_USER + string usage = "Shutdown"; + connection->setUsage(usage); +#endif + if (connection->supportsReadings()) + { + connection->shutdownAppendReadings(); + + ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); + readCat->storeGlobalId(); + } manager->release(connection); manager->shutdown(); @@ -365,6 +423,10 @@ int plugin_create_table_snapshot(PLUGIN_HANDLE handle, ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Snapshot " + string(table); + connection->setUsage(usage); +#endif int result = connection->create_table_snapshot(std::string(table), std::string(id)); manager->release(connection); @@ -386,6 +448,10 @@ int plugin_load_table_snapshot(PLUGIN_HANDLE handle, ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Load snapshot " + string(table); + connection->setUsage(usage); +#endif int result = connection->load_table_snapshot(std::string(table), std::string(id)); manager->release(connection); @@ -408,6 +474,10 @@ int plugin_delete_table_snapshot(PLUGIN_HANDLE handle, ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Delete snapshot " + string(table); + connection->setUsage(usage); +#endif int result = connection->delete_table_snapshot(std::string(table), std::string(id)); manager->release(connection); @@ -429,6 +499,10 @@ ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; +#if TRACK_CONNECTION_USER + string usage = "Get table snapshots" + string(table); + connection->setUsage(usage); +#endif bool rval = connection->get_table_snapshots(std::string(table), results); manager->release(connection); @@ -450,6 +524,10 @@ int plugin_createSchema(PLUGIN_HANDLE handle, char *definition) ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Create schema"; + connection->setUsage(usage); +#endif int result = connection->createSchema(std::string(definition)); manager->release(connection); return result; @@ -463,6 +541,10 @@ unsigned int plugin_reading_purge_asset(PLUGIN_HANDLE handle, char *asset) ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); +#if TRACK_CONNECTION_USER + string usage = "Purge asset "; + connection->setUsage(usage); +#endif unsigned int deleted = connection->purgeReadingsAsset(asset); manager->release(connection); return deleted; diff --git a/C/plugins/storage/sqlitelb/CMakeLists.txt b/C/plugins/storage/sqlitelb/CMakeLists.txt index 4bfcf9f4e9..3b27455980 100644 --- a/C/plugins/storage/sqlitelb/CMakeLists.txt +++ b/C/plugins/storage/sqlitelb/CMakeLists.txt @@ -6,8 +6,8 @@ set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(STORAGE_COMMON_LIB storage-common-lib) -# Path of compiled libsqlite3.a and .h files: /tmp/sqlite3-pkg/src -set(FLEDGE_SQLITE3_LIBS "/tmp/sqlite3-pkg/src" CACHE INTERNAL "") +# Path of compiled sqlite3 file: /usr/local/bin +set(FLEDGE_SQLITE3_LIBS "/usr/local/bin" CACHE INTERNAL "") # Find source files file(GLOB SOURCES ./common/*.cpp ../sqlite/schema/*.cpp *.cpp) @@ -16,6 +16,7 @@ file(GLOB SOURCES ./common/*.cpp ../sqlite/schema/*.cpp *.cpp) include_directories(./include) include_directories(./common/include) include_directories(../sqlite/schema/include) +include_directories(../sqlite/common/include) include_directories(../../../common/include) include_directories(../../../services/common/include) include_directories(../common/include) diff --git a/C/plugins/storage/sqlitelb/common/connection.cpp b/C/plugins/storage/sqlitelb/common/connection.cpp index b5848f16cc..f4b5f8b9b0 100644 --- a/C/plugins/storage/sqlitelb/common/connection.cpp +++ b/C/plugins/storage/sqlitelb/common/connection.cpp @@ -9,7 +9,7 @@ */ #include #include -#include +#include #include #ifndef MEMORY_READING_PLUGIN #include @@ -119,7 +119,7 @@ bool Connection::getNow(string& Now) string nowSqlCMD = "SELECT " SQLITE3_NOW_READING; - int rc = SQLexec(dbHandle, + int rc = SQLexec(dbHandle, "now", nowSqlCMD.c_str(), dateCallback, nowDate, @@ -243,7 +243,7 @@ bool Connection::applyColumnDateTimeFormat(sqlite3_stmt *pStmt, char formattedData[100] = ""; // Exec the format SQL - int rc = SQLexec(dbHandle, + int rc = SQLexec(dbHandle, "date", formatStmt.c_str(), dateCallback, formattedData, @@ -524,8 +524,9 @@ Connection::Connection() const char *sqlStmt = attachDb.coalesce(); + zErrMsg = NULL; // Exec the statement - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "database", sqlStmt, NULL, NULL, @@ -551,6 +552,24 @@ Connection::Connection() } //Release sqlStmt buffer delete[] sqlStmt; + + bool initialiseReadings = false; + if (access(dbPathReadings.c_str(), R_OK) == -1) + { + sqlite3 *dbHandle; + // Readings do not exist so set flag to initialise + rc = sqlite3_open(dbPathReadings.c_str(), &dbHandle); + if(rc != SQLITE_OK) + { + } + else + { + // Enables the WAL feature + rc = sqlite3_exec(dbHandle, DB_CONFIGURATION, NULL, NULL, NULL); + } + sqlite3_close(dbHandle); + initialiseReadings = true; + } // Attach readings database SQLBuffer attachReadingsDb; @@ -560,7 +579,7 @@ Connection::Connection() const char *sqlReadingsStmt = attachReadingsDb.coalesce(); // Exec the statement - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "database", sqlReadingsStmt, NULL, NULL, @@ -587,6 +606,151 @@ Connection::Connection() //Release sqlStmt buffer delete[] sqlReadingsStmt; + if (initialiseReadings) + { + // Would really like to run an external script here, but until we have that + // worked out we have the SQL needed to create the table and indexes + + // Need to initialise the readings + SQLBuffer initReadings; + initReadings.append("CREATE TABLE readings.readings ("); + initReadings.append("id INTEGER PRIMARY KEY AUTOINCREMENT,"); + initReadings.append("asset_code character varying(50) NOT NULL,"); + initReadings.append("reading JSON NOT NULL DEFAULT '{}',"); + initReadings.append("user_ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')),"); + initReadings.append("ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW'))"); + initReadings.append(");"); + + const char *sqlReadingsStmt = initReadings.coalesce(); + + // Exec the statement + zErrMsg = NULL; + rc = SQLexec(dbHandle, "readings creation", + sqlReadingsStmt, + NULL, + NULL, + &zErrMsg); + + // Check result + if (rc != SQLITE_OK) + { + const char* errMsg = "Failed to create 'readings' table, "; + Logger::getLogger()->error("%s '%s': error %s", + errMsg, + sqlReadingsStmt, + zErrMsg); + connectErrorTime = time(0); + + sqlite3_free(zErrMsg); + sqlite3_close_v2(dbHandle); + } + else + { + Logger::getLogger()->info("Initialised readings database"); + } + //Release sqlStmt buffer + delete[] sqlReadingsStmt; + + SQLBuffer index1; + index1.append("CREATE INDEX readings.fki_readings_fk1 ON readings (asset_code, user_ts desc);"); + + const char *sqlIndex1Stmt = index1.coalesce(); + + // Exec the statement + zErrMsg = NULL; + rc = SQLexec(dbHandle, "readings creation", + sqlIndex1Stmt, + NULL, + NULL, + &zErrMsg); + + // Check result + if (rc != SQLITE_OK) + { + const char* errMsg = "Failed to create 'readings' index, "; + Logger::getLogger()->error("%s '%s': error %s", + errMsg, + sqlIndex1Stmt, + zErrMsg); + connectErrorTime = time(0); + + sqlite3_free(zErrMsg); + sqlite3_close_v2(dbHandle); + } + else + { + Logger::getLogger()->info("Initialised readings database"); + } + //Release sqlStmt buffer + delete[] sqlIndex1Stmt; + + SQLBuffer index2; + index2.append("CREATE INDEX readings.readings_ix2 ON readings (asset_code);"); + + const char *sqlIndex2Stmt = index2.coalesce(); + + // Exec the statement + zErrMsg = NULL; + rc = SQLexec(dbHandle, "readings creation", + sqlIndex2Stmt, + NULL, + NULL, + &zErrMsg); + + // Check result + if (rc != SQLITE_OK) + { + const char* errMsg = "Failed to create 'readings' index, "; + Logger::getLogger()->error("%s '%s': error %s", + errMsg, + sqlIndex2Stmt, + zErrMsg); + connectErrorTime = time(0); + + sqlite3_free(zErrMsg); + sqlite3_close_v2(dbHandle); + } + else + { + Logger::getLogger()->info("Initialised readings database"); + } + //Release sqlStmt buffer + delete[] sqlIndex2Stmt; + + SQLBuffer index3; + index3.append("CREATE INDEX readings.readings_ix3 ON readings (user_ts);"); + + const char *sqlIndex3Stmt = index3.coalesce(); + + // Exec the statement + zErrMsg = NULL; + rc = SQLexec(dbHandle, "readings creation", + sqlIndex3Stmt, + NULL, + NULL, + &zErrMsg); + + // Check result + if (rc != SQLITE_OK) + { + const char* errMsg = "Failed to create 'readings' index, "; + Logger::getLogger()->error("%s '%s': error %s", + errMsg, + sqlIndex3Stmt, + zErrMsg); + connectErrorTime = time(0); + + sqlite3_free(zErrMsg); + sqlite3_close_v2(dbHandle); + } + else + { + Logger::getLogger()->info("Initialised readings database"); + } + //Release sqlStmt buffer + delete[] sqlIndex3Stmt; + } + // Enable the WAL for the readings DB rc = sqlite3_exec(dbHandle, dbConfiguration.c_str(),NULL, NULL, &zErrMsg); if (rc != SQLITE_OK) @@ -1101,17 +1265,15 @@ int Connection::insert(const std::string& schema, const std::string& table, const std::string& data) { -SQLBuffer sql; Document document; ostringstream convert; +sqlite3_stmt *stmt; +int rc; std::size_t arr = data.find("inserts"); if (!m_schemaManager->exists(dbHandle, schema)) { - raiseError("insert", - "Schema %s does not exist, unable to insert into table %s", - schema.c_str(), - table.c_str()); + raiseError("insert", "Schema %s does not exist, unable to insert into table %s", schema.c_str(), table.c_str()); return false; } @@ -1141,13 +1303,11 @@ std::size_t arr = data.find("inserts"); return -1; } - // Start a trabsaction - sql.append("BEGIN TRANSACTION;"); - // Number of inserts int ins = 0; - - // Iterate through insert array + int failedInsertCount = 0; + + // Generate sql query for prepared statement for (Value::ConstValueIterator iter = inserts.Begin(); iter != inserts.End(); ++iter) @@ -1159,138 +1319,153 @@ std::size_t arr = data.find("inserts"); return -1; } - int col = 0; - SQLBuffer values; - - sql.append("INSERT INTO "); - sql.append(schema); - sql.append('.'); - sql.append(table); - sql.append(" ("); - - for (Value::ConstMemberIterator itr = (*iter).MemberBegin(); - itr != (*iter).MemberEnd(); - ++itr) { - // Append column name - if (col) + int col = 0; + SQLBuffer sql; + SQLBuffer values; + sql.append("INSERT INTO " + schema + "." + table + " ("); + + for (Value::ConstMemberIterator itr = (*iter).MemberBegin(); + itr != (*iter).MemberEnd(); + ++itr) { - sql.append(", "); + // Append column name + if (col) + { + sql.append(", "); + } + sql.append(itr->name.GetString()); + col++; } - sql.append(itr->name.GetString()); - - // Append column value - if (col) + + sql.append(") VALUES ("); + for ( auto i = 0 ; i < col; i++ ) + { + if (i) + { + sql.append(","); + } + sql.append("?"); + } + sql.append(");"); + + const char *query = sql.coalesce(); + + rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); + if (rc != SQLITE_OK) { - values.append(", "); + raiseError("insert", sqlite3_errmsg(dbHandle)); + Logger::getLogger()->error("SQL statement: %s", query); + return -1; } - if (itr->value.IsString()) + + // Bind columns with prepared sql query + int columID = 1; + for (Value::ConstMemberIterator itr = (*iter).MemberBegin(); + itr != (*iter).MemberEnd(); + ++itr) { - const char *str = itr->value.GetString(); - if (strcmp(str, "now()") == 0) + + if (itr->value.IsString()) + { + const char *str = itr->value.GetString(); + if (strcmp(str, "now()") == 0) + { + sqlite3_bind_text(stmt, columID, SQLITE3_NOW, -1, SQLITE_TRANSIENT); + } + else + { + sqlite3_bind_text(stmt, columID, escape(str).c_str(), -1, SQLITE_TRANSIENT); + } + } + else if (itr->value.IsDouble()) { + sqlite3_bind_double(stmt, columID,itr->value.GetDouble()); + } + + else if (itr->value.IsInt64()) { - values.append(SQLITE3_NOW); + sqlite3_bind_int(stmt, columID,(long)itr->value.GetInt64()); } - else + + else if (itr->value.IsInt()) { - values.append('\''); - values.append(escape(str)); - values.append('\''); + sqlite3_bind_int(stmt, columID,itr->value.GetInt()); } + + else if (itr->value.IsObject()) + { + StringBuffer buffer; + Writer writer(buffer); + itr->value.Accept(writer); + sqlite3_bind_text(stmt, columID, buffer.GetString(), -1, SQLITE_TRANSIENT); + } + columID++ ; + } + + if (sqlite3_exec(dbHandle, "BEGIN TRANSACTION", NULL, NULL, NULL) != SQLITE_OK) + { + raiseError("insert", sqlite3_errmsg(dbHandle)); + return -1; } - else if (itr->value.IsDouble()) - values.append(itr->value.GetDouble()); - else if (itr->value.IsInt64()) - values.append((long)itr->value.GetInt64()); - else if (itr->value.IsInt()) - values.append(itr->value.GetInt()); - else if (itr->value.IsObject()) - { - StringBuffer buffer; - Writer writer(buffer); - itr->value.Accept(writer); - values.append('\''); - values.append(escape(buffer.GetString())); - values.append('\''); - } - col++; - } - sql.append(") VALUES ("); - const char *vals = values.coalesce(); - sql.append(vals); - delete[] vals; - sql.append(");"); + m_writeAccessOngoing.fetch_add(1); + + int sqlite3_resut = SQLstep(stmt); + + m_writeAccessOngoing.fetch_sub(1); + + if (sqlite3_resut == SQLITE_DONE) + { + sqlite3_clear_bindings(stmt); + sqlite3_reset(stmt); + } + else + { + failedInsertCount++; + raiseError("insert", sqlite3_errmsg(dbHandle)); + Logger::getLogger()->error("SQL statement: %s", sqlite3_expanded_sql(stmt)); + + // transaction is still open, do rollback + if (sqlite3_get_autocommit(dbHandle) == 0) + { + rc = sqlite3_exec(dbHandle,"ROLLBACK TRANSACTION;",NULL,NULL,NULL); + if (rc != SQLITE_OK) + { + raiseError("insert rollback", sqlite3_errmsg(dbHandle)); + } + + } + } + + + if (sqlite3_exec(dbHandle, "COMMIT TRANSACTION", NULL, NULL, NULL) != SQLITE_OK) + { + raiseError("insert", sqlite3_errmsg(dbHandle)); + return -1; + } + + delete[] query; + } // Increment row count ins++; + } - sql.append("COMMIT TRANSACTION;"); + sqlite3_finalize(stmt); - const char *query = sql.coalesce(); - logSQL("CommonInsert", query); - char *zErrMsg = NULL; - int rc; - - // Exec INSERT statement: no callback, no result set - m_writeAccessOngoing.fetch_add(1); - rc = SQLexec(dbHandle, - query, - NULL, - NULL, - &zErrMsg); - m_writeAccessOngoing.fetch_sub(1); if (m_writeAccessOngoing == 0) db_cv.notify_all(); - // Check exec result - if (rc != SQLITE_OK ) + if (failedInsertCount) { - raiseError("insert", zErrMsg); - Logger::getLogger()->error("SQL statement: %s", query); - sqlite3_free(zErrMsg); - - // transaction is still open, do rollback - if (sqlite3_get_autocommit(dbHandle) == 0) - { - rc = SQLexec(dbHandle, - "ROLLBACK TRANSACTION;", - NULL, - NULL, - &zErrMsg); - if (rc != SQLITE_OK) - { - raiseError("insert rollback", zErrMsg); - sqlite3_free(zErrMsg); - } - } - - Logger::getLogger()->error("SQL statement: %s", query); - // Release memory for 'query' var - delete[] query; - - // Failure - return -1; + char buf[100]; + snprintf(buf, sizeof(buf), + "Not all inserts into table '%s.%s' within transaction succeeded", + schema.c_str(), table.c_str()); + raiseError("insert", buf); } - else - { - // Release memory for 'query' var - delete[] query; - - int insert = sqlite3_changes(dbHandle); - - if (insert == 0) - { - char buf[100]; - snprintf(buf, sizeof(buf), - "Not all inserts within transaction '%s.%s' succeeded", - schema.c_str(), table.c_str()); - raiseError("insert", buf); - } - // Return the status - return (insert ? ins : -1); - } + return (!failedInsertCount ? ins : -1); } #endif @@ -1657,7 +1832,7 @@ bool allowZero = false; // Exec the UPDATE statement: no callback, no result set m_writeAccessOngoing.fetch_add(1); - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, table, query, NULL, NULL, @@ -1673,7 +1848,7 @@ bool allowZero = false; sqlite3_free(zErrMsg); if (sqlite3_get_autocommit(dbHandle)==0) // transaction is still open, do rollback { - rc=SQLexec(dbHandle, + rc=SQLexec(dbHandle, table, "ROLLBACK TRANSACTION;", NULL, NULL, @@ -2910,7 +3085,7 @@ void Connection::logSQL(const char *tag, const char *stmt) * @param cbArg Callback 1st argument * @param errmsg Locaiton to write error message */ -int Connection::SQLexec(sqlite3 *db, const char *sql, int (*callback)(void*,int,char**,char**), +int Connection::SQLexec(sqlite3 *db, const string& table, const char *sql, int (*callback)(void*,int,char**,char**), void *cbArg, char **errmsg) { int retries = 0, rc; @@ -2954,7 +3129,7 @@ int interval; { int rc2; char *zErrMsg = NULL; - rc2=SQLexec(db, + rc2=SQLexec(db, table, "ROLLBACK TRANSACTION;", NULL, NULL, @@ -2996,11 +3171,11 @@ int interval; if (rc == SQLITE_LOCKED) { - Logger::getLogger()->error("Database still locked after maximum retries"); + Logger::getLogger()->error("Database still locked after maximum retries, executing %s operation on %s", operation(sql).c_str(), table.c_str()); } if (rc == SQLITE_BUSY) { - Logger::getLogger()->error("Database still busy after maximum retries"); + Logger::getLogger()->error("Database still busy after maximum retries, executing %s operation on %s", operation(sql).c_str(), table.c_str()); } return rc; @@ -3133,7 +3308,7 @@ SQLBuffer sql; // Exec the DELETE statement: no callback, no result set m_writeAccessOngoing.fetch_add(1); - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, table, query, NULL, NULL, @@ -3181,7 +3356,7 @@ int Connection::create_table_snapshot(const string& table, const string& id) logSQL("CreateTableSnapshot", query.c_str()); char* zErrMsg = NULL; - int rc = SQLexec(dbHandle, + int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, @@ -3219,7 +3394,7 @@ int Connection::load_table_snapshot(const string& table, const string& id) logSQL("LoadTableSnapshot", query.c_str()); char* zErrMsg = NULL; - int rc = SQLexec(dbHandle, + int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, @@ -3238,7 +3413,7 @@ int Connection::load_table_snapshot(const string& table, const string& id) // transaction is still open, do rollback if (sqlite3_get_autocommit(dbHandle) == 0) { - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, table, "ROLLBACK TRANSACTION;", NULL, NULL, @@ -3268,7 +3443,7 @@ int Connection::delete_table_snapshot(const string& table, const string& id) logSQL("DeleteTableSnapshot", query.c_str()); char* zErrMsg = NULL; - int rc = SQLexec(dbHandle, + int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, @@ -3371,7 +3546,7 @@ bool Connection::vacuum() { char* zErrMsg = NULL; // Exec the statement - int rc = SQLexec(dbHandle, "VACUUM;", NULL, NULL, &zErrMsg); + int rc = SQLexec(dbHandle, "database", "VACUUM;", NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) @@ -3389,3 +3564,20 @@ bool Connection::vacuum() return true; } #endif + +/* + * Return the first word in a SQL statement, ie the operation that is being executed. + * + * @param sql The complete SQL statement + * @return string The operation + */ +string Connection::operation(const char *sql) +{ + const char *p1 = sql; + char buf[40], *p2 = buf; + while (*p1 && !isspace(*p1) && p2 - buf < 40) + *p2++ = *p1++; + *p2 = '\0'; + return string(buf); + +} diff --git a/C/plugins/storage/sqlitelb/common/include/common.h b/C/plugins/storage/sqlitelb/common/include/common.h deleted file mode 100644 index a6c168dd9e..0000000000 --- a/C/plugins/storage/sqlitelb/common/include/common.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef _COMMON_CONNECTION_H -#define _COMMON_CONNECTION_H - -#include -#include -#include -#include "rapidjson/document.h" -#include "rapidjson/writer.h" -#include "rapidjson/stringbuffer.h" -#include "rapidjson/error/error.h" -#include "rapidjson/error/en.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static std::map sqliteDateFormat = { - {"HH24:MI:SS", - F_TIMEH24_S}, - {"YYYY-MM-DD HH24:MI:SS.MS", - F_DATEH24_MS}, - {"YYYY-MM-DD HH24:MI:SS", - F_DATEH24_S}, - {"YYYY-MM-DD HH24:MI", - F_DATEH24_M}, - {"YYYY-MM-DD HH24", - F_DATEH24_H}, - {"", ""} - }; -#endif diff --git a/C/plugins/storage/sqlitelb/common/include/connection.h b/C/plugins/storage/sqlitelb/common/include/connection.h index 448790797b..703b457b2d 100644 --- a/C/plugins/storage/sqlitelb/common/include/connection.h +++ b/C/plugins/storage/sqlitelb/common/include/connection.h @@ -27,9 +27,9 @@ #define READINGS_TABLE "readings" #define READINGS_TABLE_MEM READINGS_TABLE -#define MAX_RETRIES 80 // Maximum no. of retries when a lock is encountered -#define RETRY_BACKOFF 100 // Multipler to backoff DB retry on lock -#define RETRY_BACKOFF_EXEC 1000 // Multipler to backoff DB retry on lock +#define MAX_RETRIES 80 // Maximum no. of retries when a lock is encountered +#define RETRY_BACKOFF 100 // Multipler to backoff DB retry on lock +#define RETRY_BACKOFF_EXEC 1000 // Multipler to backoff DB retry on lock #define LEN_BUFFER_DATE 100 #define F_TIMEH24_S "%H:%M:%S" @@ -119,6 +119,10 @@ class Connection { bool getNow(std::string& Now); unsigned int purgeReadingsAsset(const std::string& asset); bool vacuum(); +#ifdef MEMORY_READING_PLUGIN + bool loadDatabase(const std::string& filname); + bool saveDatabase(const std::string& filname); +#endif private: #ifndef MEMORY_READING_PLUGIN @@ -127,7 +131,8 @@ class Connection { bool m_streamOpenTransaction; int m_queuing; std::mutex m_qMutex; - int SQLexec(sqlite3 *db, const char *sql, + std::string operation(const char *sql); + int SQLexec(sqlite3 *db, const std::string& table, const char *sql, int (*callback)(void*,int,char**,char**), void *cbArg, char **errmsg); int SQLstep(sqlite3_stmt *statement); diff --git a/C/plugins/storage/sqlitelb/common/include/connection_manager.h b/C/plugins/storage/sqlitelb/common/include/connection_manager.h index eda1fb06db..bba0ee42b9 100644 --- a/C/plugins/storage/sqlitelb/common/include/connection_manager.h +++ b/C/plugins/storage/sqlitelb/common/include/connection_manager.h @@ -37,7 +37,13 @@ class ConnectionManager { void setVacuumInterval(long hours) { m_vacuumInterval = 60 * 60 * hours; }; - + void setPersist(bool persist, const std::string& filename = "") + { + m_persist = persist; + m_filename = filename; + } + bool persist() { return m_persist; }; + std::string filename() { return m_filename; }; protected: ConnectionManager(); @@ -54,6 +60,8 @@ class ConnectionManager { bool m_shutdown; std::thread *m_background; long m_vacuumInterval; + bool m_persist; + std::string m_filename; }; #endif diff --git a/C/plugins/storage/sqlitelb/common/readings.cpp b/C/plugins/storage/sqlitelb/common/readings.cpp index e0adef63fa..5620dbbf40 100644 --- a/C/plugins/storage/sqlitelb/common/readings.cpp +++ b/C/plugins/storage/sqlitelb/common/readings.cpp @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include @@ -22,8 +22,28 @@ #include #endif +/* + * The number of readings to insert in a single prepared statement + */ +#define APPEND_BATCH_SIZE 100 + +/* + * JSON parsing requires a lot of mempry allocation, which is slow and causes + * bottlenecks with thread synchronisation. RapidJSON supports in-situ parsing + * whereby it will reuse the storage of the string it is parsing to store the + * keys and string values of the parsed JSON. This is destructive on the buffer. + * However it can be quicker to maek a copy of the raw string and then do in-situ + * parsing on that copy of the string. + * See http://rapidjson.org/md_doc_dom.html#InSituParsing + * + * Define a threshold length for the append readings to switch to using in-situ + * parsing of the JSON to save on memory allocation overheads. Define as 0 to + * disable the in-situ parsing. + */ +#define INSITU_THRESHOLD 10240 + // Decode stream data -#define RDS_USER_TIMESTAMP(stream, x) stream[x]->userTs +#define RDS_USER_TIMESTAMP(stream, x) stream[x]->userTs #define RDS_ASSET_CODE(stream, x) stream[x]->assetCode #define RDS_PAYLOAD(stream, x) &(stream[x]->assetCode[0]) + stream[x]->assetCodeLength @@ -392,15 +412,23 @@ int Connection::readingStream(ReadingStream **readings, bool commit) int sleep_time_ms = 0; // SQLite related - sqlite3_stmt *stmt; + sqlite3_stmt *stmt, *batch_stmt; int sqlite3_resut; int rowNumber = -1; - + #if INSTRUMENT struct timeval start, t1, t2, t3, t4, t5; #endif - const char *sql_cmd = "INSERT INTO " READINGS_DB_NAME_BASE ".readings ( asset_code, reading, user_ts ) VALUES (?,?,?)"; + const char *sql_cmd = "INSERT INTO " READINGS_DB_NAME_BASE ".readings ( user_ts, asset_code, reading ) VALUES (?,?,?)"; + string cmd = sql_cmd; + for (int i = 0; i < APPEND_BATCH_SIZE - 1; i++) + { + cmd.append(", (?,?,?)"); + } + + sqlite3_prepare_v2(dbHandle, sql_cmd, strlen(sql_cmd), &stmt, NULL); + sqlite3_prepare_v2(dbHandle, cmd.c_str(), cmd.length(), &batch_stmt, NULL); if (sqlite3_prepare_v2(dbHandle, sql_cmd, strlen(sql_cmd), &stmt, NULL) != SQLITE_OK) { @@ -427,24 +455,144 @@ int Connection::readingStream(ReadingStream **readings, bool commit) gettimeofday(&start, NULL); #endif + int nReadings; + for (nReadings = 0; readings[nReadings]; nReadings++); + try { - for (i = 0; readings[i]; i++) + + unsigned int nBatches = nReadings / APPEND_BATCH_SIZE; + int curReading = 0; + for (int batch = 0; batch < nBatches; batch++) + { + int varNo = 1; + for (int readingNo = 0; readingNo < APPEND_BATCH_SIZE; readingNo++) + { + add_row = true; + + // Handles - asset_code + asset_code = RDS_ASSET_CODE(readings, curReading); + + // Handles - reading + payload = RDS_PAYLOAD(readings, curReading); + reading = escape(payload); + + // Handles - user_ts + memset(&timeinfo, 0, sizeof(struct tm)); + gmtime_r(&RDS_USER_TIMESTAMP(readings, curReading).tv_sec, &timeinfo); + std::strftime(ts, sizeof(ts), "%Y-%m-%d %H:%M:%S", &timeinfo); + snprintf(micro_s, sizeof(micro_s), ".%06lu", RDS_USER_TIMESTAMP(readings, curReading).tv_usec); + + formatted_date[0] = {0}; + strncat(ts, micro_s, 10); + user_ts = ts; + if (strcmp(user_ts, "now()") == 0) + { + getNow(now); + user_ts = now.c_str(); + } + else + { + if (!formatDate(formatted_date, sizeof(formatted_date), user_ts)) + { + raiseError("streamReadings", "Invalid date |%s|", user_ts); + add_row = false; + } + else + { + user_ts = formatted_date; + } + } + + if (add_row) + { + if (batch_stmt != NULL) + { + sqlite3_bind_text(batch_stmt, varNo++, user_ts, -1, SQLITE_STATIC); + sqlite3_bind_text(batch_stmt, varNo++, asset_code, -1, SQLITE_STATIC); + sqlite3_bind_text(batch_stmt, varNo++, reading.c_str(), -1, SQLITE_STATIC); + } + } + curReading++; + } + // Write the batch + + retries = 0; + sleep_time_ms = 0; + + // Retry mechanism in case SQLlite DB is locked + do { + // Insert the row using a lock to ensure one insert at time + { + m_writeAccessOngoing.fetch_add(1); + //unique_lock lck(db_mutex); + + sqlite3_resut = sqlite3_step(batch_stmt); + + m_writeAccessOngoing.fetch_sub(1); + //db_cv.notify_all(); + } + + if (sqlite3_resut == SQLITE_LOCKED ) + { + sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); + retries++; + + Logger::getLogger()->info("SQLITE_LOCKED - record :%d: - retry number :%d: sleep time ms :%d:",i, retries, sleep_time_ms); + + std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); + } + if (sqlite3_resut == SQLITE_BUSY) + { + ostringstream threadId; + threadId << std::this_thread::get_id(); + + sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); + retries++; + + Logger::getLogger()->info("SQLITE_BUSY - thread :%s: - record :%d: - retry number :%d: sleep time ms :%d:", threadId.str().c_str() ,i , retries, sleep_time_ms); + + std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); + } + } while (retries < PREP_CMD_MAX_RETRIES && (sqlite3_resut == SQLITE_LOCKED || sqlite3_resut == SQLITE_BUSY)); + + if (sqlite3_resut == SQLITE_DONE) + { + rowNumber++; + + sqlite3_clear_bindings(batch_stmt); + sqlite3_reset(batch_stmt); + } + else + { + raiseError("streamReadings", + "Inserting a row into SQLite using a prepared command - asset_code :%s: error :%s: reading :%s: ", + asset_code, + sqlite3_errmsg(dbHandle), + reading.c_str()); + + sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); + m_streamOpenTransaction = true; + return -1; + } + } + + while (readings[curReading]) { add_row = true; // Handles - asset_code - asset_code = RDS_ASSET_CODE(readings, i); + asset_code = RDS_ASSET_CODE(readings, curReading); // Handles - reading - payload = RDS_PAYLOAD(readings, i); + payload = RDS_PAYLOAD(readings, curReading); reading = escape(payload); // Handles - user_ts memset(&timeinfo, 0, sizeof(struct tm)); - gmtime_r(&RDS_USER_TIMESTAMP(readings, i).tv_sec, &timeinfo); + gmtime_r(&RDS_USER_TIMESTAMP(readings, curReading).tv_sec, &timeinfo); std::strftime(ts, sizeof(ts), "%Y-%m-%d %H:%M:%S", &timeinfo); - snprintf(micro_s, sizeof(micro_s), ".%06lu", RDS_USER_TIMESTAMP(readings, i).tv_usec); + snprintf(micro_s, sizeof(micro_s), ".%06lu", RDS_USER_TIMESTAMP(readings, curReading).tv_usec); formatted_date[0] = {0}; strncat(ts, micro_s, 10); @@ -458,7 +606,7 @@ int Connection::readingStream(ReadingStream **readings, bool commit) { if (!formatDate(formatted_date, sizeof(formatted_date), user_ts)) { - raiseError("appendReadings", "Invalid date |%s|", user_ts); + raiseError("streamReadings", "Invalid date |%s|", user_ts); add_row = false; } else @@ -469,78 +617,79 @@ int Connection::readingStream(ReadingStream **readings, bool commit) if (add_row) { - if (stmt != NULL) + if (batch_stmt != NULL) { - sqlite3_bind_text(stmt, 1, asset_code, -1, SQLITE_STATIC); - sqlite3_bind_text(stmt, 2, reading.c_str(), -1, SQLITE_STATIC); - sqlite3_bind_text(stmt, 3, user_ts, -1, SQLITE_STATIC); - - retries =0; - sleep_time_ms = 0; + sqlite3_bind_text(stmt, 1, user_ts, -1, SQLITE_STATIC); + sqlite3_bind_text(stmt, 2, asset_code, -1, SQLITE_STATIC); + sqlite3_bind_text(stmt, 3, reading.c_str(), -1, SQLITE_STATIC); + } + } + curReading++; - // Retry mechanism in case SQLlite DB is locked - do { - // Insert the row using a lock to ensure one insert at time - { - m_writeAccessOngoing.fetch_add(1); - //unique_lock lck(db_mutex); - sqlite3_resut = sqlite3_step(stmt); + retries =0; + sleep_time_ms = 0; - m_writeAccessOngoing.fetch_sub(1); - //db_cv.notify_all(); - } + // Retry mechanism in case SQLlite DB is locked + do { + // Insert the row using a lock to ensure one insert at time + { + m_writeAccessOngoing.fetch_add(1); + //unique_lock lck(db_mutex); - if (sqlite3_resut == SQLITE_LOCKED ) - { - sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); - retries++; + sqlite3_resut = sqlite3_step(stmt); - Logger::getLogger()->info("SQLITE_LOCKED - record :%d: - retry number :%d: sleep time ms :%d:",i, retries, sleep_time_ms); + m_writeAccessOngoing.fetch_sub(1); + //db_cv.notify_all(); + } - std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); - } - if (sqlite3_resut == SQLITE_BUSY) - { - ostringstream threadId; - threadId << std::this_thread::get_id(); + if (sqlite3_resut == SQLITE_LOCKED ) + { + sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); + retries++; - sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); - retries++; + Logger::getLogger()->info("SQLITE_LOCKED - record :%d: - retry number :%d: sleep time ms :%d:",i, retries, sleep_time_ms); - Logger::getLogger()->info("SQLITE_BUSY - thread :%s: - record :%d: - retry number :%d: sleep time ms :%d:", threadId.str().c_str() ,i , retries, sleep_time_ms); + std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); + } + if (sqlite3_resut == SQLITE_BUSY) + { + ostringstream threadId; + threadId << std::this_thread::get_id(); - std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); - } - } while (retries < PREP_CMD_MAX_RETRIES && (sqlite3_resut == SQLITE_LOCKED || sqlite3_resut == SQLITE_BUSY)); + sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); + retries++; - if (sqlite3_resut == SQLITE_DONE) - { - rowNumber++; + Logger::getLogger()->info("SQLITE_BUSY - thread :%s: - record :%d: - retry number :%d: sleep time ms :%d:", threadId.str().c_str() ,i , retries, sleep_time_ms); - sqlite3_clear_bindings(stmt); - sqlite3_reset(stmt); - } - else - { - raiseError("appendReadings", - "Inserting a row into SQLIte using a prepared command - asset_code :%s: error :%s: reading :%s: ", - asset_code, - sqlite3_errmsg(dbHandle), - reading.c_str()); - - sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); - m_streamOpenTransaction = true; - return -1; - } + std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } + } while (retries < PREP_CMD_MAX_RETRIES && (sqlite3_resut == SQLITE_LOCKED || sqlite3_resut == SQLITE_BUSY)); + + if (sqlite3_resut == SQLITE_DONE) + { + rowNumber++; + + sqlite3_clear_bindings(stmt); + sqlite3_reset(stmt); + } + else + { + raiseError("streamReadings", + "Inserting a row into SQLite using a prepared command - asset_code :%s: error :%s: reading :%s: ", + asset_code, + sqlite3_errmsg(dbHandle), + reading.c_str()); + + sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); + m_streamOpenTransaction = true; + return -1; } } - rowNumber = i; - + rowNumber = curReading; } catch (exception e) { - raiseError("appendReadings", "Inserting a row into SQLIte using a prepared command - error :%s:", e.what()); + raiseError("appendReadings", "Inserting a row into SQLite using a prepared statement - error :%s:", e.what()); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); m_streamOpenTransaction = true; @@ -584,12 +733,9 @@ int Connection::readingStream(ReadingStream **readings, bool commit) timersub(&t2, &t1, &tm); timeT2 = tm.tv_sec + ((double)tm.tv_usec / 1000000); - Logger::getLogger()->debug("readingStream row count :%d:", rowNumber); - Logger::getLogger()->debug("readingStream Timing - stream handling %.3f seconds - commit/finalize %.3f seconds", - timeT1, - timeT2 - ); + Logger::getLogger()->warn("readingStream Timing with %d rows - stream handling %.3f seconds - commit/finalize %.3f seconds", + rowNumber, timeT1, timeT2); #endif return rowNumber; @@ -609,7 +755,7 @@ bool add_row = false; const char *user_ts; const char *asset_code; string reading; -sqlite3_stmt *stmt; +sqlite3_stmt *stmt, *batch_stmt; int sqlite3_resut; string now; @@ -621,7 +767,7 @@ int sleep_time_ms = 0; threadId << std::this_thread::get_id(); #if INSTRUMENT - Logger::getLogger()->debug("appendReadings start thread :%s:", threadId.str().c_str()); + Logger::getLogger()->warn("appendReadings start thread :%s:", threadId.str().c_str()); struct timeval start, t1, t2, t3, t4, t5; #endif @@ -630,43 +776,218 @@ int sleep_time_ms = 0; gettimeofday(&start, NULL); #endif - ParseResult ok = doc.Parse(readings); + int len = strlen(readings) + 1; + char *readingsCopy = NULL; + ParseResult ok; +#if INSITU_THRESHOLD + if (len > INSITU_THRESHOLD) + { + readingsCopy = (char *)malloc(len); + memcpy(readingsCopy, readings, len); + ok = doc.ParseInsitu(readingsCopy); + } + else +#endif + { + ok = doc.Parse(readings); + } if (!ok) { raiseError("appendReadings", GetParseError_En(doc.GetParseError())); + if (readingsCopy) + { + free(readingsCopy); + } return -1; } if (!doc.HasMember("readings")) { raiseError("appendReadings", "Payload is missing a readings array"); + if (readingsCopy) + { + free(readingsCopy); + } return -1; } Value &readingsValue = doc["readings"]; if (!readingsValue.IsArray()) { raiseError("appendReadings", "Payload is missing the readings array"); + if (readingsCopy) + { + free(readingsCopy); + } return -1; } const char *sql_cmd="INSERT INTO " READINGS_DB_NAME_BASE ".readings ( user_ts, asset_code, reading ) VALUES (?,?,?)"; + string cmd = sql_cmd; + for (int i = 0; i < APPEND_BATCH_SIZE - 1; i++) + { + cmd.append(", (?,?,?)"); + } sqlite3_prepare_v2(dbHandle, sql_cmd, strlen(sql_cmd), &stmt, NULL); + sqlite3_prepare_v2(dbHandle, cmd.c_str(), cmd.length(), &batch_stmt, NULL); { m_writeAccessOngoing.fetch_add(1); //unique_lock lck(db_mutex); sqlite3_exec(dbHandle, "BEGIN TRANSACTION", NULL, NULL, NULL); #if INSTRUMENT - gettimeofday(&t1, NULL); + gettimeofday(&t1, NULL); #endif - for (Value::ConstValueIterator itr = readingsValue.Begin(); itr != readingsValue.End(); ++itr) + Value::ConstValueIterator itr = readingsValue.Begin(); + SizeType nReadings = readingsValue.Size(); + unsigned int nBatches = nReadings / APPEND_BATCH_SIZE; + Logger::getLogger()->debug("Write %d readings in %d batches of %d", nReadings, nBatches, APPEND_BATCH_SIZE); + for (int batch = 0; batch < nBatches; batch++) + { + int varNo = 1; + for (int readingNo = 0; readingNo < APPEND_BATCH_SIZE; readingNo++) + { + if (!itr->IsObject()) + { + char err[132]; + snprintf(err, sizeof(err), + "Each reading in the readings array must be an object. Reading %d of batch %d", readingNo, batch); + raiseError("appendReadings",err); + sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION;", NULL, NULL, NULL); + if (readingsCopy) + { + free(readingsCopy); + } + return -1; + } + + add_row = true; + + // Handles - user_ts + char formatted_date[LEN_BUFFER_DATE] = {0}; + user_ts = (*itr)["user_ts"].GetString(); + if (strcmp(user_ts, "now()") == 0) + { + getNow(now); + user_ts = now.c_str(); + } + else + { + if (! formatDate(formatted_date, sizeof(formatted_date), user_ts) ) + { + raiseError("appendReadings", "Invalid date |%s|", user_ts); + add_row = false; + } + else + { + user_ts = formatted_date; + } + } + + if (add_row) + { + // Handles - asset_code + asset_code = (*itr)["asset_code"].GetString(); + + if (strlen(asset_code) == 0) + { + Logger::getLogger()->warn("Sqlitelb appendReadings - empty asset code value, row is ignored"); + itr++; + continue; + } + // Handles - reading + StringBuffer buffer; + Writer writer(buffer); + (*itr)["reading"].Accept(writer); + reading = escape(buffer.GetString()); + + if (stmt != NULL) + { + + sqlite3_bind_text(batch_stmt, varNo++, user_ts, -1, SQLITE_TRANSIENT); + sqlite3_bind_text(batch_stmt, varNo++, asset_code, -1, SQLITE_TRANSIENT); + sqlite3_bind_text(batch_stmt, varNo++, reading.c_str(), -1, SQLITE_TRANSIENT); + } + } + + itr++; + if (itr == readingsValue.End()) + break; + } + + + retries =0; + sleep_time_ms = 0; + + // Retry mechanism in case SQLlite DB is locked + do { + // Insert the row using a lock to ensure one insert at time + { + + sqlite3_resut = sqlite3_step(batch_stmt); + + } + if (sqlite3_resut == SQLITE_LOCKED ) + { + sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); + retries++; + + Logger::getLogger()->info("SQLITE_LOCKED - record :%d: - retry number :%d: sleep time ms :%d:" ,row ,retries ,sleep_time_ms); + + std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); + } + if (sqlite3_resut == SQLITE_BUSY) + { + ostringstream threadId; + threadId << std::this_thread::get_id(); + + sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); + retries++; + + Logger::getLogger()->info("SQLITE_BUSY - thread :%s: - record :%d: - retry number :%d: sleep time ms :%d:", threadId.str().c_str() ,row, retries, sleep_time_ms); + + std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); + } + } while (retries < PREP_CMD_MAX_RETRIES && (sqlite3_resut == SQLITE_LOCKED || sqlite3_resut == SQLITE_BUSY)); + + if (sqlite3_resut == SQLITE_DONE) + { + row += APPEND_BATCH_SIZE; + + sqlite3_clear_bindings(batch_stmt); + sqlite3_reset(batch_stmt); + } + else + { + raiseError("appendReadings","Inserting a row into SQLite using a prepared command - asset_code :%s: error :%s: reading :%s: ", + asset_code, + sqlite3_errmsg(dbHandle), + reading.c_str()); + + sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); + if (readingsCopy) + { + free(readingsCopy); + } + return -1; + } + + + } + + Logger::getLogger()->debug("Now do the remaining readings"); + // Do individual inserts for the remainder of the readings + while (itr != readingsValue.End()) { if (!itr->IsObject()) { raiseError("appendReadings","Each reading in the readings array must be an object"); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION;", NULL, NULL, NULL); + if (readingsCopy) + { + free(readingsCopy); + } return -1; } @@ -698,6 +1019,13 @@ int sleep_time_ms = 0; // Handles - asset_code asset_code = (*itr)["asset_code"].GetString(); + if (strlen(asset_code) == 0) + { + Logger::getLogger()->warn("Sqlitelb appendReadings - empty asset code value, row is ignored"); + itr++; + continue; + } + // Handles - reading StringBuffer buffer; Writer writer(buffer); @@ -706,9 +1034,9 @@ int sleep_time_ms = 0; if(stmt != NULL) { - sqlite3_bind_text(stmt, 1, user_ts ,-1, SQLITE_STATIC); - sqlite3_bind_text(stmt, 2, asset_code ,-1, SQLITE_STATIC); - sqlite3_bind_text(stmt, 3, reading.c_str(), -1, SQLITE_STATIC); + sqlite3_bind_text(stmt, 1, user_ts ,-1, SQLITE_TRANSIENT); + sqlite3_bind_text(stmt, 2, asset_code ,-1, SQLITE_TRANSIENT); + sqlite3_bind_text(stmt, 3, reading.c_str(), -1, SQLITE_TRANSIENT); retries =0; sleep_time_ms = 0; @@ -753,16 +1081,21 @@ int sleep_time_ms = 0; } else { - raiseError("appendReadings","Inserting a row into SQLIte using a prepared command - asset_code :%s: error :%s: reading :%s: ", + raiseError("appendReadings","Inserting a row into SQLite using a prepared command - asset_code :%s: error :%s: reading :%s: ", asset_code, sqlite3_errmsg(dbHandle), reading.c_str()); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); + if (readingsCopy) + { + free(readingsCopy); + } return -1; } } } + itr++; } sqlite3_resut = sqlite3_exec(dbHandle, "END TRANSACTION", NULL, NULL, NULL); @@ -787,6 +1120,10 @@ int sleep_time_ms = 0; } } + if (readingsCopy) + { + free(readingsCopy); + } #if INSTRUMENT gettimeofday(&t3, NULL); #endif @@ -804,7 +1141,7 @@ int sleep_time_ms = 0; timersub(&t3, &t2, &tm); timeT3 = tm.tv_sec + ((double)tm.tv_usec / 1000000); - Logger::getLogger()->debug("appendReadings end thread :%s: buffer :%10lu: count :%5d: JSON :%6.3f: inserts :%6.3f: finalize :%6.3f:", + Logger::getLogger()->warn("appendReadings end thread :%s: buffer :%10lu: count :%5d: JSON :%6.3f: inserts :%6.3f: finalize :%6.3f:", threadId.str().c_str(), strlen(readings), row, @@ -904,7 +1241,8 @@ int retrieve; /** * Perform a query against the readings table * - * retrieveReadings, used by the API, returns timestamp in localtime. + * retrieveReadings, used by the API, returns timestamp in utc unless + * otherwise requested. * */ bool Connection::retrieveReadings(const string& condition, string& resultSet) @@ -915,6 +1253,7 @@ SQLBuffer sql; // Extra constraints to add to where clause SQLBuffer jsonConstraints; bool isAggregate = false; +const char *timezone = "utc"; try { if (dbHandle == NULL) @@ -930,7 +1269,7 @@ bool isAggregate = false; id, asset_code, reading, - strftime(')" F_DATEH24_SEC R"(', user_ts, 'localtime') || + strftime(')" F_DATEH24_SEC R"(', user_ts, 'utc') || substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, strftime(')" F_DATEH24_MS R"(', ts, 'localtime') AS ts FROM )" READINGS_DB_NAME_BASE R"(.readings)"; @@ -945,6 +1284,11 @@ bool isAggregate = false; return false; } + if (document.HasMember("timezone") && document["timezone"].IsString()) + { + timezone = document["timezone"].GetString(); + } + // timebucket aggregate all datapoints if (aggregateAll(document)) { @@ -990,14 +1334,18 @@ bool isAggregate = false; if (strcmp(itr->GetString() ,"user_ts") == 0) { // Display without TZ expression and microseconds also - sql.append(" strftime('" F_DATEH24_SEC "', user_ts, 'localtime') "); + sql.append(" strftime('" F_DATEH24_SEC "', user_ts, '"); + sql.append(timezone); + sql.append("') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); sql.append(" as user_ts "); } else if (strcmp(itr->GetString() ,"ts") == 0) { // Display without TZ expression and microseconds also - sql.append(" strftime('" F_DATEH24_MS "', ts, 'localtime') "); + sql.append(" strftime('" F_DATEH24_MS "', ts, '"); + sql.append(timezone); + sql.append("') "); sql.append(" as ts "); } else @@ -1109,7 +1457,9 @@ bool isAggregate = false; { // Extract milliseconds and microseconds for the user_ts fields - sql.append("strftime('" F_DATEH24_SEC "', user_ts, 'localtime') "); + sql.append("strftime('" F_DATEH24_SEC "', user_ts, '"); + sql.append(timezone); + sql.append("') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); if (! itr->HasMember("alias")) { @@ -1121,7 +1471,9 @@ bool isAggregate = false; { sql.append("strftime('" F_DATEH24_MS "', "); sql.append((*itr)["column"].GetString()); - sql.append(", 'localtime')"); + sql.append(", '"); + sql.append(timezone); + sql.append("')"); if (! itr->HasMember("alias")) { sql.append(" AS "); @@ -1164,16 +1516,13 @@ bool isAggregate = false; sql.append(' '); } - const char *sql_cmd = R"( - id, - asset_code, - reading, - strftime(')" F_DATEH24_SEC R"(', user_ts, 'localtime') || - substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, - strftime(')" F_DATEH24_MS R"(', ts, 'localtime') AS ts - FROM )" READINGS_DB_NAME_BASE R"(.)"; + sql.append("id, asset_code, reading, strftime('" F_DATEH24_SEC "', user_ts, '"); + sql.append(timezone); + sql.append("') || substr(user_ts, instr(user_ts, '.'), 7) AS user_ts,"); + sql.append("strftime('" F_DATEH24_MS "', ts, '"); + sql.append(timezone); + sql.append("') AS ts FROM " READINGS_DB_NAME_BASE "."); - sql.append(sql_cmd); } sql.append("readings"); if (document.HasMember("where")) @@ -1285,20 +1634,22 @@ unsigned int Connection::purgeReadings(unsigned long age, result = "{ \"removed\" : 0, "; result += " \"unsentPurged\" : 0, "; result += " \"unsentRetained\" : 0, "; - result += " \"readings\" : 0 }"; + result += " \"readings\" : 0, "; + result += " \"method\" : \"time\", "; + result += " \"duration\" : 0 }"; logger->info("Purge starting..."); gettimeofday(&startTv, NULL); /* * We fetch the current rowid and limit the purge process to work on just * those rows present in the database when the purge process started. - * This provents us looping in the purge process if new readings become + * This prevents us looping in the purge process if new readings become * eligible for purging at a rate that is faster than we can purge them. */ { char *zErrMsg = NULL; int rc; - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", "select max(rowid) from " READINGS_DB_NAME_BASE "." READINGS_TABLE ";", rowidCallback, &rowidLimit, @@ -1316,7 +1667,7 @@ unsigned int Connection::purgeReadings(unsigned long age, { char *zErrMsg = NULL; int rc; - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", "select min(rowid) from " READINGS_DB_NAME_BASE "." READINGS_TABLE ";", rowidCallback, &minrowidLimit, @@ -1346,7 +1697,7 @@ unsigned int Connection::purgeReadings(unsigned long age, int purge_readings = 0; // Exec query and get result in 'purge_readings' via 'selectCallback' - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", query, selectCallback, &purge_readings, @@ -1391,7 +1742,8 @@ unsigned int Connection::purgeReadings(unsigned long age, } unsigned long m=l; - + sqlite3_stmt *idStmt; + bool isMinId = false; while (l <= r) { unsigned long midRowId = 0; @@ -1401,26 +1753,29 @@ unsigned int Connection::purgeReadings(unsigned long age, // e.g. select id from readings where rowid = 219867307 AND user_ts < datetime('now' , '-24 hours', 'utc'); SQLBuffer sqlBuffer; - sqlBuffer.append("select id from " READINGS_DB_NAME_BASE "." READINGS_TABLE " where rowid = "); - sqlBuffer.append(m); - sqlBuffer.append(" AND user_ts < datetime('now' , '-"); - sqlBuffer.append(age); + sqlBuffer.append("select id from " READINGS_DB_NAME_BASE "." READINGS_TABLE " where rowid = ?"); + sqlBuffer.append(" AND user_ts < datetime('now' , '-?"); sqlBuffer.append(" hours');"); + const char *query = sqlBuffer.coalesce(); + rc = sqlite3_prepare_v2(dbHandle, query, -1, &idStmt, NULL); + + sqlite3_bind_int(idStmt, 1,(unsigned long) m); + sqlite3_bind_int(idStmt, 2,(unsigned long) age); - rc = SQLexec(dbHandle, - query, - rowidCallback, - &midRowId, - &zErrMsg); - + if (SQLstep(idStmt) == SQLITE_ROW) + { + midRowId = sqlite3_column_int(idStmt, 0); + isMinId = true; + } delete[] query; + sqlite3_clear_bindings(idStmt); + sqlite3_reset(idStmt); - if (rc != SQLITE_OK) + if (rc == SQLITE_ERROR) { - raiseError("purge - phase 1, fetching midRowId ", zErrMsg); - sqlite3_free(zErrMsg); + raiseError("purge - phase 1, fetching midRowId ", sqlite3_errmsg(dbHandle)); return 0; } @@ -1439,6 +1794,11 @@ unsigned int Connection::purgeReadings(unsigned long age, } } + if(isMinId) + { + sqlite3_finalize(idStmt); + } + rowidLimit = m; { // Fix the value of rowidLimit @@ -1453,7 +1813,7 @@ unsigned int Connection::purgeReadings(unsigned long age, sqlBuffer.append(" hours');"); const char *query = sqlBuffer.coalesce(); - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", query, rowidCallback, &rowidLimit, @@ -1489,7 +1849,7 @@ unsigned int Connection::purgeReadings(unsigned long age, idBuffer.append(rowidLimit); idBuffer.append(';'); const char *idQuery = idBuffer.coalesce(); - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", idQuery, rowidCallback, &lastPurgedId, @@ -1512,18 +1872,22 @@ unsigned int Connection::purgeReadings(unsigned long age, unsentPurged = unsent; } } +#if 0 if (m_writeAccessOngoing) { while (m_writeAccessOngoing) { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); + logger->warn("Yielding for another write access"); + std::this_thread::yield(); } } +#endif unsigned int deletedRows = 0; - char *zErrMsg = NULL; unsigned int rowsAffected, totTime=0, prevBlocks=0, prevTotTime=0; logger->info("Purge about to delete readings # %ld to %ld", rowidMin, rowidLimit); + sqlite3_stmt *stmt; + bool rowsAvailableToPurge = false; while (rowidMin < rowidLimit) { blocks++; @@ -1532,50 +1896,60 @@ unsigned int Connection::purgeReadings(unsigned long age, { rowidMin = rowidLimit; } - SQLBuffer sql; - sql.append("DELETE FROM " READINGS_DB_NAME_BASE "." READINGS_TABLE " WHERE rowid <= "); - sql.append(rowidMin); - sql.append(';'); - const char *query = sql.coalesce(); - logSQL("ReadingsPurge", query); - + int rc; + { + SQLBuffer sql; + sql.append("DELETE FROM " READINGS_DB_NAME_BASE "." READINGS_TABLE " WHERE rowid <= ? ;"); + const char *query = sql.coalesce(); + + rc = sqlite3_prepare_v2(dbHandle, query, strlen(query), &stmt, NULL); + if (rc != SQLITE_OK) + { + raiseError("purgeReadings", sqlite3_errmsg(dbHandle)); + Logger::getLogger()->error("SQL statement: %s", query); + return 0; + } + delete[] query; + } + sqlite3_bind_int(stmt, 1,(unsigned long) rowidMin); + rowsAvailableToPurge = true; { //unique_lock lck(db_mutex); // if (m_writeAccessOngoing) db_cv.wait(lck); START_TIME; // Exec DELETE query: no callback, no resultset - rc = SQLexec(dbHandle, - query, - NULL, - NULL, - &zErrMsg); + rc = SQLstep(stmt); + END_TIME; + + logSQL("ReadingsPurge", sqlite3_expanded_sql(stmt)); - logger->debug("%s - DELETE - query :%s: rowsAffected :%ld:", __FUNCTION__, query ,rowsAffected); - - // Release memory for 'query' var - delete[] query; + logger->debug("%s - DELETE - query :%s: rowsAffected :%ld:", __FUNCTION__, sqlite3_expanded_sql(stmt) ,rowsAffected); totTime += usecs; if(usecs>150000) { - std::this_thread::sleep_for(std::chrono::milliseconds(100+usecs/10000)); + std::this_thread::yield(); // Give other threads a chance to run } } - - if (rc != SQLITE_OK) + if (rc == SQLITE_DONE) { - raiseError("purge - phase 3", zErrMsg); - sqlite3_free(zErrMsg); + sqlite3_clear_bindings(stmt); + sqlite3_reset(stmt); + } + else + { + raiseError("purge - phase 3", sqlite3_errmsg(dbHandle)); return 0; } - + // Get db changes rowsAffected = sqlite3_changes(dbHandle); deletedRows += rowsAffected; + logger->debug("Purge delete block #%d with %d readings", blocks, rowsAffected); if(blocks % RECALC_PURGE_BLOCK_SIZE_NUM_BLOCKS == 0) @@ -1601,11 +1975,16 @@ unsigned int Connection::purgeReadings(unsigned long age, purgeBlockSize = MAX_PURGE_DELETE_BLOCK_SIZE; logger->debug("Changed purgeBlockSize to %d", purgeBlockSize); } - std::this_thread::sleep_for(std::chrono::milliseconds(100)); + std::this_thread::yield(); // Give other threads a chance to run } //Logger::getLogger()->debug("Purge delete block #%d with %d readings", blocks, rowsAffected); } while (rowidMin < rowidLimit); - + + if (rowsAvailableToPurge) + { + sqlite3_finalize(stmt); + } + unsentRetained = maxrowidLimit - rowidLimit; numReadings = maxrowidLimit +1 - minrowidLimit - deletedRows; @@ -1615,19 +1994,22 @@ unsigned int Connection::purgeReadings(unsigned long age, unsentPurged = deletedRows; } + gettimeofday(&endTv, NULL); + unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; + ostringstream convert; convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; - convert << " \"readings\" : " << numReadings << " }"; + convert << " \"readings\" : " << numReadings << ", "; + convert << " \"method\" : \"time\", "; + convert << " \"duration\" : " << duration << " }"; result = convert.str(); //logger->debug("Purge result=%s", result.c_str()); - gettimeofday(&endTv, NULL); - unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; logger->info("Purge process complete in %d blocks in %lduS", blocks, duration); Logger::getLogger()->debug("%s - age :%lu: flag_retain :%x: sent :%lu: result :%s:", __FUNCTION__, age, flags, flag_retain, result.c_str() ); @@ -1651,10 +2033,12 @@ unsigned int Connection::purgeReadingsByRows(unsigned long rows, unsigned long rowsAffected; unsigned long deletePoint; bool flag_retain; + struct timeval startTv, endTv; Logger *logger = Logger::getLogger(); + gettimeofday(&startTv, NULL); flag_retain = false; if ( (flags & STORAGE_PURGE_RETAIN_ANY) || (flags & STORAGE_PURGE_RETAIN_ALL) ) @@ -1674,7 +2058,9 @@ unsigned int Connection::purgeReadingsByRows(unsigned long rows, char *zErrMsg = NULL; int rc; - rc = SQLexec(dbHandle, + sqlite3_stmt *stmt; + sqlite3_stmt *idStmt; + rc = SQLexec(dbHandle, "readings", "select count(rowid) from " READINGS_DB_NAME_BASE "." READINGS_TABLE ";", rowidCallback, &rowcount, @@ -1687,7 +2073,7 @@ unsigned int Connection::purgeReadingsByRows(unsigned long rows, return 0; } - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", "select max(id) from " READINGS_DB_NAME_BASE "." READINGS_TABLE ";", rowidCallback, &maxId, @@ -1703,24 +2089,59 @@ unsigned int Connection::purgeReadingsByRows(unsigned long rows, numReadings = rowcount; rowsAffected = 0; deletedRows = 0; + bool rowsAvailableToPurge = true; + + // Create the prepared statements + SQLBuffer sqlBuffer; + sqlBuffer.append("select min(id) from " READINGS_DB_NAME_BASE "." READINGS_TABLE ";"); + const char *idquery = sqlBuffer.coalesce(); + + rc = sqlite3_prepare_v2(dbHandle, idquery, -1, &idStmt, NULL); + if (rc != SQLITE_OK) + { + raiseError("purgeReadingsByRows", sqlite3_errmsg(dbHandle)); + Logger::getLogger()->error("SQL statement: %s", idquery); + delete[] idquery; + return 0; + } + delete[] idquery; + + SQLBuffer sql; + sql.append("delete from " READINGS_DB_NAME_BASE "." READINGS_TABLE " where id <= ? ;"); + const char *delquery = sql.coalesce(); + + rc = sqlite3_prepare_v2(dbHandle, delquery, strlen(delquery), &stmt, NULL); + + if (rc != SQLITE_OK) + { + raiseError("purgeReadingsByRows", sqlite3_errmsg(dbHandle)); + Logger::getLogger()->error("SQL statement: %s", delquery); + delete[] delquery; + return 0; + } + delete[] delquery; do { if (rowcount <= rows) { logger->info("Row count %d is less than required rows %d", rowcount, rows); + rowsAvailableToPurge = false; break; } - rc = SQLexec(dbHandle, - "select min(id) from " READINGS_DB_NAME_BASE "." READINGS_TABLE ";", - rowidCallback, - &minId, - &zErrMsg); + if (SQLstep(idStmt) == SQLITE_ROW) + { + minId = sqlite3_column_int(idStmt, 0); + } - if (rc != SQLITE_OK) + + sqlite3_clear_bindings(idStmt); + sqlite3_reset(idStmt); + + if (rc == SQLITE_ERROR) { - raiseError("purge - phaase 0, fetching minimum id", zErrMsg); + raiseError("purge - phaase 0, fetching minimum id", sqlite3_errmsg(dbHandle)); sqlite3_free(zErrMsg); return 0; } @@ -1737,27 +2158,27 @@ unsigned int Connection::purgeReadingsByRows(unsigned long rows, deletePoint = limit; } } - SQLBuffer sql; - - logger->info("RowCount %lu, Max Id %lu, min Id %lu, delete point %lu", rowcount, maxId, minId, deletePoint); - - sql.append("delete from " READINGS_DB_NAME_BASE "." READINGS_TABLE " where id <= "); - sql.append(deletePoint); - const char *query = sql.coalesce(); + { - //unique_lock lck(db_mutex); -// if (m_writeAccessOngoing) db_cv.wait(lck); + logger->info("RowCount %lu, Max Id %lu, min Id %lu, delete point %lu", rowcount, maxId, minId, deletePoint); + + } + sqlite3_bind_int(stmt, 1,(unsigned long) deletePoint); + { // Exec DELETE query: no callback, no resultset - rc = SQLexec(dbHandle, query, NULL, NULL, &zErrMsg); + rc = SQLstep(stmt); + if (rc == SQLITE_DONE) + { + sqlite3_clear_bindings(stmt); + sqlite3_reset(stmt); + } rowsAffected = sqlite3_changes(dbHandle); deletedRows += rowsAffected; numReadings -= rowsAffected; rowcount -= rowsAffected; - // Release memory for 'query' var - delete[] query; logger->debug("Deleted %lu rows", rowsAffected); if (rowsAffected == 0) { @@ -1772,22 +2193,32 @@ unsigned int Connection::purgeReadingsByRows(unsigned long rows, unsentPurged += rowsAffected; } } - std::this_thread::sleep_for(std::chrono::milliseconds(1)); + std::this_thread::yield(); // Give other threads a chane to run } while (rowcount > rows); - + if (rowsAvailableToPurge) + { + sqlite3_finalize(idStmt); + sqlite3_finalize(stmt); + } + if (limit) { unsentRetained = numReadings - rows; } + gettimeofday(&endTv, NULL); + unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; + ostringstream convert; convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; - convert << " \"readings\" : " << numReadings << " }"; + convert << " \"readings\" : " << numReadings << ", "; + convert << " \"method\" : \"rows\", "; + convert << " \"duration\" : " << duration << " }"; result = convert.str(); @@ -1824,17 +2255,19 @@ unsigned int rowsAffected = 0; logSQL("ReadingsAssetPurge", query); +#if 0 if (m_writeAccessOngoing) { while (m_writeAccessOngoing) { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); + std::this_thread::yield(); } } +#endif START_TIME; // Exec DELETE query: no callback, no resultset - rc = SQLexec(dbHandle, + rc = SQLexec(dbHandle, "readings", query, NULL, NULL, diff --git a/C/plugins/storage/sqlitelb/plugin.cpp b/C/plugins/storage/sqlitelb/plugin.cpp index 10a4a1a973..87dfe8f9b6 100644 --- a/C/plugins/storage/sqlitelb/plugin.cpp +++ b/C/plugins/storage/sqlitelb/plugin.cpp @@ -7,7 +7,7 @@ * * Author: Massimiliano Pinto */ -#include +#include #include #include #include diff --git a/C/plugins/storage/sqlitememory/CMakeLists.txt b/C/plugins/storage/sqlitememory/CMakeLists.txt index b3495f16f1..e5d9b3f135 100644 --- a/C/plugins/storage/sqlitememory/CMakeLists.txt +++ b/C/plugins/storage/sqlitememory/CMakeLists.txt @@ -6,8 +6,8 @@ set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(STORAGE_COMMON_LIB storage-common-lib) -# Path of compiled libsqlite3.a and .h files: /tmp/sqlite3-pkg/src -set(FLEDGE_SQLITE3_LIBS "/tmp/sqlite3-pkg/src" CACHE INTERNAL "") +# Path of compiled sqlite3 file: /usr/local/bin +set(FLEDGE_SQLITE3_LIBS "/usr/local/bin" CACHE INTERNAL "") # Find source files # Add sqlitelb plugin common files @@ -23,6 +23,7 @@ include_directories(../../../thirdparty/rapidjson/include) # Add sqlitelb plugin header files include_directories(../sqlitelb/include) include_directories(../sqlitelb/common/include) +include_directories(../sqlite/common/include) link_directories(${PROJECT_BINARY_DIR}/../../../lib) diff --git a/C/plugins/storage/sqlitememory/connection.cpp b/C/plugins/storage/sqlitememory/connection.cpp index 608fa75fbd..c4757f6568 100644 --- a/C/plugins/storage/sqlitememory/connection.cpp +++ b/C/plugins/storage/sqlitememory/connection.cpp @@ -10,7 +10,9 @@ #include #include -#include +#include +#include +#include /** * SQLite3 storage plugin for Fledge @@ -50,6 +52,8 @@ Connection::Connection() ");"; const char * createReadingsFk = "CREATE INDEX fki_" READINGS_TABLE_MEM "_fk1 ON " READINGS_TABLE_MEM " (asset_code);"; + const char * createReadingsIdx1 = "CREATE INDEX ix1_" READINGS_TABLE_MEM " ON " READINGS_TABLE_MEM " (asset_code, user_ts desc);"; + const char * createReadingsIdx2 = "CREATE INDEX ix2_" READINGS_TABLE_MEM " ON " READINGS_TABLE_MEM " (user_ts);"; // Allow usage of URI for filename sqlite3_config(SQLITE_CONFIG_URI, 1); @@ -99,6 +103,18 @@ Connection::Connection() NULL, NULL); + // Idx1 + rc = sqlite3_exec(dbHandle, + createReadingsIdx1, + NULL, + NULL, + NULL); + // Idx2 + rc = sqlite3_exec(dbHandle, + createReadingsIdx2, + NULL, + NULL, + NULL); } } @@ -111,3 +127,78 @@ bool Connection::vacuum() { return true; } + +/** + * Load the in memory database from a file backup + * + * @param filename The name of the file to restore from + * @return bool Success or failure of the backup + */ +bool Connection::loadDatabase(const string& filename) +{ +int rc; +sqlite3 *file; +sqlite3_backup *backup; + + string pathname = getDataDir() + "/"; + pathname.append(filename); + pathname.append(".db"); + if (access(pathname.c_str(), R_OK) != 0) + { + Logger::getLogger()->warn("Persisted database %s does not exist", + pathname.c_str()); + return false; + } + if ((rc = sqlite3_open(pathname.c_str(), &file)) == SQLITE_OK) + { + if (backup = sqlite3_backup_init(dbHandle, READINGS_TABLE_MEM, file, "main")) + { + (void)sqlite3_backup_step(backup, -1); + (void)sqlite3_backup_finish(backup); + Logger::getLogger()->info("Reloaded persisted data to in-memory database"); + } + rc = sqlite3_errcode(dbHandle); + + (void)sqlite3_close(file); + } + return rc == SQLITE_OK; +} + +/** + * Backup the in memory database to a file + * + * @param filename The name of the file to backup to + * @return bool Success or failure of the backup + */ +bool Connection::saveDatabase(const string& filename) +{ +int rc; +sqlite3 *file; +sqlite3_backup *backup; + + string pathname = getDataDir() + "/"; + pathname.append(filename); + pathname.append(".db"); + unlink(pathname.c_str()); + if ((rc = sqlite3_open(pathname.c_str(), &file)) == SQLITE_OK) + { + if (backup = sqlite3_backup_init(file, "main", dbHandle, READINGS_TABLE_MEM)) + { + rc = sqlite3_backup_step(backup, -1); + (void)sqlite3_backup_finish(backup); + Logger::getLogger()->info("Persisted data from in-memory database to %s", pathname.c_str()); + } + rc = sqlite3_errcode(file); + if (rc != SQLITE_OK) + { + Logger::getLogger()->warn("Persisting in-memory database failed: %s", sqlite3_errmsg(file)); + } + + (void)sqlite3_close(file); + } + else + { + Logger::getLogger()->warn("Failed to open database %s to persist in-memory data", pathname.c_str()); + } + return rc == SQLITE_OK; +} diff --git a/C/plugins/storage/sqlitememory/include/connection.h b/C/plugins/storage/sqlitememory/include/connection.h index 974c4d7821..44fc47d3f4 100644 --- a/C/plugins/storage/sqlitememory/include/connection.h +++ b/C/plugins/storage/sqlitememory/include/connection.h @@ -15,6 +15,11 @@ #include #include +WARNING: THIS FILE IS NOT USED + +#define READINGS_TABLE "readings" +#define READINGS_TABLE_MEM READINGS_TABLE "_1" + class Connection { public: Connection(); @@ -30,7 +35,9 @@ class Connection { void setTrace(bool flag) { m_logSQL = flag; }; static bool formatDate(char *formatted_date, size_t formatted_date_size, const char *date); unsigned int purgeReadingsAsset(const std::string& asset); - bool vacuum() { return true; }; + bool vacuum(); + bool loadDatabase(const std::string& filname); + bool saveDatabase(const std::string& filname); private: int SQLexec(sqlite3 *db, const char *sql, int (*callback)(void*,int,char**,char**), diff --git a/C/plugins/storage/sqlitememory/include/connection_manager.h b/C/plugins/storage/sqlitememory/include/connection_manager.h index b2e527235b..24cbf04c04 100644 --- a/C/plugins/storage/sqlitememory/include/connection_manager.h +++ b/C/plugins/storage/sqlitememory/include/connection_manager.h @@ -32,6 +32,13 @@ class MemConnectionManager { { return &lastError; } + void setPersist(bool persist, const std::string& filename = "") + { + m_persist = persist; + m_filename = filename; + } + bool persist() { return m_persist; }; + std::string filename() { return m_filename; }; private: MemConnectionManager(); @@ -43,6 +50,8 @@ class MemConnectionManager { std::mutex errorLock; PLUGIN_ERROR lastError; bool m_trace; + bool m_persist; + std::string m_filename; }; #endif diff --git a/C/plugins/storage/sqlitememory/plugin.cpp b/C/plugins/storage/sqlitememory/plugin.cpp index f05a6d4a5d..f44b443148 100644 --- a/C/plugins/storage/sqlitememory/plugin.cpp +++ b/C/plugins/storage/sqlitememory/plugin.cpp @@ -7,7 +7,7 @@ * * Author: Massimiliano Pinto */ - +#include #include #include #include @@ -24,7 +24,7 @@ #include #include #include -#include +#include using namespace std; using namespace rapidjson; @@ -41,6 +41,21 @@ const char *default_config = QUOTE({ "default" : "5", "displayName" : "Pool Size", "order" : "1" + }, + "filename" : { + "description" : "The name of the file to which the in-memory database should be persisted", + "type" : "string", + "default" : "inmemory", + "displayName" : "Persist File", + "order" : "3", + "validity": "persist == \"true\"" + }, + "persist" : { + "description" : "Enable the persistence of the in-memory database between executions", + "type" : "boolean", + "default" : "false", + "displayName" : "Persist Data", + "order" : "2" } }); @@ -82,6 +97,27 @@ int poolSize = 5; poolSize = strtol(category->getValue("poolSize").c_str(), NULL, 10); } manager->growPool(poolSize); + if (category->itemExists("persist")) + { + string p = category->getValue("persist"); + if (p.compare("true") == 0 && category->itemExists("filename")) + { + manager->setPersist(true, category->getValue("filename")); + } + else + { + manager->setPersist(false); + } + } + else + { + manager->setPersist(false); + } + if (manager->persist()) + { + Connection *connection = manager->allocate(); + connection->loadDatabase(manager->filename()); + } return manager; } /** @@ -97,6 +133,21 @@ Connection *connection = manager->allocate(); return result;; } +/** + * Append a stream of readings to the readings buffer + */ +int plugin_readingStream(PLUGIN_HANDLE handle, ReadingStream **readings, bool commit) +{ + int result = 0; + ConnectionManager *manager = (ConnectionManager *)handle; + Connection *connection = manager->allocate(); + + result = connection->readingStream(readings, commit); + + manager->release(connection); + return result;; +} + /** * Fetch a block of readings from the readings buffer */ @@ -174,6 +225,11 @@ bool plugin_shutdown(PLUGIN_HANDLE handle) { ConnectionManager *manager = (ConnectionManager *)handle; + if (manager->persist()) + { + Connection *connection = manager->allocate(); + connection->saveDatabase(manager->filename()); + } manager->shutdown(); return true; } diff --git a/C/plugins/utils/get_plugin_info.cpp b/C/plugins/utils/get_plugin_info.cpp index cae8eb4bbb..22c83ae886 100644 --- a/C/plugins/utils/get_plugin_info.cpp +++ b/C/plugins/utils/get_plugin_info.cpp @@ -41,14 +41,15 @@ int main(int argc, char *argv[]) exit(1); } + openlog("Fledge PluginInfo", LOG_PID|LOG_CONS, LOG_USER); + setlogmask(LOG_UPTO(LOG_WARNING)); + if (access(argv[1], F_OK|R_OK) != 0) { - fprintf(stderr, "Unable to access library file '%s', exiting...\n", argv[1]); + syslog(LOG_ERR, "Unable to access library file '%s', exiting...\n", argv[1]); exit(2); } - openlog(argv[0], LOG_PID|LOG_CONS, LOG_USER); - setlogmask(LOG_UPTO(LOG_WARNING)); if ((hndl = dlopen(argv[1], RTLD_GLOBAL|RTLD_LAZY)) != NULL) { @@ -56,7 +57,7 @@ int main(int argc, char *argv[]) if (infoEntry == NULL) { // Unable to find plugin_info entry point - fprintf(stderr, "Plugin library %s does not support %s function : %s\n", argv[1], routine, dlerror()); + syslog(LOG_ERR, "Plugin library %s does not support %s function : %s\n", argv[1], routine, dlerror()); dlclose(hndl); closelog(); exit(3); @@ -66,7 +67,7 @@ int main(int argc, char *argv[]) } else { - fprintf(stderr, "dlopen failed: %s\n", dlerror()); + syslog(LOG_ERR, "dlopen failed: %s\n", dlerror()); } closelog(); diff --git a/C/services/common/include/binary_plugin_handle.h b/C/services/common/include/binary_plugin_handle.h index e690621a68..277593ba4d 100644 --- a/C/services/common/include/binary_plugin_handle.h +++ b/C/services/common/include/binary_plugin_handle.h @@ -24,14 +24,26 @@ class BinaryPluginHandle : public PluginHandle public: // for the Storage plugin BinaryPluginHandle(const char *name, const char *path, tPluginType type) { + dlerror(); // Clear the existing error handle = dlopen(path, RTLD_LAZY); + if (!handle) + { + Logger::getLogger()->error("Unable to load storage plugin %s, %s", + name, dlerror()); + } Logger::getLogger()->debug("%s - storage plugin / RTLD_LAZY - name :%s: path :%s:", __FUNCTION__, name, path); } // for all the others plugins BinaryPluginHandle(const char *name, const char *path) { + dlerror(); // Clear the existing error handle = dlopen(path, RTLD_LAZY|RTLD_GLOBAL); + if (!handle) + { + Logger::getLogger()->error("Unable to load plugin %s, %s", + name, dlerror()); + } Logger::getLogger()->debug("%s - other plugin / RTLD_LAZY|RTLD_GLOBAL - name :%s: path :%s:", __FUNCTION__, name, path); } diff --git a/C/services/common/include/perfmonitors.h b/C/services/common/include/perfmonitors.h new file mode 100644 index 0000000000..8ed24699f8 --- /dev/null +++ b/C/services/common/include/perfmonitors.h @@ -0,0 +1,66 @@ +#ifndef _PERFMONITOR_H +#define _PERFMONITOR_H +/* + * Fledge performance monitor + * + * Copyright (c) 2023 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ +#include +#include +#include +#include +#include +#include + +class PerfMon { + public: + PerfMon(const std::string& name); + void addValue(long value); + int getValues(InsertValues& values); + private: + std::string m_name; + long m_average; + long m_min; + long m_max; + int m_samples; + std::mutex m_mutex; +}; +/** + * Class to handle the performance monitors + */ +class PerformanceMonitor { + public: + PerformanceMonitor(const std::string& service, StorageClient *storage); + ~PerformanceMonitor(); + /** + * Collect a performance monitor + * + * @param name Name of the monitor + * @param calue Value of the monitor + */ + inline void collect(const std::string& name, long value) + { + if (m_collecting) + { + doCollection(name, value); + } + }; + void setCollecting(bool state); + void writeThread(); + private: + void doCollection(const std::string& name, long value); + private: + std::string m_service; + StorageClient *m_storage; + std::thread *m_thread; + bool m_collecting; + std::unordered_map + m_monitors; + std::condition_variable m_cv; + std::mutex m_mutex; +}; +#endif diff --git a/C/services/common/include/plugin_manager.h b/C/services/common/include/plugin_manager.h index 33b58769d2..a53293e4c8 100755 --- a/C/services/common/include/plugin_manager.h +++ b/C/services/common/include/plugin_manager.h @@ -58,6 +58,7 @@ class PluginManager { private: PluginManager(); + std::string findPlugin(std::string name, std::string _type, std::string _plugin_path, PLUGIN_TYPE type); private: std::list plugins; @@ -69,7 +70,7 @@ class PluginManager { std::map pluginHandleMap; Logger* logger; - tPluginType m_pluginType; + tPluginType m_pluginType; }; #endif diff --git a/C/services/common/include/service_handler.h b/C/services/common/include/service_handler.h index 5899d347bd..2075315ef5 100644 --- a/C/services/common/include/service_handler.h +++ b/C/services/common/include/service_handler.h @@ -35,6 +35,8 @@ class ServiceHandler class ServiceAuthHandler : public ServiceHandler { public: + ServiceAuthHandler() : m_refreshThread(NULL), m_refreshRunning(true) {}; + virtual ~ServiceAuthHandler() { if (m_refreshThread) { m_refreshRunning = false; m_refreshThread->join(); delete m_refreshThread; } }; std::string& getName() { return m_name; }; std::string& getType() { return m_type; }; bool createSecurityCategories(ManagementClient* mgtClient, bool dryRun); @@ -105,6 +107,8 @@ class ServiceAuthHandler : public ServiceHandler ConfigCategory m_security; // Service ACL ACL m_service_acl; + std::thread *m_refreshThread; + bool m_refreshRunning; }; #endif diff --git a/C/services/common/management_api.cpp b/C/services/common/management_api.cpp index 0249012e4f..fbaffe7887 100644 --- a/C/services/common/management_api.cpp +++ b/C/services/common/management_api.cpp @@ -195,13 +195,25 @@ void ManagementApi::configChange(shared_ptr response, shar { ostringstream convert; string responsePayload; -string category, items, payload; - - payload = request->content.string(); - ConfigCategoryChange conf(payload); - ConfigHandler *handler = ConfigHandler::getInstance(NULL); - handler->configChange(conf.getName(), conf.itemsToJSON(true)); - convert << "{ \"message\" ; \"Config change accepted\" }"; +string payload; + + try + { + payload = request->content.string(); + ConfigCategoryChange conf(payload); + ConfigHandler *handler = ConfigHandler::getInstance(NULL); + handler->configChange(conf.getName(), conf.itemsToJSON(true)); + convert << "{ \"message\" : \"Config change accepted\" }"; + } + catch(const std::exception& e) + { + convert << "{ \"exception\" : \"" << e.what() << "\" }"; + } + catch(...) + { + convert << "{ \"exception\" : \"generic\" }"; + } + responsePayload = convert.str(); respond(response, responsePayload); } diff --git a/C/services/common/perfmonitor.cpp b/C/services/common/perfmonitor.cpp new file mode 100644 index 0000000000..4f40cfc79e --- /dev/null +++ b/C/services/common/perfmonitor.cpp @@ -0,0 +1,183 @@ +/* + * Fledge storage service client + * + * Copyright (c) 2023 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ +#include +#include + +using namespace std; + +/** + * Constructor for an individual performance monitor + * + * @param name The name of the performance monitor + */ +PerfMon::PerfMon(const string& name) : m_name(name), m_samples(0) +{ +} + +/** + * Collect a new value for the performance monitor + * + * @param value The new value + */ +void PerfMon::addValue(long value) +{ + lock_guard guard(m_mutex); + if (m_samples) + { + if (value < m_min) + m_min = value; + else if (value > m_max) + m_max = value; + m_average = ((m_samples * m_average) + value) / (m_samples + 1); + m_samples++; + } + else + { + m_min = value; + m_max = value; + m_average = value; + m_samples = 1; + } +} + +/** + * Return the performance values to insert + * + */ +int PerfMon::getValues(InsertValues& values) +{ + lock_guard guard(m_mutex); + if (m_samples == 0) + return 0; + values.push_back(InsertValue("minimum", m_min)); + values.push_back(InsertValue("maximum", m_max)); + values.push_back(InsertValue("average", m_average)); + values.push_back(InsertValue("samples", m_samples)); + m_min = 0; + m_max = 0; + m_average = 0; + int samples = m_samples; + m_samples = 0; + return samples; +} + +/** + * Constructor for the performance monitors + * + * @param service The name of the service + * @param storage Point to the storage client class for the service + */ +PerformanceMonitor::PerformanceMonitor(const string& service, StorageClient *storage) : + m_service(service), m_storage(storage), m_collecting(false), m_thread(NULL) +{ +} + +/** + * Destructor for the performance monitor + */ +PerformanceMonitor::~PerformanceMonitor() +{ + if (m_collecting) + { + setCollecting(false); + } + // Write thread has now been stopped or + // was never running + for (const auto& it : m_monitors) + { + string name = it.first; + PerfMon *mon = it.second; + delete mon; + } +} + +/** + * Monitor thread entry point + * + * @param perfMon The perforamnce monitore class + */ +static void monitorThread(PerformanceMonitor *perfMon) +{ + perfMon->writeThread(); +} + +/** + * Set the collection state of the performance monitors + * + * @param state The required collection state + */ +void PerformanceMonitor::setCollecting(bool state) +{ + m_collecting = state; + if (m_collecting && m_thread == NULL) + { + // Start the thread to write the monitors to the database + m_thread = new thread(monitorThread, this); + } + else if (m_collecting == false && m_thread) + { + // Stop the thread to write the monitors to the database + m_cv.notify_all(); + m_thread->join(); + delete m_thread; + m_thread = NULL; + } +} + +/** + * Add a new value to the named performance monitor + * + * @param name The name of the performance monitor + * @param value The value to add + */ +void PerformanceMonitor::doCollection(const string& name, long value) +{ + PerfMon *mon; + auto it = m_monitors.find(name); + if (it == m_monitors.end()) + { + // Create a new monitor + mon = new PerfMon(name); + m_monitors[name] = mon; + } + else + { + mon = it->second; + } + mon->addValue(value); +} + +/** + * The thread that runs to write database values + */ +void PerformanceMonitor::writeThread() +{ + while (m_collecting) + { + unique_lock lk(m_mutex); + m_cv.wait_for(lk, chrono::seconds(60)); + if (m_collecting) + { + // Write to the database + for (const auto& it : m_monitors) + { + string name = it.first; + PerfMon *mon = it.second; + InsertValues values; + if (mon->getValues(values) > 0) + { + values.push_back(InsertValue("service", m_service)); + values.push_back(InsertValue("monitor", name)); + m_storage->insertTable("monitors", values); + } + } + } + } +} diff --git a/C/services/common/plugin_manager.cpp b/C/services/common/plugin_manager.cpp index 0a3ad194b8..9b27590dc2 100755 --- a/C/services/common/plugin_manager.cpp +++ b/C/services/common/plugin_manager.cpp @@ -28,6 +28,7 @@ #include "rapidjson/error/en.h" #include #include +#include using namespace std; using namespace rapidjson; @@ -77,7 +78,9 @@ void updateJsonPluginConfig(PLUGIN_INFORMATION *info, string json_plugin_name, s doc.Parse(json_plugin_defaults.c_str()); if (doc.HasParseError()) { - logger->error("doc JSON parsing failed"); + logger->error("Parse error in plugin '%s' defaults: %s at %d '%s'", json_plugin_name.c_str(), + GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset(), + StringAround(json_plugin_defaults, (unsigned)doc.GetErrorOffset())); return; } @@ -85,7 +88,9 @@ void updateJsonPluginConfig(PLUGIN_INFORMATION *info, string json_plugin_name, s docBase.Parse(info->config); if (docBase.HasParseError()) { - logger->error("docBase JSON parsing failed"); + logger->error("Parse error in plugin '%s' information defaults: %s at %d '%s'", json_plugin_name.c_str(), + GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset(), + StringAround(info->config, (unsigned)doc.GetErrorOffset())); return; } @@ -176,7 +181,9 @@ void updateJsonPluginConfig(PLUGIN_INFORMATION *info, string json_plugin_name, s doc2.Parse(info->config); if (doc2.HasParseError()) { - logger->error("doc2 JSON parsing failed"); + logger->error("Parse error in information returned from plugin: %s at %d '%s'", + GetParseError_En(doc2.GetParseError()), (unsigned)doc2.GetErrorOffset(), + StringAround(info->config, (unsigned)doc2.GetErrorOffset())); } if (doc2.HasMember("plugin")) { @@ -208,7 +215,7 @@ void updateJsonPluginConfig(PLUGIN_INFORMATION *info, string json_plugin_name, s * @param type The plugin type * @return string The absolute path of plugin */ -string findPlugin(string name, string _type, string _plugin_path, PLUGIN_TYPE type) +string PluginManager::findPlugin(string name, string _type, string _plugin_path, PLUGIN_TYPE type) { if (type != BINARY_PLUGIN && type != PYTHON_PLUGIN && type != JSON_PLUGIN) { diff --git a/C/services/common/service_security.cpp b/C/services/common/service_security.cpp index 7836695e36..3504ce7db8 100644 --- a/C/services/common/service_security.cpp +++ b/C/services/common/service_security.cpp @@ -12,6 +12,8 @@ #define TO_STRING_(...) #__VA_ARGS__ #define QUOTE(...) TO_STRING(__VA_ARGS__) +#define DELTA_SECONDS_BEFORE_TOKEN_EXPIRATION 120 + using namespace std; using HttpServer = SimpleWeb::Server; @@ -93,7 +95,7 @@ bool ServiceAuthHandler::createSecurityCategories(ManagementClient* mgtClient, b // Start thread for automatic bearer token refresh, before expiration if (this->getType() != "Southbound" && dryRun == false) { - new thread(bearer_token_refresh_thread, this); + m_refreshThread = new thread(bearer_token_refresh_thread, this); } return true; @@ -593,9 +595,9 @@ void ServiceAuthHandler::refreshBearerToken() // While server is running get bearer token // and sleeps for a few secods. - // When expires_in - 10 seconds is done + // When expires_in - DELTA_SECONDS_BEFORE_TOKEN_EXPIRATION seconds is done // then get new token and sleep again - while (this->isRunning()) + while (m_refreshRunning) { if (k >= max_retries) { @@ -604,7 +606,7 @@ void ServiceAuthHandler::refreshBearerToken() Logger::getLogger()->error(msg.c_str()); // Shutdown service - if (this->isRunning()) + if (m_refreshRunning) { Logger::getLogger()->warn("Service is being shut down " \ "due to bearer token refresh error"); @@ -644,7 +646,7 @@ void ServiceAuthHandler::refreshBearerToken() current_token = bToken.token(); // Token exists and it is valid, get expiration time - expires_in = bToken.getExpiration() - time(NULL) - 10; + expires_in = bToken.getExpiration() - time(NULL) - DELTA_SECONDS_BEFORE_TOKEN_EXPIRATION; Logger::getLogger()->debug("Bearer token refresh will be called in " "%ld seconds, service '%s'", @@ -663,7 +665,7 @@ void ServiceAuthHandler::refreshBearerToken() // A shutdown maybe is set, since last check: check it now // refresh_token core API endpoint - if (!this->isRunning()) + if (!m_refreshRunning) { Logger::getLogger()->info("Service is being shut down: " \ "refresh thread does not call " \ diff --git a/C/services/filter-plugin-interfaces/python/filter_ingest_pymodule/ingest_callback_pymodule.cpp b/C/services/filter-plugin-interfaces/python/filter_ingest_pymodule/ingest_callback_pymodule.cpp index 6b78e44437..d8c4cd0e6a 100755 --- a/C/services/filter-plugin-interfaces/python/filter_ingest_pymodule/ingest_callback_pymodule.cpp +++ b/C/services/filter-plugin-interfaces/python/filter_ingest_pymodule/ingest_callback_pymodule.cpp @@ -19,7 +19,7 @@ extern "C" { typedef void (*INGEST_CB_DATA)(void *, PythonReadingSet *); -static void filter_plugin_ingest_fn(PyObject *ingest_callback, +static void filter_plugin_async_ingest_fn(PyObject *ingest_callback, PyObject *ingest_obj_ref_data, PyObject *readingsObj); @@ -50,7 +50,7 @@ static PyObject *filter_ingest_callback(PyObject *self, PyObject *args) } // Invoke callback routine - filter_plugin_ingest_fn(callback, + filter_plugin_async_ingest_fn(callback, ingestData, readingList); @@ -106,7 +106,7 @@ PyInit_filter_ingest(void) * @param ingest_obj_ref_data Object parameter for callback routine * @param readingsObj Readongs data as PyObject */ -void filter_plugin_ingest_fn(PyObject *ingest_callback, +void filter_plugin_async_ingest_fn(PyObject *ingest_callback, PyObject *ingest_obj_ref_data, PyObject *readingsObj) { @@ -115,21 +115,17 @@ void filter_plugin_ingest_fn(PyObject *ingest_callback, readingsObj == NULL) { Logger::getLogger()->error("PyC interface error: " - "filter_plugin_ingest_fn: " + "%s: " "filter_ingest_callback=%p, " "ingest_obj_ref_data=%p, " "readingsObj=%p", + __FUNCTION__, ingest_callback, ingest_obj_ref_data, readingsObj); return; } - - PyObject* objectsRepresentation = PyObject_Repr(readingsObj); - const char* s = PyUnicode_AsUTF8(objectsRepresentation); - Logger::getLogger()->debug("filter_plugin_ingest_fn:L%d : Py2C: filtered readings=%s", __LINE__, s); - Py_CLEAR(objectsRepresentation); - + PythonReadingSet *pyReadingSet = NULL; // Check we have a list of readings @@ -171,6 +167,7 @@ void filter_plugin_ingest_fn(PyObject *ingest_callback, // Get ingest object parameter void *data = PyCapsule_GetPointer(ingest_obj_ref_data, NULL); + Logger::getLogger()->debug("%s:%d: cb function at address %p", __FUNCTION__, __LINE__, *cb); // Invoke callback method for ReadingSet filter ingestion (*cb)(data, pyReadingSet); } diff --git a/C/services/filter-plugin-interfaces/python/python_plugin_interface.cpp b/C/services/filter-plugin-interfaces/python/python_plugin_interface.cpp index b3ac285f53..cdd3cb37b3 100755 --- a/C/services/filter-plugin-interfaces/python/python_plugin_interface.cpp +++ b/C/services/filter-plugin-interfaces/python/python_plugin_interface.cpp @@ -262,7 +262,7 @@ void filter_plugin_ingest_fn(PLUGIN_HANDLE handle, READINGSET *data) } Logger::getLogger()->debug("C2Py: filter_plugin_ingest_fn():L%d: data->getCount()=%d", __LINE__, data->getCount()); - + // Create a readingList of readings to be filtered PythonReadingSet *pyReadingSet = (PythonReadingSet *) data; PyObject* readingsList = pyReadingSet->toPython(); @@ -272,9 +272,6 @@ void filter_plugin_ingest_fn(PLUGIN_HANDLE handle, READINGSET *data) handle, readingsList); Py_CLEAR(pFunc); - // Remove input data - delete (ReadingSet *)data; - data = NULL; // Handle returned data if (!pReturn) @@ -285,6 +282,45 @@ void filter_plugin_ingest_fn(PLUGIN_HANDLE handle, READINGSET *data) logErrorMessage(); } + data->removeAll(); + delete data; + +#if 0 + PythonReadingSet *filteredReadingSet = NULL; + if (pReturn) + { + // Check we have a list of readings + if (PyList_Check(readingsList)) + { + try + { + // Create ReadingSet from Python reading list + filteredReadingSet = new PythonReadingSet(readingsList); + + // Remove input data + data->removeAll(); + + // Append filtered readings; append will empty the passed reading set as well + data->append(filteredReadingSet); + + delete filteredReadingSet; + filteredReadingSet = NULL; + } + catch (std::exception e) + { + Logger::getLogger()->warn("Unable to create a PythonReadingSet, error: %s", e.what()); + filteredReadingSet = NULL; + } + } + else + { + Logger::getLogger()->error("Filter did not return a Python List " + "but object type %s", + Py_TYPE(readingsList)->tp_name); + } + } +#endif + // Remove readings to dict Py_CLEAR(readingsList); // Remove CallFunction result diff --git a/C/services/north-plugin-interfaces/python/python_plugin_interface.cpp b/C/services/north-plugin-interfaces/python/python_plugin_interface.cpp old mode 100644 new mode 100755 index be8e6c3e36..758921612c --- a/C/services/north-plugin-interfaces/python/python_plugin_interface.cpp +++ b/C/services/north-plugin-interfaces/python/python_plugin_interface.cpp @@ -414,29 +414,21 @@ uint32_t plugin_send_fn(PLUGIN_HANDLE handle, const std::vector& read return numReadingsSent; } - // Create a dict of readings - // 1 create empty ReadingSet - ReadingSet set; + // 1. create a ReadingSet + ReadingSet set(&readings); - // 2 append all input readings: - // Note: the readings elements are pointers - set.append(readings); - - // 3 create a PythonReadingSet object + // 2. create a PythonReadingSet object PythonReadingSet *pyReadingSet = (PythonReadingSet *) &set; - // 4 create PyObject + // 3. create PyObject PyObject* readingsList = pyReadingSet->toPython(true); numReadingsSent = call_plugin_send_coroutine(pFunc, handle, readingsList); Logger::getLogger()->debug("C2Py: plugin_send_fn():L%d: filtered readings sent %d", __LINE__, numReadingsSent); - - // Remove all elements in readings vector - // without freeing them as the reagings pointers - // will be be freed by the caller of plugin_send_fn - set.clear(); + + set.clear(); // to avoid deletion of contained Reading objects; they are subsequently accessed in calling function DataSender::send() // Remove python object Py_CLEAR(readingsList); diff --git a/C/services/north/data_load.cpp b/C/services/north/data_load.cpp index b7fa2d3cc0..77567dad13 100755 --- a/C/services/north/data_load.cpp +++ b/C/services/north/data_load.cpp @@ -26,7 +26,7 @@ static void threadMain(void *arg) */ DataLoad::DataLoad(const string& name, long streamId, StorageClient *storage) : m_name(name), m_streamId(streamId), m_storage(storage), m_shutdown(false), - m_readRequest(0), m_dataSource(SourceReadings), m_pipeline(NULL) + m_readRequest(0), m_dataSource(SourceReadings), m_pipeline(NULL), m_perfMonitor(NULL) { m_blockSize = DEFAULT_BLOCK_SIZE; @@ -52,11 +52,20 @@ DataLoad::~DataLoad() m_cv.notify_all(); m_fetchCV.notify_all(); m_thread->join(); + delete m_thread; if (m_pipeline) { m_pipeline->cleanupFilters(m_name); delete m_pipeline; } + // Clear out the queue of readings + unique_lock lck(m_qMutex); // Should not need to do this + while (! m_queue.empty()) + { + ReadingSet *readings = m_queue.front(); + delete readings; + m_queue.pop_front(); + } Logger::getLogger()->info("Data load shutdown complete"); } @@ -148,6 +157,7 @@ void DataLoad::triggerRead(unsigned int blockSize) void DataLoad::readBlock(unsigned int blockSize) { ReadingSet *readings = NULL; +int n_waits = 0; do { @@ -181,11 +191,21 @@ ReadingSet *readings = NULL; } if (readings && readings->getCount()) { - Logger::getLogger()->debug("DataLoad::readBlock(): Got %d readings from storage client", readings->getCount()); + Logger::getLogger()->debug("DataLoad::readBlock(): Got %d readings from storage client", readings->getCount()); m_lastFetched = readings->getLastId(); bufferReadings(readings); + if (m_perfMonitor) + { + m_perfMonitor->collect("No of waits for data", n_waits); + m_perfMonitor->collect("Block utilisation %", (readings->getCount() * 100) / blockSize); + } return; } + else if (readings) + { + // Delete the empty readings set + delete readings; + } else { // Logger::getLogger()->debug("DataLoad::readBlock(): No readings available"); @@ -194,6 +214,7 @@ ReadingSet *readings = NULL; { // TODO improve this this_thread::sleep_for(chrono::milliseconds(250)); + n_waits++; } } while (m_shutdown == false); } @@ -293,7 +314,9 @@ unsigned long DataLoad::getLastSentId() // Get column value ResultSet::ColumnValue* theVal = row->getColumn("last_object"); // Set found id - return (unsigned long)theVal->getInteger(); + unsigned long rval = (unsigned long)theVal->getInteger(); + delete lastObjectId; + return rval; } } // Free result set @@ -329,6 +352,15 @@ void DataLoad::bufferReadings(ReadingSet *readings) } unique_lock lck(m_qMutex); m_queue.push_back(readings); + if (m_perfMonitor) + { + m_perfMonitor->collect("Readings added to buffer", readings->getCount()); + m_perfMonitor->collect("Reading sets buffered", m_queue.size()); + long i = 0; + for (auto& set : m_queue) + i += set->getCount(); + m_perfMonitor->collect("Total readings buffered", i); + } Logger::getLogger()->debug("Buffered %d readings for north processing", readings->getCount()); m_fetchCV.notify_all(); } @@ -356,7 +388,10 @@ ReadingSet *DataLoad::fetchReadings(bool wait) } ReadingSet *rval = m_queue.front(); m_queue.pop_front(); - triggerRead(m_blockSize); + if (m_queue.size() < 5) // Read another block if we have less than 5 already queued + { + triggerRead(m_blockSize); + } return rval; } @@ -598,13 +633,18 @@ void DataLoad::configChange(const string& category, const string& newConfig) { /** * The category that has changed is the one for the north service itself. - * The only item that concerns us here is the filter item that defines - * the filter pipeline. We extract that item and check to see if it defines - * a pipeline that is different to the one we currently have. + * The only items that concerns us here is the filter item that defines + * the filter pipeline and the data source. If the item is the filter pipeline + * we extract that item and check to see if it defines a pipeline that is + * different to the one we currently have. * - * If it is we destroy the current pipeline and create a new one. + * If it is the filter pipeline we destroy the current pipeline and create a new one. */ ConfigCategory config("tmp", newConfig); + if (config.itemExists("source")) + { + setDataSource(config.getValue("source")); + } string newPipeline = ""; if (config.itemExists("filter")) { diff --git a/C/services/north/data_send.cpp b/C/services/north/data_send.cpp index 9fc48e39a8..b6ad804052 100755 --- a/C/services/north/data_send.cpp +++ b/C/services/north/data_send.cpp @@ -29,7 +29,7 @@ static void startSenderThread(void *data) * Constructor for the data sending class */ DataSender::DataSender(NorthPlugin *plugin, DataLoad *loader, NorthService *service) : - m_plugin(plugin), m_loader(loader), m_service(service), m_shutdown(false), m_paused(false) + m_plugin(plugin), m_loader(loader), m_service(service), m_shutdown(false), m_paused(false), m_perfMonitor(NULL) { m_logger = Logger::getLogger(); @@ -106,6 +106,11 @@ void DataSender::sendThread() readings = NULL; } } + if (readings) + { + // Rremove any readings we had failed to send before shutting down + delete readings; + } m_logger->info("Sending thread shutdown"); } @@ -118,9 +123,15 @@ void DataSender::sendThread() unsigned long DataSender::send(ReadingSet *readings) { blockPause(); + uint32_t to_send = readings->getCount(); uint32_t sent = m_plugin->send(readings->getAllReadings()); releasePause(); unsigned long lastSent = readings->getReadingId(sent); + if (m_perfMonitor) + { + m_perfMonitor->collect("Readings sent", sent); + m_perfMonitor->collect("Percentage readings sent", (100 * sent) / to_send); + } if (sent > 0) { diff --git a/C/services/north/include/data_load.h b/C/services/north/include/data_load.h index 389a16dd48..31cf5b8816 100644 --- a/C/services/north/include/data_load.h +++ b/C/services/north/include/data_load.h @@ -10,6 +10,7 @@ #include #include #include +#include #define DEFAULT_BLOCK_SIZE 100 @@ -47,6 +48,7 @@ class DataLoad : public ServiceHandler { { m_blockSize = blockSize; }; + void setPerfMonitor(PerformanceMonitor *perfMonitor) { m_perfMonitor = perfMonitor; }; private: void readBlock(unsigned int blockSize); @@ -77,5 +79,6 @@ class DataLoad : public ServiceHandler { FilterPipeline *m_pipeline; std::mutex m_pipelineMutex; unsigned long m_blockSize; + PerformanceMonitor *m_perfMonitor; }; #endif diff --git a/C/services/north/include/data_sender.h b/C/services/north/include/data_sender.h index ffd28ae76d..ef850a0871 100644 --- a/C/services/north/include/data_sender.h +++ b/C/services/north/include/data_sender.h @@ -7,6 +7,7 @@ #include #include #include +#include class DataLoad; class NorthService; @@ -19,6 +20,7 @@ class DataSender { void updatePlugin(NorthPlugin *plugin) { m_plugin = plugin; }; void pause(); void release(); + void setPerfMonitor(PerformanceMonitor *perfMonitor) { m_perfMonitor = perfMonitor; }; private: unsigned long send(ReadingSet *readings); void blockPause(); @@ -34,6 +36,7 @@ class DataSender { bool m_sending; std::mutex m_pauseMutex; std::condition_variable m_pauseCV; + PerformanceMonitor *m_perfMonitor; }; #endif diff --git a/C/services/north/include/north_service.h b/C/services/north/include/north_service.h index b841921354..03c12ff936 100644 --- a/C/services/north/include/north_service.h +++ b/C/services/north/include/north_service.h @@ -18,6 +18,8 @@ #include #include #include +#include +#include #define SERVICE_NAME "Fledge North" @@ -33,7 +35,7 @@ class NorthService : public ServiceAuthHandler { public: NorthService(const std::string& name, const std::string& token = ""); - ~NorthService(); + virtual ~NorthService(); void start(std::string& coreAddress, unsigned short corePort); void stop(); @@ -58,6 +60,7 @@ class NorthService : public ServiceAuthHandler { void createConfigCategories(DefaultConfigCategory configCategory, std::string parent_name,std::string current_name); void restartPlugin(); private: + std::string controlSource(); bool sendToService(const std::string& southService, const std::string& name, const std::string& value); bool sendToDispatcher(const std::string& path, const std::string& payload); DataLoad *m_dataLoad; @@ -78,5 +81,7 @@ class NorthService : public ServiceAuthHandler { bool m_allowControl; bool m_dryRun; bool m_requestRestart; + AuditLogger *m_auditLogger; + PerformanceMonitor *m_perfMonitor; }; #endif diff --git a/C/services/north/north.cpp b/C/services/north/north.cpp index 1645c3c52b..e9c81fb15e 100755 --- a/C/services/north/north.cpp +++ b/C/services/north/north.cpp @@ -35,6 +35,7 @@ #include #include #include +#include #define SERVICE_TYPE "Northbound" @@ -188,6 +189,8 @@ bool dryRun = false; } Logger::getLogger()->setMinLevel(logLevel); service->start(coreAddress, corePort); + + delete service; return 0; } @@ -276,6 +279,9 @@ int size; */ NorthService::NorthService(const string& myName, const string& token) : m_dataLoad(NULL), + m_dataSender(NULL), + northPlugin(NULL), + m_assetTracker(NULL), m_shutdown(false), m_storage(NULL), m_pluginData(NULL), @@ -283,7 +289,9 @@ NorthService::NorthService(const string& myName, const string& token) : m_token(token), m_allowControl(true), m_dryRun(false), - m_requestRestart() + m_requestRestart(), + m_auditLogger(NULL), + m_perfMonitor(NULL) { m_name = myName; logger = new Logger(myName); @@ -295,8 +303,25 @@ NorthService::NorthService(const string& myName, const string& token) : */ NorthService::~NorthService() { + if (m_perfMonitor) + delete m_perfMonitor; + if (northPlugin) + delete northPlugin; if (m_storage) delete m_storage; + if (m_dataLoad) + delete m_dataLoad; + if (m_dataSender) + delete m_dataSender; + if (m_pluginData) + delete m_pluginData; + if (m_assetTracker) + delete m_assetTracker; + if (m_auditLogger) + delete m_auditLogger; + if (m_mgtClient) + delete m_mgtClient; + delete logger; } /** @@ -328,11 +353,14 @@ void NorthService::start(string& coreAddress, unsigned short corePort) m_token); // Token); m_mgtClient = new ManagementClient(coreAddress, corePort); + m_auditLogger = new AuditLogger(m_mgtClient); + // Create an empty North category if one doesn't exist DefaultConfigCategory northConfig(string("North"), string("{}")); northConfig.setDescription("North"); m_mgtClient->addCategory(northConfig, true); + // Fetch Configuration m_config = m_mgtClient->getCategory(m_name); if (!loadPlugin()) { @@ -378,7 +406,18 @@ void NorthService::start(string& coreAddress, unsigned short corePort) m_storage->registerManagement(m_mgtClient); - // Fetch Confguration + // Setup the performance monitor + m_perfMonitor = new PerformanceMonitor(m_name, m_storage); + + if (m_configAdvanced.itemExists("perfmon")) + { + string perf = m_configAdvanced.getValue("perfmon"); + if (perf.compare("true") == 0) + m_perfMonitor->setCollecting(true); + else + m_perfMonitor->setCollecting(false); + } + logger->debug("Initialise the asset tracker"); m_assetTracker = new AssetTracker(m_mgtClient, m_name); AssetTracker::getAssetTracker()->populateAssetTrackingCache(m_name, "Egress"); @@ -390,19 +429,23 @@ void NorthService::start(string& coreAddress, unsigned short corePort) } // Deal with persisted data and start the plugin - if (northPlugin->persistData()) - { - logger->debug("Plugin %s requires persisted data", m_pluginName.c_str()); - m_pluginData = new PluginData(m_storage); - string key = m_name + m_pluginName; - string storedData = m_pluginData->loadStoredData(key); - logger->debug("Starting plugin with storedData: %s", storedData.c_str()); - northPlugin->startData(storedData); - } - else + if (!m_dryRun) { - logger->debug("Start %s plugin", m_pluginName.c_str()); - northPlugin->start(); + if (northPlugin->persistData()) + { + logger->debug("Plugin %s requires persisted data", m_pluginName.c_str()); + m_pluginData = new PluginData(m_storage); + string key = m_name + m_pluginName; + string storedData = m_pluginData->loadStoredData(key); + logger->debug("Starting plugin with storedData: %s", storedData.c_str()); + northPlugin->startData(storedData); + + } + else + { + logger->debug("Start %s plugin", m_pluginName.c_str()); + northPlugin->start(); + } } // Create default security category @@ -416,6 +459,7 @@ void NorthService::start(string& coreAddress, unsigned short corePort) } logger->debug("Create threads for stream %d", streamId); m_dataLoad = new DataLoad(m_name, streamId, m_storage); + m_dataLoad->setPerfMonitor(m_perfMonitor); if (m_config.itemExists("source")) { m_dataLoad->setDataSource(m_config.getValue("source")); @@ -432,6 +476,7 @@ void NorthService::start(string& coreAddress, unsigned short corePort) } } m_dataSender = new DataSender(northPlugin, m_dataLoad, this); + m_dataSender->setPerfMonitor(m_perfMonitor); if (!m_dryRun) { @@ -458,13 +503,15 @@ void NorthService::start(string& coreAddress, unsigned short corePort) m_dataLoad->shutdown(); // Forces the data load to return from any blocking fetch call delete m_dataSender; + m_dataSender = NULL; logger->debug("North service data sender has shut down"); delete m_dataLoad; + m_dataLoad = NULL; logger->debug("North service shutting down plugin"); // Shutdown the north plugin - if (northPlugin) + if (northPlugin && !m_dryRun) { if (m_pluginData) { @@ -643,7 +690,7 @@ bool NorthService::loadPlugin() return true; } - } catch (exception e) { + } catch (exception &e) { logger->fatal("Failed to load north plugin: %s\n", e.what()); } return false; @@ -754,6 +801,14 @@ void NorthService::configChange(const string& categoryName, const string& catego m_dataLoad->setBlockSize(newBlock); } } + if (m_configAdvanced.itemExists("perfmon")) + { + string perf = m_configAdvanced.getValue("perfmon"); + if (perf.compare("true") == 0) + m_perfMonitor->setCollecting(true); + else + m_perfMonitor->setCollecting(false); + } } // Update the Security category @@ -796,6 +851,7 @@ void NorthService::restartPlugin() } delete northPlugin; + northPlugin = NULL; loadPlugin(); // Deal with persisted data and start the plugin if (northPlugin->persistData()) @@ -856,6 +912,9 @@ void NorthService::addConfigDefaults(DefaultConfigCategory& defaultConfig) std::to_string(DEFAULT_BLOCK_SIZE), std::to_string(DEFAULT_BLOCK_SIZE)); defaultConfig.setItemDisplayName("blockSize", "Data block size"); + defaultConfig.addItem("perfmon", "Track and store performance counters", + "boolean", "false", "false"); + defaultConfig.setItemDisplayName("perfmon", "Performance Counters"); } /** @@ -875,7 +934,8 @@ bool NorthService::write(const string& name, const string& value, const ControlD } // Build payload for dispatcher service string payload = "{ \"destination\" : \"broadcast\","; - payload += "\"write\" : { \""; + payload += controlSource(); + payload += ", \"write\" : { \""; payload += name; payload += "\" : \""; string escaped = value; @@ -920,10 +980,12 @@ bool NorthService::write(const string& name, const string& value, const ControlD payload += "broadcast\""; break; } + payload += ", "; + payload += controlSource(); payload += ", \"write\" : { \""; payload += name; payload += "\" : \""; - string escaped = name; + string escaped = value; StringEscapeQuotes(escaped); payload += escaped; payload += "\" } }"; @@ -951,7 +1013,8 @@ int NorthService::operation(const string& name, int paramCount, char *names[], } // Build payload for dispatcher service string payload = "{ \"destination\" : \"broadcast\","; - payload += "\"operation\" : { \""; + payload += controlSource(); + payload += ", \"operation\" : { \""; payload += name; payload += "\" : { "; for (int i = 0; i < paramCount; i++) @@ -1009,6 +1072,8 @@ int NorthService::operation(const string& name, int paramCount, char *names[], c payload += "broadcast\""; break; } + payload += ", "; + payload += controlSource(); payload += ", \"operation\" : { \""; payload += name; payload += "\" : { "; @@ -1087,6 +1152,7 @@ bool NorthService::sendToService(const string& southService, const string& name, */ bool NorthService::sendToDispatcher(const string& path, const string& payload) { + Logger::getLogger()->debug("Dispatch %s with %s", path.c_str(), payload.c_str()); // Send the control message to the south service try { ServiceRecord service("dispatcher"); @@ -1134,3 +1200,18 @@ bool NorthService::sendToDispatcher(const string& path, const string& payload) } } + +/** + * Return the control source for control operations. This is used + * for pipeline matching. + * + * @return string The control source + */ +string NorthService::controlSource() +{ + string source = "\"source\" : \"service\", \"source_name\" : \""; + source += m_name; + source += "\""; + + return source; +} diff --git a/C/services/south-plugin-interfaces/python/async_ingest_pymodule/ingest_callback_pymodule.cpp b/C/services/south-plugin-interfaces/python/async_ingest_pymodule/ingest_callback_pymodule.cpp index 92a4a5b601..3c95ddb8b6 100644 --- a/C/services/south-plugin-interfaces/python/async_ingest_pymodule/ingest_callback_pymodule.cpp +++ b/C/services/south-plugin-interfaces/python/async_ingest_pymodule/ingest_callback_pymodule.cpp @@ -80,11 +80,6 @@ void plugin_ingest_fn(PyObject *ingest_callback, PyObject *ingest_obj_ref_data, return; } - PyObject* objectsRepresentation = PyObject_Repr(readingsObj); - const char* s = PyUnicode_AsUTF8(objectsRepresentation); - Logger::getLogger()->debug("%s:%s:L%d : Py2C: filtered readings=%s", __FILE__, __FUNCTION__, __LINE__, s); - Py_CLEAR(objectsRepresentation); - PythonReadingSet *pyReadingSet = NULL; try diff --git a/C/services/south-plugin-interfaces/python/python_plugin_interface.cpp b/C/services/south-plugin-interfaces/python/python_plugin_interface.cpp index b3119c7777..59da71af1e 100755 --- a/C/services/south-plugin-interfaces/python/python_plugin_interface.cpp +++ b/C/services/south-plugin-interfaces/python/python_plugin_interface.cpp @@ -510,46 +510,35 @@ std::vector* plugin_poll_fn(PLUGIN_HANDLE handle) } else { - // Get reading data - PythonReadingSet *pyReadingSet = NULL; + // Get reading data + PythonReadingSet *pyReadingSet = NULL; - // Valid ReadingSet would be in the form of python dict or list - if (PyList_Check(pReturn) || PyDict_Check(pReturn)) - { - try - { - pyReadingSet = new PythonReadingSet(pReturn); - } - catch (std::exception e) - { - Logger::getLogger()->warn("PythonReadingSet c'tor failed, error: %s", e.what()); - pyReadingSet = NULL; - } - } - + // Valid ReadingSet would be in the form of python dict or list + if (PyList_Check(pReturn) || PyDict_Check(pReturn)) + { + try { + pyReadingSet = new PythonReadingSet(pReturn); + } catch (std::exception e) { + Logger::getLogger()->warn("Failed to create a Python ReadingSet from the data returned by the south plugin poll routine, %s", e.what()); + pyReadingSet = NULL; + } + } + // Remove pReturn object Py_CLEAR(pReturn); PyGILState_Release(state); - if (pyReadingSet) - { - std::vector *vec = pyReadingSet->getAllReadingsPtr(); - std::vector *vec2 = new std::vector; - - for (auto & r : *vec) - { - Reading *r2 = new Reading(*r); // Need to copy reading objects here, since "del pyReadingSet" below would remove encapsulated reading objects - vec2->emplace_back(r2); - } - - delete pyReadingSet; - return vec2; - } - else - { - return NULL; - } + if (pyReadingSet) + { + std::vector *vec2 = pyReadingSet->moveAllReadings(); + delete pyReadingSet; + return vec2; + } + else + { + return NULL; + } } } diff --git a/C/services/south/CMakeLists.txt b/C/services/south/CMakeLists.txt index e6a73fd05b..8183ac0d98 100644 --- a/C/services/south/CMakeLists.txt +++ b/C/services/south/CMakeLists.txt @@ -1,6 +1,7 @@ cmake_minimum_required (VERSION 2.8.8) project (South) + set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb -DPy_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wsign-conversion") set(DLLIB -ldl) @@ -28,8 +29,40 @@ if(APPLE) set(OPENSSL_ROOT_DIR "/usr/local/opt/openssl") endif() +# Find python3.x dev/lib package +find_package(PkgConfig REQUIRED) +if(${CMAKE_VERSION} VERSION_LESS "3.12.0") + pkg_check_modules(PYTHON REQUIRED python3) +else() + if("${OS_NAME}" STREQUAL "mendel") + # We will explicitly set include path later for NumPy. + find_package(Python3 REQUIRED COMPONENTS Interpreter Development ) + else() + find_package(Python3 REQUIRED COMPONENTS Interpreter Development NumPy) + endif() +endif() + file(GLOB south_src "*.cpp") +# Add Python 3.x header files +if(${CMAKE_VERSION} VERSION_LESS "3.12.0") + include_directories(${PYTHON_INCLUDE_DIRS}) +else() + if("${OS_NAME}" STREQUAL "mendel") + # The following command gets the location of NumPy. + execute_process( + COMMAND python3 + -c "import numpy; print(numpy.get_include())" + OUTPUT_VARIABLE Python3_NUMPY_INCLUDE_DIRS + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + # Now we can add include directories as usual. + include_directories(${Python3_INCLUDE_DIRS} ${Python3_NUMPY_INCLUDE_DIRS}) + else() + include_directories(${Python3_INCLUDE_DIRS} ${Python3_NUMPY_INCLUDE_DIRS}) + endif() +endif() + link_directories(${PROJECT_BINARY_DIR}/../../lib) add_executable(${EXEC} ${south_src} ${common_src} ${services_src}) diff --git a/C/services/south/include/configuration.h b/C/services/south/include/configuration.h deleted file mode 100644 index 4925d4dfa5..0000000000 --- a/C/services/south/include/configuration.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef _CONFIGURATION_H -#define _CONFIGURATION_H -/* - * Fledge storage service. - * - * Copyright (c) 2017 OSisoft, LLC - * - * Released under the Apache 2.0 Licence - * - * Author: Mark Riddoch - */ - -#include -#include -#include - -#define STORAGE_CATEGORY "STORAGE" -#define CONFIGURATION_CACHE_FILE "storage.json" - -/** - * The storage service must handle its own configuration differently - * to other services as it is unable to read the configuration from - * the database. The configuration is required in order to connnect - * to the database. Therefore it keeps a shadow copy in a local file - * and it keeps this local, cached copy up to date by registering - * interest in the category and whenever a chaneg is made writing - * the category to the local cache file. - */ -class StorageConfiguration { - public: - StorageConfiguration(); - const char *getValue(const std::string& key); - bool setValue(const std::string& key, const std::string& value); - void updateCategory(const std::string& json); - private: - void getConfigCache(std::string& cache); - rapidjson::Document document; - void readCache(); - void writeCache(); - Logger *logger; -}; - -#endif diff --git a/C/services/south/include/defaults.h b/C/services/south/include/defaults.h index b5472072bf..d6eefed30e 100644 --- a/C/services/south/include/defaults.h +++ b/C/services/south/include/defaults.h @@ -21,10 +21,10 @@ static struct { "Maximum time to spend filling buffer before sending", "integer", "5000" }, { "bufferThreshold", "Maximum buffered Readings", "Number of readings to buffer before sending", "integer", "100" }, - { "readingsPerSec", "Reading Rate", - "Number of readings to generate per interval", "integer", "1" }, { "throttle", "Throttle", "Enable flow control by reducing the poll rate", "boolean", "false" }, + { "readingsPerSec", "Reading Rate", + "Number of readings to generate per interval", "integer", "1" }, { NULL, NULL, NULL, NULL, NULL } }; #endif diff --git a/C/services/south/include/ingest.h b/C/services/south/include/ingest.h index 79f969c928..3b5dfbf157 100644 --- a/C/services/south/include/ingest.h +++ b/C/services/south/include/ingest.h @@ -25,12 +25,23 @@ #include #include #include +#include #define SERVICE_NAME "Fledge South" #define INGEST_SUFFIX "-Ingest" // Suffix for per service ingest statistic -#define STATS_UPDATE_FAIL_THRESHOLD 10 // After this many update fails try creatign new stats +#define STATS_UPDATE_FAIL_THRESHOLD 10 // After this many update fails try creating new stats + +#define DEPRECATED_CACHE_AGE 600 // Maximum allowed aged of the deprecated asset cache + +/* + * Constants related to flow control for async south services. + * + */ +#define AFC_SLEEP_INCREMENT 20 // Number of milliseconds to wait for readings to drain +#define AFC_SLEEP_MAX 200 // Maximum sleep tiem in ms between tests +#define AFC_MAX_WAIT 5000 // Maximum amount of time we wait for the queue to drain /** * The ingest class is used to ingest asset readings. @@ -42,8 +53,6 @@ class Ingest : public ServiceHandler { public: Ingest(StorageClient& storage, - long timeout, - unsigned int threshold, const std::string& serviceName, const std::string& pluginName, ManagementClient *mgmtClient); @@ -51,6 +60,7 @@ class Ingest : public ServiceHandler { void ingest(const Reading& reading); void ingest(const std::vector *vec); + void start(long timeout, unsigned int threshold); bool running(); bool isStopping(); bool isRunning() { return !m_shutdown; }; @@ -76,12 +86,19 @@ class Ingest : public ServiceHandler { void unDeprecateAssetTrackingRecord(AssetTrackingTuple* currentTuple, const std::string& assetName, const std::string& event); - void unDeprecateStorageAssetTrackingRecord(StorageAssetTrackingTuple* currentTuple, - const std::string& assetName, const std::string&, const unsigned int&); + void unDeprecateStorageAssetTrackingRecord(StorageAssetTrackingTuple* currentTuple, + const std::string& assetName, + const std::string&, + const unsigned int&); void setStatistics(const std::string& option); std::string getStringFromSet(const std::set &dpSet); - + void setFlowControl(unsigned int lowWater, unsigned int highWater) { m_lowWater = lowWater; m_highWater = highWater; }; + void flowControl(); + void setPerfMon(PerformanceMonitor *mon) + { + m_performance = mon; + }; private: void signalStatsUpdate() { @@ -133,6 +150,12 @@ class Ingest : public ServiceHandler { int m_statsUpdateFails; enum { STATS_BOTH, STATS_ASSET, STATS_SERVICE } m_statisticsOption; + unsigned int m_highWater; + unsigned int m_lowWater; + AssetTrackingTable *m_deprecated; + time_t m_deprecatedAgeOut; + time_t m_deprecatedAgeOutStorage; + PerformanceMonitor *m_performance; }; #endif diff --git a/C/services/south/include/south_plugin.h b/C/services/south/include/south_plugin.h index 2c88261b8a..6027b6f551 100644 --- a/C/services/south/include/south_plugin.h +++ b/C/services/south/include/south_plugin.h @@ -52,6 +52,7 @@ class SouthPlugin : public Plugin { bool operation(const std::string& name, std::vector& ); private: PLUGIN_HANDLE instance; + bool m_started; // Plugin started indicator, for async plugins void (*pluginStartPtr)(PLUGIN_HANDLE); Reading (*pluginPollPtr)(PLUGIN_HANDLE); std::vector* (*pluginPollPtrV2)(PLUGIN_HANDLE); diff --git a/C/services/south/include/south_service.h b/C/services/south/include/south_service.h index 7233043de0..145dd8392d 100644 --- a/C/services/south/include/south_service.h +++ b/C/services/south/include/south_service.h @@ -17,7 +17,8 @@ #include #include #include -#include +#include +#include #define MAX_SLEEP 5 // Maximum number of seconds the service will sleep during a poll cycle @@ -46,12 +47,14 @@ class SouthService : public ServiceAuthHandler { public: SouthService(const std::string& name, const std::string& token = ""); + virtual ~SouthService(); void start(std::string& coreAddress, unsigned short corePort); void stop(); void shutdown(); void restart(); void configChange(const std::string&, const std::string&); + void processConfigChange(const std::string&, const std::string&); void configChildCreate(const std::string&, const std::string&, const std::string&){}; @@ -61,6 +64,8 @@ class SouthService : public ServiceAuthHandler { bool setPoint(const std::string& name, const std::string& value); bool operation(const std::string& name, std::vector& ); void setDryRun() { m_dryRun = true; }; + void handlePendingReconf(); + private: void addConfigDefaults(DefaultConfigCategory& defaults); bool loadPlugin(); @@ -69,7 +74,17 @@ class SouthService : public ServiceAuthHandler { std::string parent_name, std::string current_name); void throttlePoll(); + void processNumberList(const ConfigCategory& cateogry, const std::string& item, std::vector& list); + void calculateTimerRate(); + bool syncToNextPoll(); + bool onDemandPoll(); + void checkPendingReconfigure(); private: + std::thread *m_reconfThread; + std::deque> m_pendingNewConfig; + std::mutex m_pendingNewConfigMutex; + std::condition_variable m_cvNewReconf; + SouthPlugin *southPlugin; Logger *logger; AssetTracker *m_assetTracker; @@ -95,7 +110,18 @@ class SouthService : public ServiceAuthHandler { bool m_dryRun; bool m_requestRestart; std::string m_rateUnits; - StorageAssetTracker *m_storageAssetTracker; - + enum { POLL_INTERVAL, POLL_FIXED, POLL_ON_DEMAND } + m_pollType; + std::vector m_hours; + std::vector m_minutes; + std::vector m_seconds; + std::string m_hoursStr; + std::string m_minutesStr; + std::string m_secondsStr; + std::condition_variable m_pollCV; + std::mutex m_pollMutex; + bool m_doPoll; + AuditLogger *m_auditLogger; + PerformanceMonitor *m_perfMonitor; }; #endif diff --git a/C/services/south/ingest.cpp b/C/services/south/ingest.cpp index 956e41fc1b..e1e876c096 100755 --- a/C/services/south/ingest.cpp +++ b/C/services/south/ingest.cpp @@ -12,7 +12,6 @@ #include #include #include -#include #include using namespace std; @@ -53,9 +52,15 @@ static void statsThread(Ingest *ingest) * The key checked/created in the table is "" * * @param assetName Asset name for the plugin that is sending readings + * @return int Return -1 on error, 0 if not required or 1 if the entry exists */ int Ingest::createStatsDbEntry(const string& assetName) { + if (m_statisticsOption == STATS_SERVICE) + { + // No asset stats required + return 0; + } // Prepare fledge.statistics update string statistics_key = assetName; for (auto & c: statistics_key) c = toupper(c); @@ -96,7 +101,7 @@ int Ingest::createStatsDbEntry(const string& assetName) m_logger->error("%s:%d : Unable to create new row in statistics table with key='%s'", __FUNCTION__, __LINE__, statistics_key.c_str()); return -1; } - return 0; + return 1; } /** @@ -105,6 +110,11 @@ int Ingest::createStatsDbEntry(const string& assetName) */ int Ingest::createServiceStatsDbEntry() { + if (m_statisticsOption == STATS_ASSET) + { + // No service stats required + return 0; + } // SELECT * FROM fledge.configuration WHERE key = categoryName const Condition conditionKey(Equals); Where *wKey = new Where("key", conditionKey, m_serviceName + INGEST_SUFFIX); @@ -168,8 +178,10 @@ void Ingest::updateStats() { if (statsDbEntriesCache.find(it->first) == statsDbEntriesCache.end()) { - createStatsDbEntry(it->first); - statsDbEntriesCache.insert(it->first); + if (createStatsDbEntry(it->first) > 0) + { + statsDbEntriesCache.insert(it->first); + } } if (it->second) @@ -259,47 +271,59 @@ void Ingest::updateStats() * storage layer based on time. This thread in created in * the constructor and will terminate when the destructor * is called. - * TODO - try to reduce the number of arguments in c'tor * * @param storage The storage client to use - * @param timeout Maximum time before sending a queue of readings in milliseconds - * @param threshold Length of queue before sending readings */ Ingest::Ingest(StorageClient& storage, - long timeout, - unsigned int threshold, const std::string& serviceName, const std::string& pluginName, ManagementClient *mgmtClient) : m_storage(storage), - m_timeout(timeout), - m_queueSizeThreshold(threshold), m_serviceName(serviceName), m_pluginName(pluginName), m_mgtClient(mgmtClient), m_failCnt(0), m_storageFailed(false), m_storesFailed(0), - m_statisticsOption(STATS_BOTH) + m_statisticsOption(STATS_BOTH), + m_highWater(0) { m_shutdown = false; m_running = true; m_queue = new vector(); - m_thread = new thread(ingestThread, this); - m_statsThread = new thread(statsThread, this); m_logger = Logger::getLogger(); m_data = NULL; m_discardedReadings = 0; m_highLatency = false; - // populate asset tracking cache - AssetTracker::getAssetTracker()->populateAssetTrackingCache(m_pluginName, "Ingest"); - StorageAssetTracker::getStorageAssetTracker()->populateStorageAssetTrackingCache(); + // populate asset and storage asset tracking cache + AssetTracker *as = AssetTracker::getAssetTracker(); + as->populateAssetTrackingCache(m_pluginName, "Ingest"); + as->populateStorageAssetTrackingCache(); // Create the stats entry for the service createServiceStatsDbEntry(); m_filterPipeline = NULL; + + m_deprecated = NULL; + + m_deprecatedAgeOut = 0; + m_deprecatedAgeOutStorage = 0; +} + +/** + * Start the ingest threads + * + * @param timeout Maximum time before sending a queue of readings in milliseconds + * @param threshold Length of queue before sending readings + */ +void Ingest::start(long timeout, unsigned int threshold) +{ + m_timeout = timeout; + m_queueSizeThreshold = threshold; + m_thread = new thread(ingestThread, this); + m_statsThread = new thread(statsThread, this); } /** @@ -315,23 +339,59 @@ Ingest::~Ingest() { m_shutdown = true; m_running = false; + + // Cleanup filters + { + lock_guard guard(m_pipelineMutex); + if (m_filterPipeline) + { + m_filterPipeline->setShuttingDown(); + m_filterPipeline->cleanupFilters(m_serviceName); // filter's shutdown API could potentially try to feed some new readings using the async ingest mechnanism + } + } + m_cv.notify_one(); m_thread->join(); processQueue(); m_statsCv.notify_one(); m_statsThread->join(); updateStats(); + // Cleanup and readings left in the various queues + for (auto& reading : *m_queue) + { + delete reading; + } delete m_queue; + for (auto& q : m_resendQueues) + { + for (auto& rq : *q) + { + delete rq; + } + delete q; + } + while (m_fullQueues.size() > 0) + { + vector *q = m_fullQueues.front(); + for (auto& rq : *q) + { + delete rq; + } + delete q; + m_fullQueues.pop(); + } delete m_thread; delete m_statsThread; - //delete m_data; - - // Cleanup filters - no other threads are running so no need for the lock - if (m_filterPipeline) + + // Delete filter pipeline { - m_filterPipeline->cleanupFilters(m_serviceName); - delete m_filterPipeline; + lock_guard guard(m_pipelineMutex); + if (m_filterPipeline) + delete m_filterPipeline; } + + if (m_deprecated) + delete m_deprecated; } /** @@ -356,6 +416,8 @@ bool Ingest::isStopping() /** * Add a reading to the reading queue + * + * @param reading The single reading to ingest */ void Ingest::ingest(const Reading& reading) { @@ -363,7 +425,7 @@ vector *fullQueue = 0; { lock_guard guard(m_qMutex); - m_queue->push_back(new Reading(reading)); + m_queue->emplace_back(new Reading(reading)); if (m_queue->size() >= m_queueSizeThreshold || m_running == false) { fullQueue = m_queue; @@ -377,10 +439,13 @@ vector *fullQueue = 0; } if (m_fullQueues.size()) m_cv.notify_all(); + m_performance->collect("queueLength", (long)queueLength()); } /** * Add a set of readings to the reading queue + * + * @param vec A vector of readings to ingest */ void Ingest::ingest(const vector *vec) { @@ -418,14 +483,16 @@ unsigned int nFullQueues = 0; { m_cv.notify_all(); } + m_performance->collect("queueLength", (long)queueLength()); + m_performance->collect("ingestCount", (long)vec->size()); } /** * Work out how long to wait based on age of oldest queued reading - * We do this in a seperaste function so that we can - * lock the qMutex to access the oldest element in the queue + * We do this in a separate function so that we can lock the qMutex + * to access the oldest element in the queue * - * @return the tiem to wait + * @return the time to wait */ long Ingest::calculateWaitTime() { @@ -476,13 +543,6 @@ void Ingest::waitForQueue() */ void Ingest::processQueue() { - -/* typedef struct{ - std::set setDp; - unsigned int count; - }dpCountObj; -*/ - do { /* * If we have some data that has been previously filtered but failed to send, @@ -510,6 +570,7 @@ void Ingest::processQueue() q->erase(q->begin()); logDiscardedStat(); } + m_performance->collect("removedFromQueue", 5); if (q->size() == 0) { delete q; @@ -521,6 +582,7 @@ void Ingest::processQueue() else { + m_performance->collect("storedReadings", (long int)(q->size())); if (m_storageFailed) { m_logger->warn("Storage operational after %d failures", m_storesFailed); @@ -530,10 +592,10 @@ void Ingest::processQueue() m_failCnt = 0; std::map statsEntriesCurrQueue; AssetTracker *tracker = AssetTracker::getAssetTracker(); - StorageAssetTracker *satracker = StorageAssetTracker::getStorageAssetTracker(); - if ( satracker == nullptr) + if (tracker == nullptr) { - Logger::getLogger()->error("%s could not initialize satracker ", __FUNCTION__); + Logger::getLogger()->error("%s could not initialize asset tracker", + __FUNCTION__); return; } @@ -606,24 +668,30 @@ void Ingest::processQueue() } for (auto itr : assetDatapointMap) - { - std::set &s = itr.second; - unsigned int count = s.size(); - StorageAssetTrackingTuple storageTuple(m_serviceName,m_pluginName, itr.first, "store", false, "",count); - StorageAssetTrackingTuple *ptr = &storageTuple; - satracker->updateCache(s, ptr); - bool deprecated = satracker->getDeprecated(ptr); - if (deprecated == true) - { - unDeprecateStorageAssetTrackingRecord(ptr, itr.first, getStringFromSet(s), count); - } - } + { + std::set &s = itr.second; + unsigned int count = s.size(); + StorageAssetTrackingTuple storageTuple(m_serviceName, + m_pluginName, + itr.first, + "store", + false, + "", + count); + + StorageAssetTrackingTuple *ptr = &storageTuple; + + // Update SAsset Tracker database and cache + tracker->updateCache(s, ptr); + } delete q; m_resendQueues.erase(m_resendQueues.begin()); unique_lock lck(m_statsMutex); for (auto &it : statsEntriesCurrQueue) + { statsPendingEntries[it.first] += it.second; + } } } @@ -631,11 +699,14 @@ void Ingest::processQueue() lock_guard fqguard(m_fqMutex); if (m_fullQueues.empty()) { - // Block of code to execute holding the mutex - lock_guard guard(m_qMutex); - std::vector *newQ = new vector; - m_data = m_queue; - m_queue = newQ; + if (!m_shutdown) + { + // Block of code to execute holding the mutex + lock_guard guard(m_qMutex); + std::vector *newQ = new vector; + m_data = m_queue; + m_queue = newQ; + } } else { @@ -661,7 +732,7 @@ void Ingest::processQueue() */ { lock_guard guard(m_pipelineMutex); - if (m_filterPipeline) + if (m_filterPipeline && !m_filterPipeline->isShuttingDown()) { FilterPlugin *firstFilter = m_filterPipeline->getFirstFilterPlugin(); if (firstFilter) @@ -693,29 +764,32 @@ void Ingest::processQueue() } } - /* * Check the first reading in the list to see if we are meeting the * latency configuration we have been set */ - vector::iterator itr = m_data->begin(); - if (itr != m_data->cend()) + if (m_data) { - Reading *firstReading = *itr; - struct timeval tmFirst, tmNow, dur; - gettimeofday(&tmNow, NULL); - firstReading->getUserTimestamp(&tmFirst); - timersub(&tmNow, &tmFirst, &dur); - long latency = dur.tv_sec * 1000 + (dur.tv_usec / 1000); - if (latency > m_timeout && m_highLatency == false) - { - m_logger->warn("Current send latency of %ldmS exceeds requested maximum latency of %dmS", latency, m_timeout); - m_highLatency = true; - } - else if (latency <= m_timeout / 1000 && m_highLatency) + vector::iterator itr = m_data->begin(); + if (itr != m_data->cend()) { - m_logger->warn("Send latency now within requested limits"); - m_highLatency = false; + Reading *firstReading = *itr; + struct timeval tmFirst, tmNow, dur; + gettimeofday(&tmNow, NULL); + firstReading->getUserTimestamp(&tmFirst); + timersub(&tmNow, &tmFirst, &dur); + long latency = dur.tv_sec * 1000 + (dur.tv_usec / 1000); + m_performance->collect("readLatency", latency); + if (latency > m_timeout && m_highLatency == false) + { + m_logger->warn("Current send latency of %ldmS exceeds requested maximum latency of %dmS", latency, m_timeout); + m_highLatency = true; + } + else if (latency <= m_timeout / 1000 && m_highLatency) + { + m_logger->warn("Send latency now within requested limits"); + m_highLatency = false; + } } } @@ -730,7 +804,7 @@ void Ingest::processQueue() * 2- some readings removed * 3- New set of readings */ - if (!m_data->empty()) + if (m_data && m_data->size()) { if (m_storage.readingAppend(*m_data) == false) { @@ -738,12 +812,14 @@ void Ingest::processQueue() m_logger->warn("Failed to write readings to storage layer, queue for resend"); m_storageFailed = true; m_storesFailed++; + m_performance->collect("resendQueued", (long int)(m_data->size())); m_resendQueues.push_back(m_data); m_data = NULL; m_failCnt = 1; } else { + m_performance->collect("storedReadings", (long int)(m_data->size())); if (m_storageFailed) { m_logger->warn("Storage operational after %d failures", m_storesFailed); @@ -755,11 +831,11 @@ void Ingest::processQueue() // check if this requires addition of a new asset tracker tuple // Remove the Readings in the vector AssetTracker *tracker = AssetTracker::getAssetTracker(); - StorageAssetTracker *satracker = StorageAssetTracker::getStorageAssetTracker(); string lastAsset; int *lastStat = NULL; std::map > assetDatapointMap; + for (vector::iterator it = m_data->begin(); it != m_data->end(); ++it) { Reading *reading = *it; @@ -821,27 +897,39 @@ void Ingest::processQueue() { (*lastStat)++; } - delete reading; + // delete reading; } + for( auto & rdng : *m_data) + { + delete rdng; + } + m_data->clear(); - for (auto itr : assetDatapointMap) - { - std::set &s = itr.second; + for (auto itr : assetDatapointMap) + { + std::set &s = itr.second; unsigned int count = s.size(); - StorageAssetTrackingTuple storageTuple(m_serviceName,m_pluginName, itr.first, "store", false, "",count); + StorageAssetTrackingTuple storageTuple(m_serviceName, + m_pluginName, + itr.first, + "store", + false, + "", + count); + StorageAssetTrackingTuple *ptr = &storageTuple; - satracker->updateCache(s, ptr); - bool deprecated = satracker->getDeprecated(ptr); - if (deprecated == true) - { - unDeprecateStorageAssetTrackingRecord(ptr, itr.first, getStringFromSet(s), count); - } - } + + // Update SAsset Tracker database and cache + tracker->updateCache(s, ptr); + } + { unique_lock lck(m_statsMutex); for (auto &it : statsEntriesCurrQueue) + { statsPendingEntries[it.first] += it.second; + } } } } @@ -920,6 +1008,7 @@ void Ingest::passToOnwardFilter(OUTPUT_HANDLE *outHandle, { // Get next filter in the pipeline FilterPlugin *next = (FilterPlugin *)outHandle; + // Pass readings to next filter next->ingest(readingSet); } @@ -949,11 +1038,32 @@ void Ingest::useFilteredData(OUTPUT_HANDLE *outHandle, READINGSET *readingSet) { Ingest* ingest = (Ingest *)outHandle; + if (ingest->m_data != readingSet->getAllReadingsPtr()) { - ingest->m_data->clear();// Remove any pointers still in the vector - *(ingest->m_data) = readingSet->getAllReadings(); + if (ingest->m_data && ingest->m_data->size()) + { + // Remove the readings in the vector + for(auto & rdng : *(ingest->m_data)) + delete rdng; + ingest->m_data->clear();// Remove the pointers still in the vector + + + // move reading vector to ingest + *(ingest->m_data) = readingSet->getAllReadings(); + } + else + { + // move reading vector to ingest + ingest->m_data = readingSet->moveAllReadings(); + } + } + else + { + Logger::getLogger()->info("%s:%d: INPUT READINGSET MODIFIED BY FILTER: ingest->m_data=%p, readingSet->getAllReadingsPtr()=%p", + __FUNCTION__, __LINE__, ingest->m_data, readingSet->getAllReadingsPtr()); } + readingSet->clear(); delete readingSet; } @@ -1065,6 +1175,26 @@ void Ingest::unDeprecateAssetTrackingRecord(AssetTrackingTuple* currentTuple, const string& assetName, const string& event) { + time_t now = time(0); + if (m_deprecatedAgeOut < now) + { + delete m_deprecated; + m_deprecated = m_mgtClient->getDeprecatedAssetTrackingTuples(); + m_deprecatedAgeOut = now + DEPRECATED_CACHE_AGE; + } + if (m_deprecated && m_deprecated->find(assetName)) + { + // The asset is deprecated possibly + m_deprecated->remove(assetName); + } + else + { + // The asset is not believed to be deprecated so return. If + // it has been deprecated since we last loaded the cache this + // will leave the asset incorrectly deprecated. This will be + // resolved next time the cache is reloaded + return; + } // Get up-to-date Asset Tracking record AssetTrackingTuple* updatedTuple = m_mgtClient->getAssetTrackingTuple( @@ -1072,6 +1202,7 @@ void Ingest::unDeprecateAssetTrackingRecord(AssetTrackingTuple* currentTuple, assetName, event); + bool unDeprecateDataPoints = false; if (updatedTuple) { if (updatedTuple->isDeprecated()) @@ -1124,8 +1255,11 @@ void Ingest::unDeprecateAssetTrackingRecord(AssetTrackingTuple* currentTuple, " for un-deprecated asset '%s'", assetName.c_str()); } - m_logger->info("Asset '%s' has been un-deprecated", - assetName.c_str()); + m_logger->info("Asset '%s' has been un-deprecated, event '%s'", + assetName.c_str(), + event.c_str()); + + unDeprecateDataPoints = true; } } } @@ -1138,6 +1272,47 @@ void Ingest::unDeprecateAssetTrackingRecord(AssetTrackingTuple* currentTuple, } delete updatedTuple; + + // Undeprecate all "store" events related to the serviceName and assetName + if (unDeprecateDataPoints) + { + // Prepare UPDATE query + const Condition conditionParams(Equals); + Where * wAsset = new Where("asset", + conditionParams, + assetName); + Where *wService = new Where("service", + conditionParams, + m_serviceName, + wAsset); + Where *wEvent = new Where("event", + conditionParams, + "store", + wService); + + InsertValues unDeprecated; + + // Set NULL value + unDeprecated.push_back(InsertValue("deprecated_ts")); + + // Update storage with NULL value + int rv = m_storage.updateTable("asset_tracker", + unDeprecated, + *wEvent); + + // Check update operation + if (rv < 0) + { + m_logger->error("Failure while un-deprecating asset '%s'", + assetName.c_str()); + } + else + { + m_logger->info("Asset '%s' has been un-deprecated, event '%s'", + assetName.c_str(), + "store"); + } + } } /** @@ -1147,12 +1322,26 @@ void Ingest::unDeprecateAssetTrackingRecord(AssetTrackingTuple* currentTuple, * * @param currentTuple Current StorageAssetTracking record for given assetName * @param assetName AssetName to fetch from AssetTracking - * @param event The event type to fetch + * @param datapoints The datapoints comma separated list + * @param count The number of datapoints per asset */ void Ingest::unDeprecateStorageAssetTrackingRecord(StorageAssetTrackingTuple* currentTuple, - const string& assetName, const string& datapoints, const unsigned int& count) + const string& assetName, + const string& datapoints, + const unsigned int& count) { + time_t now = time(0); + if (m_deprecatedAgeOutStorage < now) + { + m_deprecatedAgeOutStorage = now + DEPRECATED_CACHE_AGE; + } + else + { + // Nothing to do right now + return; + } + // Get up-to-date Asset Tracking record StorageAssetTrackingTuple* updatedTuple = @@ -1270,3 +1459,43 @@ std::string Ingest::getStringFromSet(const std::set &dpSet) s.pop_back(); return s; } + +/** + * Implement flow control backoff for the async ingest mechanism. + * + * The flow control is "soft" in that it will only wait for a maximum + * amount of time before continuing regardless of the queue length. + * + * The mechanism is to have a high water and low water mark. When the queue + * get longer than the high water mark we wait until the queue drains below + * the low water mark before proceeding. + * + * The wait is done with a backoff algorithm that start at AFC_SLEEP_INCREMENT + * and doubles each time we have not dropped below the low water mark. It will + * sleep for a maximum of AFC_SLEEP_MAX before testing again. + */ +void Ingest::flowControl() +{ + if (m_highWater == 0) // No flow control + { + return; + } + if (m_highWater < queueLength()) + { + m_logger->debug("Waiting for ingest queue to drain"); + int total = 0, delay = AFC_SLEEP_INCREMENT; + while (total < AFC_MAX_WAIT && queueLength() > m_lowWater) + { + this_thread::sleep_for(chrono::milliseconds(delay)); + total += delay; + delay *= 2; + if (delay > AFC_SLEEP_MAX) + { + delay = AFC_SLEEP_MAX; + } + } + m_logger->debug("Ingest queue has %s", queueLength() > m_lowWater + ? "failed to drain in sufficient time" : "has drained"); + m_performance->collect("flow controlled", total); + } +} diff --git a/C/services/south/south.cpp b/C/services/south/south.cpp old mode 100755 new mode 100644 index 811b457575..c105e46b7f --- a/C/services/south/south.cpp +++ b/C/services/south/south.cpp @@ -33,12 +33,15 @@ #include #include #include +#include #define SERVICE_TYPE "Southbound" extern int makeDaemon(void); extern void handler(int sig); +static void reconfThreadMain(void *arg); + using namespace std; /** @@ -103,8 +106,12 @@ bool dryrun = false; { service->setDryRun(); } - Logger::getLogger()->setMinLevel(logLevel); + Logger *logger = Logger::getLogger(); + logger->setMinLevel(logLevel); + // Start the service. This will oly return whren the serivce is shutdown service->start(coreAddress, corePort); + delete service; + delete logger; return 0; } @@ -201,46 +208,73 @@ void doIngest(Ingest *ingest, Reading reading) void doIngestV2(Ingest *ingest, ReadingSet *set) { std::vector *vec = set->getAllReadingsPtr(); - std::vector *vec2 = new std::vector; if (!vec) { Logger::getLogger()->info("%s:%d: V2 async ingest method: vec is NULL", __FUNCTION__, __LINE__); return; } - else - { - for (auto & r : *vec) - { - Reading *r2 = new Reading(*r); // Need to copy reading objects here, since "del set" below would remove encapsulated reading objects also - vec2->emplace_back(r2); - } - } + // move reading vector from set to new vector vec2 + std::vector *vec2 = set->moveAllReadings(); + Logger::getLogger()->debug("%s:%d: V2 async ingest method returned: vec->size()=%d", __FUNCTION__, __LINE__, vec->size()); ingest->ingest(vec2); delete vec2; // each reading object inside vector has been allocated on heap and moved to Ingest class's internal queue delete set; -} + ingest->flowControl(); +} /** * Constructor for the south service */ SouthService::SouthService(const string& myName, const string& token) : + southPlugin(NULL), + m_assetTracker(NULL), m_shutdown(false), m_readingsPerSec(1), m_throttle(false), m_throttled(false), m_token(token), m_repeatCnt(1), + m_pluginData(NULL), m_dryRun(false), - m_requestRestart(false) + m_requestRestart(false), + m_auditLogger(NULL), + m_perfMonitor(NULL) { m_name = myName; m_type = SERVICE_TYPE; + m_pollType = POLL_INTERVAL; logger = new Logger(myName); logger->setMinLevel("warning"); + + m_reconfThread = new std::thread(reconfThreadMain, this); +} + +/** + * Destructor for south service + */ +SouthService::~SouthService() +{ + m_cvNewReconf.notify_all(); // Wakeup the reconfigure thread to terminate it + m_reconfThread->join(); + delete m_reconfThread; + if (m_pluginData) + delete m_pluginData; + if (m_perfMonitor) + delete m_perfMonitor; + delete m_assetTracker; + delete m_auditLogger; + delete m_mgtClient; + + // We would like to shutdown the Python environment if it + // was running. However this causes a segmentation fault within Python + // so we currently can not do this +#if PYTHON_SHUTDOWN + PythonRuntime::shutdown(); // Shutdown and release Python resources +#endif } /** @@ -283,6 +317,9 @@ void SouthService::start(string& coreAddress, unsigned short corePort) // Allocate and save ManagementClient object m_mgtClient = new ManagementClient(coreAddress, corePort); + // Create the audit logger instance + m_auditLogger = new AuditLogger(m_mgtClient); + // Create an empty South category if one doesn't exist DefaultConfigCategory southConfig(string("South"), string("{}")); southConfig.setDescription("South"); @@ -332,6 +369,8 @@ void SouthService::start(string& coreAddress, unsigned short corePort) StorageClient storage(storageRecord.getAddress(), storageRecord.getPort()); storage.registerManagement(m_mgtClient); + + m_perfMonitor = new PerformanceMonitor(m_name, &storage); unsigned int threshold = 100; long timeout = 5000; std::string pluginName; @@ -384,18 +423,33 @@ void SouthService::start(string& coreAddress, unsigned short corePort) } m_assetTracker = new AssetTracker(m_mgtClient, m_name); - m_storageAssetTracker = new StorageAssetTracker(m_mgtClient, m_name); { // Instantiate the Ingest class - Ingest ingest(storage, timeout, threshold, m_name, pluginName, m_mgtClient); + Ingest ingest(storage, m_name, pluginName, m_mgtClient); + ingest.setPerfMon(m_perfMonitor); m_ingest = &ingest; + if (m_throttle) + { + m_ingest->setFlowControl(m_lowWater, m_highWater); + } if (m_configAdvanced.itemExists("statistics")) { m_ingest->setStatistics(m_configAdvanced.getValue("statistics")); } + if (m_configAdvanced.itemExists("perfmon")) + { + string perf = m_configAdvanced.getValue("perfmon"); + if (perf.compare("true") == 0) + m_perfMonitor->setCollecting(true); + else + m_perfMonitor->setCollecting(false); + } + + m_ingest->start(timeout, threshold); // Start the ingest threads running + try { m_readingsPerSec = 1; if (m_configAdvanced.itemExists("readingsPerSec")) @@ -432,29 +486,7 @@ void SouthService::start(string& coreAddress, unsigned short corePort) // Get and ingest data if (! southPlugin->isAsync()) { - string units = m_configAdvanced.getValue("units"); - unsigned long dividend = 1000000; - if (units.compare("second") == 0) - dividend = 1000000; - else if (units.compare("minute") == 0) - dividend = 60000000; - else if (units.compare("hour") == 0) - dividend = 3600000000; - m_rateUnits = units; - unsigned long usecs = dividend / m_readingsPerSec; - - if (usecs > MAX_SLEEP * 1000000) - { - double x = usecs / (MAX_SLEEP * 1000000); - m_repeatCnt = ceil(x); - usecs /= m_repeatCnt; - } - else - { - m_repeatCnt = 1; - } - m_desiredRate.tv_sec = (int)(usecs / 1000000); - m_desiredRate.tv_usec = (int)(usecs % 1000000); + calculateTimerRate(); m_timerfd = createTimerFd(m_desiredRate); // interval to be passed is in usecs m_currentRate = m_desiredRate; if (m_timerfd < 0) @@ -473,7 +505,7 @@ void SouthService::start(string& coreAddress, unsigned short corePort) logger->info("pollInterfaceV2=%s", pollInterfaceV2?"true":"false"); /* - * Start the plugin. If it fails with an excpetion retry the start with a delay + * Start the plugin. If it fails with an exception, retry the start with a delay * That delay starts at 500mS and will backoff to 1 minute * * We will continue to retry the start until the service is shutdown @@ -510,23 +542,43 @@ void SouthService::start(string& coreAddress, unsigned short corePort) while (!m_shutdown) { - uint64_t exp; + uint64_t exp = 0; ssize_t s; - long rep = m_repeatCnt; - while (rep > 0) + if (m_pollType == POLL_FIXED) + { + if (syncToNextPoll()) + exp = 1; // Perform one poll + } + else if (m_pollType == POLL_INTERVAL) { - s = read(m_timerfd, &exp, sizeof(uint64_t)); - if ((unsigned int)s != sizeof(uint64_t)) - logger->error("timerfd read()"); - if (exp > 100 && exp > m_readingsPerSec/2) - logger->error("%d expiry notifications accumulated", exp); - rep--; - if (m_shutdown) + long rep = m_repeatCnt; + while (rep > 0) { - break; + s = read(m_timerfd, &exp, sizeof(uint64_t)); + if ((unsigned int)s != sizeof(uint64_t)) + logger->error("timerfd read()"); + if (exp > 100 && exp > m_readingsPerSec/2) + logger->error("%d expiry notifications accumulated", exp); + rep--; + if (m_shutdown) + { + break; + } + checkPendingReconfigure(); + if (rep > m_repeatCnt) + { + // Reconfigure has resulted in more frequent + // polling + rep = m_repeatCnt; + } } } + else if (m_pollType == POLL_ON_DEMAND) + { + if (onDemandPoll()) + exp = 1; + } if (m_shutdown) { break; @@ -547,30 +599,23 @@ void SouthService::start(string& coreAddress, unsigned short corePort) } else // V2 poll method { + checkPendingReconfigure(); ReadingSet *set = southPlugin->pollV2(); - if (set) - { - std::vector *vec = set->getAllReadingsPtr(); - std::vector *vec2 = new std::vector; - if (!vec) - { - Logger::getLogger()->info("%s:%d: V2 poll method: vec is NULL", __FUNCTION__, __LINE__); - continue; - } - else - { - for (auto & r : *vec) - { - Reading *r2 = new Reading(*r); // Need to copy reading objects here, since "del set" below would remove encapsulated reading objects - vec2->emplace_back(r2); - } - } - - ingest.ingest(vec2); - pollCount += (int) vec2->size(); - delete vec2; // each reading object inside vector has been allocated on heap and moved to Ingest class's internal queue - delete set; - } + if (set) + { + std::vector *vec = set->getAllReadingsPtr(); + if (!vec) + { + Logger::getLogger()->info("%s:%d: V2 poll method: vec is NULL", __FUNCTION__, __LINE__); + continue; + } + // move reading vector from set to vec2 + std::vector *vec2 = set->moveAllReadings(); + ingest.ingest(vec2); + pollCount += (int) vec2->size(); + delete vec2; // each reading object inside vector has been allocated on heap and moved to Ingest class's internal queue + delete set; + } } throttlePoll(); } @@ -631,6 +676,7 @@ void SouthService::start(string& coreAddress, unsigned short corePort) } else { + m_shutdown = true; Logger::getLogger()->info("Dryrun of service, shutting down"); } @@ -650,6 +696,8 @@ void SouthService::start(string& coreAddress, unsigned short corePort) { southPlugin->shutdown(); } + delete southPlugin; + southPlugin = NULL; } } @@ -675,10 +723,7 @@ void SouthService::start(string& coreAddress, unsigned short corePort) */ void SouthService::stop() { - if (m_storageAssetTracker) - { - m_storageAssetTracker->releaseStorageAssetTracker(); - } + logger->info("Stopping south service...\n"); } @@ -795,7 +840,16 @@ void SouthService::shutdown() /* Stop recieving new requests and allow existing * requests to drain. */ - m_shutdown = true; + if (m_pollType == POLL_ON_DEMAND) + { + lock_guard lk(m_pollMutex); + m_shutdown = true; + m_pollCV.notify_all(); + } + else + { + m_shutdown = true; + } logger->info("South service shutdown in progress."); } @@ -814,8 +868,11 @@ void SouthService::restart() /** * Configuration change notification + * + * @param categoryName Category name + * @param category Category value */ -void SouthService::configChange(const string& categoryName, const string& category) +void SouthService::processConfigChange(const string& categoryName, const string& category) { logger->info("Configuration change in category %s: %s", categoryName.c_str(), category.c_str()); @@ -839,6 +896,14 @@ void SouthService::configChange(const string& categoryName, const string& catego { m_ingest->setStatistics(m_configAdvanced.getValue("statistics")); } + if (m_configAdvanced.itemExists("perfmon")) + { + string perf = m_configAdvanced.getValue("perfmon"); + if (perf.compare("true") == 0) + m_perfMonitor->setCollecting(true); + else + m_perfMonitor->setCollecting(false); + } if (! southPlugin->isAsync()) { try { @@ -849,33 +914,61 @@ void SouthService::configChange(const string& categoryName, const string& catego m_readingsPerSec = 1; } string units = m_configAdvanced.getValue("units"); - unsigned long dividend = 1000000; - if (units.compare("second") == 0) - dividend = 1000000; - else if (units.compare("minute") == 0) - dividend = 60000000; - else if (units.compare("hour") == 0) - dividend = 3600000000; - if (newval != m_readingsPerSec || m_rateUnits.compare(units) != 0) + string pollType = m_configAdvanced.getValue("pollType"); + bool wakeup = false; + if (m_pollType == POLL_ON_DEMAND) { + wakeup = true; + } + if (pollType.compare("Fixed Times") == 0) + { + m_pollType = POLL_FIXED; + processNumberList(m_configAdvanced, "pollHours", m_hours); + processNumberList(m_configAdvanced, "pollMinutes", m_minutes); + processNumberList(m_configAdvanced, "pollSeconds", m_seconds); + + if (m_minutes.size() == 0 && m_hours.size() != 0) + m_minutes.push_back(0); + if (m_seconds.size() == 0 && m_minutes.size() != 0) + m_seconds.push_back(0); + + m_desiredRate.tv_sec = 1; + m_desiredRate.tv_usec = 0; + if (wakeup) + { + // Wakup from on demand polling + m_pollCV.notify_all(); + } + } + else if (pollType.compare("Interval") == 0 + && (newval != m_readingsPerSec || m_rateUnits.compare(units) != 0)) + { + m_pollType = POLL_INTERVAL; m_readingsPerSec = newval; m_rateUnits = units; close(m_timerfd); - unsigned long usecs = dividend / m_readingsPerSec; - if (usecs > MAX_SLEEP * 1000000) + calculateTimerRate(); + m_currentRate = m_desiredRate; + m_timerfd = createTimerFd(m_desiredRate); // interval to be passed is in usecs + if (wakeup) { - double x = usecs / (MAX_SLEEP * 1000000); - m_repeatCnt = ceil(x); - usecs /= m_repeatCnt; + // Wakup from on demand polling + m_pollCV.notify_all(); } - else + } + else if (pollType.compare("Interval") == 0 && m_pollType != POLL_INTERVAL) + { + // Change to interval mode without the rate changing + m_pollType = POLL_INTERVAL; + if (wakeup) { - m_repeatCnt = 1; + // Wakup from on demand polling + m_pollCV.notify_all(); } - m_desiredRate.tv_sec = (int)(usecs / 1000000); - m_desiredRate.tv_usec = (int)(usecs % 1000000); - m_currentRate = m_desiredRate; - m_timerfd = createTimerFd(m_desiredRate); // interval to be passed is in usecs + } + else if (pollType.compare("On Demand") == 0) + { + m_pollType = POLL_ON_DEMAND; } } catch (ConfigItemNotFound e) { logger->error("Failed to update poll interval following configuration change"); @@ -940,6 +1033,89 @@ void SouthService::configChange(const string& categoryName, const string& catego } } +/** + * Separate thread to run plugin_reconf, to avoid blocking + * service's management interface due to long plugin_poll calls + */ +static void reconfThreadMain(void *arg) +{ + SouthService *ss = (SouthService *)arg; + Logger::getLogger()->info("reconfThreadMain(): Spawned new thread for plugin reconf"); + ss->handlePendingReconf(); + Logger::getLogger()->info("reconfThreadMain(): plugin reconf thread exiting"); +} + +/** + * Handle configuration change notification; called by reconf thread + * Waits for some reconf operation(s) to get queued up, then works thru' them + */ +void SouthService::handlePendingReconf() +{ + while (isRunning()) + { + Logger::getLogger()->debug("SouthService::handlePendingReconf: Going into cv wait"); + mutex mtx; + unique_lock lck(mtx); + m_cvNewReconf.wait(lck); + Logger::getLogger()->debug("SouthService::handlePendingReconf: cv wait has completed; some reconf request(s) has/have been queued up"); + + while (isRunning()) + { + unsigned int numPendingReconfs = 0; + { + lock_guard guard(m_pendingNewConfigMutex); + numPendingReconfs = m_pendingNewConfig.size(); + if (numPendingReconfs) + Logger::getLogger()->debug("SouthService::handlePendingReconf(): will process %d entries in m_pendingNewConfig", numPendingReconfs); + else + { + Logger::getLogger()->debug("SouthService::handlePendingReconf DONE"); + break; + } + } + + for (unsigned int i=0; idebug("SouthService::handlePendingReconf(): Handling Configuration change #%d", i); + std::pair *reconfValue = NULL; + { + lock_guard guard(m_pendingNewConfigMutex); + reconfValue = &m_pendingNewConfig[i]; + } + std::string categoryName = reconfValue->first; + std::string category = reconfValue->second; + processConfigChange(categoryName, category); + + logger->debug("SouthService::handlePendingReconf(): Handling of configuration change #%d done", i); + } + + { + lock_guard guard(m_pendingNewConfigMutex); + for (unsigned int i=0; idebug("SouthService::handlePendingReconf DONE: first %d entry(ies) removed, m_pendingNewConfig new size=%d", numPendingReconfs, m_pendingNewConfig.size()); + } + } + } +} + +/** + * Configuration change notification using a separate thread + * + * @param categoryName Category name + * @param category Category value + */ +void SouthService::configChange(const string& categoryName, const string& category) +{ + { + lock_guard guard(m_pendingNewConfigMutex); + m_pendingNewConfig.emplace_back(std::make_pair(categoryName, category)); + Logger::getLogger()->debug("SouthService::reconfigure(): After adding new entry, m_pendingNewConfig.size()=%d", m_pendingNewConfig.size()); + + m_cvNewReconf.notify_all(); + } +} + /** * Add the generic south service configuration options to the advanced * category @@ -972,6 +1148,40 @@ void SouthService::addConfigDefaults(DefaultConfigCategory& defaultConfig) defaultConfig.addItem("units", "Reading Rate Per", "second", "second", rateUnits); defaultConfig.setItemDisplayName("units", "Reading Rate Per"); + + /* Now add the fixed time polling option */ + vector pollOptions = { "Interval", "Fixed Times", "On Demand" }; + defaultConfig.addItem("pollType", "Either poll at fixed intervals, at fixed times or when trigger by a poll control operation.", + "Interval", "Interval", pollOptions); + defaultConfig.setItemDisplayName("pollType", "Poll Type"); + + /* Add the validity for interval polling items */ + defaultConfig.setItemAttribute("readingsPerSec", + ConfigCategory::VALIDITY_ATTR, "pollType == \"Interval\""); + defaultConfig.setItemAttribute("units", + ConfigCategory::VALIDITY_ATTR, "pollType == \"Interval\""); + defaultConfig.setItemAttribute("throttle", + ConfigCategory::VALIDITY_ATTR, "pollType == \"Interval\""); + + /* Add the three time specifiers */ + defaultConfig.addItem("pollHours", + "List of hours on which to poll or leave empty for all hours", + "string", "", ""); + defaultConfig.setItemDisplayName("pollHours", "Hours"); + defaultConfig.setItemAttribute("pollHours", + ConfigCategory::VALIDITY_ATTR, "pollType == \"Fixed Times\""); + defaultConfig.addItem("pollMinutes", + "List of minutes on which to poll or leave empty for all minutes", + "string", "", ""); + defaultConfig.setItemDisplayName("pollMinutes", "Minutes"); + defaultConfig.setItemAttribute("pollMinutes", + ConfigCategory::VALIDITY_ATTR, "pollType == \"Fixed Times\""); + defaultConfig.addItem("pollSeconds", + "Seconds on which to poll expressed as a comma seperated list", + "string", "0,15,30,45", "0,15,30,40"); + defaultConfig.setItemDisplayName("pollSeconds", "Seconds"); + defaultConfig.setItemAttribute("pollSeconds", + ConfigCategory::VALIDITY_ATTR, "pollType == \"Fixed Times\""); } if (southPlugin->hasControl()) @@ -992,6 +1202,9 @@ void SouthService::addConfigDefaults(DefaultConfigCategory& defaultConfig) defaultConfig.addItem("statistics", "Collect statistics either for every asset ingested, for the service in total or both", "per asset & service", "per asset & service", statistics); defaultConfig.setItemDisplayName("statistics", "Statistics Collection"); + defaultConfig.addItem("perfmon", "Track and store performance counters", + "boolean", "false", "false"); + defaultConfig.setItemDisplayName("perfmon", "Performance Counters"); } /** @@ -1084,6 +1297,7 @@ struct timeval now, res; m_lastThrottle = now; m_throttled = true; logger->warn("%s Throttled down poll, rate is now %.1f%% of desired rate", m_name.c_str(), (desired * 100) / rate); + m_perfMonitor->collect("throttled rate", (long)(rate * 1000)); } else if (m_throttled && m_ingest->queueLength() < m_lowWater && res.tv_sec > SOUTH_THROTTLE_UP_INTERVAL) { @@ -1116,6 +1330,7 @@ struct timeval now, res; { logger->warn("%s Throttled up poll, rate is now %.1f%% of desired rate", m_name.c_str(), (desired * 100) / rate); } + m_perfMonitor->collect("throttled rate", (long)(rate * 1000)); close(m_timerfd); m_timerfd = createTimerFd(m_currentRate); // interval to be passed is in usecs m_lastThrottle = now; @@ -1152,7 +1367,21 @@ bool SouthService::setPoint(const string& name, const string& value) */ bool SouthService::operation(const string& operation, vector& params) { - if (southPlugin->hasControl()) + if (operation.compare("poll") == 0) + { + if (m_pollType == POLL_ON_DEMAND) + { + m_doPoll = true; + m_pollCV.notify_all(); + return true; + } + else + { + logger->warn("Received a poll request for a service that is not enabled for on demand polling"); + return false; + } + } + else if (southPlugin->hasControl()) { return southPlugin->operation(operation, params); } @@ -1162,3 +1391,327 @@ bool SouthService::operation(const string& operation, vector return false; } } + +/** + * Process a list of numbers into a vector of integers. + * The list of numbers is obtained from a configuration + * item. + * + * @param category The configuration category + * @param item Name of the configuration item + * @param list The vector to populate + */ +void SouthService::processNumberList(const ConfigCategory& category, + const string& item, vector& list) +{ + list.clear(); + if (!category.itemExists(item)) + { + Logger::getLogger()->warn("Item %s does not exist", item.c_str()); + return; + } + string value = category.getValue(item); + if (value.length() == 0) + { + Logger::getLogger()->info("Item %s is empty", item.c_str()); + return; + } + + const char *ptr = value.c_str(); + char *eptr; + while (*ptr) + { + list.push_back(strtoul(ptr, &eptr, 10)); + ptr = eptr; + if (*ptr == ',') + ptr++; + } +} + +/** + * Calcuate the rate at which the timer should trigger and the repeat + * requirement needed to match the requested poll rate + */ +void SouthService::calculateTimerRate() +{ + string pollType = m_configAdvanced.getValue("pollType"); + if (pollType.compare("Fixed Times") == 0) + { + if (m_pollType == POLL_ON_DEMAND) + { + lock_guard lk(m_pollMutex); + m_pollType = POLL_FIXED; + m_pollCV.notify_all(); + } + m_pollType = POLL_FIXED; + processNumberList(m_configAdvanced, "pollHours", m_hours); + processNumberList(m_configAdvanced, "pollMinutes", m_minutes); + processNumberList(m_configAdvanced, "pollSeconds", m_seconds); + + if (m_minutes.size() == 0 && m_hours.size() != 0) + m_minutes.push_back(0); + if (m_seconds.size() == 0 && m_minutes.size() != 0) + m_seconds.push_back(0); + + m_desiredRate.tv_sec = 1; + m_desiredRate.tv_usec = 0; + } + else if (pollType.compare("On Demand") == 0) + { + m_pollType = POLL_ON_DEMAND; + } + else + { + if (m_pollType == POLL_ON_DEMAND) + { + lock_guard lk(m_pollMutex); + m_pollType = POLL_INTERVAL; + m_pollCV.notify_all(); + } + m_pollType = POLL_INTERVAL; + string units = m_configAdvanced.getValue("units"); + unsigned long dividend = 1000000; + if (units.compare("second") == 0) + dividend = 1000000; + else if (units.compare("minute") == 0) + dividend = 60000000; + else if (units.compare("hour") == 0) + dividend = 3600000000; + m_rateUnits = units; + unsigned long usecs = dividend / m_readingsPerSec; + + if (usecs > MAX_SLEEP * 1000000) + { + double x = usecs / (MAX_SLEEP * 1000000); + m_repeatCnt = ceil(x); + usecs /= m_repeatCnt; + } + else + { + m_repeatCnt = 1; + } + m_desiredRate.tv_sec = (int)(usecs / 1000000); + m_desiredRate.tv_usec = (int)(usecs % 1000000); + } +} + +/** + * Find the next fixed time poll time and wait for that time before returning. + * This method will also return if m_shutdown is set. + * + * @return bool True if the return is doe to a poll being required. + */ +bool SouthService::syncToNextPoll() +{ + time_t tim = time(0); + struct tm tm; + localtime_r(&tim, &tm); + unsigned long waitFor = 1; + + if (m_hours.size() == 0 && m_minutes.size() == 0 && m_seconds.size() == 0) + { + Logger::getLogger()->error("Poll time misconfigured."); + } + else if (m_hours.size() == 0 && m_minutes.size() == 0) + { + // Only looking at seconds + unsigned int i; + for (i = 0; i < m_seconds.size() && m_seconds[i] <= (unsigned)tm.tm_sec; i++) + { + } + if (i == m_seconds.size()) + { + waitFor = (60 - (unsigned)tm.tm_sec) + m_seconds[0]; + } + else + { + waitFor = m_seconds[i] - (unsigned)tm.tm_sec; + } + } + else if (m_hours.size() == 0) + { + unsigned int target_min = (unsigned)tm.tm_min; + unsigned int min, sec; + for (min = 0; min < m_minutes.size() && m_minutes[min] < target_min; min++) + { + } + if (min == m_minutes.size()) // Reset to start of minute list + { + min = 0; + } + + if (m_minutes[min] != target_min) // Not this minute + { + sec = 0; // Always use first setting of seconds + } + else + { + for (sec = 0; sec < m_seconds.size() && m_seconds[sec] <= (unsigned)tm.tm_sec; sec++) + { + } + if (sec == m_seconds.size()) + { + // Too late in this minute use next minute setting + sec = 0; + min++; + if (min >= m_minutes[min]) + { + min = 0; + } + } + } + waitFor = 0; + if (m_minutes[min] > (unsigned)tm.tm_min) + { + waitFor = 60 * (m_minutes[min] - (unsigned)tm.tm_min); + } + else if (m_minutes[min] < (unsigned)tm.tm_min) + { + waitFor = 60 * ((60 - (unsigned)tm.tm_min) + m_minutes[min]); + } + if (m_seconds[sec] > (unsigned)tm.tm_sec) + { + waitFor += ((unsigned)tm.tm_sec - m_seconds[sec]); + } + else + { + waitFor += ((60 - (unsigned)tm.tm_sec) + m_seconds[sec]); + } + } + else // Hours, minutes and seconds + { + unsigned int hour, min, sec; + for (hour = 0; hour < m_hours.size() && m_hours[hour] < (unsigned)tm.tm_hour; hour++) + { + } + if (hour == m_hours.size()) // Reset to start of minute list + { + min = 0; + sec = 0; + hour = 0; + } + else if (m_hours[hour] == (unsigned)tm.tm_hour) // Check for this hour + { + for (min = 0; min < m_minutes.size() && m_minutes[min] < (unsigned)tm.tm_min; min++) + { + } + if (min < m_minutes.size()) // may still be a trogger in this hor + { + for (sec = 0; sec < m_seconds.size() && m_seconds[sec] <= (unsigned)tm.tm_sec; sec++) + { + } + if (sec == m_seconds.size()) + { + // Too late in this minute use next minute setting + sec = 0; + min++; + if (min == m_minutes.size()) + { + min = 0; + sec = 0; + hour++; + if (m_hours.size() == hour) + hour = 0; + } + } + } + else + { + hour++; + min = 0; + sec = 0; + if (m_hours.size() == hour) + hour = 0; + } + } + else + { + hour++; + min = 0; + sec = 0; + if (m_hours.size() == hour) + hour = 0; + } + waitFor = 0; + if (m_hours[hour] > (unsigned)tm.tm_hour) + { + waitFor += 60 * 60 * (m_hours[hour] - (unsigned)tm.tm_hour); + } + else if (m_minutes[min] < (unsigned)tm.tm_min) + { + waitFor += 60 * 60 * ((24 - (unsigned)tm.tm_hour) + m_hours[hour]); + } + if (m_minutes[min] > (unsigned)tm.tm_min) + { + waitFor += 60 * (m_minutes[min] - (unsigned)tm.tm_min); + } + else if (m_minutes[min] < (unsigned)tm.tm_min) + { + waitFor += 60 * ((60 - (unsigned)tm.tm_min) + m_minutes[min]); + } + if (m_seconds[sec] > (unsigned)tm.tm_sec) + { + waitFor += ((unsigned)tm.tm_sec - m_seconds[sec]); + } + else + { + waitFor += ((60 - (unsigned)tm.tm_sec) + m_seconds[sec]); + } + } + + + uint64_t exp; + while (waitFor) + { + if (read(m_timerfd, &exp, sizeof(uint64_t)) == -1) + return false; + waitFor--; + if (m_shutdown) + return false; + if (m_pollType != POLL_FIXED) // Configuration has change to the poll type + { + return false; + } + } + return true; +} + +/** + * Wait until either a shutdown request is received or a poll operation + * + * @return bool True if the return is due to a new poll request + */ +bool SouthService::onDemandPoll() +{ + unique_lock lk(m_pollMutex); + if (! m_shutdown) + { + m_doPoll = false; + m_pollCV.wait(lk); + } + return m_doPoll; +} + +/** + * Check to see if there is a reconfiguration option blocking in another + * thread and yield until that reconfiguration has occured. + */ +void SouthService::checkPendingReconfigure() +{ + while(1) + { + unsigned int numPendingReconfs; + { + lock_guard guard(m_pendingNewConfigMutex); + numPendingReconfs = m_pendingNewConfig.size(); + } + // if a reconf is pending, make this poll thread yield CPU, sleep_for is needed to sleep this thread for sufficiently long time + if (numPendingReconfs) + { + Logger::getLogger()->debug("SouthService::start(): %d entries in m_pendingNewConfig, poll thread yielding CPU", numPendingReconfs); + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + } + else + return; + } +} diff --git a/C/services/south/south_plugin.cpp b/C/services/south/south_plugin.cpp index 5f931ec83e..7cd806f0cf 100755 --- a/C/services/south/south_plugin.cpp +++ b/C/services/south/south_plugin.cpp @@ -31,6 +31,8 @@ std::mutex mtx2; */ SouthPlugin::SouthPlugin(PLUGIN_HANDLE handle, const ConfigCategory& category) : Plugin(handle) { + m_started = false; // Set started indicator, overrided by async plugins only + // Call the init method of the plugin PLUGIN_HANDLE (*pluginInit)(const void *) = (PLUGIN_HANDLE (*)(const void *)) manager->resolveSymbol(handle, "plugin_init"); @@ -110,6 +112,13 @@ SouthPlugin::SouthPlugin(PLUGIN_HANDLE handle, const ConfigCategory& category) : } } +/** + * South plugin destructor + */ +SouthPlugin::~SouthPlugin() +{ +} + /** * Call the start method in the plugin */ @@ -117,7 +126,9 @@ void SouthPlugin::start() { lock_guard guard(mtx2); try { - return this->pluginStartPtr(instance); + this->pluginStartPtr(instance); + m_started = true; // Set start indicator + return; } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in south plugin start(), %s", e.what()); @@ -137,7 +148,9 @@ void SouthPlugin::startData(const string& data) { lock_guard guard(mtx2); try { - return this->pluginStartDataPtr(instance, data); + this->pluginStartDataPtr(instance, data); + m_started = true; // Set start indicator + return; } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in south plugin start(), %s", e.what()); @@ -177,15 +190,16 @@ ReadingSet* SouthPlugin::pollV2() { lock_guard guard(mtx2); try { - std::vector *vec = this->pluginPollPtrV2(instance); - if(vec) - { - ReadingSet *set = new ReadingSet(vec); - delete vec; - return set; // this->pluginPollPtrV2(instance); - } - else - return NULL; + std::vector *vec = this->pluginPollPtrV2(instance); + if(vec) + { + ReadingSet *set = new ReadingSet(vec); + vec->clear(); + delete vec; + return set; // this->pluginPollPtrV2(instance); + } + else + return NULL; } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in v2 south plugin poll(), %s", e.what()); @@ -309,7 +323,29 @@ bool SouthPlugin::write(const string& name, const string& value) try { if (pluginWritePtr) { - return this->pluginWritePtr(instance, name, value); + bool run = true; + // Check plugin_start is done for async plugin before calling pluginWritePtr + if (isAsync()) { + int tries = 0; + while (!m_started) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + Logger::getLogger()->debug("South plugin write call is on hold, try %d", tries); + if (tries > 20) { + break; + } + tries++; + } + run = m_started; + } + + if (run) { + return this->pluginWritePtr(instance, name, value); + } + else + { + Logger::getLogger()->error("South plugin write canceled after waiting for 2 seconds"); + return false; + } } } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception in plugin write operation: %s", e.what()); @@ -349,7 +385,29 @@ bool SouthPlugin::operation(const string& name, vector& para } params[count] = NULL; try { - status = this->pluginOperationPtr(instance, name, (int)count, params); + bool run = true; + // Check plugin_start is done for async plugin before calling pluginOperationPtr + if (isAsync()) { + int tries = 0; + while (!m_started) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + Logger::getLogger()->debug("South plugin operation is on hold, try %d", tries); + if (tries > 20) { + break; + } + tries++; + } + run = m_started; + } + + if (run) { + status = this->pluginOperationPtr(instance, name, (int)count, params); + } + else + { + Logger::getLogger()->error("South plugin operation canceled after waiting for 2 seconds"); + return false; + } } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception in plugin operation: %s", e.what()); } diff --git a/C/services/storage/configuration.cpp b/C/services/storage/configuration.cpp index 8ddd427cb7..a3253ff684 100644 --- a/C/services/storage/configuration.cpp +++ b/C/services/storage/configuration.cpp @@ -23,15 +23,17 @@ static const char *defaultConfiguration = QUOTE({ "value" : "sqlite", "default" : "sqlite", "description" : "The main storage plugin to load", - "type" : "string", + "type" : "enumeration", + "options" : [ "sqlite", "sqlitelb", "postgres" ], "displayName" : "Storage Plugin", "order" : "1" }, "readingPlugin" : { - "value" : "", - "default" : "", - "description" : "The storage plugin to load for readings data. If blank the main storage plugin is used.", - "type" : "string", + "value" : "Use main plugin", + "default" : "Use main plugin", + "description" : "The storage plugin to load for readings data.", + "type" : "enumeration", + "options" : [ "Use main plugin", "sqlite", "sqlitelb", "sqlitememory", "postgres" ], "displayName" : "Readings Plugin", "order" : "2" }, @@ -83,6 +85,9 @@ using namespace rapidjson; /** * Constructor for storage service configuration class. + * + * TODO Update the options for plugin and readingPlugin with any other storage + * plugins that have been installed */ StorageConfiguration::StorageConfiguration() { @@ -92,6 +97,14 @@ StorageConfiguration::StorageConfiguration() checkCache(); } +/** + * Storage configuration destructor + */ +StorageConfiguration::~StorageConfiguration() +{ + delete document; +} + /** * Return if a value exsits for the cached configuration category */ @@ -137,7 +150,7 @@ bool StorageConfiguration::setValue(const string& key, const string& value) const char *cstr = value.c_str(); item["value"].SetString(cstr, strlen(cstr), document->GetAllocator()); return true; - } catch (exception e) { + } catch (...) { return false; } } @@ -198,7 +211,7 @@ string cachefile; GetParseError_En(document->GetParseError()), document->GetErrorOffset()); } - } catch (exception ex) { + } catch (exception& ex) { logger->error("Configuration cache failed to read %s.", ex.what()); } } @@ -293,10 +306,28 @@ DefaultConfigCategory *StorageConfiguration::getDefaultCategory() * end up reporting the wrong information in the UI when we look at the category, therefore * we special case the plugin name and set the default to whatever the current value is * for just this property. + * + * FOGL-7074 Make the plugin selection an enumeration */ void StorageConfiguration::checkCache() { +bool forceUpdate = false; + if (document->HasMember("plugin")) + { + Value& item = (*document)["plugin"]; + if (item.HasMember("type") && item["type"].IsString()) + { + const char *type = item["type"].GetString(); + if (strcmp(type, "enumeration")) + { + // It's not an enumeration currently + forceUpdate = true; + } + } + } + + if (forceUpdate == false && document->HasMember("plugin")) { Value& item = (*document)["plugin"]; if (item.HasMember("type")) @@ -305,11 +336,18 @@ void StorageConfiguration::checkCache() item["default"].SetString(val, strlen(val)); Value& rp = (*document)["readingPlugin"]; const char *rval = getValue("readingPlugin"); - rp["default"].SetString(rval, strlen(rval)); + if (strlen(rval) == 0) + { + rval = "Use main plugin"; + } + char *ncrval = strdup(rval); + rp["default"].SetString(ncrval, strlen(rval)); + rp["value"].SetString(ncrval, strlen(rval)); logger->info("Storage configuration cache is up to date"); return; } } + logger->info("Storage configuration cache is not up to date"); Document *newdoc = new Document(); newdoc->Parse(defaultConfiguration); @@ -335,6 +373,10 @@ void StorageConfiguration::checkCache() } if (strcmp(name, "readingPlugin") == 0) { + if (strlen(val) == 0) + { + val = "Use main plugin"; + } newval["default"].SetString(strdup(val), strlen(val)); logger->warn("Set default of %s to %s", name, val); } diff --git a/C/services/storage/include/configuration.h b/C/services/storage/include/configuration.h index 1918fc62f2..a0fc8eac0d 100644 --- a/C/services/storage/include/configuration.h +++ b/C/services/storage/include/configuration.h @@ -32,6 +32,7 @@ class StorageConfiguration { public: StorageConfiguration(); + ~StorageConfiguration(); const char *getValue(const std::string& key); bool hasValue(const std::string& key); bool setValue(const std::string& key, const std::string& value); diff --git a/C/services/storage/include/storage_api.h b/C/services/storage/include/storage_api.h index fc2f34a27c..1b2d152ba4 100644 --- a/C/services/storage/include/storage_api.h +++ b/C/services/storage/include/storage_api.h @@ -27,7 +27,9 @@ using HttpServer = SimpleWeb::Server; #define READING_ACCESS "^/storage/reading$" #define READING_QUERY "^/storage/reading/query" #define READING_PURGE "^/storage/reading/purge" -#define READING_INTEREST "^/storage/reading/interest/([A-Za-z\\*][a-zA-Z0-9_%\\.\\-]*)$" +#define READING_INTEREST "^/storage/reading/interest/([A-Za-z0-9\\*][a-zA-Z0-9_%\\.\\-]*)$" +#define TABLE_INTEREST "^/storage/table/interest/([A-Za-z\\*][a-zA-Z0-9_%\\.\\-]*)$" + #define GET_TABLE_SNAPSHOTS "^/storage/table/([A-Za-z][a-zA-Z_0-9_]*)/snapshot$" #define CREATE_TABLE_SNAPSHOT GET_TABLE_SNAPSHOTS #define LOAD_TABLE_SNAPSHOT "^/storage/table/([A-Za-z][a-zA-Z_0-9_]*)/snapshot/([a-zA-Z_0-9_]*)$" @@ -58,6 +60,7 @@ class StorageApi { public: StorageApi(const unsigned short port, const unsigned int threads); + ~StorageApi(); static StorageApi *getInstance(); void initResources(); void setPlugin(StoragePlugin *); @@ -79,6 +82,8 @@ class StorageApi { void readingPurge(shared_ptr response, shared_ptr request); void readingRegister(shared_ptr response, shared_ptr request); void readingUnregister(shared_ptr response, shared_ptr request); + void tableRegister(shared_ptr response, shared_ptr request); + void tableUnregister(shared_ptr response, shared_ptr request); void createTableSnapshot(shared_ptr response, shared_ptr request); void loadTableSnapshot(shared_ptr response, shared_ptr request); void deleteTableSnapshot(shared_ptr response, shared_ptr request); diff --git a/C/services/storage/include/storage_registry.h b/C/services/storage/include/storage_registry.h index fa3b9ddb6a..bdb109965d 100644 --- a/C/services/storage/include/storage_registry.h +++ b/C/services/storage/include/storage_registry.h @@ -10,6 +10,16 @@ typedef std::vector > REGISTRY; +typedef struct { + std::string url; + std::string key; + std::vector keyValues; + std::string operation; +} TableRegistration; + +typedef std::vector > REGISTRY_TABLE; + + /** * StorageRegistry - a class that manages requests from other microservices * to register interest in new readings being inserted into the storage layer @@ -22,16 +32,40 @@ class StorageRegistry { void registerAsset(const std::string& asset, const std::string& url); void unregisterAsset(const std::string& asset, const std::string& url); void process(const std::string& payload); + void processTableInsert(const std::string& tableName, const std::string& payload); + void processTableUpdate(const std::string& tableName, const std::string& payload); + void processTableDelete(const std::string& tableName, const std::string& payload); + void registerTable(const std::string& table, const std::string& url); + void unregisterTable(const std::string& table, const std::string& url); void run(); private: void processPayload(char *payload); - void sendPayload(const std::string& url, char *payload); + void sendPayload(const std::string& url, const char *payload); void filterPayload(const std::string& url, char *payload, const std::string& asset); + void processInsert(char *tableName, char *payload); + void processUpdate(char *tableName, char *payload); + void processDelete(char *tableName, char *payload); + TableRegistration* + parseTableSubscriptionPayload(const std::string& payload); + void insertTestTableReg(); + void removeTestTableReg(int n); + typedef std::pair Item; + typedef std::tuple TableItem; REGISTRY m_registrations; + REGISTRY_TABLE m_tableRegistrations; + std::queue m_queue; + std::queue + m_tableInsertQueue; + std::queue + m_tableUpdateQueue; + std::queue + m_tableDeleteQueue; std::mutex m_qMutex; + std::mutex m_registrationsMutex; + std::mutex m_tableRegistrationsMutex; std::thread *m_thread; std::condition_variable m_cv; std::mutex m_cvMutex; diff --git a/C/services/storage/include/storage_service.h b/C/services/storage/include/storage_service.h index 56a9cd44b1..6309208716 100644 --- a/C/services/storage/include/storage_service.h +++ b/C/services/storage/include/storage_service.h @@ -28,6 +28,7 @@ class StorageService : public ServiceHandler { public: StorageService(const string& name); + ~StorageService(); void start(std::string& coreAddress, unsigned short corePort); void stop(); void shutdown(); diff --git a/C/services/storage/pluginconfiguration.cpp b/C/services/storage/pluginconfiguration.cpp index 1fac092658..03e5517ee3 100644 --- a/C/services/storage/pluginconfiguration.cpp +++ b/C/services/storage/pluginconfiguration.cpp @@ -82,7 +82,7 @@ bool StoragePluginConfiguration::setValue(const string& key, const string& value const char *cstr = value.c_str(); item["value"].SetString(cstr, strlen(cstr), m_document->GetAllocator()); return true; - } catch (exception e) { + } catch (...) { return false; } } @@ -145,7 +145,7 @@ string cachefile; GetParseError_En(m_document->GetParseError()), m_document->GetErrorOffset()); } - } catch (exception ex) { + } catch (exception& ex) { m_logger->error("Configuration cache failed to read %s.", ex.what()); } } diff --git a/C/services/storage/storage.cpp b/C/services/storage/storage.cpp index 4f3439b937..8999e21e41 100644 --- a/C/services/storage/storage.cpp +++ b/C/services/storage/storage.cpp @@ -26,7 +26,8 @@ #include #include -#define NO_EXIT_STACKTRACE 0 // Set to 1 to make storage loop after stacktrace +#define NO_EXIT_STACKTRACE 0 // Set to 1 to make storage loop after stacktrace + // This is useful to be able to attach a debbugger extern int makeDaemon(void); @@ -139,19 +140,19 @@ string logLevel = "warning"; exit(1); } - StorageService *service = new StorageService(myName); + StorageService service(myName); Logger::getLogger()->setMinLevel(logLevel); if (returnPlugin) { - cout << service->getPluginName() << " " << service->getPluginManagedStatus() << endl; + cout << service.getPluginName() << " " << service.getPluginManagedStatus() << endl; } else if (returnReadingsPlugin) { - cout << service->getReadingPluginName() << " " << service->getPluginManagedStatus() << endl; + cout << service.getReadingPluginName() << " " << service.getPluginManagedStatus() << endl; } else { - service->start(coreAddress, corePort); + service.start(coreAddress, corePort); } return 0; } @@ -238,6 +239,16 @@ unsigned short servicePort; api = new StorageApi(servicePort, threads); } +/** + * Storage Service destructor + */ +StorageService::~StorageService() +{ + delete api; + delete config; + delete logger; +} + /** * Start the storage service */ @@ -273,11 +284,24 @@ void StorageService::start(string& coreAddress, unsigned short corePort) ManagementClient *client = new ManagementClient(coreAddress, corePort); client->registerService(record); + // FOGL-7074 upgrade step + try { + ConfigCategory cat = client->getCategory("Storage"); + string rp = cat.getValue("readingPlugin"); + if (rp.empty()) + { + client->setCategoryItemValue("Storage", "readingPlugin", + "Use main plugin"); + } + } catch (...) { + // ignore + } + // Add the default configuration under the Advanced category unsigned int retryCount = 0; DefaultConfigCategory *conf = config->getDefaultCategory(); conf->setDescription(CATEGORY_DESCRIPTION); - while (client->addCategory(*conf, true) == false && ++retryCount < 10) + while (client->addCategory(*conf, false) == false && ++retryCount < 10) { sleep(2 * retryCount); } @@ -309,7 +333,7 @@ void StorageService::start(string& coreAddress, unsigned short corePort) } catch (...) { } - // Regsiter for configuration changes to our category + // Register for configuration changes to our category ConfigHandler *configHandler = ConfigHandler::getInstance(client); configHandler->registerCategory(this, STORAGE_CATEGORY); @@ -326,7 +350,7 @@ void StorageService::start(string& coreAddress, unsigned short corePort) children1.push_back(conf->getName()); client->addChildCategories(STORAGE_CATEGORY, children1); - // Regsiter for configuration changes to our category + // Register for configuration changes to our storage plugin category ConfigHandler *configHandler = ConfigHandler::getInstance(client); configHandler->registerCategory(this, conf->getName()); @@ -350,13 +374,19 @@ void StorageService::start(string& coreAddress, unsigned short corePort) children1.push_back(conf->getName()); client->addChildCategories(STORAGE_CATEGORY, children1); - // Regsiter for configuration changes to our category + // Regsiter for configuration changes to our reading category category ConfigHandler *configHandler = ConfigHandler::getInstance(client); configHandler->registerCategory(this, conf->getName()); } } } + // Now we are running force the plugin names back to the configuration manager to + // make sure they match what we are running. This can be out of sync if the storage + // configuration cache has been manually reset or altered while Fledge was down + client->setCategoryItemValue(STORAGE_CATEGORY, "plugin", config->getValue("plugin")); + client->setCategoryItemValue(STORAGE_CATEGORY, "readingPlugin", config->getValue("readingPlugin")); + // Wait for all the API threads to complete api->wait(); @@ -393,7 +423,7 @@ void StorageService::stop() /** * Load the configured storage plugin or plugins * - * @return bool True if the plugins have been l;oaded and support the correct operations + * @return bool True if the plugins have been loaded and support the correct operations */ bool StorageService::loadPlugin() { @@ -442,6 +472,14 @@ bool StorageService::loadPlugin() // Single plugin does everything return true; } + if (strcmp(readingPluginName, plugin) == 0 + || strcmp(readingPluginName, "Use main plugin") == 0) + { + // Storage plugin and reading plugin are the same, or we have been + // explicitly told to use the storage plugin for reading so no need + // to add a reading plugin + return true; + } if (plugin == NULL) { logger->error("Unable to fetch reading plugin name from configuration.\n"); diff --git a/C/services/storage/storage_api.cpp b/C/services/storage/storage_api.cpp index fb9254a612..146df5c82c 100644 --- a/C/services/storage/storage_api.cpp +++ b/C/services/storage/storage_api.cpp @@ -252,6 +252,24 @@ void readingUnregisterWrapper(shared_ptr response, shared_ api->readingUnregister(response, request); } +/** + * Wrapper function for the table interest register API call. + */ +void tableRegisterWrapper(shared_ptr response, shared_ptr request) +{ + StorageApi *api = StorageApi::getInstance(); + api->tableRegister(response, request); +} + +/** + * Wrapper function for the table interest unregister API call. + */ +void tableUnregisterWrapper(shared_ptr response, shared_ptr request) +{ + StorageApi *api = StorageApi::getInstance(); + api->tableUnregister(response, request); +} + /** * Wrapper function for the create snapshot API call. */ @@ -367,7 +385,7 @@ void storageTableQueryWrapper(shared_ptr response, /** * Construct the singleton Storage API */ -StorageApi::StorageApi(const unsigned short port, const unsigned int threads) : readingPlugin(0), streamHandler(0) +StorageApi::StorageApi(const unsigned short port, const unsigned int threads) : m_thread(NULL), readingPlugin(0), streamHandler(0) { m_port = port; @@ -378,6 +396,24 @@ StorageApi::StorageApi(const unsigned short port, const unsigned int threads) : StorageApi::m_instance = this; } +/** + * Destructor for the storage API class. There is only ever one StorageApi class + * in existance and it lives for the entire duration of the storage service, so this + * is really for completerness rather than any pracitical use. + */ +StorageApi::~StorageApi() +{ + if (m_server) + { + delete m_server; + } + m_instance = NULL; + if (m_thread) + { + delete m_thread; + } +} + /** * Return the singleton instance of the StorageAPI class */ @@ -421,6 +457,9 @@ void StorageApi::initResources() m_server->resource[READING_INTEREST]["POST"] = readingRegisterWrapper; m_server->resource[READING_INTEREST]["DELETE"] = readingUnregisterWrapper; + m_server->resource[TABLE_INTEREST]["POST"] = tableRegisterWrapper; + m_server->resource[TABLE_INTEREST]["DELETE"] = tableUnregisterWrapper; + m_server->resource[CREATE_TABLE_SNAPSHOT]["POST"] = createTableSnapshotWrapper; m_server->resource[LOAD_TABLE_SNAPSHOT]["PUT"] = loadTableSnapshotWrapper; m_server->resource[DELETE_TABLE_SNAPSHOT]["DELETE"] = deleteTableSnapshotWrapper; @@ -536,6 +575,7 @@ string responsePayload; int rval = plugin->commonInsert(tableName, payload); if (rval != -1) { + registry.processTableInsert(tableName, payload); responsePayload = "{ \"response\" : \"inserted\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; @@ -612,6 +652,7 @@ string responsePayload; int rval = plugin->commonUpdate(tableName, payload); if (rval != -1) { + registry.processTableUpdate(tableName, payload); responsePayload = "{ \"response\" : \"updated\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; @@ -735,6 +776,7 @@ string responsePayload; int rval = plugin->commonDelete(tableName, payload); if (rval != -1) { + registry.processTableDelete(tableName, payload); responsePayload = "{ \"response\" : \"deleted\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; @@ -1118,6 +1160,82 @@ Document doc; } } +/** + * Register interest in readings for an asset + */ +void StorageApi::tableRegister(shared_ptr response, + shared_ptr request) +{ +string table; +string payload; +Document doc; + + payload = request->content.string(); + // URL decode table name + table = urlDecode(request->path_match[TABLE_NAME_COMPONENT]); + + doc.Parse(payload.c_str()); + if (doc.HasParseError()) + { + string resp = "{ \"error\" : \"Badly formed payload\" }"; + respond(response, + SimpleWeb::StatusCode::client_error_bad_request, + resp); + } + else + { + if (doc.HasMember("url")) + { + registry.registerTable(table, payload); + string resp = " { \"" + table + "\" : \"registered\" }"; + respond(response, resp); + } + else + { + string resp = "{ \"error\" : \"Missing url element in payload\" }"; + respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); + } + } +} + +/** + * Unregister interest in readings for an asset + */ +void StorageApi::tableUnregister(shared_ptr response, + shared_ptr request) +{ +string table; +string payload; +Document doc; + + payload = request->content.string(); + // URL decode table name + table = urlDecode(request->path_match[TABLE_NAME_COMPONENT]); + + doc.Parse(payload.c_str()); + if (doc.HasParseError()) + { + string resp = "{ \"error\" : \"Badly formed payload\" }"; + respond(response, + SimpleWeb::StatusCode::client_error_bad_request, + resp); + } + else + { + if (doc.HasMember("url")) + { + registry.unregisterTable(table, payload); + string resp = " { \"" + table + "\" : \"unregistered\" }"; + respond(response, resp); + } + else + { + string resp = "{ \"error\" : \"Missing url element in payload\" }"; + respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); + } + } +} + /** * Create a stream for high speed storage ingestion * @@ -1158,14 +1276,11 @@ string responsePayload; /** * Append the readings that have arrived via a stream to the storage plugin * - * @param readings A Null terminiunated array of points to ReadingStream structures + * @param readings A Null terminated array of points to ReadingStream structures * @param commit A flag to commit the readings block */ bool StorageApi::readingStream(ReadingStream **readings, bool commit) { - int c; - for (c = 0; readings[c]; c++); - Logger::getLogger()->debug("ReadingStream called with %d", c); if ((readingPlugin ? readingPlugin : plugin)->hasStreamSupport()) { return (readingPlugin ? readingPlugin : plugin)->readingStream(readings, commit); @@ -1449,6 +1564,7 @@ string responsePayload; int rval = plugin->commonInsert(tableName, payload, const_cast(schemaName.c_str())); if (rval != -1) { + registry.processTableInsert(tableName, payload); responsePayload = "{ \"response\" : \"inserted\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; @@ -1527,6 +1643,7 @@ string responsePayload; int rval = plugin->commonUpdate(tableName, payload, const_cast(schemaName.c_str())); if (rval != -1) { + registry.processTableUpdate(tableName, payload); responsePayload = "{ \"response\" : \"updated\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; @@ -1565,6 +1682,7 @@ string responsePayload; int rval = plugin->commonDelete(tableName, payload, const_cast(schemaName.c_str())); if (rval != -1) { + registry.processTableDelete(tableName, payload); responsePayload = "{ \"response\" : \"deleted\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; diff --git a/C/services/storage/storage_registry.cpp b/C/services/storage/storage_registry.cpp index 0e1e384721..f97798c2e4 100644 --- a/C/services/storage/storage_registry.cpp +++ b/C/services/storage/storage_registry.cpp @@ -8,6 +8,8 @@ * Author: Mark Riddoch */ #include +#include "rapidjson/stringbuffer.h" +#include #include "storage_registry.h" #include "client_http.hpp" #include "server_http.hpp" @@ -46,13 +48,14 @@ static void worker(StorageRegistry *registry) * code, or * for all assets, that URL will then be called when new * data arrives for the particular asset. * - * The servce registry maintians a worker thread that is responsible + * The service registry maintians a worker thread that is responsible * for sending these notifications such that the main flow of data into * the storage layer is minimally impacted by the registration and * delivery of these messages to interested microservices. */ -StorageRegistry::StorageRegistry() +StorageRegistry::StorageRegistry() : m_thread(NULL) { + m_running = true; m_thread = new thread(worker, this); } @@ -62,7 +65,22 @@ StorageRegistry::StorageRegistry() StorageRegistry::~StorageRegistry() { m_running = false; - m_thread->join(); + m_cv.notify_all(); + if (m_thread) + { + if (m_thread->joinable()) + m_thread->join(); + delete m_thread; + m_thread = NULL; + } + while (!m_queue.empty()) + m_queue.pop(); + while (!m_tableInsertQueue.empty()) + m_tableInsertQueue.pop(); + while (!m_tableUpdateQueue.empty()) + m_tableUpdateQueue.pop(); + while (!m_tableDeleteQueue.empty()) + m_tableDeleteQueue.pop(); } /** @@ -94,6 +112,105 @@ StorageRegistry::process(const string& payload) } } +/** + * Process a table insert payload and determine + * if any microservice has registered an interest + * in this table. Called from StorageApi::commonInsert() + * + * @param payload The table insert payload + */ +void +StorageRegistry::processTableInsert(const string& tableName, const string& payload) +{ + Logger::getLogger()->debug("StorageRegistry::processTableInsert(): tableName=%s, payload=%s", tableName.c_str(), payload.c_str()); + + if (m_tableRegistrations.size() > 0) + { + /* + * We have some registrations so queue a copy of the payload + * to be examined in the thread the send table notifications + * to interested parties. + */ + char *table = strdup(tableName.c_str()); + char *data = strdup(payload.c_str()); + + if (data != NULL && table != NULL) + { + time_t now = time(0); + TableItem item = make_tuple(now, table, data); + lock_guard guard(m_qMutex); + m_tableInsertQueue.push(item); + m_cv.notify_all(); + } + } +} + +/** + * Process a table update payload and determine + * if any microservice has registered an interest + * in this table. Called from StorageApi::commonUpdate() + * + * @param payload The table update payload + */ +void +StorageRegistry::processTableUpdate(const string& tableName, const string& payload) +{ + Logger::getLogger()->info("Checking for registered interest in table %s with update %s", tableName.c_str(), payload.c_str()); + + if (m_tableRegistrations.size() > 0) + { + /* + * We have some registrations so queue a copy of the payload + * to be examined in the thread the send table notifications + * to interested parties. + */ + char *table = strdup(tableName.c_str()); + char *data = strdup(payload.c_str()); + + if (data != NULL && table != NULL) + { + time_t now = time(0); + TableItem item = make_tuple(now, table, data); + lock_guard guard(m_qMutex); + m_tableUpdateQueue.push(item); + m_cv.notify_all(); + } + } +} + +/** + * Process a table delete payload and determine + * if any microservice has registered an interest + * in this table. Called from StorageApi::commonDelete() + * + * @param payload The table delete payload + */ +void +StorageRegistry::processTableDelete(const string& tableName, const string& payload) +{ + Logger::getLogger()->info("Checking for registered interest in table %s with delete %s", tableName.c_str(), payload.c_str()); + + if (m_tableRegistrations.size() > 0) + { + /* + * We have some registrations so queue a copy of the payload + * to be examined in the thread the send table notifications + * to interested parties. + */ + char *table = strdup(tableName.c_str()); + char *data = strdup(payload.c_str()); + + if (data != NULL && table != NULL) + { + time_t now = time(0); + TableItem item = make_tuple(now, table, data); + lock_guard guard(m_qMutex); + m_tableDeleteQueue.push(item); + m_cv.notify_all(); + } + } +} + /** * Handle a registration request from a client of the storage layer * @@ -103,6 +220,7 @@ StorageRegistry::process(const string& payload) void StorageRegistry::registerAsset(const string& asset, const string& url) { + lock_guard guard(m_registrationsMutex); m_registrations.push_back(pair(new string(asset), new string(url))); } @@ -115,6 +233,7 @@ StorageRegistry::registerAsset(const string& asset, const string& url) void StorageRegistry::unregisterAsset(const string& asset, const string& url) { + lock_guard guard(m_registrationsMutex); for (auto it = m_registrations.begin(); it != m_registrations.end(); ) { if (asset.compare(*(it->first)) == 0 && url.compare(*(it->second)) == 0) @@ -130,14 +249,148 @@ StorageRegistry::unregisterAsset(const string& asset, const string& url) } } +/** + * Parse a table subscription (un)register JSON payload + * + * @param payload JSON payload describing the interest + */ +TableRegistration* StorageRegistry::parseTableSubscriptionPayload(const string& payload) +{ + Document doc; + + doc.Parse(payload.c_str()); + if (doc.HasParseError()) + { + Logger::getLogger()->error("StorageRegistry::parseTableSubscriptionPayload(): Parse error in subscription request payload"); + return NULL; + } + if (!doc.HasMember("url")) + { + Logger::getLogger()->error("StorageRegistry::parseTableSubscriptionPayload(): subscription request doesn't have url field"); + return NULL; + } + if (!doc.HasMember("key")) + { + Logger::getLogger()->error("StorageRegistry::parseTableSubscriptionPayload(): subscription request doesn't have url field"); + return NULL; + } + if (!doc.HasMember("operation")) + { + Logger::getLogger()->error("StorageRegistry::parseTableSubscriptionPayload(): subscription request doesn't have url field"); + return NULL; + } + + TableRegistration *reg = new TableRegistration; + + reg->url = doc["url"].GetString(); + reg->key = doc["key"].GetString(); + reg->operation = doc["operation"].GetString(); + + if (reg->key.size()) + { + if (!doc.HasMember("values") || !doc["values"].IsArray()) + { + Logger::getLogger()->error("Subscription request" \ + " doesn't have a proper values field, payload=%s", payload.c_str()); + delete reg; + return NULL; + } + for (auto & v : doc["values"].GetArray()) + reg->keyValues.emplace_back(v.GetString()); + } + + return reg; +} + +/** + * Handle a registration request for a table from a client of the storage layer + * + * @param table The table of interest + * @param payload JSON payload describing the interest + */ +void +StorageRegistry::registerTable(const string& table, const string& payload) +{ + TableRegistration *reg = parseTableSubscriptionPayload(payload); + + if (!reg) + { + Logger::getLogger()->error("Unable to register invalid Registration entry for table %s, payload %s", + table.c_str(), payload.c_str()); + return; + } + + lock_guard guard(m_tableRegistrationsMutex); + Logger::getLogger()->info("Adding registration entry for table %s", table.c_str()); + m_tableRegistrations.push_back(pair(new string(table), reg)); +} + +/** + * Handle a request to remove a registration of interest in a table + * + * @param table The table of interest + * @param payload JSON payload describing the interest + */ +void +StorageRegistry::unregisterTable(const string& table, const string& payload) +{ + TableRegistration *reg = parseTableSubscriptionPayload(payload); + + if (!reg) + { + Logger::getLogger()->info("Invalid Registration entry for table %s, payload %s", + table.c_str(), payload.c_str()); + return; + } + + lock_guard guard(m_tableRegistrationsMutex); + + Logger::getLogger()->info("%d entries registered interest in table operations", m_tableRegistrations.size()); + bool found = false; + for (auto it = m_tableRegistrations.begin(); found == false && it != m_tableRegistrations.end(); ) + { + TableRegistration *reg_it = it->second; + if (table.compare(*(it->first)) == 0 && + reg->url.compare(reg_it->url)==0 && + reg->key.compare(reg_it->key)==0 && + reg->operation.compare(reg_it->operation)==0) + { + // Either no key is to be matched or a key is to be matched against a possible set of values + if (reg->key.size()==0 || (reg->key.size()>0 && reg->keyValues == reg_it->keyValues)) + { + delete it->first; + delete it->second; + it = m_tableRegistrations.erase(it); + Logger::getLogger()->info("Removed registration for table %s and url %s", table, reg->key.c_str()); + found = true; + } + else + { + ++it; + } + } + else + { + ++it; + } + } + if (!found) + { + Logger::getLogger()->warn( + "Failed to remove subscription for table '%s' using key '%s' with operation '%s' and url '%s'", + table.c_str(), reg->key.c_str(), reg->operation.c_str(), reg->url.c_str()); + } + delete reg; +} + + /** * The worker function that processes the queue of payloads * that may need to be sent to subscribers. */ void StorageRegistry::run() -{ - m_running = true; +{ while (m_running) { char *data = NULL; @@ -146,7 +399,7 @@ StorageRegistry::run() #endif { unique_lock mlock(m_cvMutex); - while (m_queue.size() == 0) + while (m_queue.size() == 0 && m_tableInsertQueue.size() == 0 && m_tableUpdateQueue.size() == 0 && m_tableDeleteQueue.size() == 0) { m_cv.wait_for(mlock, std::chrono::seconds(REGISTRY_SLEEP_TIME)); if (!m_running) @@ -154,23 +407,102 @@ StorageRegistry::run() return; } } - Item item = m_queue.front(); - m_queue.pop(); - data = item.second; + + while (!m_queue.empty()) + { + Item item = m_queue.front(); + m_queue.pop(); + data = item.second; #if CHECK_QTIMES - qTime = item.first; + qTime = item.first; #endif - } - if (data) - { + if (data) + { +#if CHECK_QTIMES + if (time(0) - qTime > QTIME_THRESHOLD) + { + Logger::getLogger()->error("Readings data has been queued for %d seconds to be sent to registered party", (time(0) - qTime)); + } +#endif + processPayload(data); + free(data); + } + } + + while (!m_tableInsertQueue.empty()) + { + char *tableName = NULL; + + TableItem item = m_tableInsertQueue.front(); + m_tableInsertQueue.pop(); + tableName = get<1>(item); + data = get<2>(item); +#if CHECK_QTIMES + qTime = item.first; +#endif + if (tableName && data) + { #if CHECK_QTIMES - if (time(0) - qTime > QTIME_THRESHOLD) + if (time(0) - qTime > QTIME_THRESHOLD) + { + Logger::getLogger()->error("Table insert data has been queued for %d seconds to be sent to registered party", (time(0) - qTime)); + } +#endif + processInsert(tableName, data); + free(tableName); + free(data); + } + } + + while (!m_tableUpdateQueue.empty()) { - Logger::getLogger()->error("Data has been queued for %d seconds to be sent to registered party", (time(0) - qTime)); + char *tableName = NULL; + + TableItem item = m_tableUpdateQueue.front(); + m_tableUpdateQueue.pop(); + tableName = get<1>(item); + data = get<2>(item); +#if CHECK_QTIMES + qTime = item.first; +#endif + if (tableName && data) + { +#if CHECK_QTIMES + if (time(0) - qTime > QTIME_THRESHOLD) + { + Logger::getLogger()->error("Table update data has been queued for %d seconds to be sent to registered party", (time(0) - qTime)); + } +#endif + processUpdate(tableName, data); + free(tableName); + free(data); + } } + + while (!m_tableDeleteQueue.empty()) + { + char *tableName = NULL; + + TableItem item = m_tableDeleteQueue.front(); + m_tableDeleteQueue.pop(); + tableName = get<1>(item); + data = get<2>(item); +#if CHECK_QTIMES + qTime = item.first; +#endif + if (tableName && data) + { +#if CHECK_QTIMES + if (time(0) - qTime > QTIME_THRESHOLD) + { + Logger::getLogger()->error("Table delete data has been queued for %d seconds to be sent to registered party", (time(0) - qTime)); + } #endif - processPayload(data); - free(data); + processDelete(tableName, data); + free(tableName); + free(data); + } + } } } } @@ -186,6 +518,8 @@ StorageRegistry::processPayload(char *payload) { bool allDone = true; + lock_guard guard(m_registrationsMutex); + // First of all deal with those that registered for all assets for (REGISTRY::const_iterator it = m_registrations.cbegin(); it != m_registrations.cend(); it++) { @@ -224,13 +558,12 @@ bool allDone = true; * @param payload The payload to send */ void -StorageRegistry::sendPayload(const string& url, char *payload) +StorageRegistry::sendPayload(const string& url, const char *payload) { size_t found = url.find_first_of("://"); size_t found1 = url.find_first_of("/", found + 3); string hostport = url.substr(found+3, found1 - found - 3); string resource = url.substr(found1); - HttpClient client(hostport); try { client.request("POST", resource, payload); @@ -317,3 +650,338 @@ ostringstream convert; Logger::getLogger()->error("filterPayload: exception %s sending reading data to interested party %s", e.what(), url.c_str()); } } + +/** + * Process an incoming payload and distribute as required to registered + * services + * + * @param payload The payload to potentially distribute + */ +void +StorageRegistry::processInsert(char *tableName, char *payload) +{ + Logger::getLogger()->debug("StorageRegistry::processInsert(): Handling for table:%s, payload=%s", tableName, payload); + Logger::getLogger()->debug("StorageRegistry::processInsert(): m_tableRegistrations.size()=%d", m_tableRegistrations.size()); + + Document payloadDoc; + + payloadDoc.Parse(payload); + if (payloadDoc.HasParseError()) + { + Logger::getLogger()->error("Internal error unable to parse payload for insert into table %s, payload is %s", tableName, payload); + return; + } + + lock_guard guard(m_tableRegistrationsMutex); + for (auto & reg : m_tableRegistrations) + { + if (reg.first->compare(tableName) != 0) + continue; + + TableRegistration *tblreg = reg.second; + + // If key is empty string, no need to match key/value pair in payload + // Also operation must be "insert" for initial implementation + if (tblreg->operation.compare("insert") != 0) + { + continue; + } + + if (tblreg->key.size() == 0) + { + sendPayload(tblreg->url, payload); + } + else + { + if (payloadDoc.HasMember("inserts") && payloadDoc["inserts"].IsArray()) + { + // We have multiple inserts in the payload, parse each one and send + // only the insert for which the key has been registered + Value &inserts = payloadDoc["inserts"]; + for (Value::ConstValueIterator iter = inserts.Begin(); + iter != inserts.End(); ++iter) + { + if (iter->HasMember(tblreg->key.c_str())) + { + string payloadKeyValue = (*iter)[tblreg->key.c_str()].GetString(); + if (std::find(tblreg->keyValues.begin(), tblreg->keyValues.end(), payloadKeyValue) != tblreg->keyValues.end()) + { + StringBuffer buffer; + Writer writer(buffer); + iter->Accept(writer); + + const char *output = buffer.GetString(); + sendPayload(tblreg->url, output); + } + } + } + } + else + { + if (payloadDoc.HasMember(tblreg->key.c_str()) && payloadDoc[tblreg->key.c_str()].IsString()) + { + string payloadKeyValue = payloadDoc[tblreg->key.c_str()].GetString(); + if (std::find(tblreg->keyValues.begin(), tblreg->keyValues.end(), payloadKeyValue) != tblreg->keyValues.end()) + { + sendPayload(tblreg->url, payload); + } + } + } + } + } +} + +/** + * Process an incoming payload and distribute as required to registered + * services + * + * @param payload The payload to potentially distribute + */ +void +StorageRegistry::processUpdate(char *tableName, char *payload) +{ + Document doc; + + doc.Parse(payload); + if (doc.HasParseError()) + { + Logger::getLogger()->error("Unable to parse table update payload for table %s, request is %s", tableName, payload); + return; + } + + lock_guard guard(m_tableRegistrationsMutex); + for (auto & reg : m_tableRegistrations) + { + if (reg.first->compare(tableName) != 0) + continue; + + TableRegistration *tblreg = reg.second; + + // If key is empty string, no need to match key/value pair in payload + if (tblreg->operation.compare("update") != 0) + { + continue; + } + + if (tblreg->key.empty()) + { + // No key to match, send all updates to table + sendPayload(tblreg->url, payload); + } + else + { + if (doc.HasMember("updates") && doc["updates"].IsArray()) + { + // Multiple updates in a single call + Value &updates = doc["updates"]; + for (Value::ConstValueIterator iter = updates.Begin(); + iter != updates.End(); ++iter) + { + const Value& where = (*iter)["where"]; + if (where.HasMember("column") && where["column"].IsString() && + where.HasMember("value") && where["value"].IsString()) + { + string updateKey = where["column"].GetString(); + string keyValue = where["value"].GetString(); + if (updateKey.compare(tblreg->key) == 0 && + std::find(tblreg->keyValues.begin(), tblreg->keyValues.end(), keyValue) + != tblreg->keyValues.end()) + { + if (iter->HasMember("values")) + { + const Value& values = (*iter)["values"]; + StringBuffer buffer; + Writer writer(buffer); + values.Accept(writer); + + const char *output = buffer.GetString(); + sendPayload(tblreg->url, output); + } + else if (iter->HasMember("expressions")) + { + const Value& expressions = (*iter)["expressions"]; + for (Value::ConstValueIterator expr = expressions.Begin(); + expr != expressions.End(); ++expr) + { + StringBuffer buffer; + Writer writer(buffer); + expr->Accept(writer); + + const char *output = buffer.GetString(); + sendPayload(tblreg->url, output); + } + } + } + } + } + } + else if (doc.HasMember("where") && doc["where"].IsObject()) + { + const Value& where = doc["where"]; + if (where.HasMember("column") && where["column"].IsString() && + where.HasMember("value") && where["value"].IsString()) + { + string updateKey = where["column"].GetString(); + string keyValue = where["value"].GetString(); + if (updateKey.compare(tblreg->key) == 0 && + std::find(tblreg->keyValues.begin(), tblreg->keyValues.end(), keyValue) + != tblreg->keyValues.end()) + { + if (doc.HasMember("values")) + { + const Value& values = doc["values"]; + StringBuffer buffer; + Writer writer(buffer); + values.Accept(writer); + + const char *output = buffer.GetString(); + sendPayload(tblreg->url, output); + } + else if (doc.HasMember("expressions")) + { + const Value& expressions = doc["expressions"]; + for (Value::ConstValueIterator expr = expressions.Begin(); + expr != expressions.End(); ++expr) + { + StringBuffer buffer; + Writer writer(buffer); + expr->Accept(writer); + + const char *output = buffer.GetString(); + sendPayload(tblreg->url, output); + } + } + } + } + } + } + } +} + +/** + * Process an incoming payload and distribute as required to registered + * services + * + * @param payload The payload to potentially distribute + */ +void +StorageRegistry::processDelete(char *tableName, char *payload) +{ + Document doc; + + doc.Parse(payload); + if (doc.HasParseError()) + { + Logger::getLogger()->error("Unable to parse table delete payload for table %s, request is %s", tableName, payload); + return; + } + + lock_guard guard(m_tableRegistrationsMutex); + for (auto & reg : m_tableRegistrations) + { + if (reg.first->compare(tableName) != 0) + continue; + + TableRegistration *tblreg = reg.second; + + // If key is empty string, no need to match key/value pair in payload + if (tblreg->operation.compare("delete") != 0) + { + continue; + } + if (tblreg->key.empty()) + { + // No key to match, send all updates to table + sendPayload(tblreg->url, payload); + } + else + { + if (doc.HasMember("where") && doc["where"].IsObject()) + { + const Value& where = doc["where"]; + if (where.HasMember("column") && where["column"].IsString() && + where.HasMember("value") && where["value"].IsString()) + { + string updateKey = where["column"].GetString(); + string keyValue = where["value"].GetString(); + if (updateKey.compare(tblreg->key) == 0 && + std::find(tblreg->keyValues.begin(), tblreg->keyValues.end(), keyValue) + != tblreg->keyValues.end()) + { + StringBuffer buffer; + Writer writer(buffer); + where.Accept(writer); + + const char *output = buffer.GetString(); + sendPayload(tblreg->url, output); + } + } + } + } + } +} + +/** + * Test function to add some dummy/test table subscriptions + */ +void StorageRegistry::insertTestTableReg() +{ + string table1("log"); + string payload1 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl", "key": "code", "values":["CONAD", "PURGE", "CONCH", "FSTOP", "SRVRG"], "operation": "insert"} )***"; + + string table2("asset_tracker"); + string payload2 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl2", "key": "", "operation": "insert"} )***"; + + string table3("asset_tracker"); + string payload3 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl3", "key": "event", "values":["Ingest", "Filter"], "operation": "insert"} )***"; + + Logger::getLogger()->error("StorageRegistry::insertTestTableReg(): table=%s, payload=%s", table1.c_str(), payload1.c_str()); + registerTable(table1, payload1); + + Logger::getLogger()->error("StorageRegistry::insertTestTableReg(): table=%s, payload=%s", table2.c_str(), payload2.c_str()); + registerTable(table2, payload2); + + Logger::getLogger()->error("StorageRegistry::insertTestTableReg(): table=%s, payload=%s", table3.c_str(), payload3.c_str()); + registerTable(table3, payload3); +} + +/** + * Test function to remove a dummy/test table subscription + * + * @param n The subscription number to remove + */ +void StorageRegistry::removeTestTableReg(int n) +{ + string table1("log"); + string payload1 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl", "key": "code", "values":["CONAD", "PURGE", "CONCH", "FSTOP", "SRVRG"], "operation": "insert"} )***"; + + string table2("asset_tracker"); + string payload2 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl2", "key": "", "operation": "insert"} )***"; + + string table3("asset_tracker"); + string payload3 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl3", "key": "event", "values":["Ingest", "Filter"], "operation": "insert"} )***"; + + switch(n) + { + case 1: + unregisterTable(table1, payload1); + Logger::getLogger()->error("StorageRegistry::removeTestTableReg(): table=%s, payload=%s", table1.c_str(), payload1.c_str()); + break; + + case 2: + unregisterTable(table2, payload2); + Logger::getLogger()->error("StorageRegistry::removeTestTableReg(): table=%s, payload=%s", table2.c_str(), payload2.c_str()); + break; + + case 3: + unregisterTable(table3, payload3); + Logger::getLogger()->error("StorageRegistry::removeTestTableReg(): table=%s, payload=%s", table3.c_str(), payload3.c_str()); + break; + + default: + Logger::getLogger()->error("StorageRegistry::removeTestTableReg(): unhandled value n=%d", n); + break; + } +} + + diff --git a/C/services/storage/stream_handler.cpp b/C/services/storage/stream_handler.cpp index 49cac60f64..66e4e25b0e 100644 --- a/C/services/storage/stream_handler.cpp +++ b/C/services/storage/stream_handler.cpp @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -74,11 +73,27 @@ void StreamHandler::handler() } else { - int nfds = epoll_wait(m_pollfd, events, MAX_EVENTS, 1); - for (int i = 0; i < nfds; i++) + /* + * Call epoll_wait with a zero timeout to see if any data is available. + * If not then call with a tiemout. This prevents Linux from scheduling + * us out if there is data on the socket. + */ + int nfds = epoll_wait(m_pollfd, events, MAX_EVENTS, 100); + if (nfds == 0) { - Stream *stream = (Stream *)events[i].data.ptr; - stream->handleEvent(m_pollfd, m_api, events[i].events); + nfds = epoll_wait(m_pollfd, events, MAX_EVENTS, 100); + } + if (nfds == -1) + { + Logger::getLogger()->error("Stream epoll error: %s", strerror(errno)); + } + else + { + for (int i = 0; i < nfds; i++) + { + Stream *stream = (Stream *)events[i].data.ptr; + stream->handleEvent(m_pollfd, m_api, events[i].events); + } } } } @@ -125,6 +140,9 @@ StreamHandler::Stream::~Stream() * will connect to this port and then send the token to verify they are the * service that requested the stream to be connected. * + * The client calls a REST API endpoint in the storage layer to request a streaming + * connection which results in this method beign called. + * * @param epollfd The epoll descriptor * @param token The single use token the client will send in the connect request */ @@ -132,12 +150,14 @@ uint32_t StreamHandler::Stream::create(int epollfd, uint32_t *token) { struct sockaddr_in address; + // Create the memory pool from whuch readings will be allocated if ((m_blockPool = new MemoryPool(BLOCK_POOL_SIZES)) == NULL) { Logger::getLogger()->error("Failed to create memory block pool"); return 0; } + // Open the socket used to listen for the incoming stream connection if ((m_socket = socket(AF_INET, SOCK_STREAM, 0)) < 0) { Logger::getLogger()->error("Failed to create socket: %s", strerror(errno)); @@ -166,13 +186,15 @@ struct sockaddr_in address; } m_status = Listen; + // Create the random token that is used to verify the connection comes from the + // source that requested the streaming connection srand(m_port + (unsigned int)time(0)); m_token = (uint32_t)random() & 0xffffffff; *token = m_token; // Add to epoll set m_event.data.ptr = this; - m_event.events = EPOLLIN | EPOLLRDHUP; + m_event.events = EPOLLIN | EPOLLRDHUP | EPOLLHUP | EPOLLPRI | EPOLLERR; if (epoll_ctl(epollfd, EPOLL_CTL_ADD, m_socket, &m_event) < 0) { Logger::getLogger()->error("Failed to add listening port %d to epoll fileset, %s", m_port, strerror(errno)); @@ -198,6 +220,10 @@ void StreamHandler::Stream::setNonBlocking(int fd) * Handle an epoll event. The precise handling will depend * on the state of the stream. * + * One of the things done here is to handle the streaming protocol, + * reading the block header the individual reading headers and the + * readings themselves. + * * TODO Improve memory handling, use seperate threads for inserts, send acknowledgements * * @param epollfd The epoll file descriptor @@ -211,12 +237,38 @@ ssize_t n; // TODO mark this stream for destruction epoll_ctl(epollfd, EPOLL_CTL_DEL, m_socket, &m_event); close(m_socket); - Logger::getLogger()->warn("Closing stream..."); + Logger::getLogger()->error("Closing stream..."); + m_status = Closed; + } + if (events & EPOLLHUP) + { + // TODO mark this stream for destruction + epoll_ctl(epollfd, EPOLL_CTL_DEL, m_socket, &m_event); + close(m_socket); + Logger::getLogger()->error("Hangup on socket Closing stream..."); + m_status = Closed; + } + if (events & EPOLLPRI) + { + // TODO mark this stream for destruction + epoll_ctl(epollfd, EPOLL_CTL_DEL, m_socket, &m_event); + close(m_socket); + Logger::getLogger()->error("Eceptional condition on socket Closing stream..."); + m_status = Closed; + } + if (events & EPOLLERR) + { + // TODO mark this stream for destruction + epoll_ctl(epollfd, EPOLL_CTL_DEL, m_socket, &m_event); + close(m_socket); + m_status = Closed; + Logger::getLogger()->error("Error condition on socket Closing stream..."); } if (events & EPOLLIN) { if (m_status == Listen) { + // Accept the connection for the streaming data int conn_sock; struct sockaddr addr; socklen_t addrlen = sizeof(addr); @@ -226,15 +278,19 @@ ssize_t n; Logger::getLogger()->info("Accept failed for streaming socket: %s", strerror(errno)); return; } + + // Remove and close the listening socket now we have a connection epoll_ctl(epollfd, EPOLL_CTL_DEL, m_socket, &m_event); close(m_socket); Logger::getLogger()->info("Stream connection established"); m_socket = conn_sock; m_status = AwaitingToken; - setNonBlocking(m_socket); - m_event.events = EPOLLIN | EPOLLRDHUP; + m_event.events = EPOLLIN | EPOLLRDHUP | EPOLLHUP | EPOLLERR | EPOLLPRI | EPOLLET; m_event.data.ptr = this; - epoll_ctl(epollfd, EPOLL_CTL_ADD, m_socket, &m_event); + if (epoll_ctl(epollfd, EPOLL_CTL_ADD, m_socket, &m_event) == -1) + { + Logger::getLogger()->fatal("Failed to add data socket to epoll set: %s", strerror(errno)); + } } else if (m_status == AwaitingToken) { @@ -244,7 +300,10 @@ ssize_t n; return; } if ((n = read(m_socket, &hdr, sizeof(hdr))) != (int)sizeof(hdr)) - Logger::getLogger()->warn("Token exchange: Short read of %d bytes: %s", n, strerror(errno)); + { + Logger::getLogger()->warn("Token exchange failed: Short read of %d bytes: %s", n, strerror(errno)); + return; + } if (hdr.magic == RDS_CONNECTION_MAGIC && hdr.token == m_token) { m_status = Connected; @@ -261,6 +320,18 @@ ssize_t n; } else if (m_status == Connected) { + /* + * We are connected so loop on the available data reading block headers, + * reading headers and the readings themselves. + * + * We use the available method to see if there is enough data before we + * read in order to avoid blocking in a red call. This also allows to + * not have to set the socket to non-blocking mode. meaning that our + * epoll interaction does not need to be edge triggered. + * + * Once we exhaust the data that is aviaabnle we return and allow the + * epoll to inform us when more data becomes available. + */ while (1) { Logger::getLogger()->debug("Connected in protocol state %d, readingNo %d", m_protocolState, m_readingNo); @@ -274,8 +345,7 @@ ssize_t n; } if ((n = read(m_socket, &blkHdr, sizeof(blkHdr))) != (int)sizeof(blkHdr)) { - if (errno == EAGAIN) - return; + // This should never happen as avialable said we had enough data Logger::getLogger()->warn("Block Header: Short read of %d bytes: %s", n, strerror(errno)); return; } @@ -289,26 +359,38 @@ ssize_t n; } if (blkHdr.blockNumber != m_blockNo) { + // Somehow we lost a block } m_blockNo++; m_blockSize = blkHdr.count; m_protocolState = RdHdr; m_readingNo = 0; - Logger::getLogger()->debug("New block %d of %d readings", blkHdr.blockNumber, blkHdr.count); + Logger::getLogger()->info("New block %d of %d readings", blkHdr.blockNumber, blkHdr.count); } else if (m_protocolState == RdHdr) { + // We are expecting a reading header RDSReadingHeader rdhdr; if (available(m_socket) < sizeof(rdhdr)) { - Logger::getLogger()->debug("Not enough bytes for reading header"); + Logger::getLogger()->warn("Not enough bytes %d for reading header %d in block %d (socket %d)", available(m_socket), m_readingNo, m_blockNo - 1, m_socket); + static bool reported = false; + if (!reported) + { + char buf[40]; + int i; + i = recv(m_socket, buf, sizeof(buf), MSG_PEEK); + for (int j = 0; j < i; j++) + Logger::getLogger()->warn("Byte at %d is %x", j, buf[j]); + reported = true; + } return; } - if (read(m_socket, &rdhdr, sizeof(rdhdr)) < (int)sizeof(rdhdr)) + int n; + if ((n = read(m_socket, &rdhdr, sizeof(rdhdr))) < (int)sizeof(rdhdr)) { - if (errno == EAGAIN) - return; - Logger::getLogger()->warn("Not enough bytes for reading header"); + // Should never happen + Logger::getLogger()->warn("Not enough bytes read %d for reading header", n); return; } if (rdhdr.magic != RDS_READING_MAGIC) @@ -341,28 +423,45 @@ ssize_t n; } else if (m_protocolState == RdBody) { + // We are expecting a reading body if (available(m_socket) < m_readingSize) { - Logger::getLogger()->debug("Not enough bytes for reading %d", m_readingSize); + Logger::getLogger()->warn("Not enough bytes %d for reading %d in block %d", m_readingSize, m_readingNo, m_blockNo - 1); return; } - if (m_sameAsset) + struct iovec iov[3]; + + iov[0].iov_base = &m_currentReading->userTs; + iov[0].iov_len = sizeof(struct timeval); + + if (!m_sameAsset) { - if ((n = read(m_socket, &m_currentReading->userTs, sizeof(struct timeval))) != (int)sizeof(struct timeval)) - Logger::getLogger()->warn("Short read of %d bytes for timestamp: %s", n, strerror(errno)); - size_t plen = m_readingSize - sizeof(struct timeval); - uint32_t assetLen = m_currentReading->assetCodeLength; - if ((n = read(m_socket, &m_currentReading->assetCode[assetLen], plen)) < (int)plen) - Logger::getLogger()->warn("Short read of %d bytes for payload: %s", n, strerror(errno)); - memcpy(&m_currentReading->assetCode[0], m_lastAsset.c_str(), assetLen); + iov[1].iov_base = &m_currentReading->assetCode; + iov[1].iov_len = m_currentReading->assetCodeLength; + iov[2].iov_base = &m_currentReading->assetCode[m_currentReading->assetCodeLength]; + iov[2].iov_len = m_currentReading->payloadLength; + int n = readv(m_socket, iov, 3); + if (n != m_currentReading->assetCodeLength + + m_currentReading->payloadLength + sizeof(struct timeval)) + { + Logger::getLogger()->error("Short red for reading"); + } + + m_lastAsset = m_currentReading->assetCode; } else { - if ((n = read(m_socket, &m_currentReading->userTs, m_readingSize)) != (int)m_readingSize) - Logger::getLogger()->warn("Short read of %d bytes for reading: %s", n, strerror(errno)); - m_lastAsset = m_currentReading->assetCode; + iov[1].iov_base = &m_currentReading->assetCode[m_currentReading->assetCodeLength]; + iov[1].iov_len = m_currentReading->payloadLength; + int n = readv(m_socket, iov, 2); + if (n != m_currentReading->payloadLength + sizeof(struct timeval)) + { + Logger::getLogger()->error("Short red for reading"); + } + memcpy(&m_currentReading->assetCode[0], m_lastAsset.c_str(), m_currentReading->assetCodeLength); } m_readingNo++; + m_protocolState = RdHdr; if ((m_readingNo % RDS_BLOCK) == 0) { queueInsert(api, RDS_BLOCK, false); @@ -376,14 +475,12 @@ ssize_t n; queueInsert(api, m_readingNo % RDS_BLOCK, true); for (uint32_t i = 0; i < m_readingNo % RDS_BLOCK; i++) m_blockPool->release(m_readings[i]); - } - if (m_readingNo >= m_blockSize) - { m_protocolState = BlkHdr; + Logger::getLogger()->warn("Waiting for the next block header"); } - else + else if (m_readingNo > m_blockSize) { - m_protocolState = RdHdr; + Logger::getLogger()->error("Too many readings in block"); } } } diff --git a/C/tasks/north/sending_process/sending.cpp b/C/tasks/north/sending_process/sending.cpp index a50548c390..e17effed82 100755 --- a/C/tasks/north/sending_process/sending.cpp +++ b/C/tasks/north/sending_process/sending.cpp @@ -283,6 +283,11 @@ SendingProcess::SendingProcess(int argc, char** argv) : FledgeProcess(argc, argv // Init plugin with merged configuration from Fledge API this->m_plugin->init(config); + + if(m_dryRun) + { + return; + } if (this->m_plugin->m_plugin_data) { diff --git a/C/tasks/statistics_history/CMakeLists.txt b/C/tasks/statistics_history/CMakeLists.txt index b6924b3e71..893f5c91b1 100644 --- a/C/tasks/statistics_history/CMakeLists.txt +++ b/C/tasks/statistics_history/CMakeLists.txt @@ -4,7 +4,7 @@ project (statistics_history) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wsign-conversion") set(UUIDLIB -luuid) -set(COMMON_LIB common-lib) +set(COMMON_LIB common-lib services-common-lib) include_directories(. include ../../thirdparty/Simple-Web-Server ../../thirdparty/rapidjson/include ../../common/include) diff --git a/C/tasks/statistics_history/include/stats_history.h b/C/tasks/statistics_history/include/stats_history.h index 2409016a80..0973c5b584 100644 --- a/C/tasks/statistics_history/include/stats_history.h +++ b/C/tasks/statistics_history/include/stats_history.h @@ -12,6 +12,9 @@ */ #include +#include +#include +#include /** @@ -28,7 +31,11 @@ class StatsHistory : public FledgeProcess void run() const; private: - void processKey(const std::string& key) const; + void processKey(const std::string& key, std::vector &historyValues, + std::vector > &updateValues, std::string dateTimeStr, int val , int prev) const; + std::string getTime(void) const; + + }; #endif diff --git a/C/tasks/statistics_history/stats_history.cpp b/C/tasks/statistics_history/stats_history.cpp index cbc114114a..2ce91238bb 100644 --- a/C/tasks/statistics_history/stats_history.cpp +++ b/C/tasks/statistics_history/stats_history.cpp @@ -10,7 +10,12 @@ #include #include +#include +#include +#define DATETIME_MAX_LEN 52 +#define MICROSECONDS_FORMAT_LEN 10 +#define DATETIME_FORMAT_DEFAULT "%Y-%m-%d %H:%M:%S" using namespace std; @@ -45,6 +50,7 @@ StatsHistory::~StatsHistory() */ void StatsHistory::run() const { + // We handle these signals, add more if needed std::signal(SIGINT, signalHandler); std::signal(SIGSTOP, signalHandler); @@ -56,61 +62,124 @@ void StatsHistory::run() const // Get the set of distinct statistics keys Query query(new Returns("key")); query.distinct(); - ResultSet *keySet = getStorageClient()->queryTable("statistics", query); + query.returns(new Returns("value")); + query.returns(new Returns("previous_value")); + ResultSet *keySet = getStorageClient()->queryTable("statistics", query); ResultSet::RowIterator rowIter = keySet->firstRow(); + std::vector historyValues; + vector> updateValues; + + std::string dateTimeStr = getTime(); - do { + while (keySet->hasNextRow(rowIter) || keySet->isLastRow(rowIter) ) + { string key = (*rowIter)->getColumn("key")->getString(); + int val = (*rowIter)->getColumn("value")->getInteger(); + int prev = (*rowIter)->getColumn("previous_value")->getInteger(); + try { - processKey(key); + processKey(key, historyValues, updateValues, dateTimeStr, val, prev); } catch (exception e) { getLogger()->error("Failed to process statisitics key %s, %s", key, e.what()); } - rowIter = keySet->nextRow(rowIter); - } while (keySet->hasNextRow(rowIter)); + if (!keySet->isLastRow(rowIter)) + rowIter = keySet->nextRow(rowIter); + else + break; + } + + int n_rows; + if ((n_rows = getStorageClient()->insertTable("statistics_history", historyValues)) < 1) + { + getLogger()->error("Failed to insert rows to statistics history table "); + } + + if (getStorageClient()->updateTable("statistics", updateValues) < 1) + { + getLogger()->error("Failed to update rows to statistics table"); + } + + for (auto it = updateValues.begin(); it != updateValues.end() ; ++it) + { + InsertValue *updateValue = it->first; + if (updateValue) + { + delete updateValue; + updateValue=nullptr; + } + Where *wKey = it->second; + if(wKey) + { + delete wKey; + wKey = nullptr; + } + } delete keySet; } /** - * Process a single statistics key + * Process statistics keys * - * @param key The statistics key to process + * @param key The statistics key to process + * @param historyValues Values to be inserted in statistics_history + * @param updateValues Values to be updated in statistics + * @param dateTimeStr Local time with microseconds precision + * @param val int + * @param prev int + * @return void */ -void StatsHistory::processKey(const string& key) const +void StatsHistory::processKey(const std::string& key, std::vector &historyValues, std::vector > &updateValues, std::string dateTimeStr, int val, int prev) const { -Query query(new Where("key", Equals, key)); + InsertValues iValue; - // Fetch the current and previous valaues for the key - query.returns(new Returns("value")); - query.returns(new Returns("previous_value")); - ResultSet *values = getStorageClient()->queryTable("statistics", query); - if (values->rowCount() != 1) - { - getLogger()->error("Internal error, failed to get statisitics for key %s", key.c_str()); - return; - } - int val = ((*values)[0])->getColumn("value")->getInteger(); - int prev = ((*values)[0])->getColumn("previous_value")->getInteger(); - delete values; - - // Insert the row into the configuration history - InsertValues historyValues; - historyValues.push_back(InsertValue("key", key.c_str())); - historyValues.push_back(InsertValue("value", val - prev)); - historyValues.push_back(InsertValue("history_ts", "now()")); - int n_rows; - if ((n_rows = getStorageClient()->insertTable("statistics_history", historyValues)) != 1) - { - getLogger()->error("Failed to insert single row to statisitics history table for key %s", key.c_str()); - } + // Insert the row into the statistics history + // create an object of InsertValues and push in historyValues vector + // for batch insertion + iValue.push_back(InsertValue("key", key.c_str())); + iValue.push_back(InsertValue("value", val - prev)); + iValue.push_back(InsertValue("history_ts", dateTimeStr)); + + historyValues.push_back(iValue); // Update the previous value in the statistics row - InsertValues updateValues; - updateValues.push_back(InsertValue("previous_value", val)); - if (getStorageClient()->updateTable("statistics", updateValues, Where("key", Equals, key)) != 1) - { - getLogger()->error("Failed to update single row to statisitics table for key %s", key.c_str()); - } + // create an object of InsertValue and push in updateValues vector + // for batch updation + InsertValue *updateValue = new InsertValue("previous_value", val); + Where *wKey = new Where("key", Equals, key); + updateValues.emplace_back(updateValue, wKey); } + +/** + * getTime() function returns the localTime with microseconds precision + * + * @param void + * @return std::string localTime + */ + +std::string StatsHistory::getTime(void) const +{ + struct timeval tv ; + struct tm* timeinfo; + gettimeofday(&tv, NULL); + timeinfo = gmtime(&tv.tv_sec); + char date_time[DATETIME_MAX_LEN]; + // Create datetime with seconds + strftime(date_time, + sizeof(date_time), + DATETIME_FORMAT_DEFAULT, + timeinfo); + + std::string dateTimeLocal = date_time; + char micro_s[MICROSECONDS_FORMAT_LEN]; + // Add microseconds + snprintf(micro_s, + sizeof(micro_s), + ".%06lu", + tv.tv_usec); + + dateTimeLocal.append(micro_s); + return dateTimeLocal; +} + diff --git a/CMakeLists.txt b/CMakeLists.txt index 875e677949..8ab13abe33 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,6 +35,7 @@ add_subdirectory(C/services/filter-plugin-interfaces/python/filter_ingest_pymodu add_subdirectory(C/services/north-plugin-interfaces/python) add_subdirectory(C/tasks/north) add_subdirectory(C/tasks/purge_system) +add_subdirectory(C/tasks/statistics_history) add_subdirectory(C/plugins/utils) add_subdirectory(C/plugins/north/OMF) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 238488f1b4..3e0db2c2cd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,9 +8,9 @@ The following is a set of guidelines for contributing to Fledge IoT project and its plugins, which are hosted in the [fledge-iot Organization](https://github.com/fledge-iot) on GitHub. -To give us feedback or make suggestions use the [Fledge Slack Channel](https://lfedge.slack.com/archives/CLJ7CNCAX). +To give us feedback or make suggestions use the fledge or fledge-help Slack Channel on [LFEdge](https://lfedge.slack.com/archives/CLJ7CNCAX). -If you find a security vulnerability within Fledge or any of it's plugins then we request that you inform us via email rather than by opening an issue in GitHub. This allows us to act on it without giving information that others might exploit. Any security vulnerability will be discussed at the project TCS and user will be informed of the need to upgrade via the Fledge slack channel. The email address to which vulnerabilities should be reported is security@dianomic.com. +If you find a security vulnerability within Fledge or any of its plugins then we request that **you inform us via email rather than by opening an issue in GitHub**. This allows us to act on it without giving information that others might exploit. Any security vulnerability will be discussed at the project TSC and user will be informed of the need to upgrade via the Fledge Slack channel. The email address to which vulnerabilities should be reported is security@dianomic.com. ## Pull requests @@ -18,7 +18,7 @@ If you find a security vulnerability within Fledge or any of it's plugins then w refactoring code etc.), otherwise you risk spending a lot of time working on something that might already be underway or is unlikely to be merged into the project. -Join the Fledge slack channel on [LFEdge](https://lfedge.slack.com/archives/CLJ7CNCAX). This +Join the fledge or fledge-help Slack channel on [LFEdge](https://lfedge.slack.com/archives/CLJ7CNCAX). This will allow you to talk to the wider fledge community and discuss your proposed changes and get help from the maintainers when needed. @@ -35,26 +35,26 @@ Adhering to the following process is the best way to get your work included in t ```bash # Clone your fork of the repo into the current directory - git clone https://github.com//fledge-iot.git + git clone https://github.com//fledge.git # Navigate to the newly cloned directory - cd fledge-iot + cd fledge # Assign the original repo to a remote called "upstream" git remote add upstream https://github.com/fledge-iot/fledge.git ``` -2. If you cloned a while ago, get the latest changes from upstream: +2. If you cloned a while ago, get the latest changes of develop branch from upstream: ```bash - git checkout main - git pull --rebase upstream main + git checkout develop + git pull --rebase upstream develop ``` 3. Create a new topic branch from `develop`, if you are working a particular issue from the Project Jira then the convention for branch names is to use the Jira name, otherwise choose a descriptive branch name that contains your GitHub username in order to help us track the changes. ```bash - git checkout -b [branch-name] + git checkout -b [topic-branch-name] upstream/develop ``` 4. Commit your changes in logical chunks. When you are ready to commit, make sure to write a Good @@ -70,24 +70,27 @@ Adhering to the following process is the best way to get your work included in t 5. Locally merge (or rebase) the upstream development branch into your topic branch: ```bash - git pull --rebase upstream main + git pull --rebase upstream develop ``` 6. Push your topic branch up to your fork: ```bash - git push origin [branch-name] + git push -u origin [topic-branch-name] ``` 7. [Open a Pull Request](https://help.github.com/articles/using-pull-requests/) with a clear title - and detailed description. + and detailed description. Also make sure always raise Pull request against develop base branch of upstream only. + It must have at least one reviewer to expedite the review and also verify GitHub status checks which let you know if your commits meet the conditions set for the repository you're contributing to. + GitHub Status checks are based on external processes, such as continuous integration builds, which run for each push you make to a repository. You can see the pending, passing, or failing state of status checks next to individual commits in your pull request. + ### Plugins The above addresses the main Fledge repository, however plugins each have a repository of their own which contains the code for the plugin and the documentation for the plugin. If you wish to work on an existing plugin -then the process is similar to that above, just replace the fledge.git +then the process is similar to that above, just replace the "fledge.git" repository with the fledge-{plugin-type}-{plugin-name}.git repository, for example ```bash @@ -100,6 +103,7 @@ repository with the fledge-{plugin-type}-{plugin-name}.git repository, for examp # Assign the original repo to a remote called "upstream" git remote add upstream https://github.com/fledge-iot/fledge-south-sinusoid.git ``` +Repeat further steps which we mentioned [here](#pull-requests) If you wish to create a new plugin then contact the maintainers and we will create a blank base repository for you to add your code into. diff --git a/GOVERNANCE.MD b/GOVERNANCE.MD new file mode 100644 index 0000000000..061a02bc76 --- /dev/null +++ b/GOVERNANCE.MD @@ -0,0 +1,7 @@ +# Governance + +Project governance as well as policies, procedures and instructions for contributing to FLEDGE can be found on our Wiki site at the following locations: + + +- [Fledge Governance Wiki Page](https://wiki.lfedge.org/display/FLEDGE/Governance) +- [Contributor's Guide](CONTRIBUTING.md) diff --git a/Makefile b/Makefile index c1c80d666b..3ce471339c 100644 --- a/Makefile +++ b/Makefile @@ -61,6 +61,7 @@ CMAKE_SOUTH_BINARY := $(CMAKE_SERVICES_DIR)/south/fledge.services.sou CMAKE_NORTH_SERVICE_BINARY := $(CMAKE_SERVICES_DIR)/north/fledge.services.north CMAKE_NORTH_BINARY := $(CMAKE_TASKS_DIR)/north/sending_process/sending_process CMAKE_PURGE_SYSTEM_BINARY := $(CMAKE_TASKS_DIR)/purge_system/purge_system +CMAKE_STATISTICS_BINARY := $(CMAKE_TASKS_DIR)/statistics_history/statistics_history CMAKE_PLUGINS_DIR := $(CURRENT_DIR)/$(CMAKE_BUILD_DIR)/C/plugins DEV_SERVICES_DIR := $(CURRENT_DIR)/services DEV_TASKS_DIR := $(CURRENT_DIR)/tasks @@ -70,6 +71,7 @@ SYMLINK_SOUTH_BINARY := $(DEV_SERVICES_DIR)/fledge.services.south SYMLINK_NORTH_SERVICE_BINARY := $(DEV_SERVICES_DIR)/fledge.services.north SYMLINK_NORTH_BINARY := $(DEV_TASKS_DIR)/sending_process SYMLINK_PURGE_SYSTEM_BINARY := $(DEV_TASKS_DIR)/purge_system +SYMLINK_STATISTICS_BINARY := $(DEV_TASKS_DIR)/statistics_history ASYNC_INGEST_PYMODULE := $(CURRENT_DIR)/python/async_ingest.so* FILTER_INGEST_PYMODULE := $(CURRENT_DIR)/python/filter_ingest.so* @@ -166,7 +168,7 @@ PACKAGE_NAME=Fledge # generally prepare the development tree to allow for core to be run default : apply_version \ generate_selfcertificate \ - c_build $(SYMLINK_STORAGE_BINARY) $(SYMLINK_SOUTH_BINARY) $(SYMLINK_NORTH_SERVICE_BINARY) $(SYMLINK_NORTH_BINARY) $(SYMLINK_PURGE_SYSTEM_BINARY) $(SYMLINK_PLUGINS_DIR) \ + c_build $(SYMLINK_STORAGE_BINARY) $(SYMLINK_SOUTH_BINARY) $(SYMLINK_NORTH_SERVICE_BINARY) $(SYMLINK_NORTH_BINARY) $(SYMLINK_PURGE_SYSTEM_BINARY) $(SYMLINK_STATISTICS_BINARY) $(SYMLINK_PLUGINS_DIR) \ python_build python_requirements_user apply_version : @@ -289,6 +291,11 @@ $(SYMLINK_NORTH_BINARY) : $(DEV_TASKS_DIR) $(SYMLINK_PURGE_SYSTEM_BINARY) : $(DEV_TASKS_DIR) $(LN) $(CMAKE_PURGE_SYSTEM_BINARY) $(SYMLINK_PURGE_SYSTEM_BINARY) +# create symlink to purge_system binary +$(SYMLINK_STATISTICS_BINARY) : $(DEV_TASKS_DIR) + $(LN) $(CMAKE_STATISTICS_BINARY) $(SYMLINK_STATISTICS_BINARY) + + # create tasks dir $(DEV_TASKS_DIR) : $(MKDIR_PATH) $(DEV_TASKS_DIR) diff --git a/SECURITY.MD b/SECURITY.MD new file mode 100644 index 0000000000..d322363696 --- /dev/null +++ b/SECURITY.MD @@ -0,0 +1,33 @@ + + +## Security + +Fledge takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Fledge](https://github.com/Fledge-iot) + + +If you believe you have found a security vulnerability in any Fledge repository please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues, instead email to security@dianomic.com.** + +You should receive a response soon. If for some reason you do not, please follow up via email to ensure we received your original message. + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + + +## Preferred Languages + +We prefer all communications to be in English. + + \ No newline at end of file diff --git a/VERSION b/VERSION index 496130b3aa..ed70b8404e 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -fledge_version=2.1.0 -fledge_schema=59 +fledge_version=2.2.0 +fledge_schema=66 diff --git a/docs/91_version_history.rst b/docs/91_version_history.rst index 14c0d9d4f4..7c409f4108 100644 --- a/docs/91_version_history.rst +++ b/docs/91_version_history.rst @@ -25,6 +25,191 @@ Version History Fledge v2 ========== +v2.2.0 +------- + +Release Date: 2023-10-17 + +- **Fledge Core** + + - New Features: + + - New audit logs have been added to reflect the creation, update and deletion of access control lists. + - New public API Entry Points have been added to allow for the creation and manipulation of control pipelines. + - A new user role has been added for those users able to update the control features of the platform. + - A new tuning parameter has been added to the PostgreSQL storage plugin to allow the maximum number of readings inserted into the database in a single insert to be limited. This is useful when high data rates or large bursts of readings are received as it limits the memory consumption of the plugin and reduces the lock contention on the database. + - The asset tracker component has been optimized in order to improve the ingress and egress performance of Fledge. + - The mechanism used by the south and north services to interact with the audit log has been optimized. This improves the ingress and egress performance of the product at the cost of a small delay before the audit log is updated. + - A number of optimizations have been made to improve the performance of Python filters within a pipeline. + - A number of optimizations to the SQLite in-memory storage plugin and the SQLiteLB storage plugin have been added that increase the rate at which readings can be stored with these plugins. + - The support bundle creation process has been updated to include any performance counters available in the system. + - The ability to monitor performance counters has been added to Fledge. The South and North services now offer performance counters that can be captured by the system. These are designed to provide information useful for tuning the respective services. + - The process used to extract log information from the system logs has been updated to improve performance and reduce the system overhead required to extract log data. + - A number of changes have been made to improve the performance of sending data north from the system. + - The performance of the statistics history task has been improved. It now makes fewer calls to the storage subsystem, improving the overall system performance. + - The performance of the asset tracker system has been improved, resulting in an improvement in the ingress performance of the system. + - Changes have been made to the purge process in the SQLiteLB and SQLite in-memory plugins in order to improve performance. + - The audit log entries have been updated to include more information when schedules are updated. + - Audit logs have been added to the user API of the public REST interface. + - The plugin developers guide has been updated to include the mechanism for adding audit trail entries from C++ plugins. + - Plugins that run within the south and north services and north tasks now have access to the audit logging system. + - The public API has been updated to include the ability to make control requests. + - The public API of the system has been updated to allow selection of readings from the storage buffer for given time intervals. + - The public API that is used to retrieve reading data from the storage layer has been updated to allow data for multiple assets to be retrieved in a single call. + - The SQLite in-memory storage plugin now has an option that allows the data to be persisted when shutting the system down and reloaded on startup. + - The SQLite storage plugins have been updated to improve the error reporting around database contention issues. + - A change has been made to the configuration of the storage plugin such that rather than having to type correct names for storage plugins the user may now select the plugins to use from a drop down list. Note however that the system must still be restarted for the new storage plugin to take effect. + - The storage service has been updated to allow other services to subscribe the notifications of inserts into the generic tables. + - A change has been made to prevent the schedules used to start services from being renamed as this could cause the services to fail. + - The default interval for running the purge process has been reduced, the purge process will now run every 10 minutes. This change only affects new installations, the purge process will run as before on systems that are upgraded. + - The ingestion of data from asynchronous south services paid no attention to the advanced configuration option "Throttle". This meant that very fast asynchronous south plugins could build extremely large queues of data within the south service, using system resources and taking a long time to shutdown. This has now been rectified, with asynchronous south services now subject to flow control if the "Throttle" option is set for the service. Unconstrained input is still available if the "Throttle" option is not checked. + - The south plugin now supports three different modes of polling. Polling at fixed intervals from the time started, polling at fixed times or polling on demand via the control mechanisms. + - Support has been added to allow filters to ingest passed data onwards during a shutdown of the filter. This allows any buffered data to be flushed to the next filter in the pipeline. + - A numeric list data type has been added to the reading ingestion code of the system. + - A Python package, used by the system, found to have a security vulnerability. This has been updated. + - The format of Python traceback has been improved to use multiple lines within the log. This makes the trace easier to understand and prevents the truncation that can occur. + - The setting of log levels from a service is now also reflected in any Python code loaded by the service. + - The reporting of issues related to failure to load plugins has been improved. + - When upgrading the version of a plugin any new configuration items are added to the relevant configuration categories. However the operation was not correctly reported as a configuration change in the audit log. This behavior has now been corrected. + - An issue which could occasionally result in the bearer token used for authentication between the various services expiring before the completion of the renewal process has been resolved. This could result in the failure of services to communicate with each other. + - The configuration category C++ API has been enhanced in the retrieval and setting of all the attributes of a configuration item. + - The support bundle has been updated to include a list of the Python packages installed on the machine. + - The documentation regarding handling and updating certificates used for authentication has been updated. + - Added documentation for the performance counters in the tuning guide. + + + - Bug Fix: + + - An issue with the SQLite in-memory and the SQLiteLB storage plugins that could result in incorrect data being stored has been resolved. + - An erroneous message was being produced when starting the system using the SQLite in-memory storage plugin. This has now been resolved. + - Support has been improved for switching between different storage plugins that allows for correct schema creation when using different sqlite plugin variants for configuration and readings storage. + - An issue that could cause health metrics to not be correctly returned when using the Postgres storage engine has been resolved. + - An issue in one of the storage plugins that caused spurious warnings to appear in the logs during a backup has been resolved. + - A memory leak in one of the storage plugins has been fixed. This caused the storage service to consume large amounts of memory over time which could result in the operating system killing the service. + - An update has been done to the default SQLite storage plugin to enable it to handle a large number of distinct asset codes in the readings. Previously the plugin was limited in the number of assets it could support. When the number of asset codes gets large the performance of the plugin will be reduced slightly, however it will continue to ingest data. + - An issue with memory usage in Python plugins used in south services has been resolved. + - A number of issues regarding the usage of memory have been resolved, including some small memory leaks. The overall memory footprint of north services should also be reduced in some circumstances. + - An issue that causes log messages to not be recorded has been resolved. + - An issue that could cause the statistics to be displayed with a timestamp in the wrong timezone has been resolved. + - A bug in the statistics rate API that would result in incorrect data being returned has been fixed. + - An empty statistics entry would erroneously be added for an asset or a service if the advanced parameter to control the statistics was modified from the default before the service was started. This has now been resolved. + - A problem with statistics counter overflow that could cause a crash in the statistics collector has been resolved. + - An issue that caused the retrieval of system logs for services with white space in the name of the service has been resolved. + - The control dispatcher now has access to the audit logging system. + - An issue that required the north service to be restarted if the source of data to send was changed in a running service has been resolved. Changing the data source no longer requires a restart of the north service. + - An issue that could sometimes cause a running north service to fail if the configuration for that service is updated has been resolved. + - A problem that prevents an updated service from restarting after an upgrade if HTTPS is used for the interface between services has been resolved. + - An issue that limited the update of additional services to just the notification service has been resolved. The update mechanism can now update any service that is added to the base system installation. + - The Python south plugin mechanism has been updated to fix an issue with ingestion of nested data point values. + - When switching a south plugin from a slow poll rate to a faster one the new poll rate does not take effect until the end of the current poll cycle. This could be a very long time. This has now been changed so that the south service will take the new poll rate as soon as possible rather than wait for the end of the current poll cycle. + - A bug that prevented notification rules from being executed for readings with asset codes starting with numeric values has been resolved. + - The data sent to notification rules that register for audit information has been updated to include the complete audit record. This allows for notification rules to be written that trigger on the particular auditable operations within the system. + - The notification service would sometimes shutdown without removing all of the subscriptions it holds with the storage service. This could cause issues for the storage service. Subscriptions are now correctly removed. + - The command line interface to view the status of the system has been updated to correctly show the statistics history collection task when it is running. + - The issue of incorrect timestamps in reading graphs due to inconsistent timezones in API calls has been resolved. All API calls now return timestamps in UTC unless explicitly specified in the response. + - An issue with the code update mechanism that could cause multiple updates to occur has been resolved. Only a single update should be executed and then the flag allowing for updates to be applied should be removed. This prevents the update mechanism triggering on each restart of the system. + - A problem that prevented the fledge-south-modbus plugin from being updated in the same way as other plugins has been resolved. + - An issue with trying to create a new user that shares the same user name with a previous user that was removed from the system failing has been resolved. + - A problem with converting very long integers from JSON has been resolved. This would have manifested itself as a crash when handling datapoints that contain 64 bit integers above a certain value. + - An update has been made to prevent the creation of service with empty service name. + + +- **GUI** + + - New Features: + + - New controls have been added in the menu pane of the GUI to allow nested commands to be collapsed or expanded, resulting in a smaller menu display. + - A new user interface option has been added to the control menu to create control pipelines. + - The user interface has been updated such that if the backend system is not available then the user interface components are made non-interactive & blur. + - The interface for updating the filters has been improved when multiple filters are being updated at once. + - New controls have been added to the asset browser to pause the automatic refresh of the data and to allow shuffling back and forth along the timeline. + - The ability to move backwards and forwards in the timeline of the asset browser graph has been added. + - The facility that pauses the automatic update of the asset browser graph has been added. + - The ability to graph multiple readings on a single graph has been added to the asset browser graph. + - A facility to allow a user to define the default time duration shown in the asset browser graph has been added to the user interface settings page. + - The date format has been made more flexible in the asset and readings graph. + - The display of image attributes for image type data points has been added to the latest reading display. + - The ability to select an area on the graph shown in the asset browser and zoom into the time period defined by that area has been added. + - The reading graph time granularity has been improved in the asset browser. + + + - Bug Fix: + + - The user interface for configuring plugins has been improved to make it more obvious when mandatory items are missing. + - An issue that allowed view users to update configuration when logged in using certificate based authentication has been resolved. + - An issue which prevented the file upload/value update for script type configuration item, unless the name also was script has been resolved. + - An issue with editing large scripts or JSON items in the configuration has been resolved. + - An issue that caused services with quotes in the name to disappear from the user interface has been resolved. + - The latest reading display issue that resulted in non image data not being shown when one or more image data points are in the reading has been resolved. + - A text wrapping issue in the system log viewer has been resolved. + - An occasional error that appeared on the Control Script and ACL pages has been resolved. + + +- **Services & Plugins** + + - New Features: + + - An update has been done to the OMF north plugin to correctly handle the set of reserved characters in PI tag names when using the new linked data method for inserting data in the PI Server. + - The OMF north plugin has been updated to make an additional test for the server hostname when it is configured. This will give clearer feedback in the error log if a bad hostname is entered or the hostname can not be resolved. This will also confirm that IP addresses entered are in the correct format. + - Some enhancements have been made to the OMF north plugin to improve the performance when there are large numbers of distinct assets to send to the PI Server. + - There have been improvements to the OMF north plugin to prevent an issue that could cause the plugin to stop sending data if the type of an individual datapoint changed repeatedly between integer and floating point values. The logging of the plugin has also been improved, with clearer messages and less repetition of error conditions that persist for long periods. + - Support for multiple data centers for OSIsoft Cloud Services (OCS) has been added in the OMF north plugin. OCS is hosted in the US-West and EU-West regions. + - When processing data updates from the PI Server at high rates, the PI Server Update Manager queue might overflow. This is caused by the PI Server not retrieving data updates until all registrations were complete. To address this, the PI Server South plugin has been updated to interleave registration and retrieval of data updates so that data retrieval begins immediately. + - Macro substitution has been added to the OMFHint filter allowing the contents of datapoints and metadata to be incorporated into the values of the OMF Hint, for example in the Asset Framework location can now include data read from the data source in the location. + - The fledge-filter-asset has been updated to allow it to split assets into multiple assets, with the different data points in the original asset being assigned to one or more of the new assets created. + - The fledge-filter-asset has been enhanced to allow it to flatten a complex asset structure. This allows nested data to be moved to the root level of the asset. + - The fledge-filter-asset has been enhanced to allow it to remove data points from readings. + - Windowed averages in the notification service preserve the type of the input data when creating the averages. This does not work well for integer values and has been changed such that integer values are promoted to floating point when using windowed averages for notification rule input. + - The notification mechanism has been updated to accept raw statistics and statistics rates as an input for notification rules. This allows alerts to be raised for pipeline flows and other internal tasks that generate statistics. + - Notifications can now register for audit log entries to be sent to notification rules. This allows notification to be made based on internal state changes of the system. + - The fledge-north-opcuaclient has been updated to support multiple values in a single write. + - The fledge-north-opcuaclient plugin has been updated to support OPC UA security mode and security policies. + - The fledge-north-httpc plugin now supports sending audit log data as well as readings and statistics. + - The fledge-north-kafka plugin has been updated to allow for username and password authentication to be supplied when connecting to the Kafka server. + - Compression functionality has been added to the fledge-north-kafka. + - The average and watchdog rules have been updated to allow selection of data sources other than the readings to be sent to the rules. + - The fledge-notify-email notification delivery plugin has been updated to hide the password from view and also allow custom alert messages to be created. + - Some devices were not compatible with the optimized block reading of registers performed by the fledge-south-modbus plugin. The plugin has been updated to provide controls that can determine how it reads data from the modbus device. This allows single register reads, single object reads and the current optimized block reads. + - The fledge-south-s2opcua now supports an optional datapoint in its Readings that shows the full path of the OPC UA Variable in the server's namespace. It has also to support large numbers of Monitored Items. + - The option to configure and use a username and password for authentication to the MQTT broker has been added to the fledge-south-mqtt plugin. + - The North service could crash if it retrieved invalid JSON while processing a reconfiguration request. This was addressed by adding an exception handler to prevent the crash. + - The audit logger has been made available to plugins running within the notification service. + - The notification service documentation has been updated to include examples of notifications based on statistics and audit logs. + - Documentation of the AF Location OMFHint in the OMF North plugin page has been updated to include an outline of differences in behaviors between Complex Types and the new Linked Types configuration. + - The documentation of the OMF North plugin has been updated to conform with the latest look and feel of the configuration user interface. It also contains notes regarding the use of complex types versus the OMF 1.2 linked types. + - The documentation for the asset filter has been improved to include more examples and explanations for the various uses of the plugin and to include all the different operations that can be performed with the filter. + - The documentation for the control notification plugin has been updated to include examples for all destinations of control requests. + + + - Bug Fix: + + - The OMF North plugin that is used to send Data to the AVEVA PI Server has been updated to improve the performance of the plugin. + - The OMF North plugin sent basic data type definitions to AVEVA Data Hub (ADH) that could not be processed resulting in a loss of all time series data. This has been fixed. + - Recent changes in the OMF North plugin caused the data streaming to the Edge Data Store (EDS) to fail. This has been fixed. The fix has been tested with EDS 2020 (Version 1.0.0.609). + - The fledge-north-opcuaclient plugin has been updated to support higher data transfer rates. + - An issue with the fledge-south-s2opcua that allowed a negative value to be entered for the minimum reporting interval has been resolved. The plugin has also been updated to use the new tab format for configuration item grouping. + - An issue with NULL string data being returned from OPC UA servers has been resolved. NULL strings will not be represented in the readings, no data point will be created for the NULL string. + - The fledge-south-s2opcua plugin would become unresponsive if the OPC UA server was unavailable or if the server URL was incorrect. The only way to stop the plugin in this state was to shut down Fledge. This has been fixed. + - An issue with fledge-notify-setpoint plugin to control operations occurring before a south plugin is fully ready has been resolved. + - An issue with reconfiguring a fledge-north-kafka plugin has been resolved, this now behaves correctly in all cases. + - An issue with sending data to Kafka that included image data points has been resolved. There is no support in Kafka for images and they will be removed while allowing the remainder of the data to be sent to Kafka. + - An issue with the fledge-south-modbustcp & S7 plugins which caused the polling to fail has been resolved. + - A problem with the fledge-south-j1708 & fledge-south-j1939 plugins that caused them to fail if added disabled and then later enabling them has been resolved. + - A problem that caused the fledge-north-azure-iot plugin to fail to send data has been corrected. + - A product version check was made incorrectly if the OMF endpoint type was not PI Web API. This has been fixed. + - The notification sent an audit log entry was created even when the delivery failed. It should only be created on successful delivery, this has been fixed. + - A problem with the fledge-notify-asset delivery plugin that would sometimes result in stopping the notification service and also it was not previously creating entries in the asset tracker have been resolved. + - An issue that could cause notification to not trigger correctly when used with conditional forwarding has been resolved. + - An issue with using multiple Python based plugins in a north conditional forwarding pipeline has been resolved. + - Changing the name of an asset in a notification rule plugins could sometimes cause an error to be incorrectly logged. This has now been resolved. + - An issue related to using averaging with the statistics history input to the notification rules has been fixed. + - If a query for AF Attributes includes a search string token that does not exist, PI Web API returns an HTTP 400 error. PI Server South now retrieves error messages if this occurs and logs them. + - Various filters summarize data over time, these have been standardized to use the times of the summary calculation. + - The fledge-filter-threshold interface has been tidied up, removing duplicate information. + - A problem with installation of the fledge-south-person-detection plugin on Ubuntu 20 has been resolved. + - The control map configuration item of the fledge-south-modbus plugin was incorrectly described, this has now been resolved. + + v2.1.0 ------- @@ -74,6 +259,7 @@ Release Date: 2022-12-26 - The S2OPCUA south plugin has been updated to allow the timestamp for readings to be taken from the OPC UA server itself rather than the time that it was received by Fledge. + - Bug Fix: - An issue with building of the DNP3 plugin on the Raspberry Pi platform has been resolved. @@ -151,7 +337,7 @@ Release Date: 2022-09-09 - When the data stream from a south plugin included an OMF Hint of AFLocation, performance of the OMF North plugin would degrade. In addition, process memory would grow over time. These issues have been fixed. - The version of the PostgreSQL database used by the Postgres storage plugin has been updated to PostgreSQL 13. - An enhancement has been added to the North service to allow the user to specify the block size to use when sending data to the plugin. This helps tune the north services and is described in the tuning guide within the documentation. - - The notification server would previously output warning messages when it was starting, these were not an indication of a problem and should have been information messages. This has now been resolved. + - The notification service would previously output warning messages when it was starting. These were not an indication of a problem and should have been information messages. This has now been resolved. - The backup mechanism has been improved to include some external items in the backup and provide a more secure backup. - The purge option that controls if unsent assets can be purged or not has been enhanced to provide options for sent to any destination or sent to all destinations as well as sent to no destinations. - It is now possible to add control features to Python south plugins. @@ -442,7 +628,7 @@ Release Date: 2021-05-27 - The Python 35 filter stated it used the Python version 3.5 always, in reality it uses whatever Python 3 version is installed on your system. The documentation has been updated to reflect this. - Fixed a bug that treated arrays of bytes as if they were strings in the OPC/UA south plugin. - The HTTP North C plugin would not correctly shutdown, this effected reconfiguration when run as an always on service. This issue has now been resolved. - - An issue with the SQLite In Memory storage plugin that caused database locks under high load conditions has been resolved. + - An issue with the SQLite in-memory storage plugin that caused database locks under high load conditions has been resolved. v1.9.0 @@ -601,7 +787,7 @@ Release Date: 2020-05-08 - New Features: - - Documentation has been added for the use of the SQLite In Memory storage plugin. + - Documentation has been added for the use of the SQLite in-memory storage plugin. - The support bundle functionality has been improved to include more detail in order to aid tracking down issues in installations. - Improvements have been made to the documentation of the OMF plugin in line with the enhancements to the code. This includes the documentation of OCS and EDS support as well as PI Web API. - An issue with forwarding data between two Fledge instances in different time zones has been resolved. diff --git a/docs/OMF.rst b/docs/OMF.rst index 657f9c256f..fdbda34529 100644 --- a/docs/OMF.rst +++ b/docs/OMF.rst @@ -7,150 +7,48 @@ .. |omf_plugin_eds_config| image:: images/omf-plugin-eds.jpg .. |omf_plugin_ocs_config| image:: images/omf-plugin-ocs.jpg .. |omf_plugin_adh_config| image:: images/omf-plugin-adh.jpg +.. |OMF_AF| image:: images/OMF_AF.jpg +.. |OMF_Auth| image:: images/OMF_Auth.jpg +.. |OMF_Cloud| image:: images/OMF_Cloud.jpg +.. |OMF_Connection| image:: images/OMF_Connection.jpg +.. |OMF_Default| image:: images/OMF_Default.jpg +.. |OMF_Format| image:: images/OMF_Format.jpg +.. |OMF_Endpoints| image:: images/OMF_Endpoints.jpg +.. |ADH_Regions| image:: images/ADH_Regions.jpg .. Links .. |OMFHint filter plugin| raw:: html OMFHint filter plugin + +OMF End Points +-------------- + +The OMF Plugin within Fledge supports a number of different OMF Endpoints for sending data out of Fledge. + PI Web API OMF Endpoint ~~~~~~~~~~~~~~~~~~~~~~~ To use the PI Web API OMF endpoint first ensure the OMF option was included in your PI Server when it was installed. Now go to the Fledge user interface, create a new North instance and select the “OMF” plugin on the first screen. -The second screen will request the following information: - -+----------------------------+ -| |omf_plugin_pi_web_config| | -+----------------------------+ - -Select PI Web API from the Endpoint options. - -- Basic Information - - **Endpoint:** This is the type of OMF endpoint. In this case, choose PI Web API. - - **Send full structure:** Used to control if Asset Framework structure messages are sent to the PI Server. If this is turned off then the data will not be placed in the Asset Framework. - - **Naming scheme:** Defines the naming scheme to be used when creating the PI points in the PI Data Archive. See :ref:`Naming_Scheme`. - - **Server hostname:** The hostname or address of the PI Web API server. This is normally the same address as the PI Server. - - **Server port:** The port the PI Web API OMF endpoint is listening on. Leave as 0 if you are using the default port. - - **Data Source:** Defines which data is sent to the PI Server. Choices are: readings or statistics (that is, Fledge's internal statistics). - - **Static Data:** Data to include in every reading sent to PI. For example, you can use this to specify the location of the devices being monitored by the Fledge server. -- Asset Framework - - **Default Asset Framework Location:** The location in the Asset Framework hierarchy into which the data will be inserted. - All data will be inserted at this point in the Asset Framework hierarchy unless a later rule overrides this. - Note this field does not include the name of the target Asset Framework Database; - the target database is defined on the PI Web API server by the PI Web API Admin Utility. - - **Asset Framework Hierarchies Rules:** A set of rules that allow specific readings to be placed elsewhere in the Asset Framework. These rules can be based on the name of the asset itself or some metadata associated with the asset. See `Asset Framework Hierarchy Rules`_. -- PI Web API authentication - - **PI Web API Authentication Method:** The authentication method to be used: anonymous, basic or kerberos. - Anonymous equates to no authentication, basic authentication requires a user name and password, and Kerberos allows integration with your Single Sign-On environment. - - **PI Web API User Id:** For Basic authentication, the user name to authenticate with the PI Web API. - - **PI Web API Password:** For Basic authentication, the password of the user we are using to authenticate. - - **PI Web API Kerberos keytab file:** The Kerberos keytab file used to authenticate. -- Connection management (These should only be changed with guidance from support) - - **Sleep Time Retry:** Number of seconds to wait before retrying the HTTP connection (Fledge doubles this time after each failed attempt). - - **Maximum Retry:** Maximum number of times to retry connecting to the PI Server. - - **HTTP Timeout:** Number of seconds to wait before Fledge will time out an HTTP connection attempt. -- Other (Rarely changed) - - **Integer Format:** Used to match Fledge data types to the data type configured in PI. This defaults to int64 but may be set to any OMF data type compatible with integer data, e.g. int32. - - **Number Format:** Used to match Fledge data types to the data type configured in PI. The default is float64 but may be set to any OMF datatype that supports floating point values. - - **Compression:** Compress the readings data before sending them to the PI Web API OMF endpoint. - This setting is not related to data compression in the PI Data Archive. - - **Complex Types:** Used to force the plugin to send OMF data types as complex types rather than the newer linked types. Linked types are the default way to send data and allows assets to have different sets of data points in different readings. See :ref:`Linked_Types`. +In the second screen select the PI Web API as the OMF endpoint. + +AVEVA Data Hub +~~~~~~~~~~~~~~ + +The cloud service from AVEVA that allows you to store your data in the AVEVA cloud. + +.. _Edge_Data_Store: Edge Data Store OMF Endpoint ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use the OSIsoft Edge Data Store first install Edge Data Store on the same machine as your Fledge instance. It is a limitation of Edge Data Store that it must reside on the same host as any system that connects to it with OMF. -Now go to the Fledge user interface, create a new North instance and select the “OMF” plugin on the first screen. -The second screen will request the following information: - -+-------------------------+ -| |omf_plugin_eds_config| | -+-------------------------+ - -Select Edge Data Store from the Endpoint options. - -- Basic Information - - **Endpoint:** This is the type of OMF endpoint. In this case, choose Edge Data Store. - - **Naming scheme:** Defines the naming scheme to be used when creating the PI points within the PI Server. See :ref:`Naming_Scheme`. - - **Server hostname:** Normally the hostname or address of the OMF endpoint. For Edge Data Store, this must be *localhost*. - - **Server port:** The port the Edge Data Store is listening on. Leave as 0 if you are using the default port. - - **Data Source:** Defines which data is sent to the Edge Data Store. Choices are: readings or statistics (that is, Fledge's internal statistics). - - **Static Data:** Data to include in every reading sent to PI. For example, you can use this to specify the location of the devices being monitored by the Fledge server. -- Connection management (These should only be changed with guidance from support) - - **Sleep Time Retry:** Number of seconds to wait before retrying the HTTP connection (Fledge doubles this time after each failed attempt). - - **Maximum Retry:** Maximum number of times to retry connecting to the PI server. - - **HTTP Timeout:** Number of seconds to wait before Fledge will time out an HTTP connection attempt. -- Other (Rarely changed) - - **Integer Format:** Used to match Fledge data types to the data type configured in PI. This defaults to int64 but may be set to any OMF data type compatible with integer data, e.g. int32. - - **Number Format:** Used to match Fledge data types to the data type configured in PI. The default is float64 but may be set to any OMF datatype that supports floating point values. - - **Compression:** Compress the readings data before sending them to the Edge Data Store. - -AVEVA Data Hub OMF Endpoint -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Go to the Fledge user interface, create a new North instance and select the “OMF” plugin on the first screen. -The second screen will request the following information: - -+-------------------------+ -| |omf_plugin_adh_config| | -+-------------------------+ - -Select AVEVA Data Hub from the Endpoint options. - -- Basic Information - - **Endpoint:** This is the type of OMF endpoint. In this case, choose AVEVA Data Hub. - - **Naming scheme:** Defines the naming scheme to be used when creating the PI points within the PI Server. See :ref:`Naming_Scheme`. - - **Data Source:** Defines which data is sent to AVEVA Data Hub. Choices are: readings or statistics (that is, Fledge's internal statistics). - - **Static Data:** Data to include in every reading sent to AVEVA Data Hub. For example, you can use this to specify the location of the devices being monitored by the Fledge server. -- Authentication - - **Namespace:** Your namespace within the AVEVA Data Hub. - - **Tenant ID:** Your AVEVA Data Hub Tenant ID for your account. - - **Client ID:** Your AVEVA Data Hub Client ID for your account. - - **Client Secret:** Your AVEVA Data Hub Client Secret. -- Connection management (These should only be changed with guidance from support) - - **Sleep Time Retry:** Number of seconds to wait before retrying the HTTP connection (Fledge doubles this time after each failed attempt). - - **Maximum Retry:** Maximum number of times to retry connecting to the AVEVA Data Hub. - - **HTTP Timeout:** Number of seconds to wait before Fledge will time out an HTTP connection attempt. -- Other (Rarely changed) - - **Integer Format:** Used to match Fledge data types to the data type configured in AVEVA Data Hub. This defaults to int64 but may be set to any OMF data type compatible with integer data, e.g. int32. - - **Number Format:** Used to match Fledge data types to the data type configured in AVEVA Data Hub. The default is float64 but may be set to any OMF datatype that supports floating point values. - - **Compression:** Compress the readings data before sending them to AVEVA Data Hub. - - -OSIsoft Cloud Services OMF Endpoint -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Go to the Fledge user interface, create a new North instance and select the “OMF” plugin on the first screen. -The second screen will request the following information: - -+-------------------------+ -| |omf_plugin_ocs_config| | -+-------------------------+ - -Select OSIsoft Cloud Services from the Endpoint options. - -- Basic Information - - **Endpoint:** This is the type of OMF endpoint. In this case, choose OSIsoft Cloud Services. - - **Naming scheme:** Defines the naming scheme to be used when creating the PI points within the PI Server. See :ref:`Naming_Scheme`. - - **Data Source:** Defines which data is sent to OSIsoft Cloud Services. Choices are: readings or statistics (that is, Fledge's internal statistics). - - **Static Data:** Data to include in every reading sent to OSIsoft Cloud Services. For example, you can use this to specify the location of the devices being monitored by the Fledge server. -- Authentication - - **Namespace:** Your namespace within OSIsoft Cloud Services. - - **Tenant ID:** Your OSIsoft Cloud Services Tenant ID for your account. - - **Client ID:** Your OSIsoft Cloud Services Client ID for your account. - - **Client Secret:** Your OSIsoft Cloud Services Client Secret. -- Connection management (These should only be changed with guidance from support) - - **Sleep Time Retry:** Number of seconds to wait before retrying the HTTP connection (Fledge doubles this time after each failed attempt). - - **Maximum Retry:** Maximum number of times to retry connecting to the PI server. - - **HTTP Timeout:** Number of seconds to wait before Fledge will time out an HTTP connection attempt. -- Other (Rarely changed) - - **Integer Format:** Used to match Fledge data types to the data type configured in PI. This defaults to int64 but may be set to any OMF data type compatible with integer data, e.g. int32. - - **Number Format:** Used to match Fledge data types to the data type configured in PI. The default is float64 but may be set to any OMF datatype that supports floating point values. - - **Compression:** Compress the readings data before sending them to OSIsoft Cloud Services. +.. _Connector_Relay: PI Connector Relay ~~~~~~~~~~~~~~~~~~ @@ -179,28 +77,174 @@ Connect the new application to the PI Connector Relay by selecting the new Fledg Finally, select the new Fledge application. Click "More" at the bottom of the Configuration panel. Make note of the Producer Token and Relay Ingress URL. -Now go to the Fledge user interface, create a new North instance and select the “OMF” plugin on the first screen. -The second screen will request the following information: - -+-------------------------------------+ -| |omf_plugin_connector_relay_config| | -+-------------------------------------+ - -- Basic Information - - **Endpoint:** This is the type of OMF endpoint. In this case, choose Connector Relay. - - **Server hostname:** The hostname or address of the PI Connector Relay. - - **Server port:** The port the PI Connector Relay is listening on. Leave as 0 if you are using the default port. - - **Producer Token:** The Producer Token provided by the PI Relay Data Connection Manager. - - **Data Source:** Defines which data is sent to the PI Connector Relay. Choices are: readings or statistics (that is, Fledge's internal statistics). - - **Static Data:** Data to include in every reading sent to PI. For example, you can use this to specify the location of the devices being monitored by the Fledge server. -- Connection management (These should only be changed with guidance from support) - - **Sleep Time Retry:** Number of seconds to wait before retrying the HTTP connection (Fledge doubles this time after each failed attempt). - - **Maximum Retry:** Maximum number of times to retry connecting to the PI server. - - **HTTP Timeout:** Number of seconds to wait before Fledge will time out an HTTP connection attempt. -- Other (Rarely changed) - - **Integer Format:** Used to match Fledge data types to the data type configured in PI. This defaults to int64 but may be set to any OMF data type compatible with integer data, e.g. int32. - - **Number Format:** Used to match Fledge data types to the data type configured in PI. The default is float64 but may be set to any OMF datatype that supports floating point values. - - **Compression:** Compress the readings data before sending it to the PI System. +Now go to the Fledge user interface, create a new North instance and select the “OMF” plugin on the first screen. Continue with the configuration, choosing the connector relay as the end point to be connected. + +OSISoft Cloud Services +~~~~~~~~~~~~~~~~~~~~~~ + +The original cloud services from OSISoft, this has now been superseded by AVEVA Data Hub, and should only be used to support existing workloads. All new installations should use AVEVA Data Hub. + +Configuration +------------- + +The configuration of the plugin is split into a number of tabs in order to reduce the size of each set of values to enter. Each tab contains a set of related items. + + - **Default Configuration**: This tab contains the base set of configuration items that are most commonly changed. + + - **Asset Framework**: The configuration that impacts the location with the asset framework in which the data will be placed. + + - **Authentication**: The configuration required to authenticate with the OMF end point. + + - **Cloud**: Configuration specific to using the cloud end points for OCS and ADH. + + - **Connection**: This tab contains the configuration items that can be used to tune the connection to the OMF end point. + + - **Formats & Types**: The configuration relating to how types are used and formatted with the OMF data. + + - **Advanced Configuration**: Configuration of the service or task that is supporting the OMF plugin. + + - **Security Configuration**: The configuration options that impact the security of the service that is running OMF. + + - **Developer**: This tab is only visible if the developer features of Fledge have been enabled and will give access to the features aimed at a plugin or pipeline developer. + +Default Configuration +~~~~~~~~~~~~~~~~~~~~~ + +The *Default Configuration* tab contains the most commonly modified items + ++---------------+ +| |OMF_Default| | ++---------------+ + + - **Endpoint**: The type of OMF end point we are connecting with. The options available are + + +-----------------+ + | |OMF_Endpoints| | + +-----------------+ + + - *PI Web API* - A connection to a PI Server that supports the OMF option of the PI Web API. This is the preferred mechanism for sending data to a PI Server. + + - *AVEVA Data Hub* - The AVEVA cloud service. + + - *Connector Relay* - The previous way to send data to a PI Server before PI Web API supported OMF. This should only be used for older PI Servers that do not have the support available within PI Web API. + + - *OSISoft Cloud Services* - The original OSISoft cloud service, this is currently being replaced with the AVEVA Data Hub. + + - *Edge Data Store* - The OSISoft Edge Data Store + + - **Send full structure**: Used to control if Asset Framework structure messages are sent to the PI Server. If this is turned off then the data will not be placed in the Asset Framework. + + - **Naming scheme**: Defines the naming scheme to be used when creating the PI points in the PI Data Archive. See :ref:`Naming_Scheme`. + + - **Server hostname**: The hostname or address of the OMF end point. This is only valid if the end point is a PI Server either with PI Web API or the Connector Relay. This is normally the same address as the PI Server. + + - **Server port**: The port the PI Web API OMF endpoint is listening on. Leave as 0 if you are using the default port. + + - **Data Source**: Defines which data is sent to the OMF end point. The options available are + + - *readings* - The data that has been ingested into Fledge via the South services. + + - *statistics* - Fledge's internal statistics. + + - **Static Data**: Data to include in every reading sent to OMF. For example, you can use this to specify the location of the devices being monitored by the Fledge server. + + +Asset Framework +~~~~~~~~~~~~~~~ + +The OMF plugins has the ability to interact with the PI Asset Framework and put data into the desired locations within the asset framework. It allows a default location to be specified and also a set of rules to be defined that will override that default location. + ++----------+ +| |OMF_AF| | ++----------+ + + - **Default Asset Framework Location**: The location in the Asset Framework hierarchy into which the data will be inserted. + All data will be inserted at this point in the Asset Framework hierarchy unless a later rule overrides this. + Note this field does not include the name of the target Asset Framework Database; + the target database is defined on the PI Web API server by the PI Web API Admin Utility. + + - **Asset Framework Hierarchies Rules**: A set of rules that allow specific readings to be placed elsewhere in the Asset Framework. These rules can be based on the name of the asset itself or some metadata associated with the asset. See `Asset Framework Hierarchy Rules`_. + +Authentication +~~~~~~~~~~~~~~ + +The *Authentication* tab allows the configuration of authentication between the OMF plugin and the OMF endpoint. + ++------------+ +| |OMF_Auth| | ++------------+ + + - **Producer Token**: The Producer Token provided by the PI Relay Data Connection Manager. This is only required when using the older Connector Relay end point for sending data to a PI Server. + + - **PI Web API Authentication Method**: The authentication method to be used: + + - *anonymous* - Anonymous equates to no authentication. + + - *basic* - basic authentication requires a user name and password + + - *kerberos* - Kerberos allows integration with your Single Sign-On environment. + + - **PI Web API User Id**: For Basic authentication, the user name to authenticate with the PI Web API. + + - **PI Web API Password**: For Basic authentication, the password of the user we are using to authenticate. + + - **PI Web API Kerberos keytab file**: The Kerberos keytab file used to authenticate. + +Cloud +~~~~~ + +The *Cloud* tab contains configuration items that are required if the chosen OMF end point is either AVEVA Data Hub or OSISoft Cloud Services. + ++-------------+ +| |OMF_Cloud| | ++-------------+ + + - **Cloud Service Region**: - The region in which your AVEVA Data Hub or OSISoft Cloud Services service is located. + + +---------------+ + | |ADH_Regions| | + +---------------+ + + - **Namespace**: Your namespace within the AVEVA Data Hub or OSISoft Cloud Service. + + - **Tenant ID**: Your AVEVA Data Hub or OSISoft Cloud Services Tenant ID for your account. + + - **Client ID**: Your AVEVA Data Hub or OSISoft Cloud Services Client ID for your account. + + - **Client Secret**: Your AVEVA Data Hub or OSISoft Cloud Services Client Secret. + +Connection +~~~~~~~~~~ + +The *Connection* tab allows a set of tuning parameters to be set for the connection from the OMF plugin to the OMF End point. + ++------------------+ +| |OMF_Connection| | ++------------------+ + + + - **Sleep Time Retry**: Number of seconds to wait before retrying the connection (Fledge doubles this time after each failed attempt). + + - **Maximum Retry**: Maximum number of times to retry connecting to the OMF Endpoint. + + - **HTTP Timeout**: Number of seconds to wait before Fledge will time out an HTTP connection attempt. + + - **Compression**: Compress the readings data before sending them to the OMF endpoint. + +Formats & Types +~~~~~~~~~~~~~~~ + +The *Formats & Types* tab provides a means to specify the detail types that will be used and the way complex assets are mapped to OMF types to also be configured. + ++--------------+ +| |OMF_Format| | ++--------------+ + + - **Integer Format**: Used to match Fledge data types to the data type configured in PI. This defaults to int64 but may be set to any OMF data type compatible with integer data, e.g. int32. + + - **Number Format**: Used to match Fledge data types to the data type configured in PI. The default is float64 but may be set to any OMF datatype that supports floating point values. + + - **Complex Types**: Versions of the OMF plugin prior to 2.1 support complex types in which each asset would have a corresponding OMF type created for it. With the introduction of OMF Version 1.2 support in version 2.1.0 of the plugin support has been added for linked types. These are more versatile and allow for asset structures to change dynamically. The linked types are now the default, however setting this option can force the older complex types to be used. See :ref:`Linked_Types`. Versions of the PI Server from 2020 or before will always use the complex types. The plugin will normally automatically detect this, however if the detection does not correctly enforce this setting then this option should be enabled by the user. .. _Naming_Scheme: @@ -534,8 +578,9 @@ Asset Framework Location Hint ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An Asset Framework location hint can be added to a reading to control -the placement of the asset within the Asset Framework. An Asset Framework -hint would be as follows: +the placement of the asset within the Asset Framework. +This hint overrides the path in the *Default Asset Framework Location* for the reading. +An Asset Framework hint would be as follows: .. code-block:: console @@ -543,21 +588,28 @@ hint would be as follows: Note the following when defining an *AFLocation* hint: -- An asset in a Fledge Reading is used to create a `Container in the OSIsoft Asset Framework `_. - A *Container* is an AF Element with one or more AF Attributes that are mapped to PI Points using the OSIsoft PI Point Data Reference. - The name of the AF Element comes from the Fledge Reading asset name. - The names of the AF Attributes come from the Fledge Reading datapoint names. -- If you edit the AF Location hint, the Container will be moved to the new location in the AF hierarchy. -- If you disable the OMF Hint filter, the Container will not move. -- If you wish to move a Container, you can do this with the PI System Explorer. - Right-click on the AF Element that represents the Container. +- An asset name in a Fledge Reading is used to create an AF Element in the OSIsoft Asset Framework. + Time series data streams become AF Attributes of that AF Element. + This means these AF Attributes are mapped to PI Points using the OSIsoft PI Point Data Reference. +- Deleting the original Reading AF Element is not recommended; + if you delete a Reading AF Element, the OMF North plugin will not recreate it. +- If you wish to move a Reading AF Element, you can do this with the PI System Explorer. + Right-click on the AF Element that represents the Reading AF Element. Choose Copy. - Select the AF Element that will serve as the new parent of the Container. - Right-click and choose *Paste*. - You can then return to the original Container and delete it. + Select the AF Element that will serve as the new parent of the Reading AF Element. + Right-click and choose *Paste* or *Paste Reference*. *Note that PI System Explorer does not have the traditional Cut function for AF Elements*. -- If you move a Container, OMF North will not recreate it. - If you then edit the AF Location hint, the Container will appear in the new location. +- For Linked Types + - If you define an AF Location hint after the Reading AF Element has been created in the default location, + a reference will be created in the location defined by the hint. + - If an AF Location hint was in place when the Reading AF Element was created and you then disable the hint, + a reference will be created in the *Default Asset Framework Location*. + - If you edit the AF Location hint, the Reading AF Element not move. + A reference to the Reading AF Element will be created in the new location. +- For Complex Types + - If you disable the OMF Hint filter, the Reading AF Element will not move. + - If you edit the AF Location hint, the Reading AF Element will move to the new location in the AF hierarchy. + - No references are created. Unit Of Measure Hint ~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/building_fledge/06_testing.rst b/docs/building_fledge/06_testing.rst index 9c40cd7e71..066ad1e7c3 100644 --- a/docs/building_fledge/06_testing.rst +++ b/docs/building_fledge/06_testing.rst @@ -263,34 +263,14 @@ The template file looks like this: $ cat /usr/local/fledge/data/extras/fogbench/fogbench_sensor_coap.template.json [ - { "name" : "fogbench_luxometer", - "sensor_values" : [ { "name": "lux", "type": "number", "min": 0, "max": 130000, "precision":3 } ] }, - { "name" : "fogbench_pressure", - "sensor_values" : [ { "name": "pressure", "type": "number", "min": 800.0, "max": 1100.0, "precision":1 } ] }, - { "name" : "fogbench_humidity", - "sensor_values" : [ { "name": "humidity", "type": "number", "min": 0.0, "max": 100.0 }, - { "name": "temperature", "type": "number", "min": 0.0, "max": 50.0 } ] }, - { "name" : "fogbench_temperature", - "sensor_values" : [ { "name": "object", "type": "number", "min": 0.0, "max": 50.0 }, - { "name": "ambient", "type": "number", "min": 0.0, "max": 50.0 } ] }, - { "name" : "fogbench_accelerometer", - "sensor_values" : [ { "name": "x", "type": "number", "min": -2.0, "max": 2.0 }, - { "name": "y", "type": "number", "min": -2.0, "max": 2.0 }, - { "name": "z", "type": "number", "min": -2.0, "max": 2.0 } ] }, - { "name" : "fogbench_gyroscope", - "sensor_values" : [ { "name": "x", "type": "number", "min": -255.0, "max": 255.0 }, - { "name": "y", "type": "number", "min": -255.0, "max": 255.0 }, - { "name": "z", "type": "number", "min": -255.0, "max": 255.0 } ] }, - { "name" : "fogbench_magnetometer", - "sensor_values" : [ { "name": "x", "type": "number", "min": -255.0, "max": 255.0 }, - { "name": "y", "type": "number", "min": -255.0, "max": 255.0 }, - { "name": "z", "type": "number", "min": -255.0, "max": 255.0 } ] }, - { "name" : "fogbench_mouse", - "sensor_values" : [ { "name": "button", "type": "enum", "list": [ "up", "down" ] } ] }, - { "name" : "fogbench_switch", - "sensor_values" : [ { "name": "button", "type": "enum", "list": [ "up", "down" ] } ] }, - { "name" : "fogbench_wall clock", - "sensor_values" : [ { "name": "tick", "type": "enum", "list": [ "tock" ] } ] } + { "name" : "asset_1", + "sensor_values" : [ { "name": "dp_1", "type": "number", "min": -2.0, "max": 2.0 }, + { "name": "dp_1", "type": "number", "min": -2.0, "max": 2.0 }, + { "name": "dp_1", "type": "number", "min": -2.0, "max": 2.0 } ] }, + { "name" : "asset_2", + "sensor_values" : [ { "name": "lux", "type": "number", "min": 0, "max": 130000, "precision":3 } ] }, + { "name" : "asset_3", + "sensor_values" : [ { "name": "pressure", "type": "number", "min": 800.0, "max": 1100.0, "precision":1 } ] } ] $ @@ -304,66 +284,67 @@ Now you should have all the information necessary to test the CoAP South microse - ``$FLEDGE_ROOT/scripts/extras/fogbench`` ``-t $FLEDGE_ROOT/data/extras/fogbench/fogbench_sensor_coap.template.json``, if you are in a development environment, with the *FLEDGE_ROOT* environment variable set with the path to your project repository folder - ``$FLEDGE_ROOT/bin/fogbench -t $FLEDGE_DATA/extras/fogbench/fogbench_sensor_coap.template.json``, if you are in a deployed environment, with *FLEDGE_ROOT* and *FLEDGE_DATA* set correctly. - - If you have installed Fledge in the default location (i.e. */usr/local/fledge*), type ``cd /usr/local/fledge;bin/fogbench -t data/extras/fogbench/fogbench_sensor_coap.template.json``. -- ``fledge.fogbench`` ``-t /snap/fledge/current/usr/local/fledge/data/extras/fogbench/fogbench_sensor_coap.template.json``, if you have installed a snap version of Fledge. + - If you have installed Fledge in the default location (i.e. */usr/local/fledge*), type ``/usr/local/fledge/bin/fogbench -t data/extras/fogbench/fogbench_sensor_coap.template.json``. In development environment the output of your command should be: .. code-block:: console - $ $FLEDGE_ROOT/scripts/extras/fogbench -t data/extras/fogbench/fogbench_sensor_coap.template.json - >>> Make sure south CoAP plugin service is running & listening on specified host and port - Total Statistics: + $ $FLEDGE_ROOT/scripts/extras/fogbench -t $FLEDGE_ROOT/data/extras/fogbench/fogbench_sensor_coap.template.json + >>> Make sure south CoAP plugin service is running + & listening on specified host and port + + Total Statistics: - Start Time: 2017-12-17 07:17:50.615433 - Ene Time: 2017-12-17 07:17:50.650620 + Start Time: 2023-04-14 11:15:50.679366 + End Time: 2023-04-14 11:15:50.711856 - Total Messages Transferred: 10 - Total Bytes Transferred: 2880 + Total Messages Transferred: 3 + Total Bytes Transferred: 720 - Total Iterations: 1 - Total Messages per Iteration: 10.0 - Total Bytes per Iteration: 2880.0 + Total Iterations: 1 + Total Messages per Iteration: 3.0 + Total Bytes per Iteration: 720.0 - Min messages/second: 284.19586779208225 - Max messages/second: 284.19586779208225 - Avg messages/second: 284.19586779208225 + Min messages/second: 92.33610341643583 + Max messages/second: 92.33610341643583 + Avg messages/second: 92.33610341643583 - Min Bytes/second: 81848.4099241197 - Max Bytes/second: 81848.4099241197 - Avg Bytes/second: 81848.4099241197 + Min Bytes/second: 22160.6648199446 + Max Bytes/second: 22160.6648199446 + Avg Bytes/second: 22160.6648199446 $ -Congratulations! You have just inserted data into Fledge from the CoAP South microservice. More specifically, the output informs you that the data inserted has been composed by 10 different messages for a total of 2,880 Bytes, for an average of 284 messages per second and 81,848 Bytes per second. +Congratulations! You have just inserted data into Fledge from the CoAP South microservice. More specifically, the output informs you that the data inserted has been composed by 10 different messages for a total of 720 Bytes, for an average of 92 messages per second and 22,160 Bytes per second. If you want to stress Fledge a bit, you may insert the same data sample several times, by using the *-I* or *--iterations* argument: .. code-block:: console $ $FLEDGE_ROOT/scripts/extras/fogbench -t data/extras/fogbench/fogbench_sensor_coap.template.json -I 100 - >>> Make sure south CoAP plugin service is running & listening on specified host and port - Total Statistics: + >>> Make sure south CoAP plugin service is running & listening on specified host and port + Total Statistics: - Start Time: 2017-12-17 07:33:40.568130 - End Time: 2017-12-17 07:33:43.205626 + Start Time: 2023-04-14 11:18:03.586924 + End Time: 2023-04-14 11:18:04.582291 - Total Messages Transferred: 1000 - Total Bytes Transferred: 288000 + Total Messages Transferred: 300 + Total Bytes Transferred: 72000 - Total Iterations: 100 - Total Messages per Iteration: 10.0 - Total Bytes per Iteration: 2880.0 + Total Iterations: 100 + Total Messages per Iteration: 3.0 + Total Bytes per Iteration: 720.0 - Min messages/second: 98.3032852957946 - Max messages/second: 625.860558267618 - Avg messages/second: 455.15247432732866 + Min messages/second: 90.53597295992274 + Max messages/second: 454.33893684688775 + Avg messages/second: 323.7178365566367 - Min Bytes/second: 28311.346165188843 - Max Bytes/second: 180247.840781074 - Avg Bytes/second: 131083.9126062706 + Min Bytes/second: 21728.63351038146 + Max Bytes/second: 109041.34484325306 + Avg Bytes/second: 77692.28077359282 $ -Here we have inserted the same set of data 100 times, therefore the total number of Bytes inserted is 288,000. The performance and insertion rates varies with each iteration and *fogbench* presents the minimum, maximum and average values. +Here we have inserted the same set of data 100 times, therefore the total number of Bytes inserted is 72,000. The performance and insertion rates varies with each iteration and *fogbench* presents the minimum, maximum and average values. Checking What's Inside Fledge @@ -374,13 +355,13 @@ We can check if Fledge has now stored what we have inserted from the South micro .. code-block:: console $ curl -s http://localhost:8081/fledge/asset ; echo - [{"assetCode": "fogbench_switch", "count": 11}, {"assetCode": "fogbench_temperature", "count": 11}, {"assetCode": "fogbench_humidity", "count": 11}, {"assetCode": "fogbench_luxometer", "count": 11}, {"assetCode": "fogbench_accelerometer", "count": 11}, {"assetCode": "wall clock", "count": 11}, {"assetCode": "fogbench_magnetometer", "count": 11}, {"assetCode": "mouse", "count": 11}, {"assetCode": "fogbench_pressure", "count": 11}, {"assetCode": "fogbench_gyroscope", "count": 11}] + [{"count": 11, "assetCode": "asset_1"}, {"count": 11, "assetCode": "asset_2"}, {"count": 11, "assetCode": "asset_3"}] $ The output of the asset entry point provides a list of assets buffered in Fledge and the count of elements stored. The output is a JSON array with two elements: -- **assetCode** : the name of the sensor or device that provides the data -- **count** : the number of occurrences of the asset in the buffer +- **count** : the number of occurrences of the asset in the buffer. +- **assetCode** : the name of the sensor or device that provides the data. Feeding East/West Applications @@ -390,53 +371,53 @@ Let's suppose that we are interested in the data collected for one of the assets .. code-block:: console - $ curl -s http://localhost:8081/fledge/asset/fogbench_temperature ; echo - [{"timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41}}, {"timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41}}, {"timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41}}, {"timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41}}, {"timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41}}, {"timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41}}, {"timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41}}, {"timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41}}, {"timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41}}, {"timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41}}, {"timestamp": "2017-12-18 10:38:12.580", "reading": {"ambient": 33, "object": 7}}] + $ curl -s http://localhost:8081/fledge/asset/asset_2 ; echo + [{"reading": {"lux": 75723.923}, "timestamp": "2023-04-14 11:25:05.672528"}, {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}] $ Let's see the JSON output on a more readable format: .. code-block:: json - [ { "timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41} }, - { "timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41} }, - { "timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41} }, - { "timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41} }, - { "timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41} }, - { "timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41} }, - { "timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41} }, - { "timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41} }, - { "timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41} }, - { "timestamp": "2017-12-18 10:38:29.652", "reading": {"ambient": 13, "object": 41} }, - { "timestamp": "2017-12-18 10:38:12.580", "reading": {"ambient": 33, "object": 7} } ] + [ + {"reading": {"lux": 75723.923}, "timestamp": "2023-04-14 11:25:05.672528"}, + {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, + {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, + {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, + {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, + {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, + {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, + {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, + {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, + {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"}, + {"reading": {"lux": 50475.99}, "timestamp": "2023-04-14 11:24:49.767983"} + ] The JSON structure depends on the sensor and the plugin used to capture the data. In this case, the values shown are: +- **reading** : a JSON structure that is the set of data points provided by the sensor. In this case only datapoint named lux: +- **lux** : the lux meter value - **timestamp** : the timestamp generated by the sensors. In this case, since we have inserted 10 times the same value and one time a new value using *fogbench*, the result is 10 timestamps with the same value and one timestamp with a different value. -- **reading** : a JSON structure that is the set of data points provided by the sensor. In this case: -- **ambient** : the ambient temperature in Celsius -- **object** : the object temperature in Celsius. Again, the values are repeated 10 times, due to the iteration executed by *fogbench*, plus an isolated element, so there are 11 readings in total. Also, it is very unlikely that in a real sensor the ambient and the object temperature differ so much, but here we are using a random number generator. - -You can dig even more in the data and extract only a subset of the reading. Fog example, you can select the ambient temperature and limit to the last 5 readings: +You can dig even more in the data and extract only a subset of the reading. Fog example, you can select the lux and limit to the last 5 readings: .. code-block:: console - $ curl -s http://localhost:8081/fledge/asset/fogbench_temperature/ambient?limit=5 ; echo - [ { "ambient": 13, "timestamp": "2017-12-18 10:38:29.652" }, - { "ambient": 13, "timestamp": "2017-12-18 10:38:29.652" } - { "ambient": 13, "timestamp": "2017-12-18 10:38:29.652" }, - { "ambient": 13, "timestamp": "2017-12-18 10:38:29.652" }, - { "ambient": 13, "timestamp": "2017-12-18 10:38:29.652" } ] + $ curl -s http://localhost:8081/fledge/asset/asset_2/lux?limit=5 ; echo + [ + {"timestamp": "2023-04-14 11:25:05.672528", "lux": 75723.923}, + {"timestamp": "2023-04-14 11:24:49.767983", "lux": 50475.99}, + {"timestamp": "2023-04-14 11:24:49.767983", "lux": 50475.99}, + {"timestamp": "2023-04-14 11:24:49.767983", "lux": 50475.99}, + {"timestamp": "2023-04-14 11:24:49.767983", "lux": 50475.99} + ] $ - We have beautified the JSON output for you, so it is more readable. .. note:: When you select a specific element in the reading, the timestamp and the element are presented in the opposite order compared to the previous example. This is a known issue that will be fixed in the next version. - Sending Greetings to the Northern Hemisphere ============================================ diff --git a/docs/conf.py b/docs/conf.py index 635789cc32..18529328ca 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -177,4 +177,4 @@ # Pass Plugin DOCBRANCH argument in Makefile ; by default develop # NOTE: During release time we need to replace DOCBRANCH with actual released version -subprocess.run(["make generated DOCBRANCH='2.1.0RC'"], shell=True, check=True) +subprocess.run(["make generated DOCBRANCH='2.2.0RC'"], shell=True, check=True) diff --git a/docs/control.rst b/docs/control.rst index 9d9df81755..3d6dc2404f 100644 --- a/docs/control.rst +++ b/docs/control.rst @@ -14,13 +14,30 @@ .. |north_map4| image:: images/north_map4.jpg .. |opcua_server| image:: images/opcua_server.jpg .. |dispatcher_config| image:: images/dispatcher-config.jpg +.. |pipeline_list| image:: images/control/pipeline_list.jpg +.. |pipeline_add| image:: images/control/pipeline_add.jpg +.. |pipeline_menu| image:: images/control/pipeline_menu.jpg +.. |pipeline_model| image:: images/control/pipeline_model.jpg +.. |pipeline_source| image:: images/control/pipeline_source.jpg +.. |pipeline_filter_add| image:: images/control/pipeline_filter_add.jpg +.. |pipeline_filter_config| image:: images/control/pipeline_filter_config.jpg +.. |pipeline_context_menu| image:: images/control/pipeline_context_menu.jpg +.. |pipeline_destination| image:: images/control/pipeline_destination.jpg +.. Links +.. |ExpressionFilter| raw:: html + + expression filter + +.. |DeltaFilter| raw:: html + + delta filter ************************ Fledge Control Features ************************ -Fledge supports facilities that allows control of devices via the south service and plugins. This control in known as *set point control* as it is not intended for real time critical control of devices but rather to modify the behavior of a device based on one of many different information flows. The latency involved in these control operations is highly dependent on the control path itself and also the scheduling limitations of the underlying operating system. Hence the caveat that the control functions are not real time or guaranteed to be actioned within a specified time window. +Fledge supports facilities that allows control of devices via the south service and plugins. This control in known as *set point control* as it is not intended for real time critical control of devices but rather to modify the behavior of a device based on one of many different information flows. The latency involved in these control operations is highly dependent on the control path itself and also the scheduling limitations of the underlying operating system. Hence the caveat that the control functions are not real time or guaranteed to be actioned within a specified time window. This does not mean however that they can not be used for non-critical closed loop control, however we would not advise the use of this functionality in safety critical situations. Control Functions ================= @@ -56,6 +73,8 @@ Currently only the notification method is fully implemented within Fledge. The use of a notification in the Fledge instance itself provides the fastest response for an edge notification. All the processing for this is done on the edge by Fledge itself. +As with the data ingress and egress features of Fledge it is also possible to build filter pipelines in the control paths in order to alter the behavior and process the data in the control path. Pipelines in the control path as defined between the different end point of control operations and are defined such that the same pipeline can be utilised by multiple control paths. See :ref:`ControlPipelines` + Edge Based Control ------------------ @@ -380,3 +399,172 @@ Advanced Configuration - **Minimum Log Level**: Allows the minimum level at which logs will get written to the system log to be defined. - **Maximum number of dispatcher threads**: Dispatcher threads are used to execute automation scripts. Each script utilizes a single thread for the duration of the execution of the script. Therefore this setting determines how many scripts can be executed in parallel. + +.. _ControlPipelines: + +Control Pipelines +================= + +A control pipeline is very similar to pipelines in Fledge's data path, i.e. the ingress pipelines of a south service or the egress pipelines in the north data path. A control pipeline comprises an order set of filters through which the data in the control path is pushed. Each individual filter in the pipeline can add, remove or modify the data as it flows through the filter, in this case the data however are the set point write and operations. + +The flow of control requests is organised in such a way that the same filters that are used for data ingress in a south service or data egress in a north service can be used for control pipelines. This is done by mapping control data to asset names and datapoint names and value in the control path pipeline. + +Mapping Rules +------------- + +For a set point write the name of the asset will always be set to *reading*, the asset that is created will have a set of datapoints, one per each setpoint write operation that is to be executed. The name of the datapoint is the name of the set point to be written and the value of the datapoint is the value to to set. + +For example, if a set point write wishes to set the *Pump Speed* set point to *80* and the *Pump Running* set point to *True* then the reading that would be created and passed to the filter would have the asset_code of *reading* and two data points, one called *Pump Speed* with a value of *80* and another called *Pump Running* with a value of *True*. + +This reading can then be manipulated by a filter in the same way as in any other pipeline. For example the |ExpressionFilter| filter could be used to scale the pump speed. If the required was to multiply the pump speed by 10, then the expression defined would be *Pump Speed * 10* . + +In the case of an operation the mapping is very similar, except that the asset_code in the reading becomes the operation name and the data points are the parameters of the operation. + +For example, if an operation *Start Fan* required a parameter of *Fan Speed* then a reading with an asset_code of *Start Fan* with a single datapoint called *Fan Speed* would be created and passed through the filter pipeline. + +Data Types +~~~~~~~~~~ + +The values of all set points and the parameters of all operations are passed in the control services and between services as string representations, however they are converted to appropriate types when passed through the filter pipeline. If a value can be represented as an integer it will be and likewise for floating point values. + +.. note:: + + Currently complex types such as Image, Data Buffer and Array data can not be represented in the control pipelines. + +Pipeline Connections +-------------------- + +The control pipelines are not defined against a particular end point as they are with the data pipelines, they are defined separately and part of that definition includes the input and output end points to which the control pipeline may be attached. The input and output of a control pipeline may be defined as being able to connect to one of a set of endpoints. + +.. list-table:: + :widths: 20 20 70 + :header-rows: 1 + + * - Type + - Endpoints + - Description + * - Any + - Both + - The pipeline can connection to any source or destination. This is only used in situations where an exact match for an endpoint can not be satisfied. + * - API + - Source + - The source of the request is an API call to the public API of the Fledge instance. + * - Asset + - Destination + - The data will be sent to the service that is responsible for ingesting the named asset. + * - Broadcast + - Destination + - The requests will be sent to all south services that support control. + * - Notification + - Source + - The request originated from the named notification. + * - Schedule + - Source + - The request originated from a schedule. + * - Script + - Source + - The request is either originating from a script or being sent to a script. + * - Service + - Source + - The request is either coming from a named service or going to a named service. + +Control pipelines are always executed in the control dispatcher service. When a request comes into the service it will look for a pipeline to pass that request through. This process will look at the source of the request and the destination of the request. If a pipeline that has source and destination endpoints that are an exact match for the source and destination of the control request then the control request will be processed through that pipeline. + +If no exact match is found then the source of the request will be checked against the defined pipelines for a match with the specified source and a destination of *any*. If there is a pipeline that matches these criteria it will be used. If not then a check is made for a pipeline with a source of *any* and a destination that matches the destination of this request. + +If all the above tests fail then a final test is made for a pipeline with a source of *any* and a destination of *any*. If no match occurs then the request is processed without passing through any filters. + +If a request is processed by a script in the control dispatcher then this request may pass through multiple filters, one from the source to the script and then one for each script step that performs a set point write or operation. Each of these may be a different pipeline. + +Pipeline Execution Models +~~~~~~~~~~~~~~~~~~~~~~~~~ + +When a pipeline is defined it may be set to use a *Shared* execution model or an *Exclusive* execution model. This is only important if any of the filters in the pipeline persist state that impacts future processing. + +In a *Shared* execution model one pipeline instance will be created and any requests that use resolve to the pipeline will share the same instance of the pipeline. This saves creating multiple objects within the control dispatcher and is the preferred model to use. + +However if the filters in the pipeline store previous data and use it to influence future decisions, such as the |DeltaFilter| this behavior is undesirable as requests from different sources or destined for different destinations may interfere with each other. In this case the *Exclusive* execution model should be used. + +In an *Exclusive* execution model a new instance of the pipeline will be created for each distinct source and destination of the control request that utilises the pipeline. This ensures that the different instances of the pipeline can not interfere with each other. + +Control Pipeline Management +--------------------------- + +The Fledge Graphical User Interface provides a mechanism to manage the control pipelines, this is found in the Control sub menu under the pipelines item. + ++-----------------+ +| |pipeline_menu| | ++-----------------+ + +The user is presented with a list of the pipelines that have been created to date and an option in the top right corner to add a new pipeline. + ++-----------------+ +| |pipeline_list| | ++-----------------+ + +The list displays the name of the pipeline, the source and destination of the pipeline, the filters in the pipeline, the execution model and the enabled/disabled state of the pipeline. + +The user has a number of actions that may be taken from this screen. + + - Enable or disable the pipeline by clicking on the checkbox to the left of the pipeline name + + - Click on the name of the plugin to view and edit the pipeline. + + - Click on the three vertical dots to view the content menu. + + +-------------------------+ + | |pipeline_context_menu| | + +-------------------------+ + + Currently the only operation that is supported is delete. + + - Click on the Add option in the top right corner to define a new pipeline. + +Adding A Control Pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Clicking on the add option will display the screen to add a new control pipeline. + ++----------------+ +| |pipeline_add| | ++----------------+ + + - **Name**: The name of the control pipeline. This should be a unique name that is used to identify the control pipeline. + + - **Execution**: The execution model to use to run this pipeline. In most cases the *Shared* execution model is sufficient. + + - **Source**: The control source that this pipeline should be considered to be used by. + + +-------------------+ + | |pipeline_source| | + +-------------------+ + + - **Filters**: The filters in the pipeline. Click on *Add new filter* to add a new filter to the pipeline. + + + Clicking on the *Add filter* link will display a dialog in which the filter plugin can be chosen and named. + + +-----------------------+ + | |pipeline_filter_add| | + +-----------------------+ + + Clicking on next from this dialog will display the configuration for the chosen filter, in this case we have chosen the |ExpressionFilter| + + +--------------------------+ + | |pipeline_filter_config| | + +--------------------------+ + + The filter should then be configured in the same way as it would for data path pipelines. + + On clicking *Done* the dialog will disappear and the original screen shown with the new pipeline displayed. In the list of filters. More filters can be added by clicking on the *Add new filter* link. If multiple filters are in the pipeline that can be re-ordered by dragging them around to change the order. + + - **Destination**: The control destination that this pipeline will considered to be used with. + + +------------------------+ + | |pipeline_destination| | + +------------------------+ + + - **Enabled**: Enable the execution of the pipeline + +Finally click on the *Save* button to save the new control pipeline. + diff --git a/docs/fledge-rule-DataAvailability/images/data-availability.png b/docs/fledge-rule-DataAvailability/images/data-availability.png new file mode 100644 index 0000000000..beb62aa7b5 Binary files /dev/null and b/docs/fledge-rule-DataAvailability/images/data-availability.png differ diff --git a/docs/fledge-rule-DataAvailability/index.rst b/docs/fledge-rule-DataAvailability/index.rst new file mode 100644 index 0000000000..6a169a527f --- /dev/null +++ b/docs/fledge-rule-DataAvailability/index.rst @@ -0,0 +1,15 @@ +.. Images +.. |data-availability| image:: images/data-availability.png + +DataAvailability Rule +===================== + +This is a built in rule that triggers every time it receives data that matches an asset code or audit code those given in the configuration. + ++---------------------+ +| |data-availability| | ++---------------------+ + + - **Audit Code**: Audit log code to monitor, Leave blank if not required or set to * for all codes. If we want to monitor several audit codes a comma separated list can be entered. E.g. SRVRG, SRVUN + + - **Asset Code**: Asset code to monitor. Leave blank if not required. diff --git a/docs/fledge-rule-Threshold/images/threshold.jpg b/docs/fledge-rule-Threshold/images/threshold.jpg index c73e952731..37f85b4993 100644 Binary files a/docs/fledge-rule-Threshold/images/threshold.jpg and b/docs/fledge-rule-Threshold/images/threshold.jpg differ diff --git a/docs/fledge-rule-Threshold/images/threshold_source.jpg b/docs/fledge-rule-Threshold/images/threshold_source.jpg new file mode 100644 index 0000000000..700bfe9a89 Binary files /dev/null and b/docs/fledge-rule-Threshold/images/threshold_source.jpg differ diff --git a/docs/fledge-rule-Threshold/index.rst b/docs/fledge-rule-Threshold/index.rst index af77e50ab0..659c80bcf6 100644 --- a/docs/fledge-rule-Threshold/index.rst +++ b/docs/fledge-rule-Threshold/index.rst @@ -1,5 +1,6 @@ .. Images .. |threshold| image:: images/threshold.jpg +.. |source| image:: images/threshold_source.jpg Threshold Rule ============== @@ -12,9 +13,15 @@ The configuration of the rule allows the threshold value to be set, the operatio | |threshold| | +-------------+ - - **Asset name**: The name of the asset that is tested by the rule. + - **Data Source**: The source of the data used for the rule evaluation. This may be one of Readings, Statistics or Statistics History. See details below. - - **Datapoint Name**: The name of the datapoint in the asset used for the test. + +----------+ + | |source| | + +----------+ + + - **Name**: The name of the asset or statistics that is tested by the rule. + + - **Value**: The name of the datapoint in the asset used for the test. This is only required if the *Data Source* above is set to *Readings*. - **Condition**: The condition that is being tested, this may be one of >, >=, <= or <. @@ -25,3 +32,27 @@ The configuration of the rule allows the threshold value to be set, the operatio - **Window evaluation**: Only valid if evaluation data is set to Window. This determines if the value used in the rule evaluation is the average, minimum or maximum over the duration of the window. - **Time window**: Only valid if evaluation data is set to Window. This determines the time span of the window. + +Data Source +----------- + +The rule may be used to test the values of the data that is ingested by +south services within Fledge or the statistics that Fledge itself creates. + +When the rule examines a reading in the Fledge data stream it must be +given then name of the asset to observe and the name of the data point +within that asset. The data points within the asset should contain +numeric data. + +When observing a statistic there are two choices that can be made, +to monitor the raw statistics value, which is a simple count, or to +examine the statistic history. The value received by the threshold rule +for a statistic is the increment that is added to the statistic and not +the absolute value of the statistics. + +The statistic history is the value seen plotted in +the dashboard graphs and shows the change in the statistic value over +a defined period. By default the period is 15 seconds, however this is +configurable. In the case of statistics all that is required is the name +of the statistic to monitor, there is no associated data point name as +each statistic is a single value. diff --git a/docs/images/ADH_Regions.jpg b/docs/images/ADH_Regions.jpg new file mode 100644 index 0000000000..73c58866fd Binary files /dev/null and b/docs/images/ADH_Regions.jpg differ diff --git a/docs/images/OMF_AF.jpg b/docs/images/OMF_AF.jpg new file mode 100644 index 0000000000..93070a5c27 Binary files /dev/null and b/docs/images/OMF_AF.jpg differ diff --git a/docs/images/OMF_Auth.jpg b/docs/images/OMF_Auth.jpg new file mode 100644 index 0000000000..d19c736a8e Binary files /dev/null and b/docs/images/OMF_Auth.jpg differ diff --git a/docs/images/OMF_Cloud.jpg b/docs/images/OMF_Cloud.jpg new file mode 100644 index 0000000000..022591a95b Binary files /dev/null and b/docs/images/OMF_Cloud.jpg differ diff --git a/docs/images/OMF_Connection.jpg b/docs/images/OMF_Connection.jpg new file mode 100644 index 0000000000..976b86cde6 Binary files /dev/null and b/docs/images/OMF_Connection.jpg differ diff --git a/docs/images/OMF_Default.jpg b/docs/images/OMF_Default.jpg new file mode 100644 index 0000000000..3d7b17ec09 Binary files /dev/null and b/docs/images/OMF_Default.jpg differ diff --git a/docs/images/OMF_Endpoints.jpg b/docs/images/OMF_Endpoints.jpg new file mode 100644 index 0000000000..9dcbd70fac Binary files /dev/null and b/docs/images/OMF_Endpoints.jpg differ diff --git a/docs/images/OMF_Format.jpg b/docs/images/OMF_Format.jpg new file mode 100644 index 0000000000..06eecf456b Binary files /dev/null and b/docs/images/OMF_Format.jpg differ diff --git a/docs/images/OMF_Formats.jpg b/docs/images/OMF_Formats.jpg new file mode 100644 index 0000000000..ea6bcba7a6 Binary files /dev/null and b/docs/images/OMF_Formats.jpg differ diff --git a/docs/images/add_user.jpg b/docs/images/add_user.jpg index d68af322f8..8e53d4de75 100644 Binary files a/docs/images/add_user.jpg and b/docs/images/add_user.jpg differ diff --git a/docs/images/change_role.jpg b/docs/images/change_role.jpg index 68919a29ee..0f9583ac9a 100644 Binary files a/docs/images/change_role.jpg and b/docs/images/change_role.jpg differ diff --git a/docs/images/control/pipeline_add.jpg b/docs/images/control/pipeline_add.jpg new file mode 100644 index 0000000000..3f7ee3f9b4 Binary files /dev/null and b/docs/images/control/pipeline_add.jpg differ diff --git a/docs/images/control/pipeline_context_menu.jpg b/docs/images/control/pipeline_context_menu.jpg new file mode 100644 index 0000000000..87e2f0690a Binary files /dev/null and b/docs/images/control/pipeline_context_menu.jpg differ diff --git a/docs/images/control/pipeline_destination.jpg b/docs/images/control/pipeline_destination.jpg new file mode 100644 index 0000000000..2ab93471b5 Binary files /dev/null and b/docs/images/control/pipeline_destination.jpg differ diff --git a/docs/images/control/pipeline_filter_add.jpg b/docs/images/control/pipeline_filter_add.jpg new file mode 100644 index 0000000000..92cd069dd8 Binary files /dev/null and b/docs/images/control/pipeline_filter_add.jpg differ diff --git a/docs/images/control/pipeline_filter_config.jpg b/docs/images/control/pipeline_filter_config.jpg new file mode 100644 index 0000000000..3beec47472 Binary files /dev/null and b/docs/images/control/pipeline_filter_config.jpg differ diff --git a/docs/images/control/pipeline_list.jpg b/docs/images/control/pipeline_list.jpg new file mode 100644 index 0000000000..0f87d8d651 Binary files /dev/null and b/docs/images/control/pipeline_list.jpg differ diff --git a/docs/images/control/pipeline_menu.jpg b/docs/images/control/pipeline_menu.jpg new file mode 100644 index 0000000000..05e7ff304c Binary files /dev/null and b/docs/images/control/pipeline_menu.jpg differ diff --git a/docs/images/control/pipeline_model.jpg b/docs/images/control/pipeline_model.jpg new file mode 100644 index 0000000000..fd9c8be651 Binary files /dev/null and b/docs/images/control/pipeline_model.jpg differ diff --git a/docs/images/control/pipeline_source.jpg b/docs/images/control/pipeline_source.jpg new file mode 100644 index 0000000000..3649d62b58 Binary files /dev/null and b/docs/images/control/pipeline_source.jpg differ diff --git a/docs/images/gui_settings.jpg b/docs/images/gui_settings.jpg new file mode 100644 index 0000000000..c7790af64e Binary files /dev/null and b/docs/images/gui_settings.jpg differ diff --git a/docs/images/multi_graph1.jpg b/docs/images/multi_graph1.jpg new file mode 100644 index 0000000000..ee4572bf33 Binary files /dev/null and b/docs/images/multi_graph1.jpg differ diff --git a/docs/images/multi_graph2.jpg b/docs/images/multi_graph2.jpg new file mode 100644 index 0000000000..092b61afc3 Binary files /dev/null and b/docs/images/multi_graph2.jpg differ diff --git a/docs/images/multi_graph3.jpg b/docs/images/multi_graph3.jpg new file mode 100644 index 0000000000..f6c63a122e Binary files /dev/null and b/docs/images/multi_graph3.jpg differ diff --git a/docs/images/poll_type.png b/docs/images/poll_type.png new file mode 100644 index 0000000000..e7eab1f43b Binary files /dev/null and b/docs/images/poll_type.png differ diff --git a/docs/images/postgres_config.png b/docs/images/postgres_config.png index e2528f14f1..0f2b6a05ef 100644 Binary files a/docs/images/postgres_config.png and b/docs/images/postgres_config.png differ diff --git a/docs/images/south_advanced.jpg b/docs/images/south_advanced.jpg index 3a0df711b8..9bf92c88d7 100644 Binary files a/docs/images/south_advanced.jpg and b/docs/images/south_advanced.jpg differ diff --git a/docs/images/sqlitememory_config.png b/docs/images/sqlitememory_config.png index f1da7f4e80..59c527d1c2 100644 Binary files a/docs/images/sqlitememory_config.png and b/docs/images/sqlitememory_config.png differ diff --git a/docs/images/storage_config.png b/docs/images/storage_config.png index 2724c1ef39..f764829e2e 100644 Binary files a/docs/images/storage_config.png and b/docs/images/storage_config.png differ diff --git a/docs/images/update_user.jpg b/docs/images/update_user.jpg new file mode 100644 index 0000000000..b5047bb0a4 Binary files /dev/null and b/docs/images/update_user.jpg differ diff --git a/docs/images/user_management.jpg b/docs/images/user_management.jpg index 9d05e795fd..4dfbe8be09 100644 Binary files a/docs/images/user_management.jpg and b/docs/images/user_management.jpg differ diff --git a/docs/introduction.rst b/docs/introduction.rst index eba8aba2b5..96ac8eb165 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -9,7 +9,6 @@ .. |DataPipelines| raw:: html - Developing Data Pipelines Introduction to Fledge diff --git a/docs/plugin_developers_guide/02_writing_plugins.rst b/docs/plugin_developers_guide/02_writing_plugins.rst index 69e8f8852a..2a309f8c1f 100644 --- a/docs/plugin_developers_guide/02_writing_plugins.rst +++ b/docs/plugin_developers_guide/02_writing_plugins.rst @@ -12,6 +12,12 @@ C++ Support Classes +.. |audit_trail| raw:: html + + Audit Trail + + + .. Links in new tabs .. ============================================= @@ -353,6 +359,8 @@ We have used the properties *type* and *default* to define properties of the con - Only used for enumeration type elements. This is a JSON array of string that contains the options in the enumeration. * - order - Used in the user interface to give an indication of how high up in the dialogue to place this item. + * - group + - Used to group related items together. The main use of this is within the GUI which will turn each group into a tab in the creation and edit screens. * - readonly - A boolean property that can be used to include items that can not be altered by the API. * - rule @@ -425,3 +433,22 @@ The code that connects to the device should then look at the *discovered* config The example here was written in C++, there is nothing that is specific to C++ however and the same approach can be taken in Python. One thing to note however, the *plugin_info* call is used in the display of available plugins, discovery code that is very slow will impact the performance of plugin selection. + +Writing Audit Trail +~~~~~~~~~~~~~~~~~~~ + +Plugins are able to write records to the audit trail. These records must use one of the predefined audit code that are support by the system. See |audit_trail| for details of the supported audit codes within the system. + +In C++ you use the *AuditLogger* class to write these audit trail entries, this is a singleton object that is access via the getLogger method. + +.. code-block:: C + + AuditLogger *audit = AuditLogger::getLogger(); + audit->audit("NHDWN", "INFORMATION"); + +There is also a convenience function that can be used if you not want to define a local pointer the AuditLogger + +.. code-block:: C + + AuditLogger::auditLog("NHAVL", "INFORMATION"); + diff --git a/docs/quick_start/installing.rst b/docs/quick_start/installing.rst index 30eec10b89..ceaabc6128 100644 --- a/docs/quick_start/installing.rst +++ b/docs/quick_start/installing.rst @@ -3,6 +3,10 @@ For Debian Platform +.. |RPM PostgreSQL| raw:: html + + For Red Hat Platform + .. |Configure Storage Plugin| raw:: html Configure Storage Plugin from GUI @@ -140,6 +144,7 @@ To start Fledge with PostgreSQL, first you need to install the PostgreSQL packag |Debian PostgreSQL| +|RPM PostgreSQL| Also you need to change the value of Storage plugin. See |Configure Storage Plugin| or with below curl command @@ -156,3 +161,80 @@ Also you need to change the value of Storage plugin. See |Configure Storage Plug } Now, it's time to restart Fledge. Thereafter you will see Fledge is running with PostgreSQL. + + +Using Docker Containerizer to install Fledge +############################################# + +Fledge Docker containers are provided in a private repository. This repository has no authentication or encryption associated with it. + +The following steps describe how to install Fledge using these containers: + +- Edit the daemon.json file, whose default location is /etc/docker/daemon.json on Linux, If the daemon.json file does not exist, create it. Assuming there are no other settings in the file, it should have the following contents: + +.. code-block:: console + + { "insecure-registries":["54.204.128.201:5000"] } + +- Restart Docker for the changes to take effect + +.. code-block:: console + + sudo systemctl restart docker.service + +- Check using command + +.. code-block:: console + + docker info + +You should see the following output: + +.. code-block:: console + + Insecure Registries: + 52.3.255.136:5000 + 127.0.0.0/8 + +You may also refer to the Docker documentation `here `_. + +Ubuntu 20.04 +~~~~~~~~~~~~ + +- To pull the Docker registry + +.. code-block:: console + + docker pull 54.204.128.201:5000/fledge:latest-ubuntu2004 + +- To run the Docker container + +.. code-block:: console + + docker run -d --name fledge -p 8081:8081 -p 1995:1995 -p 8082:80 54.204.128.201:5000/fledge:latest-ubuntu2004 + +Here, The GUI is forwarded to port 8082 on the host machine, it can be any port and omitted if port 80 is free. + +- It is possible to check if Fledge and the Fledge GUI are running by using the following commands on the host machine + +*Fledge* + +.. code-block:: console + + curl -sX GET http://localhost:8081/fledge/ping + +*Fledge GUI* + +.. code-block:: console + + http://localhost:8082 + +- To attach to the running container + +.. code-block:: console + + docker exec -it fledge bash + +.. note:: + For Ubuntu 18.04 setup, you just need to replace ubuntu2004 with ubuntu1804. + Images are currently only available for Ubuntu version 18.04 and 20.04. diff --git a/docs/quick_start/uninstalling.rst b/docs/quick_start/uninstalling.rst index df203627d1..ded329be03 100644 --- a/docs/quick_start/uninstalling.rst +++ b/docs/quick_start/uninstalling.rst @@ -10,6 +10,13 @@ Use the ``apt`` or the ``apt-get`` command to uninstall Fledge: sudo apt -y purge fledge +Red Hat Platform +################ + +.. code-block:: console + + sudo yum -y remove fledge + .. note:: You may notice the warning in the last row of the package removal output: diff --git a/docs/quick_start/viewing.rst b/docs/quick_start/viewing.rst index 79c3d81afb..48131b9f67 100644 --- a/docs/quick_start/viewing.rst +++ b/docs/quick_start/viewing.rst @@ -5,6 +5,10 @@ .. |view_summary| image:: ../images/view_summary.jpg .. |view_times| image:: ../images/view_times.jpg .. |view_spreadsheet| image:: ../images/view_spreadsheet.jpg +.. |gui_settings| image:: ../images/gui_settings.jpg +.. |multi_graph1| image:: ../images/multi_graph1.jpg +.. |multi_graph2| image:: ../images/multi_graph2.jpg +.. |multi_graph3| image:: ../images/multi_graph3.jpg Viewing Data ############ @@ -34,12 +38,39 @@ It is possible to change the time period to which the graph refers by use of the | |view_times| | +--------------+ +It is also possible to change the default duration of a graph when it is first displayed. This is done via the *Settings* menu item. + ++----------------+ +| |gui_settings| | ++----------------+ + +This can be useful when very high frequency data is ingested into the system as it will prevent the initial graph that is displayed from pulling large amounts of data from the system and slowing down the response of the system and the GUI. + Where an asset contains multiple data points each of these is displayed in a different colour. Graphs for particular data points can be toggled on and off by clicking on the key at the top of the graph. Those data points not should will be indicated by striking through the name of the data point. +-------------+ | |view_hide| | +-------------+ +It is also possible to overlay the graphs for other assets onto the asset you are viewing. + ++----------------+ +| |multi_graph1| | ++----------------+ + +Using the pull down menu above the graph you may select another asset to add to the graph. + ++----------------+ +| |multi_graph2| | ++----------------+ + +All the data points from that asset will then be added to the graph. Multiple assets may be chosen from this pull down in order to build up more complex sets of graphs, individual data points for any of the assets may be hidden as above, or an entire asset may be removed from the graph by clicking on the **x** next to the asset name. + ++----------------+ +| |multi_graph3| | ++----------------+ + + A summary tab is also available, this will show the minimum, maximum and average values for each of the data points. Click on *Summary* to show the summary tab. +----------------+ diff --git a/docs/requirements.txt b/docs/requirements.txt index f5cae46112..281fe1774e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,5 @@ Sphinx==3.5.4 -docutils<0.18 \ No newline at end of file +docutils<0.18 +Jinja2<3.1 +urllib3==1.26.15 +sphinx-rtd-theme==1.3.0 diff --git a/docs/rest_api_guide/03_RESTstatistics.rst b/docs/rest_api_guide/03_RESTstatistics.rst index c8117a92bb..6a62bf9e30 100644 --- a/docs/rest_api_guide/03_RESTstatistics.rst +++ b/docs/rest_api_guide/03_RESTstatistics.rst @@ -145,33 +145,33 @@ GET statistics/rate **Request Parameters** - - **statistics** - a comma separated list of statistics values to return + - **statistics** - a comma separated list of statistics keys. - - **periods** - a comma separated list of time periods in minutes. The corresponding rate that will be returned for a given value X is the counts per minute over the previous X minutes. + - **periods** - a comma separated list of time periods in minutes. + +The corresponding rate that will be returned for a given value X is the counts per minute over the previous X minutes. **Example** .. code-block:: console $ curl -sX GET http://localhost:8081/fledge/statistics/rate?statistics=READINGS,Readings%20Sent\&periods=1,5,15,30,60 - { + + { "rates": { "READINGS": { - "1": 12.938816958618938, - "5": 12.938816958618938, - "15": 12.938816958618938, - "30": 12.938816958618938, - "60": 12.938816958618938 + "1": 2561.0, + "5": 512.2, + "15": 170.73333333333332, + "30": 85.36666666666666, + "60": 42.68333333333333 }, "Readings Sent": { - "1": 0, - "5": 0, - "15": 0, - "30": 0, - "60": 0 + "1": 2225.0, + "5": 445.0, + "15": 148.33333333333334, + "30": 74.16666666666667, + "60": 37.083333333333336 } } } - $ - - diff --git a/docs/rest_api_guide/04_RESTuser.rst b/docs/rest_api_guide/04_RESTuser.rst index 924e5f5aeb..db5501da8d 100644 --- a/docs/rest_api_guide/04_RESTuser.rst +++ b/docs/rest_api_guide/04_RESTuser.rst @@ -55,33 +55,27 @@ An array of JSON objects, one per asset. - Type - Description - Example - * - assetCode - - string - - The code of the asset - - fogbench/accelerometer * - count - number - The number of recorded readings for the asset code - - 180 + - 11 + * - assetCode + - string + - The code of the asset + - asset_1 **Example** .. code-block:: console $ curl -sX GET http://localhost:8081/fledge/asset - [ { "count": 18, "assetCode": "fogbench/accelerometer" }, - { "count": 18, "assetCode": "fogbench/gyroscope" }, - { "count": 18, "assetCode": "fogbench/humidity" }, - { "count": 18, "assetCode": "fogbench/luxometer" }, - { "count": 18, "assetCode": "fogbench/magnetometer" }, - { "count": 18, "assetCode": "fogbench/mouse" }, - { "count": 18, "assetCode": "fogbench/pressure" }, - { "count": 18, "assetCode": "fogbench/switch" }, - { "count": 18, "assetCode": "fogbench/temperature" }, - { "count": 18, "assetCode": "fogbench/wall clock" } ] + [ + {"count": 11, "assetCode": "asset_1"}, + {"count": 11, "assetCode": "asset_2"}, + {"count": 11, "assetCode": "asset_3"} + ] $ - GET asset readings ~~~~~~~~~~~~~~~~~~ @@ -119,46 +113,35 @@ An array of JSON objects with the readings data for a series of readings sorted - Type - Description - Example - * - timestamp - - timestamp - - The time at which the reading was received - - 2018-04-16 14:33:18.215 * - reading - JSON object - The JSON reading object received from the sensor - - {"reading": {"x":0, "y":0, "z":1} + - {"pressure": 885.7} + * - timestamp + - timestamp + - The time at which the reading was received + - 2023-04-14 12:04:34.603963 **Example** .. code-block:: console - $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Faccelerometer - [ { "reading": { "x": 0, "y": -2, "z": 0 }, "timestamp": "2018-04-19 14:20:59.692" }, - { "reading": { "x": 0, "y": 0, "z": -1 }, "timestamp": "2018-04-19 14:20:54.643" }, - { "reading": { "x": -1, "y": 2, "z": 1 }, "timestamp": "2018-04-19 14:20:49.899" }, - { "reading": { "x": -1, "y": -1, "z": 1 }, "timestamp": "2018-04-19 14:20:47.026" }, - { "reading": { "x": -1, "y": -2, "z": -2 }, "timestamp": "2018-04-19 14:20:42.746" }, - { "reading": { "x": 0, "y": 2, "z": 0 }, "timestamp": "2018-04-19 14:20:37.418" }, - { "reading": { "x": -2, "y": -1, "z": 2 }, "timestamp": "2018-04-19 14:20:32.650" }, - { "reading": { "x": 0, "y": 0, "z": 1 }, "timestamp": "2018-04-19 14:06:05.870" }, - { "reading": { "x": 1, "y": 1, "z": 1 }, "timestamp": "2018-04-19 14:06:05.870" }, - { "reading": { "x": 0, "y": 0, "z": -1 }, "timestamp": "2018-04-19 14:06:05.869" }, - { "reading": { "x": 2, "y": -1, "z": 0 }, "timestamp": "2018-04-19 14:06:05.868" }, - { "reading": { "x": -1, "y": -2, "z": 2 }, "timestamp": "2018-04-19 14:06:05.867" }, - { "reading": { "x": 2, "y": 1, "z": 1 }, "timestamp": "2018-04-19 14:06:05.867" }, - { "reading": { "x": 1, "y": -2, "z": 1 }, "timestamp": "2018-04-19 14:06:05.866" }, - { "reading": { "x": 2, "y": -1, "z": 1 }, "timestamp": "2018-04-19 14:06:05.865" }, - { "reading": { "x": 0, "y": -1, "z": 2 }, "timestamp": "2018-04-19 14:06:05.865" }, - { "reading": { "x": 0, "y": -2, "z": 1 }, "timestamp": "2018-04-19 14:06:05.864" }, - { "reading": { "x": -1, "y": -2, "z": 0 }, "timestamp": "2018-04-19 13:45:15.881" } ] + $ curl -sX GET http://localhost:8081/fledge/asset/asset_3 + [ + {"reading": {"pressure": 885.7}, "timestamp": "2023-04-14 12:04:34.603963"}, + {"reading": {"pressure": 846.3}, "timestamp": "2023-04-14 12:02:39.150127"}, + {"reading": {"pressure": 913.0}, "timestamp": "2023-04-14 12:02:26.616218"}, + {"reading": {"pressure": 994.7}, "timestamp": "2023-04-14 12:02:11.171338"}, + {"reading": {"pressure": 960.2}, "timestamp": "2023-04-14 12:01:56.979426"} + ] $ - $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Faccelerometer?limit=5 - [ { "reading": { "x": 0, "y": -2, "z": 0 }, "timestamp": "2018-04-19 14:20:59.692" }, - { "reading": { "x": 0, "y": 0, "z": -1 }, "timestamp": "2018-04-19 14:20:54.643" }, - { "reading": { "x": -1, "y": 2, "z": 1 }, "timestamp": "2018-04-19 14:20:49.899" }, - { "reading": { "x": -1, "y": -1, "z": 1 }, "timestamp": "2018-04-19 14:20:47.026" }, - { "reading": { "x": -1, "y": -2, "z": -2 }, "timestamp": "2018-04-19 14:20:42.746" } ] + $ curl -sX GET http://localhost:8081/fledge/asset/asset_3?limit=3 + [ + {"reading": {"pressure": 885.7}, "timestamp": "2023-04-14 12:04:34.603963"}, + {"reading": {"pressure": 846.3}, "timestamp": "2023-04-14 12:02:39.150127"}, + {"reading": {"pressure": 913.0}, "timestamp": "2023-04-14 12:02:26.616218"} + ] $ Using *seconds* and *previous* to obtain historical data. @@ -218,43 +201,32 @@ An array of JSON objects with a series of readings sorted in reverse chronologic * - timestamp - timestamp - The time at which the reading was received - - 2018-04-16 14:33:18.215 - * - {reading} + - 2023-04-14 12:04:34.603937 + * - reading - JSON object - The value of the specified reading - - {"temperature": 20} + - {"lux": 47705.68} **Example** .. code-block:: console - $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature - [ { "temperature": 20, "timestamp": "2018-04-19 14:20:59.692" }, - { "temperature": 33, "timestamp": "2018-04-19 14:20:54.643" }, - { "temperature": 35, "timestamp": "2018-04-19 14:20:49.899" }, - { "temperature": 0, "timestamp": "2018-04-19 14:20:47.026" }, - { "temperature": 37, "timestamp": "2018-04-19 14:20:42.746" }, - { "temperature": 47, "timestamp": "2018-04-19 14:20:37.418" }, - { "temperature": 26, "timestamp": "2018-04-19 14:20:32.650" }, - { "temperature": 12, "timestamp": "2018-04-19 14:06:05.870" }, - { "temperature": 38, "timestamp": "2018-04-19 14:06:05.869" }, - { "temperature": 7, "timestamp": "2018-04-19 14:06:05.869" }, - { "temperature": 21, "timestamp": "2018-04-19 14:06:05.868" }, - { "temperature": 5, "timestamp": "2018-04-19 14:06:05.867" }, - { "temperature": 40, "timestamp": "2018-04-19 14:06:05.867" }, - { "temperature": 39, "timestamp": "2018-04-19 14:06:05.866" }, - { "temperature": 29, "timestamp": "2018-04-19 14:06:05.865" }, - { "temperature": 41, "timestamp": "2018-04-19 14:06:05.865" }, - { "temperature": 46, "timestamp": "2018-04-19 14:06:05.864" }, - { "temperature": 10, "timestamp": "2018-04-19 13:45:15.881" } ] + $ curl -sX GET http://localhost:8081/fledge/asset/asset_2/lux + [ + {"timestamp": "2023-04-14 12:04:34.603937", "lux": 47705.68}, + {"timestamp": "2023-04-14 12:02:39.150106", "lux": 97967.9}, + {"timestamp": "2023-04-14 12:02:26.616200", "lux": 28788.154}, + {"timestamp": "2023-04-14 12:02:11.171319", "lux": 57992.674}, + {"timestamp": "2023-04-14 12:01:56.979407", "lux": 10373.945} + ] $ - $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Faccelerometer?limit=5 - [ { "temperature": 20, "timestamp": "2018-04-19 14:20:59.692" }, - { "temperature": 33, "timestamp": "2018-04-19 14:20:54.643" }, - { "temperature": 35, "timestamp": "2018-04-19 14:20:49.899" }, - { "temperature": 0, "timestamp": "2018-04-19 14:20:47.026" }, - { "temperature": 37, "timestamp": "2018-04-19 14:20:42.746" } ] + $ curl -sX GET http://localhost:8081/fledge/asset/asset_2/lux?limit=3 + [ + {"timestamp": "2023-04-14 11:25:05.672528", "lux": 75723.923}, + {"timestamp": "2023-04-14 11:24:49.767983", "lux": 50475.99}, + {"timestamp": "2023-04-14 11:23:15.672528", "lux": 75723.923} + ] $ @@ -282,28 +254,27 @@ A JSON object of a reading by asset code. - Type - Description - Example - * - {reading}.max - - number - - The maximum value of the set of sensor values selected in the query string - - 47 - * - {reading}.min + * - .lux.min - number - The minimum value of the set of sensor values selected in the query string - - 0 - * - {reading}.average + - 10373.945 + * - .lux.max + - number + - The maximum value of the set of sensor values selected in the query string + - 97967.9 + * - .lux.average - number - The average value of the set of sensor values selected in the query string - - 27 + - 48565.6706 **Example** .. code-block:: console - $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature/summary - { "temperature": { "max": 47, "min": 0, "average": 27 } } + $ curl -sX GET http://localhost:8081/fledge/asset/asset_2/lux/summary + {"lux": {"min": 10373.945, "max": 97967.9, "average": 48565.6706}} $ - GET all asset reading timespan ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -322,18 +293,18 @@ An array of JSON objects with newest and oldest timestamps of the readings held - Type - Description - Example - * - asset_code - - string - - The asset code for which the timestamps refer - - sinusoid * - oldest - string - The oldest timestamp held in the buffer for this asset - - "2022-11-08 17:07:02.623258" + - 2022-11-08 17:07:02.623258 * - newest - string - The newest timestamp held in the buffer for this asset - - "2022-11-09 14:52:50.069432" + - 2022-11-09 14:52:50.069432 + * - asset_code + - string + - The asset code for which the timestamps refer + - sinusoid **Example** @@ -374,11 +345,11 @@ A JSON object with the newest and oldest timestamps for the asset held in the st * - oldest - string - The oldest timestamp held in the buffer for this asset - - "2022-11-08 17:07:02.623258" + - 2022-11-08 17:07:02.623258 * - newest - string - The newest timestamp held in the buffer for this asset - - "2022-11-09 14:52:50.069432" + - 2022-11-09 14:52:50.069432 **Example** @@ -428,58 +399,52 @@ An array of JSON objects with a series of readings sorted in reverse chronologic - Type - Description - Example - * - timestamp - - timestamp - - The time the reading represents - - 2018-04-16 14:33:18 - * - max - - number - - The maximum value of the set of sensor values selected in the query string - - 47 * - min - number - The minimum value of the set of sensor values selected in the query string - - 0 + - 47705.68 + * - max + - number + - The maximum value of the set of sensor values selected in the query string + - 47705.68 * - average - number - The average value of the set of sensor values selected in the query string - - 27 + - 47705.68 + * - timestamp + - timestamp + - The time the reading represents + - 2023-04-14 12:04:34 **Example** .. code-block:: console - $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature/series - [ { "timestamp": "2018-04-19 14:20:59", "max": 20, "min": 20, "average": 20 }, - { "timestamp": "2018-04-19 14:20:54", "max": 33, "min": 33, "average": 33 }, - { "timestamp": "2018-04-19 14:20:49", "max": 35, "min": 35, "average": 35 }, - { "timestamp": "2018-04-19 14:20:47", "max": 0, "min": 0, "average": 0 }, - { "timestamp": "2018-04-19 14:20:42", "max": 37, "min": 37, "average": 37 }, - { "timestamp": "2018-04-19 14:20:37", "max": 47, "min": 47, "average": 47 }, - { "timestamp": "2018-04-19 14:20:32", "max": 26, "min": 26, "average": 26 }, - { "timestamp": "2018-04-19 14:06:05", "max": 46, "min": 5, "average": 27.8 }, - { "timestamp": "2018-04-19 13:45:15", "max": 10, "min": 10, "average": 10 } ] + $ curl -sX GET http://localhost:8081/fledge/asset/asset_2/lux/series + [ + {"min": 47705.68, "max": 47705.68, "average": 47705.68, "timestamp": "2023-04-14 12:04:34"}, + {"min": 97967.9, "max": 97967.9, "average": 97967.9, "timestamp": "2023-04-14 12:02:39"}, + {"min": 28788.154, "max": 28788.154, "average": 28788.154, "timestamp": "2023-04-14 12:02:26"}, + {"min": 57992.674, "max": 57992.674, "average": 57992.674, "timestamp": "2023-04-14 12:02:11"}, + {"min": 10373.945, "max": 10373.945, "average": 10373.945, "timestamp": "2023-04-14 12:01:56"} + ] $ - $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature/series?limit=5 - [ { "timestamp": "2018-04-19 14:20:59", "max": 20, "min": 20, "average": 20 }, - { "timestamp": "2018-04-19 14:20:54", "max": 33, "min": 33, "average": 33 }, - { "timestamp": "2018-04-19 14:20:49", "max": 35, "min": 35, "average": 35 }, - { "timestamp": "2018-04-19 14:20:47", "max": 0, "min": 0, "average": 0 }, - { "timestamp": "2018-04-19 14:20:42", "max": 37, "min": 37, "average": 37 } ] + $ curl -sX GET http://localhost:8081/fledge/asset/asset_2/lux/series?limit=3 + [ + {"min": 47705.68, "max": 47705.68, "average": 47705.68, "timestamp": "2023-04-14 12:04:34"}, + {"min": 97967.9, "max": 97967.9, "average": 97967.9, "timestamp": "2023-04-14 12:02:39"}, + {"min": 28788.154, "max": 28788.154, "average": 28788.154, "timestamp": "2023-04-14 12:02:26"} + ] Using *seconds* and *previous* to obtain historical data. .. code-block:: console - $ curl http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature/series?seconds=5\&previous=60|jq + $ curl -sX GET http://localhost:8081/fledge/asset/asset_2/lux/series?seconds=5\&previous=60 [ - { "timestamp": "2022-11-09 09:37:51.930688", "max": 20, "min": 20, "average": 20 }, - { "timestamp": "2022-11-09 09:37:50.930887", "max": 33, "min": 33, "average": 33 }, - { "timestamp": "2022-11-09 09:37:49.933698", "max": 0, "min": 0, "average": 0 }, - { "timestamp": "2022-11-09 09:37:48.930644", "max": 5, "min": 1, "average": 4 }, - { "timestamp": "2022-11-09 09:37:47.930950", "max": 0, "min": 37, "average": 37 } + {"min": 47705.68, "max": 47705.68, "average": 47705.68, "timestamp": "2023-04-14 12:04:34"} ] - $ + $ The above call returned 5 seconds of data from the current time minus 65 seconds to the current time minus 5 seconds. diff --git a/docs/scripts/fledge_plugin_list b/docs/scripts/fledge_plugin_list index b90cabc1ff..d06e0a3d85 100755 --- a/docs/scripts/fledge_plugin_list +++ b/docs/scripts/fledge_plugin_list @@ -35,8 +35,10 @@ all_repos = [r["name"] for r in repos if r["archived"] is False];\ fRepos = list(set(all_repos) - set(exclude_topic_packages.split()));\ print("\n".join(fRepos)); ' -REPOSITORIES=$(echo ${fledgeRepos} | python3 -c "$fetchFledgeReposPyScript" | sort -f) -echo "REPOS LIST: "${REPOSITORIES} +REPOS=$(echo ${fledgeRepos} | python3 -c "$fetchFledgeReposPyScript" | sort -f) +INBUILT_PLUGINS="fledge-north-OMF fledge-rule-Threshold fledge-rule-DataAvailability" +REPOSITORIES=$(echo ${REPOS} ${INBUILT_PLUGINS} | xargs -n1 | sort -f | xargs) +echo "REPOSITORIES LIST: "${REPOSITORIES} function table { list="$1" @@ -50,24 +52,46 @@ function table { echo " - Description" >> "$output" for repo in ${list} do - product=$(echo "$repo}" | sed -e 's/-.*//') type=$(echo "${repo}" | sed -e 's/fledge-//' -e 's/-.*//') name=$(echo "${repo}" | sed -e 's/fledge-//' -e "s/${type}-//") if [[ ${type} = "$tableType" ]]; then - is_branch_exists=$(git ls-remote -h https://${USERNAME}:${GITHUB_ACCESS_TOKEN}@github.com/fledge-iot/${repo}.git ${DOCBRANCH} | grep -c "refs/heads/${DOCBRANCH}") - if [[ ${is_branch_exists} -gt 0 ]]; then - description=$(echo "$fledgeRepos" | python3 -c 'import json,sys;repos=json.load(sys.stdin);fRepo = [r for r in repos if r["name"] == "'"${repo}"'" ];print(fRepo[0]["description"])') - if [[ "${description}" = "None" ]]; then description="A ${name} ${type} plugin"; fi - echo " * - ${name}" >> "$output" - echo " - ${description}" >> "$output" + if grep -q "$repo" <<< "$INBUILT_PLUGINS"; then + if [[ $repo == "fledge-north-OMF" ]]; then + echo " * - \`omf <"plugins/fledge-north-OMF/index.html">\`__" >> "$output" + echo " - Send data to OSIsoft PI Server, Edge Data Store or OSIsoft Cloud Services" >> "$output" + elif [[ $repo == "fledge-rule-DataAvailability" ]]; then + echo " * - \`data-availability <"plugins/fledge-rule-DataAvailability/index.html">\`__" >> "$output" + echo " - Triggers every time when it receives data that matches an asset code or audit code those given in the configuration" >> "$output" + elif [[ $repo == "fledge-rule-Threshold" ]]; then + echo " * - \`threshold <"plugins/fledge-rule-Threshold/index.html">\`__" >> "$output" + echo " - Detect the value of a data point within an asset going above or below a set threshold" >> "$output" + fi + else + rm -rf ${repo} + git clone https://${USERNAME}:${GITHUB_ACCESS_TOKEN}@github.com/fledge-iot/${repo}.git --branch ${DOCBRANCH} >/dev/null 2>&1 + is_branch_exists=$? + if [[ ${is_branch_exists} -eq 0 ]]; then + description=$(echo "$fledgeRepos" | python3 -c 'import json,sys;repos=json.load(sys.stdin);fRepo = [r for r in repos if r["name"] == "'"${repo}"'" ];print(fRepo[0]["description"])') + if [[ "${description}" = "None" ]]; then description="A ${name} ${type} plugin"; fi + # cloned directory replaced with installed directory name which is defined in Package file for each repo + installed_plugin_dir_name=$(cat ${repo}/Package | grep plugin_install_dirname= | sed -e "s/plugin_install_dirname=//g") + if [[ $installed_plugin_dir_name == "\${plugin_name}" ]]; then + installed_plugin_dir_name=$(cat ${repo}/Package | grep plugin_name= | sed -e "s/plugin_name=//g") + fi + old_plugin_name=$(echo ${repo} | cut -d '-' -f3-) + new_plugin_name=$(echo ${repo/$old_plugin_name/$installed_plugin_dir_name}) + # Only link when doc exists in plugins directory + if [[ -d ${repo}/docs && -f ${repo}/docs/index.rst ]]; then + echo " * - \`$name <"plugins/$new_plugin_name/index.html">\`__" >> "$output" + else + echo " * - ${name}" >> "$output" + fi + echo " - ${description}" >> "$output" + fi + rm -rf ${repo} fi fi done - if [[ ${tableType} = "north" ]]; then - echo " * - OMF" >> "$output" - echo " - Send data to OSIsoft PI Server, Edge Data Store or OSIsoft Cloud Services" >> "$output" - fi - echo "" >> "$output" } cat >> $output << EOF1 diff --git a/docs/scripts/plugin_and_service_documentation b/docs/scripts/plugin_and_service_documentation index 89e11cdc2a..523122b066 100644 --- a/docs/scripts/plugin_and_service_documentation +++ b/docs/scripts/plugin_and_service_documentation @@ -25,7 +25,6 @@ Fledge North Plugins .. toctree:: - fledge-north-OMF/index EOFNORTH cat > plugins/filter.rst << EOFFILTER ********************* @@ -42,7 +41,6 @@ Fledge Notification Rule Plugins .. toctree:: - fledge-rule-Threshold/index EOFRULE cat > plugins/notify.rst << EOFNOTIFY ************************************ @@ -87,8 +85,10 @@ all_repos = [r["name"] for r in repos if r["archived"] is False];\ fRepos = list(set(all_repos) - set(exclude_topic_packages.split()));\ print("\n".join(fRepos)); ' -REPOSITORIES=$(echo ${fledgeRepos} | python3 -c "$fetchFledgeReposPyScript" | sort -f) -echo "REPOS LIST: "${REPOSITORIES} +REPOS=$(echo ${fledgeRepos} | python3 -c "$fetchFledgeReposPyScript" | sort -f) +INBUILT_PLUGINS="fledge-north-OMF fledge-rule-Threshold fledge-rule-DataAvailability" +REPOSITORIES=$(echo ${REPOS} ${INBUILT_PLUGINS} | xargs -n1 | sort -f | xargs) +echo "REPOSITORIES LIST: "${REPOSITORIES} function plugin_and_service_doc { repo_name=$1 @@ -131,29 +131,38 @@ function plugin_and_service_doc { rm -rf /tmp/doc.$$ } - for repo in ${REPOSITORIES} do type=$(echo $repo | sed -e 's/fledge-//' -e 's/-.*//') - if [ "$type" = "south" -o "$type" = "north" -o $type = "filter" -o $type = "rule" -o $type = "notify" ]; then - dest=plugins/${type}.rst + dest=plugins/${type}.rst + if grep -q "$repo" <<< "$INBUILT_PLUGINS"; then + if [[ $repo == "fledge-north-OMF" ]]; then + name="fledge-north-OMF" + echo " ${name}/index" >> $dest + mkdir plugins/${name} + ln -s ../../images plugins/${name}/images + echo '.. include:: ../../fledge-north-OMF.rst' > plugins/${name}/index.rst + # Append OMF.rst to the end of the file rather than including it so that we may edit the links to prevent duplicates + cat OMF.rst >> plugins/${name}/index.rst + sed -i -e 's/Naming_Scheme/Naming_Scheme_plugin/' -e 's/Linked_Types/Linked_Types_Plugin/' -e 's/Edge_Data_Store/Edge_Data_Store_OMF_Endpoint/' -e 's/_Connector_Relay/PI_Connector_Relay/' plugins/${name}/index.rst + elif [[ $repo == "fledge-rule-DataAvailability" ]]; then + name="fledge-rule-DataAvailability" + echo " ${name}/index" >> $dest + mkdir plugins/${name} + ln -s $(pwd)/${name}/images plugins/${name}/images + echo '.. include:: ../../fledge-rule-DataAvailability/index.rst' > plugins/${name}/index.rst + elif [[ $repo == "fledge-rule-Threshold" ]]; then + name="fledge-rule-Threshold" + echo " ${name}/index" >> $dest + mkdir plugins/${name} + ln -s $(pwd)/${name}/images plugins/${name}/images + echo '.. include:: ../../fledge-rule-Threshold/index.rst' > plugins/${name}/index.rst + fi + elif [ "$type" = "south" -o "$type" = "north" -o $type = "filter" -o $type = "rule" -o $type = "notify" ]; then plugin_and_service_doc $repo $dest "plugins" fi done -# Deal with builtin plugin documentation -mkdir plugins/fledge-north-OMF -ln -s ../../images plugins/fledge-north-OMF/images -echo '.. include:: ../../fledge-north-OMF.rst' > plugins/fledge-north-OMF/index.rst -# Append OMF.rst to the end of the file rather than including it so that we may -# edit the links to prevent duplicates -cat OMF.rst >> plugins/fledge-north-OMF/index.rst -sed -i -e 's/Naming_Scheme/Naming_Scheme_plugin/' -e 's/Linked_Types/Linked_Types_Plugin/' plugins/fledge-north-OMF/index.rst -# Create the Threshold rule documentation -mkdir plugins/fledge-rule-Threshold -ln -s $(pwd)/fledge-rule-Threshold/images plugins/fledge-rule-Threshold/images -echo '.. include:: ../../fledge-rule-Threshold/index.rst' > plugins/fledge-rule-Threshold/index.rst - cat > services/index.rst << EOFSERVICES ******************* Additional Services diff --git a/docs/securing_fledge.rst b/docs/securing_fledge.rst index d2ea317695..1e1f3274c0 100644 --- a/docs/securing_fledge.rst +++ b/docs/securing_fledge.rst @@ -11,6 +11,7 @@ .. |password_rotation| image:: images/password_rotation.jpg .. |user_management| image:: images/user_management.jpg .. |add_user| image:: images/add_user.jpg +.. |update_user| image:: images/update_user.jpg .. |delete_user| image:: images/delete_user.jpg .. |change_role| image:: images/change_role.jpg .. |reset_password| image:: images/reset_password.jpg @@ -18,6 +19,18 @@ .. |update_certificate| image:: images/update_certificate.jpg +.. Links +.. |REST API| raw:: html + + REST API + +.. |Require User Login| raw:: html + + Require User Login + +.. |User Management| raw:: html + + User Management ***************** Securing Fledge @@ -48,7 +61,7 @@ After enabling HTTPS and selecting save you must restart Fledge in order for the *Note*: if using the default self-signed certificate you might need to authorise the browser to connect to IP:PORT. Just open a new browser tab and type the URL https://YOUR_FLEDGE_IP:1995 - +; Then follow browser instruction in order to allow the connection and close the tab. In the Fledge GUI you should see the green icon (Fledge is running). @@ -130,7 +143,12 @@ Whenever a user logs into Fledge the age of their password is checked against th User Management =============== -Once mandatory authentication has been enabled and the currently logged in user has the role *admin*, a new option appears in the GUI, *User Management*. +The user management option becomes active once the Fledge has been configured to require authentication of users. This is enabled via the *Admin API* page of the *Configuration* menu item. A new menu item *User Management* will appear in the left hand menu. + +.. note:: + + After setting the Authentication option to mandatory in the configuration page the Fledge instance should be restarted. + +-------------------+ | |user_management| | @@ -142,11 +160,19 @@ The user management pages allows - Deleting users. - Resetting user passwords. - Changing the role of a user. + - Changing the details of a user + +Fledge currently supports four roles for users: -Fledge currently supports two roles for users, + - **Administrator**: a user with admin role is able to fully configure Fledge, view the data read by the Fledge instance and also manage Fledge users. - - **admin**: a user with admin role is able to fully configure Fledge and also manage Fledge users - - **user**: a user with this role is able to configure Fledge but can not manage users + - **Editor**: a user with this role is able to configure Fledge and view the data read by Fledge. The user can not manage other users or add new users. + + - **Viewer**: a user that can only view the configuration of the Fledge instance and the data that has been read by Fledge. The user has no ability to modify the Fledge instance in any way. + + - **Data Viewer**: a user that can only view the data in Fledge and not the configuration of Fledge itself. The user has no ability to modify the Fledge instance in any way. + +Restrictions apply to both the API calls that can be made when authenticated as particular users and the access the user will have to the graphical user interface. Users will observe both menu items will be removed completely or options on certain pages will be unavailable. Adding Users ------------ @@ -159,6 +185,15 @@ To add a new user from the *User Management* page select the *Add User* icon in You can select a role for the new user, a user name and an initial password for the user. Only users with the role *admin* can add new users. +Update User Details +------------------- + +The edit user option allows the name, authentication method and description of a user to be updated. This option is only available to users with the *admin* role. + ++---------------+ +| |update_user| | ++---------------+ + Changing User Roles ------------------- @@ -220,3 +255,47 @@ To add a new certificate select the *Import* icon in the top right of the certif +----------------------+ A dialog will appear that allows a key file and/or a certificate file to be selected and uploaded to the *Certificate Store*. An option allows to allow overwrite of an existing certificate. By default certificates may not be overwritten. + + +Generate a new auth certificates for user login +----------------------------------------------- + +Default ca certificate is available inside $FLEDGE_DATA/etc/certs and named as ca.cert. Also default admin and non-admin certs are available in the same location which will be used for Login with Certificate in Fledge i.e admin.cert, user.cert. See |Require User Login| + +Below are the steps to create custom certificate along with existing fledge based ca signed for auth certificates. + +a) Create a new certificate for username. Let say **test** + +.. code-block:: console + + $ cd $FLEDGE_ROOT + $ ./scripts/auth_certificates user test 365 + + Here script arguments are: $1=user $2=FLEDGE_USERNAME $3=SSL_DAYS_EXPIRATION + +And now you can find **test** cert inside $FLEDGE_DATA/etc/certs/ + +b) Now, it's time to create user with name **test** (case sensitive). Also only admin can create user. Below are the cURL Commands + +.. code-block:: console + + $ AUTH_TOKEN=$(curl -d '{"username": "admin", "password": "fledge"}' -sX POST ://:/fledge/login | jq '.token' | tr -d '""') + $ curl -H "authorization: $AUTH_TOKEN" -skX POST ://:/fledge/admin/user -d '{"username":"test","real_name":"Test","access_method":"cert","description":"Non-admin based role","role_id":2}' + +.. note:: + + role_id:2 (non-admin user) | if new user requires admin privileges then pass role_id:1 + +You may also refer the documentation of |REST API| cURL commands. If you are not comfortable with cURL commands then use the GUI steps |User Management| and make sure Login with admin user. + +.. note:: + + Steps a (cert creation) and b (create user) can be executed in any order. + +c) Now you can login with the newly created user **test**, with the following cURL + +.. code-block:: console + + $ curl -T $FLEDGE_DATA/etc/certs/test.cert -skX POST ://:/fledge/login + +Or use GUI |Require User Login| diff --git a/docs/storage.rst b/docs/storage.rst index a12fc66310..121ab60d1c 100644 --- a/docs/storage.rst +++ b/docs/storage.rst @@ -150,6 +150,52 @@ A more generic command is: sudo -u postgres createuser -d $(whoami) +Red Hat Install +--------------- + +On Red Hat or other yum based distributions to install postgres: + +Add PostgreSQL YUM Repository to your System + +.. code-block:: console + + sudo yum install -y https://download.postgresql.org/pub/repos/yum/reporpms/EL-9-x86_64/pgdg-redhat-repo-latest.noarch.rpm + +Check whether PostgreSQL 13 is available using the command shown below + +.. code-block:: console + + sudo yum search -y postgresql13 + +Once you have confirmed that PostgreSQL 13 repositories are available on your system. Then, you can proceed to install PostgreSQL 13 + +.. code-block:: console + + sudo yum install -y postgresql13 postgresql13-server + +Before using the PostgreSQL server, you need to first initialize the database service using the command + +.. code-block:: console + + sudo /usr/pgsql-13/bin/postgresql-13-setup initdb + +You can then proceed to start the database server as follows + +.. code-block:: console + + sudo systemctl enable --now postgresql-13 + +Confirm if the just started service above is running by checking its status using the command + +.. code-block:: console + + sudo systemctl status postgresql-13 + +Next, you must create a PostgreSQL user that matches your Linux user. + +.. code-block:: console + + sudo -u postgres createuser -d $(whoami) SQLite Plugin Configuration =========================== diff --git a/docs/troubleshooting_pi-server_integration.rst b/docs/troubleshooting_pi-server_integration.rst index c9bfdc6f83..41a82e81ad 100644 --- a/docs/troubleshooting_pi-server_integration.rst +++ b/docs/troubleshooting_pi-server_integration.rst @@ -13,11 +13,13 @@ .. |OMF_Persisted| image:: images/OMF_Persisted.png .. |PersistedPlugins| image:: images/PersistedPlugins.png .. |PersistedActions| image:: images/PersistActions.png +.. |OMF_Formats| image:: images/OMF_Formats.jpg ***************************************** Troubleshooting the PI Server integration ***************************************** + This section describes how to troubleshoot issues with the PI Server integration using Fledge version >= 1.9.1 and PI Web API 2019 SP1 1.13.0.6518 @@ -27,10 +29,23 @@ using Fledge version >= 1.9.1 and PI Web API 2019 SP1 1.13.0.6518 - `Error messages and causes`_ - `Possible solutions to common problems`_ +Fledge 2.1.0 and later +======================= + +In version 2.1 of Fledge a major change was introduced to the OMF plugin in the form of support for OMF version 1.2. This provides for a different method of adding data to the OMF end points that greatly improves the flexibility and removes the need to create complex types in OMF to map onto the Fledge reading structure. +When upgrading from a version prior to 2.1 where data had previously been sent to OMF, the plugin will continue to use the older, pre-OMF 1.2 method to add data. This ensures that data will continue to be written to the same tags within the PI Server or other OMF end points. New data, not previously sent to OMF will be written using the newer OMF 1.2 mechanism. + +It is possible to force the OMF plugin to always send data in the pre-OMF 1.2 format, using complex OMF data types, by turning on the option *Complex Types* in the *Formats & Types* tab of the plugin configuration. + ++---------------+ +| |OMF_Formats| | ++---------------+ + Log files ========= Fledge logs messages at error and warning levels by default, it is possible to increase the verbosity of messages logged to include information and debug messages also. This is done by altering the minimum log level setting for the north service or task. To change the minimal log level within the graphical user interface select the north service or task, click on the advanced settings link and then select a new minimal log level from the option list presented. + The name of the north instance should be used to extract just the logs about the PI Server integration, as in this example: screenshot from the Fledge GUI @@ -147,8 +162,8 @@ Managing Plugin Persisted Data This is not a feature that users would ordinarily need to be concerned with, however it is possible to enable *Developer Features* in the Fledge User Interface that will provide a mechanism to manage this data. -Enable Develop Features -~~~~~~~~~~~~~~~~~~~~~~~ +Enable Developer Features +~~~~~~~~~~~~~~~~~~~~~~~~~ Navigate to the *Settings* page of the GUI and toggle on the *Developer Features* check box on the bottom left of the page. diff --git a/docs/tuning_fledge.rst b/docs/tuning_fledge.rst index 4c29d1d5db..0fc0483072 100644 --- a/docs/tuning_fledge.rst +++ b/docs/tuning_fledge.rst @@ -9,6 +9,7 @@ .. |sqlitelb_config| image:: images/sqlitelb_config.png .. |postgres_config| image:: images/postgres_config.png .. |sqlitememory_config| image:: images/sqlitememory_config.png +.. |poll_type| image:: images/poll_type.png *************** Tuning Fledge @@ -42,12 +43,30 @@ The south services within Fledge each have a set of advanced configuration optio - *Maximum buffered Readings* - This is the maximum number of readings the south service will buffer before attempting to send those readings onward to the storage service. This and the setting above work together to define the buffering strategy of the south service. - - *Reading Rate* - The rate at which polling occurs for this south service. This parameter only has effect if your south plugin is polled, asynchronous south services do not use this parameter. The units are defined by the setting of the *Reading Rate Per* item. - - *Throttle* - If enabled this allows the reading rate to be throttled by the south service. The service will attempt to poll at the rate defined by *Reading Rate*, however if this is not possible, because the readings are being forwarded out of the south service at a lower rate, the reading rate will be reduced to prevent the buffering in the south service from becoming overrun. + - *Reading Rate* - The rate at which polling occurs for this south service. This parameter only has effect if your south plugin is polled, asynchronous south services do not use this parameter. The units are defined by the setting of the *Reading Rate Per* item. + - *Reading Rate Per* - This defines the units to be used in the *Reading Rate* value. It allows the selection of per *second*, *minute* or *hour*. + - *Poll Type* - This defines the mechanism used to control the poll requests that will be sent to the plugin. Three options are currently available, interval polling and fixed time polling and polling on demand. + + +-------------+ + | |poll_type| | + +-------------+ + + - *Interval* polling will issue a poll request at a fixed rate, that rate being determined by the *Reading Rate* and *Reading Rate Per* settings described above. The first poll request will be issued after startup of the plugin and will not be synchronized to any time or other events within the system. + + - *Fixed Times* polling will issue poll requests at fixed times that are defined by a set of hours, minutes and seconds. These times are defined in the local time zone of the machine that is running the Fledge instance. + + - *On Demand* polling will not perform any regular polling, instead it will wait for a control operation to be sent to the service. That operation is named *poll* and takes no arguments. This allow a poll to be trigger by the control mechanisms from notifications, schedules, north services or API requests. + + - *Hours* - This defines the hours when a poll request will be made. The hours are expressed using the 24 hour clock, with poll requests being made only when the current hour matches one of the hours in the coma separated list of hours. If the *Hours* field is left blank then poll will be issued during every hour of the day. + + - *Minutes* - This defines the minutes in the day when poll requests are made. Poll requests are only made when the current minute matches one of the minutes in the comma separated list of minutes. If the *Minutes* field is left blank then poll requests will be made in any minute within the hour. + + - *Seconds* - This defines the seconds when a poll requests will be made. Seconds is a comma separated list of seconds, poll requests are made when the current second match one of the seconds in the list. If *Fixed Times* polling is selected then the *Seconds* field must not be empty. + - *Minimum Log Level* - This configuration option can be used to set the logs that will be seen for this service. It defines the level of logging that is send to the syslog and may be set to *error*, *warning*, *info* or *debug*. Logs of the level selected and higher will be sent to the syslog. You may access the contents of these logs by selecting the log icon in the bottom left of this screen. - *Statistics Collection* - This configuration option can be used to control how detailed the statistics collected by the south service are. There are three options that may be selected @@ -56,11 +75,92 @@ The south services within Fledge each have a set of advanced configuration optio | |stats_options| | +-----------------+ - The default, *per asset & per service* setting will collect one statistic per asset ingested and an overall statistic for the entire service. The *per service* option just collects the overall service ingest statistics and the *per asset* option just collects the statistics for each asset and not for the entire service. + The *per asset & per service* setting will collect one statistic per asset ingested and an overall statistic for the entire service. The *per service* option just collects the overall service ingest statistics and the *per asset* option just collects the statistics for each asset and not for the entire service. The default is to collect statistics on a per asset & service basis, this is not the best setting if large numbers of distinct assets are ingested by a single south service. Use of the per asset or the per asset and service options should be limited to south service that collect a relatively small number of distinct assets. Collecting large number of statistics, for 1000 or more distinct assets will have a significant performance overhead and may overwhelm less well provisioned Fledge instances. When a large number of assets are ingested by a single south service this value should be set to *per service*. + + .. note:: + + The *Statistics Collection* setting will not remove any existing statistics, these will remain and remain to be represented in the statistics history. This only impacts new values that are collected. It is recommended that this be set before a service is started for the first time if the desire it to have no statistics values recorded for either assets or the service. + + .. note:: + + If the *per service* option is used then the UI page that displays the south services will not show the asset names and counts for each of the assets that are ingested by that service. + + - *Performance Counters* - This option allows for the collection of performance counters that can be used to help tune the south service. + +Performance Counters +-------------------- + +A number of performance counters can be collected in the south service to help characterise the performance of the service. This is intended to provide input into the tuning of the service and the collection of these counters should not be left on during production use of the service. + +Performance counters are collected in the service and a report is written once per minute to the storage layer for later retrieval. The values written are + + - The minimum value of the counter observed within the current minute + + - The maximum value of the counter observed within the current minute + + - The average value of the counter observed within the current minute + + - The number of samples of the counter collected within the current minute + +In the current release the performance counters can only be retrieved by director access to the configuration and statistics database, they are stored in the *monitors* table. Future releases will include tools for the retrieval and analysis of these performance counters. + +When collection is enabled the following counters will be collected for the south service that is enabled. + +.. list-table:: + :widths: 15 30 55 + :header-rows: 1 + + * - Counter + - Description + - Causes & Remedial Actions + * - queueLength + - The total number of readings that have been queued within the south service for sending to the storage service. + - Large queues in the south service will mean that the service will have a larger than normal footprint but may not be an issue in itself. However if the queue size grows continuously then there will eventually be a memory allocation failure in the south service. Turning on throttling of the ingest rate will reduce the data that is added to the queue and may be enough to resole the problem, however data will be collected at a reduced rate. A faster storage plugin, perhaps using an in-memory storage engine may be another solution. If your instance has many south services it may be worth considering splitting the south services between multiple instances. + * - ingestCount + - The number of readings ingested in each plugin interaction. + - The counter reflects the number of readings that are returned for each call to the south plugin poll entry point or by the south plugin ingest asynchronous call. Typically this number should be moderately low, if very large numbers are returned in a single call it will result in very large queues building up within the south service and the performance of the system will be degraded with large burst of data that possibly overwhelm other layers interspersed with periods of inactivity. Ideally the peaks should be eliminated and the rate kept 'flat' in order to make the best use of the system. Consider altering the configuration of the south plugin such that it returns less data but more frequently. + * - readLatency + - The longest time a reading has spent in the queue between being returned by the south plugin and sent to the storage layer. + - This counter describes how long, in milliseconds, the oldest reading waiting in the internal south service queue before being sent to the storage layer. This should be less than or equal to the define maximum latency, it may be a little over to allow for queue management times, but should not be significantly higher. If it is significantly higher for long periods of time it would indicate that the storage service is unable to handle the load that is being placed upon it. It may be possible that by tuning the storage layer, changing t a higher performance plugin or one that is better suited to your workload, may resolve the problem. Alternatively consider reducing the load by splitting the south services across multiple Fledge instances. + * - flow controlled + - The number of times the reading rate has been reduced due to excessive queues building up in the south service. + - This is closely related to the queuLength counter and has much the same set of actions that should be taken if the service is frequently flow controlled. Reducing the ingest rate, or adding filtering in the pipeline to reduce the amount of data passed onward to the storage service may alleviate the problem. In general if processing can be done that reduces high bandwidth data into lower bandwidth data that can still characterise the high bandwidth content, then this should be done as close as possible to the source of the data to reduce the overall load on the system. + * - throttled rate + - The rate that data is being ingested at as a result of flow control throttling. + - This counter is more for information as to what might make a reasonable ingest rate the system can sustain with the current configuration. It is useful as it gives a good idea of how far away from your desired performance the current configuration of the system is currently + * - storedReadings + - The readings successfully sent to the storage layer. + - This counter gives an indication of the bandwidth available from the service to the storage engine. This should be at least as high as the ingest rate if data is not to accumulate in buffers within the storage. Altering the maximum latency and maximum buffered readings advanced settings in the south server can impact this throughput. + * - resendQueued + - The number of readings queued for resend. Note that readings may be queued for resend multiple times if the resend also failed. + - This is a good indication of overload conditions within the storage engine. Consistent high values of this counter point to the need to improve the performance of the storage layer. + * - removedReadings + - A count of the readings that have been removed after too many attempts to save them in the storage layer. + - This should normally be zero or close to zero. Any significant values here are a pointer to a critical error with either the south plugin data that is being created or the operation of the storage layer. + + +Fixed Time Polling +------------------ + +The fixed time polling can be used in a number of ways to control when poll requests occur, amongst the possible scenarios are; + + - Poll at fixed times within a minute or hour. + + - Poll only for certain periods of the day. + +To poll at fixed, regular times then simply set the times when a poll is required. For example to poll every 15 seconds at 0 seconds past the minute, 15, 30 and 45 seconds past the hour, simply st the *Seconds* field to have the value 0, 15, 30, 45 and leave the minutes and hours blank. + +If you wished to poll at the hour and every 15 minutes thereafter set the *Minutes* field to 0, 15, 30 and 45 and set the *Seconds* field to 0. Settings *Seconds* to another single value, for example 30, would simply move the poll time to be 0 minutes and 30 seconds, 15 minutes and 30 seconds etc. If multiple values of seconds are given then multiple polls would occur. For example if *Minutes* is set to 0, 15, 30, 45 and *Seconds* is set to 0, 30. A poll would occur at 0 minutes and 0 seconds, 0 minutes and 30 seconds, 15 minutes and 0 seconds, 15 minutes and thirty seconds. + +The *Hours* field, if not left empty, would work in the same way as the minutes above. + +Another use of the feature is to only poll at certain times of the day. As an example, if we wished to poll every 15 minutes between the hours of 8am and 5pm then we can set the *Hours* field to be 8,9,10,11,12,13,14,15,16 and the *Minutes* field to be 0, 15, 30, 45. The seconds field can be left as 0. .. note:: - The *Statistics Collection* setting will not remove any existing statistics, these will remain and remain to be represented in the statistics history. This only impacts new values that are collected. It is recommended that this be set before a service is started for the first time if the desire it to have no statistics values recorded for either assets or the service. + The last poll of the day would be at 16:45 in the above configuration. + +Although the intervals between poll times shown in the above examples have all been equal, there is no requirement for this to be the case. Tuning Buffer Usage ------------------- @@ -84,6 +184,56 @@ In a similar way to the south services, north services and tasks also have advan - *Data block size* - This defines the number of readings that will be sent to the north plugin for each call to the *plugin_send* entry point. This allows the performance of the north data pipeline to be adjusted, with larger blocks sizes increasing the performance, by reducing overhead, but at the cost of requiring more memory in the north service or task to buffer the data as it flows through the pipeline. Setting this value too high may cause issues for certain of the north plugins that have limitations on the number of messages they can handle within a single block. + - *Performance Counters* - This option allows for collection of performance counters that can be use to help tune the north service. + +Performance Counters +-------------------- + +A number of performance counters can be collected in the north service to help characterise the performance of the service. This is intended to provide input into the tuning of the service and the collection of these counters should not be left on during production use of the service. + +Performance counters are collected in the service and a report is written once per minute to the storage layer for later retrieval. The values written are + + - The minimum value of the counter observed within the current minute + + - The maximum value of the counter observed within the current minute + + - The average value of the counter observed within the current minute + + - The number of samples of the counter collected within the current minute + +In the current release the performance counters can only be retrieved by director access to the configuration and statistics database, they are stored in the *monitors* table. Future releases will include tools for the retrieval and analysis of these performance counters. + +When collection is enabled the following counters will be collected for the south service that is enabled. + +.. list-table:: + :widths: 15 30 55 + :header-rows: 1 + + * - Counter + - Description + - Causes & Remedial Actions + * - No of waits for data + - This counter reports how many times the north service requested data from storage and no data was available. + - If this value is consistently low or zero it indicates the other services are providing data faster than the north service is able to send that data. Improving the throughput of the north service would be advisable to prevent the accumulation of unsent data in the storage service. + * - Block utilisation % + - Data is read by the north service in blocks, the size of this blocks is defined in the advanced configuration of the north service. This counter reflects what percentage of the requested blocks are actually populated with data on each call to the storage service. + - A constantly high utilisation is an indication that more data is available than can be sent, increasing the block size may improve this situation and allow for a high throughput. + * - Reading sets buffered + - This is a counter of the number of blocks that are waiting to be sent in the north service + - if this figure is more than a couple of blocks it is an indication that the north plugin is failing to sent complete blocks of data and that partial blocks are failing. Reducing the block size may improve the situation and reduce the amount of storage required in the north service. + * - Total readings buffered + - This is a count of the total number of readings buffered within the north service. + - This should be equivalent to 2 or 3 blocks size worth of readings. If it is high then it is an indication that the north plugin is not able to sustain a high enough data rate to match the ingest rates of the system. + * - Readings sent + - This gives an indication, for each block, how many readings are sent in the block. + - This should typically match the blocks read, if not it is an indication of failures to send data by the north plugin. + * - Percentage readings sent + - Closely related to the above the s the percentage of each block read that was actually sent. + - In a well tuned system this figure should be close to 100%, if it is not then it may be that the north plugin is failing to send data, possibly because of an issue in an upstream system. Alternatively the block size may be too high for the upstream system to handle and reducing the block size will bring this value closer to 100%. + * - Readings added to buffer + - An absolute count of the number of readings read into each block. + - If this value is significantly less than the block size it is an indication that the block size can be lowered. If it is always close to the block size then consider increasing the block size. + Health Monitoring ================= @@ -150,7 +300,9 @@ postgres In most cases the default *sqlite* storage plugin is perfectly acceptable, however if very high data rates, or huge volumes of data (i.e. large images at a reasonably high rate) are ingested this plugin can start to exhibit issues. This usually exhibits itself by large queues building in the south service or in extreme cases by transaction failure messages in the log for the storage service. If this happens then the recommended course of action is to either switch to a plugin that stores data in memory rather than on external storage, *sqlitememory*, or investigate the media where the data is stored. Low performance storage will adversely impact the *sqlite* plugin. -The *sqlite* plugin may also prove less than optimal if you are ingested many hundreds of different assets in the same Fledge instance. The *sqlite* plugin has been optimized to allow concurrent south services to write to the storage in parallel. This is done by the use of multiple databases to improve the concurrency, however there is a limit, imposed by the number of open databases that can be supported. If this limit is exceeded it is recommend to switch to the *sqlitelb* plugin. There are configuration options regarding how these databases are used that can change the point at which it becomes necessary to switch to the other plugin. +The *sqlite* plugin may also prove less than optimal if you are ingesting many hundreds of different assets in the same Fledge instance. The *sqlite* plugin has been optimized to allow concurrent south services to write to the storage in parallel. This is done by the use of multiple databases to improve the concurrency, however there is a limit, imposed by the number of open databases that can be supported. If this limit is exceeded it is recommend to switch to the *sqlitelb* plugin. There are configuration options regarding how these databases are used that can change the point at which it becomes necessary to switch to the other plugin. + +If you wish to use the same plugin to both store the configuration data and the reading data then you may either choose the same plugin for both or select the option *Use main plugin* for the *Reading Plugin* value. Use the later is perhaps a slightly safer option as changes to the *Storage Plugin* will then automatically cause the readings to use that same plugin. Configuring Storage Plugins ########################### @@ -177,6 +329,8 @@ The storage plugins to use can be selected in the *Advanced* section of the *Con - **Management Port**: Normally the storage service will dynamically create a management port that will be used by the storage service. Setting this to a value other than 0 will cause a fixed port to be used. This can be useful when developing a new storage plugin. +- **Log Level**: This control the level at which the storage plugin will output logs. + Changing will be saved once the *save* button is pressed. Fledge uses a mechanism whereby this data is not only saved in the configuration database, but also cached to a file called *storage.json* in the *etc* directory of the data directory. This is required such that Fledge can find the configuration database during the boot process. If the configuration becomes corrupt for some reason simply removing this file and restarting Fledge will cause the default configuration to be restored. The location of the Fledge data directory will depend upon how you installed Fledge and the environment variables used to run Fledge. - Installation from a package will usually put the data directory in */usr/local/fledge/data*. However this can be overridden by setting the *$FLEDGE_DATA* environment variable to point at a different location. @@ -248,6 +402,14 @@ The storage plugin configuration can be found in the *Advanced* section of the * This pool size is only the initial size, the storage service will grow the pool if required, however setting a realistic initial pool size will improve the ramp up performance of Fledge. + - **Max. Insert Rows**: The maximum number of readings that will be inserted in a single call to Postgres. This is a tuning parameter that has two effects on the system + + - It limits the size, and hence memory requirements, for a single insert statement + + - It prevents very long running insert transactions from blocking access to the readings table + + This parameter is useful on systems with very high data ingest rates or when the ingest contains sporadic large bursts of readings, to limit resource usage and database lock contention. + .. note:: Although the pool size denotes the number of parallel operations that can take place, database locking considerations may reduce the number of actual operations in progress at any point in time. @@ -269,4 +431,6 @@ The storage plugin configuration can be found in the *Advanced* section of the * Although the pool size denotes the number of parallel operations that can take place, database locking considerations may reduce the number of actual operations in progress at any point in time. + - **Persist Data**: Control the persisting of the in-memory database on shutdown. If enabled the in-memory database will be persisted on shutdown of Fledge and reloaded when Fledge is next started. Selecting this option will slow down the shutdown and startup processing for Fledge. + - **Persist File**: This defines the name of the file to which the in-memory database will be persisted. diff --git a/extras/scripts/fledge.service b/extras/scripts/fledge.service index f4c25fe78b..4f79f14d73 100755 --- a/extras/scripts/fledge.service +++ b/extras/scripts/fledge.service @@ -67,11 +67,19 @@ get_pid() { } fledge_start() { - sudo -u ${FLEDGE_USER} "${FLEDGE_ROOT}/bin/fledge" start > /dev/null + if [ "$IS_RHEL" = "" ]; then + sudo -u ${FLEDGE_USER} "${FLEDGE_ROOT}/bin/fledge" start > /dev/null + else + "${FLEDGE_ROOT}/bin/fledge" start > /dev/null + fi } fledge_stop() { - sudo -u ${FLEDGE_USER} "${FLEDGE_ROOT}/bin/fledge" stop > /dev/null + if [ "$IS_RHEL" = "" ]; then + sudo -u ${FLEDGE_USER} "${FLEDGE_ROOT}/bin/fledge" stop > /dev/null + else + "${FLEDGE_ROOT}/bin/fledge" stop > /dev/null + fi } case "$1" in diff --git a/python/fledge/common/acl_manager.py b/python/fledge/common/acl_manager.py index 207e161361..bbf2c26ed0 100644 --- a/python/fledge/common/acl_manager.py +++ b/python/fledge/common/acl_manager.py @@ -4,11 +4,9 @@ # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END -import logging - +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.storage_client.exceptions import StorageServerError -from fledge.common import logger __author__ = "Deepanshu Yadav" @@ -16,7 +14,7 @@ __license__ = "Apache 2.0" __version__ = "${VERSION}" -_logger = logger.setup(__name__, level=logging.INFO) +_logger = FLCoreLogger().get_logger(__name__) class ACLManagerSingleton(object): @@ -97,7 +95,7 @@ async def _notify_service_about_acl_change(self, entity_name, acl, reason): self._pending_notifications.pop(entity_name) except Exception as ex: - _logger.error("Could not notify {} due to {}".format(entity_name, str(ex))) + _logger.error(ex, "Could not notify {}.".format(entity_name)) async def handle_update_for_acl_usage(self, entity_name, acl_name, entity_type, message="updateACL"): diff --git a/python/fledge/common/audit_logger.py b/python/fledge/common/audit_logger.py index 6da16a99f9..937c497db5 100644 --- a/python/fledge/common/audit_logger.py +++ b/python/fledge/common/audit_logger.py @@ -4,19 +4,17 @@ # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.storage_client.storage_client import StorageClientAsync from fledge.common.storage_client.exceptions import StorageServerError -from fledge.common import logger - __author__ = "Mark Riddoch" __copyright__ = "Copyright (c) 2018 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}" - -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) class AuditLoggerSingleton(object): @@ -62,7 +60,7 @@ async def _log(self, level, code, log): await self._storage.insert_into_tbl("log", payload) except (StorageServerError, Exception) as ex: - _logger.exception("Failed to log audit trail entry '%s': %s", code, str(ex)) + _logger.error(ex, "Failed to log audit trail entry '{}'.".format(code)) raise ex async def success(self, code, log): diff --git a/python/fledge/common/configuration_manager.py b/python/fledge/common/configuration_manager.py index c05bba4465..ecbc8cb4c0 100644 --- a/python/fledge/common/configuration_manager.py +++ b/python/fledge/common/configuration_manager.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # FLEDGE_BEGIN # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END @@ -13,6 +12,7 @@ import ipaddress import datetime import os +import logging from math import * import collections import ast @@ -21,7 +21,7 @@ from fledge.common.storage_client.storage_client import StorageClientAsync from fledge.common.storage_client.exceptions import StorageServerError from fledge.common.storage_client.utils import Utils -from fledge.common import logger +from fledge.common.logger import FLCoreLogger from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA from fledge.common.audit_logger import AuditLogger from fledge.common.acl_manager import ACLManager @@ -31,9 +31,7 @@ __license__ = "Apache 2.0" __version__ = "${VERSION}" -import logging - -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) # MAKE UPPER_CASE _valid_type_strings = sorted(['boolean', 'integer', 'float', 'string', 'IPv4', 'IPv6', 'X509 certificate', 'password', @@ -83,7 +81,7 @@ def update(self, category_name, category_description, category_val, display_name display_name = category_name if display_name is None else display_name self.cache[category_name] = {'date_accessed': datetime.datetime.now(), 'description': category_description, 'value': category_val, 'displayName': display_name} - _logger.info("Updated Configuration Cache %s", self.cache) + _logger.debug("Updated Configuration Cache %s", self.cache) def remove_oldest(self): """Remove the entry that has the oldest accessed date""" @@ -91,9 +89,11 @@ def remove_oldest(self): for category_name in self.cache: if oldest_entry is None: oldest_entry = category_name - elif self.cache[category_name]['date_accessed'] < self.cache[oldest_entry]['date_accessed']: + elif self.cache[category_name].get('date_accessed') and self.cache[oldest_entry].get('date_accessed') \ + and self.cache[category_name]['date_accessed'] < self.cache[oldest_entry]['date_accessed']: oldest_entry = category_name - self.cache.pop(oldest_entry) + if oldest_entry: + self.cache.pop(oldest_entry) def remove(self, key): """Remove the entry with given key name""" @@ -189,11 +189,15 @@ async def _run_callbacks(self, category_name): 'Callback module %s run method must be a coroutine function', callback) raise AttributeError('Callback module {} run method must be a coroutine function'.format(callback)) await cb.run(category_name) + else: + if category_name == "LOGGING": + from fledge.services.core import server + logging_level = self._cacheManager.cache[category_name]['value']['logLevel']['value'] + server.Server._log_level = logging_level + FLCoreLogger().set_level(logging_level) async def _run_callbacks_child(self, parent_category_name, child_category, operation): - callbacks = self._registered_interests_child.get(parent_category_name) - if callbacks is not None: for callback in callbacks: try: @@ -669,7 +673,7 @@ async def update_configuration_item_bulk(self, category_name, config_item_list): await audit.information('CONCH', audit_details) except Exception as ex: - _logger.exception('Unable to bulk update config items %s', str(ex)) + _logger.exception(ex, 'Unable to bulk update config items') raise try: @@ -747,8 +751,7 @@ async def get_all_category_names(self, root=None, children=False): children) if root is not None else await self._read_all_category_names() return info except: - _logger.exception( - 'Unable to read all category names') + _logger.exception('Unable to read all category names') raise async def get_category_all_items(self, category_name): @@ -778,8 +781,7 @@ async def get_category_all_items(self, category_name): category["display_name"]) return category_value except: - _logger.exception( - 'Unable to get all category names based on category_name %s', category_name) + _logger.exception('Unable to get all category items of {} category.'.format(category_name)) raise async def get_category_item(self, category_name, item_name): @@ -1134,7 +1136,16 @@ async def create_category(self, category_name, category_value, category_descript else: await self._update_category(category_name, category_val_prepared, category_description, display_name) - + diff = set(category_val_prepared) - set(category_val_storage) + if diff: + audit = AuditLogger(self._storage) + audit_details = { + 'category': category_name, + 'item': "configurationChange", + 'oldValue': category_val_storage, + 'newValue': category_val_prepared + } + await audit.information('CONCH', audit_details) is_acl, config_item, found_cat_name, found_value = await \ self.search_for_ACL_recursive_from_cat_name(category_name) _logger.debug("check if there is {} create category function for category {} ".format(is_acl, @@ -1416,13 +1427,13 @@ async def _delete_recursively(self, cat): payload = PayloadBuilder().WHERE(["child", "=", cat]).payload() result = await self._storage.delete_from_tbl("category_children", payload) if result['response'] == 'deleted': - _logger.info('Deleted parent in category_children: %s', cat) + _logger.info('Deleted parent in category_children: {}'.format(cat)) # Remove category. payload = PayloadBuilder().WHERE(["key", "=", cat]).payload() result = await self._storage.delete_from_tbl("configuration", payload) if result['response'] == 'deleted': - _logger.info('Deleted parent category from configuration: %s', cat) + _logger.info('Deleted parent category from configuration: {}'.format(cat)) audit = AuditLogger(self._storage) audit_details = {'categoryDeleted': cat} # FIXME: FOGL-2140 @@ -1462,7 +1473,7 @@ def delete_category_related_things(self, category_name): _logger.info("Removing file %s for category %s", f, category_name) os.remove(f) except Exception as ex: - _logger.error('Failed to delete file(s) for category %s. Exception %s', category_name, str(ex)) + _logger.error(ex, 'Failed to delete file(s) for category {}.'.format(category_name)) # raise ex def register_interest_child(self, category_name, callback): diff --git a/python/fledge/common/jqfilter.py b/python/fledge/common/jqfilter.py index 0f61d6080f..c14ecd0988 100644 --- a/python/fledge/common/jqfilter.py +++ b/python/fledge/common/jqfilter.py @@ -9,7 +9,7 @@ import pyjq -from fledge.common import logger +from fledge.common.logger import FLCoreLogger __author__ = "Vaibhav Singhal" __copyright__ = "Copyright (c) 2017 OSI Soft, LLC" @@ -25,7 +25,7 @@ class JQFilter: def __init__(self): """Initialise the JQFilter""" - self._logger = logger.setup("JQFilter") + self._logger = FLCoreLogger().get_logger("JQFilter") def transform(self, reading_block, filter_string): """ @@ -45,8 +45,8 @@ def transform(self, reading_block, filter_string): try: return pyjq.all(filter_string, reading_block) except TypeError as ex: - self._logger.error("Invalid JSON passed, exception %s", str(ex)) + self._logger.error(ex, "Invalid JSON passed during jq transform.") raise except ValueError as ex: - self._logger.error("Failed to transform, please check the transformation rule, exception %s", str(ex)) + self._logger.error(ex, "Failed to transform, please check the transformation rule.") raise diff --git a/python/fledge/common/logger.py b/python/fledge/common/logger.py index 242c519cf3..8629058c50 100644 --- a/python/fledge/common/logger.py +++ b/python/fledge/common/logger.py @@ -7,12 +7,14 @@ """ Fledge Logger """ import os import subprocess -import sys import logging +import traceback from logging.handlers import SysLogHandler +from functools import wraps -__author__ = "Praveen Garg" -__copyright__ = "Copyright (c) 2017 OSIsoft, LLC" + +__author__ = "Praveen Garg, Ashish Jabble" +__copyright__ = "Copyright (c) 2017-2023 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}" @@ -24,11 +26,12 @@ """ CONSOLE = 1 """Send log entries to STDERR""" +FLEDGE_LOGS_DESTINATION = 'FLEDGE_LOGS_DESTINATION' +"""Log destination environment variable""" +default_destination = SYSLOG +"""Default destination of logger""" -FLEDGE_LOGS_DESTINATION='FLEDGE_LOGS_DESTINATION' # env variable -default_destination = SYSLOG # default for fledge - def set_default_destination(destination: int): """ set_default_destination - allow a global default to be set, once, for all fledge modules also, set env variable FLEDGE_LOGS_DESTINATION for communication with related, spawned @@ -42,7 +45,16 @@ def set_default_destination(destination: int): os.environ[FLEDGE_LOGS_DESTINATION] in [str(CONSOLE), str(SYSLOG)]: # inherit (valid) default from the environment set_default_destination(int(os.environ[FLEDGE_LOGS_DESTINATION])) - + + +def get_process_name() -> str: + # Example: ps -eaf | grep 5175 | grep -v grep | awk -F '--name=' '{print $2}' + pid = os.getpid() + cmd = "ps -eaf | grep {} | grep -v grep | awk -F '--name=' '{{print $2}}'| tr -d '\n'".format(pid) + read_process_name = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.readlines() + binary_to_string = [b.decode() for b in read_process_name] + pname = 'Fledge ' + binary_to_string[0] if binary_to_string else 'Fledge' + return pname def setup(logger_name: str = None, @@ -83,16 +95,6 @@ def setup(logger_name: str = None, .. _logging.getLogger: https://docs.python.org/3/library/logging.html#logging.getLogger """ - - def _get_process_name(): - # Example: ps -eaf | grep 5175 | grep -v grep | awk -F '--name=' '{print $2}' - pid = os.getpid() - cmd = "ps -eaf | grep {} | grep -v grep | awk -F '--name=' '{{print $2}}'| tr -d '\n'".format(pid) - read_process_name = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.readlines() - binary_to_string = [b.decode() for b in read_process_name] - pname = 'Fledge ' + binary_to_string[0] if binary_to_string else 'Fledge' - return pname - logger = logging.getLogger(logger_name) # if no destination is set, use the fledge default @@ -106,16 +108,148 @@ def _get_process_name(): else: raise ValueError("Invalid destination {}".format(destination)) - process_name = _get_process_name() # TODO: Consider using %r with message when using syslog .. \n looks better than # - fmt = '{}[%(process)d] %(levelname)s: %(module)s: %(name)s: %(message)s'.format(process_name) + fmt = '{}[%(process)d] %(levelname)s: %(module)s: %(name)s: %(message)s'.format(get_process_name()) formatter = logging.Formatter(fmt=fmt) - handler.setFormatter(formatter) if level is not None: logger.setLevel(level) - logger.addHandler(handler) - logger.propagate = propagate + + # Call error override + error_override(logger) return logger + + +def error_override(_logger: logging.Logger) -> None: + """Override error logger to print multi-line traceback and error string with newline + Args: + _logger: Logger Object + + Returns: + None + """ + # save the old logging.error function + __logging_error = _logger.error + + @wraps(_logger.error) + def error(msg, *args, **kwargs): + if isinstance(msg, Exception): + """For example: + a) _logger.error(ex) + b) _logger.error(ex, "Failed to add data.") + """ + trace_msg = traceback.format_exception(msg.__class__, msg, msg.__traceback__) + if args: + trace_msg[:0] = ["{}\n".format(args[0])] + [__logging_error(line.strip('\n')) for line in trace_msg] + else: + if isinstance(msg, str): + """For example: + a) _logger.error(str(ex)) + b) _logger.error("Failed to log audit trail entry") + c) _logger.error('Failed to log audit trail entry for code: %s', "CONCH") + d) _logger.error('Failed to log audit trail entry for code: {log_code}'.format(log_code="CONAD")) + e) _logger.error('Failed to log audit trail entry for code: {0}'.format("CONAD")) + f) _logger.error("Failed to log audit trail entry for code '{}' \n{}".format("CONCH", "Next line")) + """ + if args: + msg = msg % args + [__logging_error(m) for m in msg.splitlines()] + else: + # Default logging error + __logging_error(msg) + # overwrite the default logging.error + _logger.error = error + + +class FLCoreLogger: + """ + Singleton FLCoreLogger class. This class is only instantiated ONCE. It is to keep a consistent + criteria for the logger throughout the application if need to be called upon. + It serves as the criteria for initiating logger for modules. It creates child loggers. + It's important to note these are child loggers as any changes made to the root logger + can be done. + """ + _instance = None + + def __new__(cls): + if cls._instance is None: + cls._instance = super().__new__(cls) + process_name = get_process_name() + fmt = '{}[%(process)d] %(levelname)s: %(module)s: %(name)s: %(message)s'.format(process_name) + cls.formatter = logging.Formatter(fmt=fmt) + return cls._instance + + def get_syslog_handler(self): + """Defines a syslog handler + + Returns: + logging handler object : the syslog handler + """ + syslog_handler = SysLogHandler(address='/dev/log') + syslog_handler.setFormatter(self.formatter) + syslog_handler.name = "syslogHandler" + return syslog_handler + + def get_console_handler(self): + """Defines a console handler to come out on the console + + Returns: + logging handler object : the console handler + """ + console_handler = logging.StreamHandler() + console_handler.setFormatter(self.formatter) + console_handler.name = "consoleHandler" + return console_handler + + def add_handlers(self, logger, handler_list: list): + """Adds handlers to the logger, checks first if handlers exist to avoid + duplication + + Args: + logger: Logger to check handlers + handler_list: list of handlers to add + """ + existing_handler_names = [] + for existing_handler in logger.handlers: + existing_handler_names.append(existing_handler.name) + + for new_handler in handler_list: + if new_handler.name not in existing_handler_names: + logger.addHandler(new_handler) + + def get_logger(self, logger_name: str): + """Generates logger for use in the modules. + Args: + logger_name: name of the logger + + Returns: + logger: returns logger for module + """ + _logger = logging.getLogger(logger_name) + console_handler = self.get_console_handler() + syslog_handler = self.get_syslog_handler() + self.add_handlers(_logger, [syslog_handler, console_handler]) + _logger.propagate = False + # Call error override + error_override(_logger) + return _logger + + def set_level(self, level_name: str): + """Sets the root logger level. That means all child loggers will inherit this feature from it. + Args: + level_name: logging level + """ + if level_name == 'debug': + log_level = logging.DEBUG + elif level_name == 'info': + log_level = logging.INFO + elif level_name == 'error': + log_level = logging.ERROR + elif level_name == 'critical': + log_level = logging.CRITICAL + else: + log_level = logging.WARNING + logging.root.setLevel(log_level) diff --git a/python/fledge/common/microservice_management_client/microservice_management_client.py b/python/fledge/common/microservice_management_client/microservice_management_client.py index 70196433fb..247205fe5a 100644 --- a/python/fledge/common/microservice_management_client/microservice_management_client.py +++ b/python/fledge/common/microservice_management_client/microservice_management_client.py @@ -55,7 +55,8 @@ def register_service(self, service_registration_payload): try: response["id"] except (KeyError, Exception) as ex: - _logger.exception("Could not register the microservice, From request %s, Reason: %s", json.dumps(service_registration_payload), str(ex)) + _logger.exception(ex, "Could not register the microservice, From request {}".format( + json.dumps(service_registration_payload))) raise return response @@ -86,8 +87,7 @@ def unregister_service(self, microservice_id): try: response["id"] except (KeyError, Exception) as ex: - _logger.exception("Could not unregister the micro-service having uuid %s, Reason: %s", - microservice_id, str(ex)) + _logger.exception(ex, "Could not unregister the microservice having UUID {}".format(microservice_id)) raise return response @@ -117,8 +117,7 @@ def register_interest(self, category, microservice_id): try: response["id"] except (KeyError, Exception) as ex: - _logger.exception("Could not register interest, for request payload %s, Reason: %s", - payload, str(ex)) + _logger.exception(ex, "Could not register interest, for request payload {}".format(payload)) raise return response @@ -145,7 +144,7 @@ def unregister_interest(self, registered_interest_id): try: response["id"] except (KeyError, Exception) as ex: - _logger.exception("Could not unregister interest for %s, Reason: %s", registered_interest_id, str(ex)) + _logger.exception(ex, "Could not unregister interest for {}".format(registered_interest_id)) raise return response @@ -179,7 +178,7 @@ def get_services(self, service_name=None, service_type=None): try: response["services"] except (KeyError, Exception) as ex: - _logger.exception("Could not find the micro-service for requested url %s, Reason: %s", url, str(ex)) + _logger.exception(ex, "Could not find the microservice for requested url {}".format(url)) raise return response diff --git a/python/fledge/common/plugin_discovery.py b/python/fledge/common/plugin_discovery.py index e332fd78f6..02bfe73fea 100644 --- a/python/fledge/common/plugin_discovery.py +++ b/python/fledge/common/plugin_discovery.py @@ -7,10 +7,10 @@ """Common Plugin Discovery Class""" import os -from fledge.common import logger +from fledge.common.logger import FLCoreLogger +from fledge.plugins.common import utils as common_utils from fledge.services.core.api import utils from fledge.services.core.api.plugins import common -from fledge.plugins.common import utils as common_utils __author__ = "Amarendra K Sinha, Ashish Jabble" @@ -18,8 +18,7 @@ __license__ = "Apache 2.0" __version__ = "${VERSION}" - -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) class PluginDiscovery(object): @@ -149,8 +148,11 @@ def fetch_c_plugins_installed(cls, plugin_type, is_config, installed_dir_name): 'description': jdoc['config']['plugin']['description'], 'version': jdoc['version'], 'installedDirectory': '{}/{}'.format(installed_dir_name, name), - 'packageName': pkg_name - } + 'packageName': get_package_name( + "fledge-{}-".format(plugin_type), + "{}/plugins/{}/{}/.Package".format(utils._FLEDGE_ROOT, installed_dir_name, name), + pkg_name) + } if is_config: plugin_config.update({'config': jdoc['config']}) configs.append(plugin_config) @@ -185,14 +187,14 @@ def get_plugin_config(cls, plugin_dir, plugin_type, installed_dir_name, is_confi # Only OMF is an inbuilt plugin if name.lower() != 'omf': pkg_name = 'fledge-{}-{}'.format(plugin_type, name.lower().replace("_", "-")) - plugin_config = { 'name': plugin_info['config']['plugin']['default'], 'type': plugin_type, 'description': plugin_info['config']['plugin']['description'], 'version': plugin_info['version'], 'installedDirectory': '{}/{}'.format(installed_dir_name, name), - 'packageName': pkg_name + 'packageName': get_package_name("fledge-{}-".format(plugin_type), + "{}/.Package".format(plugin_dir), pkg_name) } else: _logger.warning("Plugin {} is discarded due to invalid type".format(plugin_dir)) @@ -202,9 +204,28 @@ def get_plugin_config(cls, plugin_dir, plugin_type, installed_dir_name, is_confi except DeprecationWarning: _logger.warning('"{}" plugin is deprecated'.format(plugin_dir.split('/')[-1])) except FileNotFoundError as ex: - _logger.error('Plugin "{}" import problem from path "{}". {}'.format(plugin_dir, plugin_module_path, str(ex))) + _logger.error(ex, 'Import problem from path "{}" for {} plugin.'.format(plugin_module_path, plugin_dir)) except Exception as ex: - _logger.exception('Plugin "{}" raised exception "{}" while fetching config'.format(plugin_dir, str(ex))) + _logger.exception(ex, 'Failed to fetch config for {} plugin.'.format(plugin_dir)) return plugin_config + +def get_package_name(prefix: str, filepath: str, internal_name: str) -> str: + """ Get Package name on the basis of .Package file + Args: + prefix: package prefix which is used for file content matching + filepath: Check .Package file in given path + internal_name: If .Package file is missing then use old internal way + """ + try: + # open file in read mode + with open(filepath, 'r') as read_obj: + line = read_obj.read().strip('\n') + except Exception: + # If .Package file not found then return internal package name + # which is most likely a case of non-package environment setup + return internal_name + else: + # if Package file content is empty then return internal package name Else Package file content + return internal_name if prefix not in line else line diff --git a/python/fledge/common/process.py b/python/fledge/common/process.py index c6dbdb6ffb..99efe29fff 100644 --- a/python/fledge/common/process.py +++ b/python/fledge/common/process.py @@ -100,7 +100,7 @@ def __init__(self): raise ArgumentParserError("Invalid value {} for optional arg {}".format(kv[1], kv[0])) except ArgumentParserError as ex: - _logger.error("Arg parser error: %s", str(ex)) + _logger.error(ex, "Arg parser error.") raise self._core_microservice_management_client = MicroserviceManagementClient(self._core_management_host, diff --git a/python/fledge/common/statistics.py b/python/fledge/common/statistics.py index 53f1a05ad0..7481c0c463 100644 --- a/python/fledge/common/statistics.py +++ b/python/fledge/common/statistics.py @@ -5,7 +5,7 @@ # FLEDGE_END import json -from fledge.common import logger +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.storage_client.storage_client import StorageClientAsync @@ -15,8 +15,7 @@ __license__ = "Apache 2.0" __version__ = "${VERSION}" - -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) async def create_statistics(storage=None): @@ -71,7 +70,7 @@ async def update_bulk(self, stat_list): payload['updates'].append(json.loads(payload_item)) await self._storage.update_tbl("statistics", json.dumps(payload, sort_keys=False)) except Exception as ex: - _logger.exception('Unable to bulk update statistics %s', str(ex)) + _logger.exception(ex, 'Unable to bulk update statistics') raise async def update(self, key, value_increment): @@ -97,9 +96,9 @@ async def update(self, key, value_increment): .payload() await self._storage.update_tbl("statistics", payload) except Exception as ex: - _logger.exception( - 'Unable to update statistics value based on statistics_key %s and value_increment %d, error %s' - , key, value_increment, str(ex)) + msg = 'Unable to update statistics value based on statistics_key {} and value_increment {}'.format( + key, value_increment) + _logger.exception(ex, msg) raise async def add_update(self, sensor_stat_dict): @@ -126,9 +125,9 @@ async def add_update(self, sensor_stat_dict): _logger.exception('Statistics key %s has not been registered', key) raise except Exception as ex: - _logger.exception( - 'Unable to update statistics value based on statistics_key %s and value_increment %s, error %s' - , key, value_increment, str(ex)) + msg = 'Unable to update statistics value based on statistics_key {} and value_increment {}'.format( + key, value_increment) + _logger.exception(ex, msg) raise async def register(self, key, description): @@ -144,7 +143,7 @@ async def register(self, key, description): """ The error may be because the key has been created in another process, reload keys """ await self._load_keys() if key not in self._registered_keys: - _logger.exception('Unable to create new statistic %s, error %s', key, str(ex)) + _logger.exception(ex, 'Unable to create new statistic {} key.'.format(key)) raise async def _load_keys(self): @@ -155,4 +154,4 @@ async def _load_keys(self): for row in results['rows']: self._registered_keys.append(row['key']) except Exception as ex: - _logger.exception('Failed to retrieve statistics keys, %s', str(ex)) + _logger.exception(ex, 'Failed to retrieve statistics keys') diff --git a/python/fledge/common/storage_client/payload_builder.py b/python/fledge/common/storage_client/payload_builder.py index 6fb1f025df..b1b4ba4455 100644 --- a/python/fledge/common/storage_client/payload_builder.py +++ b/python/fledge/common/storage_client/payload_builder.py @@ -127,6 +127,15 @@ def add_clause_to_select(cls, clause, qp_list, col, clause_value): with_clause = OrderedDict() with_clause['column'] = item with_clause[clause] = clause_value + """ + NOTE: + For Sqlite based engines, Temporarily workaround in payload builder to add "utc" timezone always + when query with user_ts column and having alias timestamp. + Though for PostgreSQL we already have set a session level time zone to 'UTC' during connection + https://github.com/fledge-iot/fledge/pull/900/files + """ + if col == "user_ts" and clause_value == "timestamp": + with_clause["timezone"] = "utc" qp_list[i] = with_clause if isinstance(item, dict): if 'json' in qp_list[i] and qp_list[i]['json']['column'] == col: diff --git a/python/fledge/common/storage_client/storage_client.py b/python/fledge/common/storage_client/storage_client.py index 04f4b26a9e..dfff9234cf 100644 --- a/python/fledge/common/storage_client/storage_client.py +++ b/python/fledge/common/storage_client/storage_client.py @@ -587,10 +587,16 @@ async def purge(self, age=None, sent_id=0, size=None, flag=None, asset=None): async with aiohttp.ClientSession() as session: async with session.put(url, data=None) as resp: status_code = resp.status - jdoc = await resp.json() - if status_code not in range(200, 209): - _LOGGER.error("PUT url %s, Error code: %d, reason: %s, details: %s", put_url, resp.status, - resp.reason, jdoc) - raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) - + try: + jdoc = await resp.json() + if status_code not in range(200, 209): + _LOGGER.error("PUT url %s, Error code: %d, reason: %s, details: %s", put_url, resp.status, + resp.reason, jdoc) + raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) + except ValueError as err: + jdoc = None + _LOGGER.error(err, "Failed to parse JSON data returned of purge from the storage reading plugin.") + except Exception as ex: + jdoc = None + _LOGGER.error(ex, "Purge readings is failed.") return jdoc diff --git a/python/fledge/common/utils.py b/python/fledge/common/utils.py index e8d77f317a..09bf56a1a8 100644 --- a/python/fledge/common/utils.py +++ b/python/fledge/common/utils.py @@ -85,3 +85,50 @@ def decorator(Class): def eprint(*args, **kwargs): """ eprintf -- convenience print function that prints to stderr """ print(*args, *kwargs, file=sys.stderr) + + +def read_os_release(): + """ General information to identifying the operating system """ + import ast + import re + os_details = {} + with open('/etc/os-release', encoding="utf-8") as f: + for line_number, line in enumerate(f, start=1): + line = line.rstrip() + if not line or line.startswith('#'): + continue + m = re.match(r'([A-Z][A-Z_0-9]+)=(.*)', line) + if m: + name, val = m.groups() + if val and val[0] in '"\'': + val = ast.literal_eval(val) + os_details.update({name: val}) + return os_details + + +def is_redhat_based(): + """ + To check if the Operating system is of Red Hat family or Not + Examples: + a) For an operating system with "ID=centos", an assignment of "ID_LIKE="rhel fedora"" is appropriate + b) For an operating system with "ID=ubuntu/raspbian", an assignment of "ID_LIKE=debian" is appropriate. + """ + os_release = read_os_release() + id_like = os_release.get('ID_LIKE') + if id_like is not None and any(x in id_like.lower() for x in ['centos', 'rhel', 'redhat', 'fedora']): + return True + return False + + +def get_open_ssl_version(version_string=True): + """ Open SSL version info + + Args: + version_string + + Returns: + When version_string is True - The version string of the OpenSSL library loaded by the interpreter + When version_string is False - A tuple of five integers representing version information about the OpenSSL library + """ + import ssl + return ssl.OPENSSL_VERSION if version_string else ssl.OPENSSL_VERSION_INFO diff --git a/python/fledge/common/web/middleware.py b/python/fledge/common/web/middleware.py index 24e5570c80..295c31878e 100644 --- a/python/fledge/common/web/middleware.py +++ b/python/fledge/common/web/middleware.py @@ -13,14 +13,14 @@ import jwt from fledge.services.core.user_model import User -from fledge.common import logger +from fledge.common.logger import FLCoreLogger __author__ = "Praveen Garg" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}" -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) async def error_middleware(app, handler): @@ -42,7 +42,7 @@ async def middleware_handler(request): async def optional_auth_middleware(app, handler): async def middleware(request): - _logger.info("Received %s request for %s", request.method, request.path) + _logger.debug("Received %s request for %s", request.method, request.path) request.is_auth_optional = True request.user = None return await handler(request) @@ -54,7 +54,7 @@ async def middleware(request): # if `rest_api` config has `authentication` set to mandatory then: # request must carry auth header, # actual value will be checked too and if bad then 401: unauthorized will be returned - _logger.info("Received %s request for %s", request.method, request.path) + _logger.debug("Received %s request for %s", request.method, request.path) request.is_auth_optional = False request.user = None @@ -160,11 +160,13 @@ def handle_api_exception(ex, _class=None, if_trace=0): async def validate_requests(request): """ - a) With "view" based user role id=3 only + a) With "normal" based user role id=2 only + - restrict operations of Control scripts and pipelines except GET + b) With "view" based user role id=3 only - read access operations (GET calls) - change profile (PUT call) - logout (PUT call) - b) With "data-view" based user role id=4 only + c) With "data-view" based user role id=4 only - ping (GET call) - browser asset read operation (GET call) - service (GET call) @@ -173,12 +175,24 @@ async def validate_requests(request): - user roles (GET call) - change profile (PUT call) - logout (PUT call) + d) With "control" based user role id=5 only + - same as normal user can do + - All CRUD's privileges for control scripts + - All CRUD's privileges for control pipelines """ user_id = request.user['id'] - if int(request.user["role_id"]) == 3 and request.method != 'GET': + # Normal/Editor user + if int(request.user["role_id"]) == 2 and request.method != 'GET': + # Special case: Allowed control entrypoint update request and handling of rejection in its handler + if str(request.rel_url).startswith('/fledge/control') and not str(request.rel_url).startswith( + '/fledge/control/request'): + raise web.HTTPForbidden + # Viewer user + elif int(request.user["role_id"]) == 3 and request.method != 'GET': supported_endpoints = ['/fledge/user', '/fledge/user/{}/password'.format(user_id), '/logout'] if not str(request.rel_url).endswith(tuple(supported_endpoints)): raise web.HTTPForbidden + # Data Viewer user elif int(request.user["role_id"]) == 4: if request.method == 'GET': supported_endpoints = ['/fledge/asset', '/fledge/ping', '/fledge/statistics', diff --git a/python/fledge/common/web/ssl_wrapper.py b/python/fledge/common/web/ssl_wrapper.py index 7dd13b5c3f..b9179921a4 100644 --- a/python/fledge/common/web/ssl_wrapper.py +++ b/python/fledge/common/web/ssl_wrapper.py @@ -9,7 +9,7 @@ import time import datetime import subprocess -from fledge.common import logger +from fledge.common import logger, utils __author__ = "Amarendra Kumar Sinha" __copyright__ = "Copyright (c) 2019 Dianomic Systems" @@ -80,21 +80,24 @@ def verify_against_revoked(cls): @classmethod def verify_against_ca(cls): echo_process = subprocess.Popen(['echo', cls.user_cert], stderr=subprocess.PIPE, stdout=subprocess.PIPE) - a = subprocess.Popen(["openssl", "verify", "-CAfile", cls.ca_cert, "-x509_strict"], stdin=echo_process.stdout, stderr=subprocess.PIPE, stdout=subprocess.PIPE) + args = "openssl verify -CAfile {}".format(cls.ca_cert) + # TODO: FOGL-7302 to handle -x509_strict check when OpenSSL version >=3.x + # Removing the -x509_strict flag as an interim solution; as of now only CentOS Stream9 has OpenSSL version 3.0 + if utils.get_open_ssl_version(version_string=False)[0] < 3: + args += " -x509_strict" + a = subprocess.Popen(args.split(), stdin=echo_process.stdout, stderr=subprocess.PIPE, stdout=subprocess.PIPE) outs, errs = a.communicate() if outs is None and errs is None: - raise OSError( - 'Verification error in executing command "{}"'.format("openssl verify -CAfile {} -x509_strict".format(cls.ca_cert))) + raise OSError('Verification error in executing command "{}"'.format(args)) if a.returncode != 0: - raise OSError( - 'Verification error in executing command "{}". Error: {}, returncode: {}'.format("openssl verify -CAfile {} -x509_strict".format(cls.ca_cert), errs.decode('utf-8').replace('\n', ''), a.returncode)) + raise OSError('Verification error in executing command "{}". Error: {}, returncode: {}'.format( + args, errs.decode('utf-8').replace('\n', ''), a.returncode)) d = [b for b in outs.decode('utf-8').split('\n') if b != ''] if "OK" not in d[0]: raise SSLVerifier.VerificationError( str(), 'failed verification', errs) return d - """ Common x509 options: -serial - print serial number value diff --git a/python/fledge/plugins/storage/common/backup.py b/python/fledge/plugins/storage/common/backup.py index 4c22f2492b..94519c3cc2 100644 --- a/python/fledge/plugins/storage/common/backup.py +++ b/python/fledge/plugins/storage/common/backup.py @@ -211,7 +211,7 @@ async def create_backup(self): try: await server.Server.scheduler.queue_task(uuid.UUID(Backup._SCHEDULE_BACKUP_ON_DEMAND)) _message = self._MESSAGES_LIST["i000003"] - Backup._logger.info("{0}".format(_message)) + Backup._logger.debug("{0}".format(_message)) status = "running" except Exception as _ex: _message = self._MESSAGES_LIST["e000004"].format(_ex) diff --git a/python/fledge/plugins/storage/common/lib.py b/python/fledge/plugins/storage/common/lib.py index a914f06212..e078e2dcc0 100644 --- a/python/fledge/plugins/storage/common/lib.py +++ b/python/fledge/plugins/storage/common/lib.py @@ -9,6 +9,7 @@ import os import asyncio import json +import datetime from enum import IntEnum from fledge.common import logger @@ -403,17 +404,10 @@ def sl_backup_status_create(self, _file_name, _type, _status): Returns: Raises: """ - _logger.debug("{func} - file name |{file}| ".format(func="sl_backup_status_create", file=_file_name)) - - payload = payload_builder.PayloadBuilder() \ - .INSERT(file_name=_file_name, - ts="now()", - type=_type, - status=_status, - exit_code=0) \ - .payload() - + payload = payload_builder.PayloadBuilder().INSERT( + file_name=_file_name, ts=datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), type=_type, + status=_status, exit_code=0).payload() asyncio.get_event_loop().run_until_complete(self._storage.insert_into_tbl(self.STORAGE_TABLE_BACKUPS, payload)) def sl_backup_status_update(self, _id, _status, _exit_code): @@ -426,17 +420,11 @@ def sl_backup_status_update(self, _id, _status, _exit_code): Returns: Raises: """ - _logger.debug("{func} - id |{file}| ".format(func="sl_backup_status_update", file=_id)) - - payload = payload_builder.PayloadBuilder() \ - .SET(status=_status, - ts="now()", - exit_code=_exit_code) \ - .WHERE(['id', '=', _id]) \ - .payload() - - asyncio.get_event_loop().run_until_complete( self._storage.update_tbl(self.STORAGE_TABLE_BACKUPS, payload)) + payload = payload_builder.PayloadBuilder().SET( + status=_status, ts=datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), exit_code=_exit_code + ).WHERE(['id', '=', _id]).payload() + asyncio.get_event_loop().run_until_complete(self._storage.update_tbl(self.STORAGE_TABLE_BACKUPS, payload)) def sl_get_backup_details_from_file_name(self, _file_name): """ Retrieves backup information from file name diff --git a/python/fledge/services/common/microservice.py b/python/fledge/services/common/microservice.py index f4df44afdc..65c9cc6488 100644 --- a/python/fledge/services/common/microservice.py +++ b/python/fledge/services/common/microservice.py @@ -85,7 +85,7 @@ def __init__(self): res = self.register_service_with_core(self._get_service_registration_payload()) self._microservice_id = res["id"] except Exception as ex: - _logger.exception('Unable to intialize FledgeMicroservice due to exception %s', str(ex)) + _logger.exception(ex, 'Unable to initialize FledgeMicroservice') raise def _make_microservice_management_app(self): diff --git a/python/fledge/services/common/utils.py b/python/fledge/services/common/utils.py index 327334870f..b31cf68fab 100644 --- a/python/fledge/services/common/utils.py +++ b/python/fledge/services/common/utils.py @@ -8,6 +8,7 @@ import aiohttp import asyncio +import logging from fledge.common import logger __author__ = "Amarendra Kumar Sinha" @@ -16,7 +17,7 @@ __version__ = "${VERSION}" -_logger = logger.setup(__name__, level=20) +_logger = logger.setup(__name__, level=logging.INFO) _MAX_ATTEMPTS = 15 """Number of max attempts for finding a heartbeat of service""" @@ -41,7 +42,7 @@ async def ping_service(service, loop=None): attempt_count += 1 await asyncio.sleep(1.5, loop=loop) if attempt_count <= _MAX_ATTEMPTS: - _logger.info('Ping received for Service %s id %s at url %s', service._name, service._id, url_ping) + _logger.debug('Ping received for Service %s id %s at url %s', service._name, service._id, url_ping) return True _logger.error('Ping not received for Service %s id %s at url %s attempt_count %s', service._name, service._id, url_ping, attempt_count) @@ -62,7 +63,7 @@ async def shutdown_service(service, loop=None): if not status_code == 200: raise Exception(message=text) except Exception as ex: - _logger.exception('Error in Service shutdown %s, %s', service._name, str(ex)) + _logger.exception(ex, 'Failed to shutdown {} service.'.format(service._name)) return False else: _logger.info('Service %s, id %s at url %s successfully shutdown', service._name, service._id, url_shutdown) diff --git a/python/fledge/services/core/api/asset_tracker.py b/python/fledge/services/core/api/asset_tracker.py index 5f1828c2f0..af807a3a1f 100644 --- a/python/fledge/services/core/api/asset_tracker.py +++ b/python/fledge/services/core/api/asset_tracker.py @@ -4,20 +4,17 @@ # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END import json -import logging from aiohttp import web import urllib.parse from fledge.common import utils as common_utils +from fledge.common.audit_logger import AuditLogger +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.exceptions import StorageServerError from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.services.core import connect -from fledge.common.audit_logger import AuditLogger -from fledge.common import logger - - __author__ = "Ashish Jabble" __copyright__ = "Copyright (c) 2018 OSIsoft, LLC" __license__ = "Apache 2.0" @@ -30,7 +27,7 @@ ----------------------------------------------------------------------------------------- """ -_logger = logger.setup(__name__, level=logging.INFO) +_logger = FLCoreLogger().get_logger(__name__) async def get_asset_tracker_events(request: web.Request) -> web.Response: @@ -46,6 +43,7 @@ async def get_asset_tracker_events(request: web.Request) -> web.Response: curl -sX GET http://localhost:8081/fledge/track?asset=XXX curl -sX GET http://localhost:8081/fledge/track?event=XXX curl -sX GET http://localhost:8081/fledge/track?service=XXX + curl -sX GET http://localhost:8081/fledge/track?deprecated=true curl -sX GET http://localhost:8081/fledge/track?event=XXX&asset=XXX&service=XXX """ payload = PayloadBuilder().SELECT("asset", "event", "service", "fledge", "plugin", "ts", "deprecated_ts", "data") \ @@ -61,6 +59,10 @@ async def get_asset_tracker_events(request: web.Request) -> web.Response: if 'service' in request.query and request.query['service'] != '': service = urllib.parse.unquote(request.query['service']) payload.AND_WHERE(['service', '=', service]) + if 'deprecated' in request.query and request.query['deprecated'] != '': + deprecated = request.query['deprecated'].strip().lower() + if deprecated == "true": + payload.AND_WHERE(['deprecated_ts', "notnull"]) storage_client = connect.get_storage_async() payload = PayloadBuilder(payload.chain_payload()) @@ -72,6 +74,7 @@ async def get_asset_tracker_events(request: web.Request) -> web.Response: raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to get asset tracker events.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'track': response}) @@ -146,6 +149,8 @@ async def deprecate_asset_track_entry(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Deprecate {} asset entry failed for {} service with {} event.".format( + asset_name, svc_name, event_name)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: msg = "For {} event, {} asset record entry has been deprecated.".format(event_name, asset_name) @@ -239,6 +244,7 @@ async def get_datapoint_usage(request: web.Request) -> web.Response: raise web.HTTPBadRequest(reason=err_response, body=json.dumps({"message": err_response})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to get asset tracker store datapoints.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(response) diff --git a/python/fledge/services/core/api/audit.py b/python/fledge/services/core/api/audit.py index 4647d9ed51..fbf6864547 100644 --- a/python/fledge/services/core/api/audit.py +++ b/python/fledge/services/core/api/audit.py @@ -10,11 +10,11 @@ from aiohttp import web import json +from fledge.common.audit_logger import AuditLogger +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.storage_client.exceptions import StorageServerError from fledge.services.core import connect -from fledge.common.audit_logger import AuditLogger -from fledge.common import logger __author__ = "Amarendra K. Sinha, Ashish Jabble, Massimiliano Pinto" __copyright__ = "Copyright (c) 2017-2018 OSIsoft, LLC" @@ -32,7 +32,7 @@ ------------------------------------------------------------------------------- """ -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) class Severity(IntEnum): @@ -119,17 +119,19 @@ async def create_audit_entry(request): except AttributeError as e: # Return error for wrong severity method err_msg = "severity type {} is not supported".format(severity) - _logger.error("Error in create_audit_entry(): %s | %s", err_msg, str(e)) raise web.HTTPNotFound(reason=err_msg, body=json.dumps({"message": err_msg})) except StorageServerError as ex: if int(ex.code) in range(400, 500): err_msg = 'Audit entry cannot be logged. {}'.format(ex.error['message']) - raise web.HTTPBadRequest(body=json.dumps({"message": err_msg})) + raise web.HTTPBadRequest(reason=err_msg, body=json.dumps({"message": err_msg})) else: err_msg = 'Failed to log audit entry. {}'.format(ex.error['message']) - raise web.HTTPInternalServerError(body=json.dumps({"message": err_msg})) + _logger.warning(err_msg) + raise web.HTTPInternalServerError(reason=err_msg, body=json.dumps({"message": err_msg})) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex), body=json.dumps({"message": str(ex)})) + msg = str(ex) + _logger.error(ex, "Failed to log audit entry.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(message) @@ -267,7 +269,9 @@ async def get_audit_entries(request): r["timestamp"] = row["timestamp"] res.append(r) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _logger.error(ex, "Failed to get Audit log entry.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'audit': res, 'totalCount': total_count}) diff --git a/python/fledge/services/core/api/auth.py b/python/fledge/services/core/api/auth.py index 0ddf7907c2..8a47c5a6fb 100644 --- a/python/fledge/services/core/api/auth.py +++ b/python/fledge/services/core/api/auth.py @@ -10,20 +10,20 @@ import json from collections import OrderedDict import jwt -import logging - from aiohttp import web -from fledge.services.core.user_model import User + +from fledge.common.audit_logger import AuditLogger +from fledge.common.logger import FLCoreLogger from fledge.common.web.middleware import has_permission -from fledge.common import logger from fledge.common.web.ssl_wrapper import SSLVerifier +from fledge.services.core.user_model import User __author__ = "Praveen Garg, Ashish Jabble, Amarendra K Sinha" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}" -_logger = logger.setup(__name__, level=logging.INFO) +_logger = FLCoreLogger().get_logger(__name__) _help = """ ------------------------------------------------------------------------------------ @@ -54,7 +54,7 @@ USERNAME_REGEX_PATTERN = '^[a-zA-Z0-9_.-]+$' PASSWORD_REGEX_PATTERN = '((?=.*\d)(?=.*[A-Z])(?=.*\W).{6,}$)' PASSWORD_ERROR_MSG = 'Password must contain at least one digit, one lowercase, one uppercase & one special character ' \ - 'and length of minimum 6 characters' + 'and length of minimum 6 characters.' FORBIDDEN_MSG = 'Resource you were trying to reach is absolutely forbidden for some reason' @@ -79,8 +79,7 @@ def __remove_ott_for_user(user_id): """Helper function that removes given user_id from OTT_MAP if the user exists in the map.""" try: _user_id = int(user_id) - except ValueError as ex: - _logger.info("User id given is not an integer.") + except ValueError: return for k, v in OTT.OTT_MAP.items(): if v[0] == _user_id: @@ -161,7 +160,6 @@ async def login(request): password = _data.get('password') if not username or not password: - _logger.warning("Username and password are required to login") raise web.HTTPBadRequest(reason="Username or password is missing") username = str(username).lower() @@ -173,13 +171,11 @@ async def login(request): try: uid, token, is_admin = await User.Objects.login(username, password, host) except (User.DoesNotExist, User.PasswordDoesNotMatch, ValueError) as ex: - _logger.warning(str(ex)) raise web.HTTPNotFound(reason=str(ex)) except User.PasswordExpired as ex: # delete all user token for this user await User.Objects.delete_user_tokens(str(ex)) - - msg = 'Your password has been expired. Please set your password again' + msg = 'Your password has been expired. Please set your password again.' _logger.warning(msg) raise web.HTTPUnauthorized(reason=msg) @@ -220,12 +216,13 @@ async def get_ott(request): if int(result_role['rows'][0]['role_id']) == 1: is_admin = True except Exception as ex: - raise web.HTTPBadRequest(reason="The request failed due to {}".format(ex)) + msg = str(ex) + _logger.error(ex, "OTT token failed.") + raise web.HTTPBadRequest(reason="The request failed due to {}".format(msg)) else: now_time = datetime.datetime.now() p = {'uid': user_id, 'exp': now_time} - ott_token = jwt.encode(p, JWT_SECRET, JWT_ALGORITHM).decode("utf-8") - + ott_token = jwt.encode(p, JWT_SECRET, JWT_ALGORITHM) already_existed_token = False key_to_remove = None for k, v in OTT.OTT_MAP.items(): @@ -256,11 +253,10 @@ async def logout_me(request): result = await User.Objects.delete_token(request.token) if not result['rows_affected']: - _logger.warning("Logout requested with bad user token") raise web.HTTPNotFound() __remove_ott_for_token(request.token) - _logger.info("User has been logged out successfully") + _logger.info("User has been logged out successfully.") return web.json_response({"logout": True}) @@ -279,15 +275,11 @@ async def logout(request): if int(request.user["role_id"]) == ADMIN_ROLE_ID or int(request.user["id"]) == int(user_id): result = await User.Objects.delete_user_tokens(user_id) - if not result['rows_affected']: - _logger.warning("Logout requested with bad user") raise web.HTTPNotFound() - # Remove OTT token for this user if there. __remove_ott_for_user(user_id) - - _logger.info("User with id:<{}> has been logged out successfully".format(int(user_id))) + _logger.info("User with ID:<{}> has been logged out successfully.".format(int(user_id))) else: # requester is not an admin but trying to take action for another user raise web.HTTPUnauthorized(reason="admin privileges are required to logout other user") @@ -323,8 +315,7 @@ async def get_user(request): if user_id <= 0: raise ValueError except ValueError: - _logger.warning("Get user requested with bad user id") - raise web.HTTPBadRequest(reason="Bad user id") + raise web.HTTPBadRequest(reason="Bad user ID") if 'username' in request.query and request.query['username'] != '': user_name = request.query['username'].lower() @@ -342,20 +333,20 @@ async def get_user(request): result = u except User.DoesNotExist as ex: msg = str(ex) - _logger.warning(msg) raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) else: users = await User.Objects.all() res = [] for row in users: - u = OrderedDict() - u["userId"] = row["id"] - u["userName"] = row["uname"] - u["roleId"] = row["role_id"] - u["accessMethod"] = row["access_method"] - u["realName"] = row["real_name"] - u["description"] = row["description"] - res.append(u) + if row['enabled'] == 't': + u = OrderedDict() + u["userId"] = row["id"] + u["userName"] = row["uname"] + u["roleId"] = row["role_id"] + u["accessMethod"] = row["access_method"] + u["realName"] = row["real_name"] + u["description"] = row["description"] + res.append(u) result = {'users': res} return web.json_response(result) @@ -388,55 +379,44 @@ async def create_user(request): description = data.get('description', '') if not username: - msg = "Username is required to create user" - _logger.error(msg) + msg = "Username is required to create user." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) if not isinstance(username, str) or not isinstance(access_method, str) or not isinstance(real_name, str) \ or not isinstance(description, str): - msg = "Values should be passed in string" - _logger.error(msg) + msg = "Values should be passed in string." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) username = username.lower().strip().replace(" ", "") if len(username) < MIN_USERNAME_LENGTH: - msg = "Username should be of minimum 4 characters" - _logger.error(msg) + msg = "Username should be of minimum 4 characters." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) if not re.match(USERNAME_REGEX_PATTERN, username): - msg = "Dot, hyphen, underscore special characters are allowed for username" - _logger.error(msg) + msg = "Dot, hyphen, underscore special characters are allowed for username." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) if access_method.lower() not in ['any', 'cert', 'pwd']: - msg = "Invalid access method. Must be 'any' or 'cert' or 'pwd'" - _logger.error(msg) + msg = "Invalid access method. Must be 'any' or 'cert' or 'pwd'." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) if access_method == 'pwd' and not password: - msg = "Password should not be an empty" - _logger.error(msg) + msg = "Password should not be an empty." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) if access_method != 'cert': if password is not None: if not re.match(PASSWORD_REGEX_PATTERN, str(password)): - _logger.error(PASSWORD_ERROR_MSG) raise web.HTTPBadRequest(reason=PASSWORD_ERROR_MSG, body=json.dumps({"message": PASSWORD_ERROR_MSG})) if not (await is_valid_role(role_id)): - msg = "Invalid role id" - _logger.error(msg) + msg = "Invalid role ID." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) - try: - await User.Objects.get(username=username) - except User.DoesNotExist: - pass - else: - msg = "Username already exists" + users = await User.Objects.all() + unames = [u['uname'] for u in users] + if username in unames: + msg = "Username already exists." _logger.warning(msg) raise web.HTTPConflict(reason=msg, body=json.dumps({"message": msg})) - u = dict() try: result = await User.Objects.create(username, password, role_id, access_method, real_name, description) @@ -452,13 +432,12 @@ async def create_user(request): u["description"] = user.pop('description') except ValueError as err: msg = str(err) - _logger.error(msg) raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) - _logger.exception(str(exc)) + _logger.error(exc, "Failed to create user.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) - msg = "{} user has been created successfully".format(username) + msg = "{} user has been created successfully.".format(username) _logger.info(msg) return web.json_response({'message': msg, 'user': u}) @@ -478,35 +457,36 @@ async def update_me(request): real_name = data.get('real_name', '') if 'real_name' in data: if len(real_name.strip()) == 0: - msg = "Real Name should not be empty" + msg = "Real Name should not be empty." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) else: from fledge.services.core import connect from fledge.common.storage_client.payload_builder import PayloadBuilder - payload = PayloadBuilder().SELECT("user_id").WHERE(['token', '=', request.token]).payload() - storage_client = connect.get_storage_async() - result = await storage_client.query_tbl_with_payload("user_logins", payload) - if len(result['rows']) == 0: - raise User.DoesNotExist - payload = PayloadBuilder().SET(real_name=real_name.strip()).WHERE(['id', '=', - result['rows'][0]['user_id']]).payload() - message = "Something went wrong" try: + payload = PayloadBuilder().SELECT("user_id").WHERE(['token', '=', request.token]).payload() + storage_client = connect.get_storage_async() + result = await storage_client.query_tbl_with_payload("user_logins", payload) + if len(result['rows']) == 0: + raise User.DoesNotExist + user_id = result['rows'][0]['user_id'] + payload = PayloadBuilder().SET(real_name=real_name.strip()).WHERE(['id', '=', user_id]).payload() + message = "Something went wrong." result = await storage_client.update_tbl("users", payload) if result['response'] == 'updated': # TODO: FOGL-1226 At the moment only real name can update message = "Real name has been updated successfully!" except User.DoesNotExist: - msg = "User does not exist" + msg = "User does not exist." raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except ValueError as err: msg = str(err) raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to update the user <{}> profile.".format(int(user_id))) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: - msg = "Nothing to update" + msg = "Nothing to update." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) return web.json_response({"message": message}) @@ -526,9 +506,9 @@ async def update_user(request): user_id = request.match_info.get('user_id') if int(user_id) == 1: - msg = "Restricted for Super Admin user" + msg = "Restricted for Super Admin user." _logger.warning(msg) - raise web.HTTPNotAcceptable(reason=msg, body=json.dumps({"message": msg})) + raise web.HTTPForbidden(reason=msg, body=json.dumps({"message": msg})) data = await request.json() access_method = data.get('access_method', '') @@ -537,24 +517,24 @@ async def update_user(request): user_data = {} if 'real_name' in data: if len(real_name.strip()) == 0: - msg = "Real Name should not be empty" + msg = "Real Name should not be empty." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) else: user_data.update({"real_name": real_name.strip()}) if 'access_method' in data: if len(access_method.strip()) == 0: - msg = "Access method should not be empty" + msg = "Access method should not be empty." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) else: valid_access_method = ('any', 'pwd', 'cert') if access_method not in valid_access_method: - msg = "Accepted access method values are {}".format(valid_access_method) + msg = "Accepted access method values are {}.".format(valid_access_method) raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) user_data.update({"access_method": access_method.strip()}) if 'description' in data: user_data.update({"description": description.strip()}) if not user_data: - msg = "Nothing to update" + msg = "Nothing to update." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) try: user = await User.Objects.update(user_id, user_data) @@ -564,15 +544,15 @@ async def update_user(request): if 'access_method' in data: # Remove OTT token for this user only if access method is updated. __remove_ott_for_user(user_id) - except ValueError as err: msg = str(err) raise web.HTTPBadRequest(reason=str(err), body=json.dumps({"message": msg})) except User.DoesNotExist: - msg = "User with id:<{}> does not exist".format(int(user_id)) + msg = "User with ID:<{}> does not exist".format(int(user_id)) raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to update the user ID:<{}>.".format(user_id)) raise web.HTTPInternalServerError(reason=str(exc), body=json.dumps({"message": msg})) return web.json_response({'user_info': user_info}) @@ -591,59 +571,50 @@ async def update_password(request): try: int(user_id) except ValueError: - msg = "User id should be in integer" + msg = "User ID should be in integer." raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) data = await request.json() current_password = data.get('current_password') new_password = data.get('new_password') if not current_password or not new_password: - msg = "Current or new password is missing" - _logger.warning(msg) + msg = "Current or new password is missing." raise web.HTTPBadRequest(reason=msg) if new_password and not isinstance(new_password, str): - _logger.warning(PASSWORD_ERROR_MSG) raise web.HTTPBadRequest(reason=PASSWORD_ERROR_MSG) if new_password and not re.match(PASSWORD_REGEX_PATTERN, new_password): - _logger.warning(PASSWORD_ERROR_MSG) raise web.HTTPBadRequest(reason=PASSWORD_ERROR_MSG) if current_password == new_password: - msg = "New password should not be same as current password" - _logger.warning(msg) + msg = "New password should not be the same as current password." raise web.HTTPBadRequest(reason=msg) user_id = await User.Objects.is_user_exists(user_id, current_password) if not user_id: - msg = 'Invalid current password' - _logger.warning(msg) + msg = 'Invalid current password.' raise web.HTTPNotFound(reason=msg) try: await User.Objects.update(int(user_id), {'password': new_password}) - # Remove OTT token for this user if there. __remove_ott_for_user(user_id) - except ValueError as ex: - _logger.warning(str(ex)) raise web.HTTPBadRequest(reason=str(ex)) except User.DoesNotExist: - msg = "User with id:<{}> does not exist".format(int(user_id)) - _logger.warning(msg) + msg = "User with ID:<{}> does not exist.".format(int(user_id)) raise web.HTTPNotFound(reason=msg) except User.PasswordAlreadyUsed: - msg = "The new password should be different from previous 3 used" - _logger.warning(msg) + msg = "The new password should be different from previous 3 used." raise web.HTTPBadRequest(reason=msg) except Exception as exc: - _logger.exception(str(exc)) - raise web.HTTPInternalServerError(reason=str(exc)) - - _logger.info("Password has been updated successfully for user id:<{}>".format(int(user_id))) + msg = str(exc) + _logger.error(exc, "Failed to update the user ID:<{}>.".format(user_id)) + raise web.HTTPInternalServerError(reason=msg) - return web.json_response({'message': 'Password has been updated successfully for user id:<{}>'.format(int(user_id))}) + msg = "Password has been updated successfully for user ID:<{}>.".format(int(user_id)) + _logger.info(msg) + return web.json_response({'message': msg}) @has_permission("admin") @@ -659,9 +630,9 @@ async def enable_user(request): user_id = request.match_info.get('user_id') if int(user_id) == 1: - msg = "Restricted for Super Admin user" + msg = "Restricted for Super Admin user." _logger.warning(msg) - raise web.HTTPNotAcceptable(reason=msg, body=json.dumps({"message": msg})) + raise web.HTTPForbidden(reason=msg, body=json.dumps({"message": msg})) data = await request.json() enabled = data.get('enabled') @@ -674,8 +645,8 @@ async def enable_user(request): payload = PayloadBuilder().SELECT("id", "uname", "role_id", "enabled").WHERE( ['id', '=', user_id]).payload() storage_client = connect.get_storage_async() - result = await storage_client.query_tbl_with_payload('users', payload) - if len(result['rows']) == 0: + old_result = await storage_client.query_tbl_with_payload('users', payload) + if len(old_result['rows']) == 0: raise User.DoesNotExist payload = PayloadBuilder().SET(enabled=user_data['enabled']).WHERE(['id', '=', user_id]).payload() result = await storage_client.update_tbl("users", payload) @@ -685,25 +656,32 @@ async def enable_user(request): _text = 'enabled' if user_data['enabled'] == 't' else 'disabled' payload = PayloadBuilder().SELECT("id", "uname", "role_id", "enabled").WHERE( ['id', '=', user_id]).payload() - result = await storage_client.query_tbl_with_payload('users', payload) - if len(result['rows']) == 0: + new_result = await storage_client.query_tbl_with_payload('users', payload) + if len(new_result['rows']) == 0: raise User.DoesNotExist + # USRCH audit trail entry + audit = AuditLogger(storage_client) + await audit.information( + 'USRCH', {'user_id': int(user_id), 'old_value': {'enabled': old_result['rows'][0]['enabled']}, + 'new_value': {'enabled': new_result['rows'][0]['enabled']}, + "message": "'{}' user has been {}.".format(new_result['rows'][0]['uname'], _text)}) else: - raise ValueError('Something went wrong during update. Check Syslogs') + raise ValueError('Something went wrong during update. Check Syslogs.') else: - raise ValueError('Accepted values are True/False only') + raise ValueError('Accepted values are True/False only.') else: - raise ValueError('Nothing to enable user update') + raise ValueError('Nothing to enable user update.') except ValueError as err: msg = str(err) raise web.HTTPBadRequest(reason=str(err), body=json.dumps({"message": msg})) except User.DoesNotExist: - msg = "User with id:<{}> does not exist".format(int(user_id)) + msg = "User with ID:<{}> does not exist.".format(int(user_id)) raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to enable/disable user ID:<{}>.".format(user_id)) raise web.HTTPInternalServerError(reason=str(exc), body=json.dumps({"message": msg})) - return web.json_response({'message': 'User with id:<{}> has been {} successfully'.format(int(user_id), _text)}) + return web.json_response({'message': 'User with ID:<{}> has been {} successfully.'.format(int(user_id), _text)}) @has_permission("admin") @@ -720,29 +698,25 @@ async def reset(request): user_id = request.match_info.get('user_id') if int(user_id) == 1: - msg = "Restricted for Super Admin user" + msg = "Restricted for Super Admin user." _logger.warning(msg) - raise web.HTTPNotAcceptable(reason=msg) + raise web.HTTPForbidden(reason=msg, body=json.dumps({"message": msg})) data = await request.json() password = data.get('password') role_id = data.get('role_id') if not role_id and not password: - msg = "Nothing to update the user" - _logger.warning(msg) + msg = "Nothing to update the user." raise web.HTTPBadRequest(reason=msg) if role_id and not (await is_valid_role(role_id)): - msg = "Invalid or bad role id" - _logger.warning(msg) + msg = "Invalid or bad role id." return web.HTTPBadRequest(reason=msg) if password and not isinstance(password, str): - _logger.warning(PASSWORD_ERROR_MSG) raise web.HTTPBadRequest(reason=PASSWORD_ERROR_MSG) if password and not re.match(PASSWORD_REGEX_PATTERN, password): - _logger.warning(PASSWORD_ERROR_MSG) raise web.HTTPBadRequest(reason=PASSWORD_ERROR_MSG) user_data = {} @@ -750,31 +724,30 @@ async def reset(request): user_data.update({'role_id': data['role_id']}) if 'password' in data: user_data.update({'password': data['password']}) - + if not user_data: + msg = "Nothing to update." + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) try: await User.Objects.update(user_id, user_data) - # Remove OTT token for this user if there. __remove_ott_for_user(user_id) - except ValueError as ex: - _logger.warning(str(ex)) raise web.HTTPBadRequest(reason=str(ex)) except User.DoesNotExist: - msg = "User with id:<{}> does not exist".format(int(user_id)) - _logger.warning(msg) + msg = "User with ID:<{}> does not exist.".format(int(user_id)) raise web.HTTPNotFound(reason=msg) except User.PasswordAlreadyUsed: - msg = "The new password should be different from previous 3 used" + msg = "The new password should be different from previous 3 used." _logger.warning(msg) raise web.HTTPBadRequest(reason=msg) except Exception as exc: - _logger.exception(str(exc)) - raise web.HTTPInternalServerError(reason=str(exc)) - - _logger.info("User with id:<{}> has been updated successfully".format(int(user_id))) + msg = str(exc) + _logger.error(exc, "Failed to reset the user ID:<{}>.".format(user_id)) + raise web.HTTPInternalServerError(reason=msg) - return web.json_response({'message': 'User with id:<{}> has been updated successfully'.format(user_id)}) + msg = "User with ID:<{}> has been updated successfully.".format(int(user_id)) + _logger.info(msg) + return web.json_response({'message': msg}) @has_permission("admin") @@ -792,17 +765,16 @@ async def delete_user(request): try: user_id = int(request.match_info.get('user_id')) except ValueError as ex: - _logger.warning(str(ex)) raise web.HTTPBadRequest(reason=str(ex)) if user_id == 1: - msg = "Super admin user can not be deleted" + msg = "Super admin user can not be deleted." _logger.warning(msg) - raise web.HTTPNotAcceptable(reason=msg) + raise web.HTTPForbidden(reason=msg, body=json.dumps({"message": msg})) # Requester should not be able to delete her/himself if user_id == request.user["id"]: - msg = "You can not delete your own account" + msg = "You can not delete your own account." _logger.warning(msg) raise web.HTTPBadRequest(reason=msg) @@ -810,24 +782,21 @@ async def delete_user(request): result = await User.Objects.delete(user_id) if not result['rows_affected']: raise User.DoesNotExist - # Remove OTT token for this user if there. __remove_ott_for_user(user_id) - except ValueError as ex: - _logger.warning(str(ex)) raise web.HTTPBadRequest(reason=str(ex)) except User.DoesNotExist: - msg = "User with id:<{}> does not exist".format(int(user_id)) - _logger.warning(msg) + msg = "User with ID:<{}> does not exist.".format(int(user_id)) raise web.HTTPNotFound(reason=msg) except Exception as exc: - _logger.exception(str(exc)) - raise web.HTTPInternalServerError(reason=str(exc)) + msg = str(exc) + _logger.error(exc, "Failed to delete the user ID:<{}>.".format(user_id)) + raise web.HTTPInternalServerError(reason=msg) - _logger.info("User with id:<{}> has been deleted successfully.".format(int(user_id))) + _logger.info("User with ID:<{}> has been deleted successfully.".format(int(user_id))) - return web.json_response({'message': "User has been deleted successfully"}) + return web.json_response({'message': "User has been deleted successfully."}) async def is_valid_role(role_id): diff --git a/python/fledge/services/core/api/backup_restore.py b/python/fledge/services/core/api/backup_restore.py index c712d09db2..7d9e9a9c87 100644 --- a/python/fledge/services/core/api/backup_restore.py +++ b/python/fledge/services/core/api/backup_restore.py @@ -5,7 +5,6 @@ # FLEDGE_END """Backup and Restore Rest API support""" -import logging import os import sys import tarfile @@ -15,9 +14,10 @@ from enum import IntEnum from collections import OrderedDict -from fledge.common import logger -from fledge.common.audit_logger import AuditLogger from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA +from fledge.common.audit_logger import AuditLogger +from fledge.common.logger import FLCoreLogger + from fledge.common.storage_client import payload_builder from fledge.plugins.storage.common import exceptions from fledge.services.core import connect @@ -47,7 +47,7 @@ ----------------------------------------------------------------------------------- """ -_logger = logger.setup(__name__, level=logging.INFO) +_logger = FLCoreLogger().get_logger(__name__) class Status(IntEnum): @@ -114,10 +114,10 @@ async def get_backups(request): r["date"] = row["ts"] r["status"] = _get_status(int(row["status"])) res.append(r) - except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) - + msg = str(ex) + _logger.error(ex, "Failed to get the list of Backup records.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) return web.json_response({"backups": res}) @@ -130,8 +130,9 @@ async def create_backup(request): backup = Backup(connect.get_storage_async()) status = await backup.create_backup() except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) - + msg = str(ex) + _logger.error(ex, "Failed to create Backup.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) return web.json_response({"status": status}) @@ -156,8 +157,9 @@ async def get_backup_details(request): except exceptions.DoesNotExist: raise web.HTTPNotFound(reason='Backup id {} does not exist'.format(backup_id)) except Exception as ex: - raise web.HTTPInternalServerError(reason=(str(ex))) - + msg = str(ex) + _logger.error(ex, "Failed to fetch backup details for ID: <{}>.".format(backup_id)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) return web.json_response(resp) @@ -204,6 +206,7 @@ async def get_backup_download(request): raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to download Backup file for ID: <{}>.".format(backup_id)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.FileResponse(path=gz_path) @@ -225,7 +228,9 @@ async def delete_backup(request): except exceptions.DoesNotExist: raise web.HTTPNotFound(reason='Backup id {} does not exist'.format(backup_id)) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _logger.error(ex, "Failed to delete Backup ID: <{}>.".format(backup_id)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) async def restore_backup(request): @@ -246,7 +251,9 @@ async def restore_backup(request): except exceptions.DoesNotExist: raise web.HTTPNotFound(reason='Backup with {} does not exist'.format(backup_id)) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _logger.error(ex, "Failed to restore Backup ID: <{}>.".format(backup_id)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) async def get_backup_status(request): @@ -350,6 +357,7 @@ async def upload_backup(request: web.Request) -> web.Response: raise web.HTTPNotImplemented(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to upload Backup.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: msg = "{} backup uploaded successfully.".format(file_name) diff --git a/python/fledge/services/core/api/browser.py b/python/fledge/services/core/api/browser.py index 56f49ffa0d..a909f3d9bd 100644 --- a/python/fledge/services/core/api/browser.py +++ b/python/fledge/services/core/api/browser.py @@ -43,11 +43,11 @@ from aiohttp import web +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.services.core import connect -from fledge.common import logger -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) __author__ = "Mark Riddoch, Ashish Jabble, Massimiliano Pinto" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" @@ -154,6 +154,7 @@ async def asset_counts(request): raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to get all assets.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(asset_json) @@ -177,16 +178,51 @@ async def asset(request): curl -sX GET "http://localhost:8081/fledge/asset/fogbench_humidity?limit=1&skip=1&order=desc curl -sX GET http://localhost:8081/fledge/asset/fogbench_humidity?seconds=60 curl -sX GET http://localhost:8081/fledge/asset/fogbench_humidity?seconds=60&previous=600 + curl -sX GET "http://localhost:8081/fledge/asset/fogbench_humidity?additional=sinusoid,random&seconds=600" + curl -sX GET "http://localhost:8081/fledge/asset/sinusoid?mostrecent=true&seconds=600" + curl -sX GET "http://localhost:8081/fledge/asset/sinusoid?mostrecent=true&seconds=60&additional=randomwalk" """ asset_code = request.match_info.get('asset_code', '') - _select = PayloadBuilder().SELECT(("reading", "user_ts")).ALIAS("return", ("user_ts", "timestamp")).chain_payload() - - _where = PayloadBuilder(_select).WHERE(["asset_code", "=", asset_code]).chain_payload() + # A comma separated list of additional assets to generate the readings to display multiple graphs in GUI + if 'additional' in request.query: + additional_assets = "{},{}".format(asset_code, request.query['additional']) + additional_asset_codes = additional_assets.split(',') + _select = PayloadBuilder().SELECT(("asset_code", "reading", "user_ts")).ALIAS( + "return", ("user_ts", "timestamp")).chain_payload() + _where = PayloadBuilder(_select).WHERE(["asset_code", "in", additional_asset_codes]).chain_payload() + else: + _select = PayloadBuilder().SELECT(("reading", "user_ts")).ALIAS( + "return", ("user_ts", "timestamp")).chain_payload() + _where = PayloadBuilder(_select).WHERE(["asset_code", "=", asset_code]).chain_payload() if 'previous' in request.query and ( 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query): _and_where = where_window(request, _where) - elif 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query: + elif 'mostrecent' not in request.query and ( + 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query): _and_where = where_clause(request, _where) + elif 'mostrecent' in request.query and 'seconds' in request.query: + if str(request.query['mostrecent']).lower() == 'true': + # To get latest reading for an asset's + asset_codes = additional_asset_codes if 'additional' in request.query else [asset_code] + _readings = connect.get_readings_async() + date_times = [] + dt_format = '%Y-%m-%d %H:%M:%S.%f' + for ac in asset_codes: + payload = PayloadBuilder().SELECT("user_ts").ALIAS("return", ("user_ts", "timestamp")).WHERE( + ["asset_code", "=", ac]).LIMIT(1).ORDER_BY(["user_ts", "desc"]).payload() + results = await _readings.query(payload) + response = results['rows'] + if response and 'timestamp' in response[0]: + date_times.append(datetime.datetime.strptime(response[0]['timestamp'], dt_format)) + most_recent_ts = max(date_times) + _logger.debug("DTS: {} most_recent_ts: {}".format(date_times, most_recent_ts)) + window = int(request.query['seconds']) + to_ts = most_recent_ts - datetime.timedelta(seconds=window) + most_recent_str = most_recent_ts.strftime(dt_format) + to_str = to_ts.strftime(dt_format) + _logger.debug("user_ts <={} TO user_ts>{}".format(most_recent_str, to_str)) + _and_where = PayloadBuilder(_where).AND_WHERE(['user_ts', '<=', most_recent_str]).AND_WHERE( + ['user_ts', '>', to_str]).chain_payload() elif 'previous' in request.query: msg = "the parameter previous can only be given if one of seconds, minutes or hours is also given" raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) @@ -201,7 +237,6 @@ async def asset(request): if _order not in ('asc', 'desc'): msg = "order must be asc or desc" raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) - payload = PayloadBuilder(_and_where).ORDER_BY(["user_ts", _order]).payload() try: _readings = connect.get_readings_async() @@ -213,12 +248,24 @@ async def asset(request): for item_name2, item_val2 in item_val.items(): if isinstance(item_val2, str) and item_val2.startswith(tuple(DATAPOINT_TYPES)): data[item_name][item_name2] = IMAGE_PLACEHOLDER if is_image_excluded(request) else item_val2 - response = rows + # Group the readings value by asset_code in case of additional multiple assets + if 'additional' in request.query: + response_by_asset_code = {} + for aacl in additional_asset_codes: + response_by_asset_code[aacl] = [] + for r in rows: + if r['asset_code'] in additional_asset_codes: + response_by_asset_code[r['asset_code']].extend([r]) + r.pop('asset_code') + response = response_by_asset_code + else: + response = rows except KeyError: msg = results['message'] raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to get {} asset.".format(asset_code)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(response) @@ -245,6 +292,7 @@ async def asset_latest(request: web.Request) -> web.Response: raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to get latest {} asset.".format(asset_code)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(response) @@ -313,6 +361,7 @@ async def asset_reading(request): raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to get {} asset for {} reading.".format(asset_code, reading)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(response) @@ -388,6 +437,7 @@ async def asset_all_readings_summary(request): raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to get {} asset readings summary.".format(asset_code)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(response) @@ -453,6 +503,7 @@ async def asset_summary(request): raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to get {} asset {} reading summary.".format(asset_code, reading)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({reading: response}) @@ -554,6 +605,7 @@ async def asset_averages(request): raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to get average of {} readings for {} asset.".format(reading, asset_code)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(response) @@ -583,6 +635,29 @@ def where_clause(request, where): def where_window(request, where): + """ newer/older payload conditions only worked with datetime (now - seconds) + Also there is no support of BETWEEN operator. + For mostrecent functionality with back/forward buttons a.k.a previous payload + There is workaround implemented at python side to get it without any amendments at C Payload side + Now, client has to pass datetime UTC string and having format %Y-%m-%d %H:%M:%S.%f in "previous" payload + For example: /fledge/asset/randomwalk?mostrecent=TRUE&seconds=10&previous=2023-08-01 06:32:36.515 + Payload: + {"return": ["reading", {"column": "user_ts", "alias": "timestamp", "timezone": "utc"}], + "where": {"column": "asset_code", "condition": "=", "value": "randomwalk", + "and": {"column": "user_ts", "condition": "<=", "value": "2023-08-01 06:32:36.515", + "and": {"column": "user_ts", "condition": ">=", "value": "2023-08-01 06:32:26.515"}}}, + "sort": {"column": "user_ts", "direction": "desc"}} + """ + if 'mostrecent' in request.query and 'seconds' in request.query: + val = int(request.query['seconds']) + previous_str = request.query['previous'] + dt_format = '%Y-%m-%d %H:%M:%S.%f' + dt_obj = datetime.datetime.strptime(previous_str, dt_format) + dt_obj_diff = dt_obj - datetime.timedelta(seconds=val) + dt_str = dt_obj_diff.strftime(dt_format) + payload = PayloadBuilder(where).AND_WHERE(['user_ts', '<=', previous_str]).chain_payload() + return PayloadBuilder(payload).AND_WHERE(['user_ts', '>=', dt_str]).chain_payload() + val = 0 previous = 0 try: @@ -686,8 +761,10 @@ async def asset_datapoints_with_bucket_size(request: web.Request) -> web.Respons raise web.HTTPNotFound(reason=e) except (TypeError, ValueError) as e: raise web.HTTPBadRequest(reason=e) - except Exception as e: - raise web.HTTPInternalServerError(reason=str(e)) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to get {} asset datapoints with {} bucket size.".format(asset_code, bucket_size)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(response) @@ -763,8 +840,11 @@ async def asset_readings_with_bucket_size(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=e) except (TypeError, ValueError) as e: raise web.HTTPBadRequest(reason=e) - except Exception as e: - raise web.HTTPInternalServerError(reason=str(e)) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to get {} readings of {} asset with {} bucket size.".format( + reading, asset_code, bucket_size)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(response) @@ -831,6 +911,7 @@ async def asset_structure(request): raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to get assets structure.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(asset_json) @@ -849,11 +930,11 @@ async def asset_purge_all(request): curl -sX DELETE http://localhost:8081/fledge/asset """ try: - from fledge.common.audit_logger import AuditLogger + _logger.warning("Manual purge of all assets has been requested.") # Call storage service - _logger.warning("Manual purge of all assets has been requested") _readings = connect.get_readings_async() # Get AuditLogger + from fledge.common.audit_logger import AuditLogger _audit = AuditLogger(_readings) start_time = time.strftime('%Y-%m-%d %H:%M:%S.%s', time.localtime(time.time())) @@ -872,6 +953,7 @@ async def asset_purge_all(request): raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to purge all assets.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(results) @@ -886,13 +968,13 @@ async def asset_purge(request): curl -sX DELETE http://localhost:8081/fledge/asset/fogbench_humidity """ asset_code = request.match_info.get('asset_code', '') - _logger.warning("Manual purge of '%s' asset has been requested", asset_code) + _logger.warning("Manual purge of '{}' asset has been requested.".format(asset_code)) try: - from fledge.common.audit_logger import AuditLogger # Call storage service _readings = connect.get_readings_async() # Get AuditLogger + from fledge.common.audit_logger import AuditLogger _audit = AuditLogger(_readings) start_time = time.strftime('%Y-%m-%d %H:%M:%S.%s', time.localtime(time.time())) @@ -912,6 +994,7 @@ async def asset_purge(request): raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to purge {} asset.".format(asset_code)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(results) @@ -940,6 +1023,7 @@ async def asset_timespan(request): raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to get timespan of buffered readings for assets.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(response) @@ -970,6 +1054,7 @@ async def asset_reading_timespan(request): raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: msg = str(exc) + _logger.error(exc, "Failed to get timespan of buffered readings for {} asset.".format(asset_code)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(response) diff --git a/python/fledge/services/core/api/certificate_store.py b/python/fledge/services/core/api/certificate_store.py index 4fe92df1ca..5c1d840c64 100644 --- a/python/fledge/services/core/api/certificate_store.py +++ b/python/fledge/services/core/api/certificate_store.py @@ -9,11 +9,11 @@ from aiohttp import web -from fledge.common import logger +from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA +from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger from fledge.common.web.middleware import has_permission from fledge.services.core import connect -from fledge.common.configuration_manager import ConfigurationManager -from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA __author__ = "Ashish Jabble" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" @@ -27,8 +27,8 @@ | DELETE | /fledge/certificate/{name} | ------------------------------------------------------------------------------- """ -FORBIDDEN_MSG = 'Resource you were trying to reach is absolutely forbidden for some reason' -_logger = logger.setup(__name__) +FORBIDDEN_MSG = 'Resource you were trying to reach is absolutely forbidden for some reason.' +_logger = FLCoreLogger().get_logger(__name__) async def get_certs(request): diff --git a/python/fledge/services/core/api/common.py b/python/fledge/services/core/api/common.py index 0cd6333d11..44bc68ba12 100644 --- a/python/fledge/services/core/api/common.py +++ b/python/fledge/services/core/api/common.py @@ -157,9 +157,11 @@ async def shutdown(request): return web.json_response({'message': 'Fledge shutdown has been scheduled. ' 'Wait for few seconds for process cleanup.'}) except TimeoutError as err: - raise web.HTTPInternalServerError(reason=str(err)) + raise web.HTTPRequestTimeout(reason=str(err)) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _logger.error(ex, "Error while stopping Fledge server.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) def do_shutdown(request): @@ -168,7 +170,7 @@ def do_shutdown(request): loop = request.loop asyncio.ensure_future(server.Server.shutdown(request), loop=loop) except RuntimeError as e: - _logger.exception("Error while stopping Fledge server: {}".format(str(e))) + _logger.error(e, "Error while stopping Fledge server.") raise @@ -182,9 +184,10 @@ async def restart(request): _logger.info("Executing controlled shutdown and start") asyncio.ensure_future(server.Server.restart(request), loop=request.loop) return web.json_response({'message': 'Fledge restart has been scheduled.'}) - except TimeoutError as e: - _logger.exception("Error while stopping Fledge server: %s", e) - raise web.HTTPInternalServerError(reason=e) + except TimeoutError as err: + msg = str(err) + raise web.HTTPRequestTimeout(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: - _logger.exception("Error while stopping Fledge server: %s", ex) - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _logger.error(ex, "Error while stopping Fledge server.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) diff --git a/python/fledge/services/core/api/configuration.py b/python/fledge/services/core/api/configuration.py index 3835bf2622..6bcc41ff53 100644 --- a/python/fledge/services/core/api/configuration.py +++ b/python/fledge/services/core/api/configuration.py @@ -12,10 +12,10 @@ from typing import Dict from aiohttp import web -from fledge.common import logger from fledge.common.audit_logger import AuditLogger from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA from fledge.common.configuration_manager import ConfigurationManager, _optional_items +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.services.core import connect @@ -39,7 +39,7 @@ """ script_dir = _FLEDGE_DATA + '/scripts/' if _FLEDGE_DATA else _FLEDGE_ROOT + "/data/scripts/" -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) ################################# # Configuration Manager @@ -165,7 +165,9 @@ async def create_category(request): except LookupError as ex: raise web.HTTPNotFound(reason=str(ex)) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _logger.error(ex, "Failed to create category.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) return web.json_response(result) @@ -189,7 +191,9 @@ async def delete_category(request): except (ValueError, TypeError) as ex: raise web.HTTPBadRequest(reason=ex) except Exception as ex: - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _logger.error(ex, "Failed to delete {} category.".format(category_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'result': 'Category {} deleted successfully.'.format(category_name)}) @@ -327,8 +331,6 @@ async def update_configuration_item_bulk(request): WHEN: if non-admin user is trying to update THEN: 403 Forbidden case """ - - if hasattr(request, "user"): config_items = [k for k, v in data.items() if k == 'authentication'] if request.user and (category_name == 'rest_api' and config_items): @@ -356,7 +358,9 @@ async def update_configuration_item_bulk(request): except (ValueError, TypeError) as ex: raise web.HTTPBadRequest(reason=ex) except Exception as ex: - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _logger.error(ex, "Failed to bulk update {} category.".format(category_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: cat = await cf_mgr.get_category_all_items(category_name) try: @@ -432,9 +436,12 @@ async def add_configuration_item(request): except NameError as ex: raise web.HTTPNotFound(reason=str(ex)) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _logger.error(ex, "Failed to create {} config item for {} category.".format(new_config_item, category_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) - return web.json_response({"message": "{} config item has been saved for {} category".format(new_config_item, category_name)}) + return web.json_response({"message": "{} config item has been saved for {} category".format(new_config_item, + category_name)}) async def delete_configuration_item_value(request): @@ -509,7 +516,10 @@ async def get_child_category(request): children = await cf_mgr.get_category_child(category_name) except ValueError as ex: raise web.HTTPNotFound(reason=str(ex)) - + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to get the child {} category.".format(category_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) return web.json_response({"categories": children}) @@ -540,7 +550,10 @@ async def create_child_category(request): raise web.HTTPBadRequest(reason=str(ex)) except ValueError as ex: raise web.HTTPNotFound(reason=str(ex)) - + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to create the child relationship for {} category.".format(category_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) return web.json_response(r) @@ -568,7 +581,10 @@ async def delete_child_category(request): raise web.HTTPBadRequest(reason=str(ex)) except ValueError as ex: raise web.HTTPNotFound(reason=str(ex)) - + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to delete the {} child of {} category.".format(child_category, category_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) return web.json_response({"children": result}) @@ -594,7 +610,10 @@ async def delete_parent_category(request): raise web.HTTPBadRequest(reason=str(ex)) except ValueError as ex: raise web.HTTPNotFound(reason=str(ex)) - + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to delete the parent-child relationship of {} category.".format(category_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) return web.json_response({"message": "Parent-child relationship for the parent-{} is deleted".format(category_name)}) @@ -658,7 +677,10 @@ async def upload_script(request): except Exception as ex: os.remove(script_file_path) - raise web.HTTPBadRequest(reason=ex) + msg = str(ex) + _logger.error(ex, "Failed to upload script for {} config item of {} category.".format(config_item, + category_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: result = await cf_mgr.get_category_item(category_name, config_item) return web.json_response(result) diff --git a/python/fledge/services/core/api/control_service/acl_management.py b/python/fledge/services/core/api/control_service/acl_management.py index 87cdc9a62f..8f3361098b 100644 --- a/python/fledge/services/core/api/control_service/acl_management.py +++ b/python/fledge/services/core/api/control_service/acl_management.py @@ -5,18 +5,17 @@ # FLEDGE_END import json -import logging from aiohttp import web - -from fledge.common import logger +from fledge.common.acl_manager import ACLManager +from fledge.common.audit_logger import AuditLogger from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.exceptions import StorageServerError from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.web.middleware import has_permission from fledge.services.core import connect from fledge.services.core.api.control_service.exceptions import * -from fledge.common.acl_manager import ACLManager __author__ = "Ashish Jabble, Massimiliano Pinto" @@ -33,7 +32,7 @@ -------------------------------------------------------------- """ -_logger = logger.setup(__name__, level=logging.INFO) +_logger = FLCoreLogger().get_logger(__name__) async def get_all_acls(request: web.Request) -> web.Response: @@ -75,6 +74,7 @@ async def get_acl(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to get {} ACL.".format(name)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(acl_info) @@ -125,6 +125,9 @@ async def add_acl(request: web.Request) -> web.Response: if 'response' in insert_control_acl_result: if insert_control_acl_result['response'] == "inserted": result = {"name": name, "service": json.loads(services), "url": json.loads(urls)} + # ACLAD audit trail entry + audit = AuditLogger(storage) + await audit.information('ACLAD', result) else: raise StorageServerError(insert_control_acl_result) else: @@ -141,6 +144,7 @@ async def add_acl(request: web.Request) -> web.Response: raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to create ACL.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(result) @@ -170,13 +174,12 @@ async def update_acl(request: web.Request) -> web.Response: if url is not None and not isinstance(url, list): raise TypeError('url must be a list.') storage = connect.get_storage_async() - payload = PayloadBuilder().SELECT("name").WHERE(['name', '=', name]).payload() + payload = PayloadBuilder().SELECT("name", "service", "url").WHERE(['name', '=', name]).payload() result = await storage.query_tbl_with_payload('control_acl', payload) message = "" if 'rows' in result: if result['rows']: update_query = PayloadBuilder() - set_values = {} if service is not None: set_values["service"] = json.dumps(service) @@ -184,11 +187,14 @@ async def update_acl(request: web.Request) -> web.Response: set_values["url"] = json.dumps(url) update_query.SET(**set_values).WHERE(['name', '=', name]) - update_result = await storage.update_tbl("control_acl", update_query.payload()) if 'response' in update_result: if update_result['response'] == "updated": message = "ACL {} updated successfully.".format(name) + # ACLCH audit trail entry + audit = AuditLogger(storage) + values = {'name': name, 'service': service, 'url': url} + await audit.information('ACLCH', {'acl': values, 'old_acl': result['rows'][0]}) else: raise StorageServerError(update_result) else: @@ -206,6 +212,7 @@ async def update_acl(request: web.Request) -> web.Response: raise web.HTTPBadRequest(reason=message, body=json.dumps({"message": message})) except Exception as ex: message = str(ex) + _logger.error(ex, "Failed to update {} ACL.".format(name)) raise web.HTTPInternalServerError(reason=message, body=json.dumps({"message": message})) else: # Fetch service name associated with acl @@ -242,13 +249,16 @@ async def delete_acl(request: web.Request) -> web.Response: if services or scripts: message = "{} is associated with an entity. So cannot delete." \ " Make sure to remove all the usages of this ACL.".format(name) - _logger.info(message) - return web.json_response({"message": message}) + _logger.warning(message) + return web.HTTPConflict(reason=message, body=json.dumps({"message": message})) delete_result = await storage.delete_from_tbl("control_acl", payload) if 'response' in delete_result: if delete_result['response'] == "deleted": message = "{} ACL deleted successfully.".format(name) + # ACLDL audit trail entry + audit = AuditLogger(storage) + await audit.information('ACLDL', {"message": message, "name": name}) else: raise StorageServerError(delete_result) else: @@ -263,6 +273,7 @@ async def delete_acl(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to delete {} ACL.".format(name)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"message": message}) @@ -275,8 +286,8 @@ async def attach_acl_to_service(request: web.Request) -> web.Response: :Example: curl -H "authorization: $AUTH_TOKEN" -sX PUT http://localhost:8081/fledge/service/Sine/ACL -d '{"acl_name": "testACL"}' """ + svc_name = request.match_info.get('service_name', None) try: - svc_name = request.match_info.get('service_name', None) storage = connect.get_storage_async() payload = PayloadBuilder().SELECT(["id", "enabled"]).WHERE(['schedule_name', '=', svc_name]).payload() # check service name existence @@ -350,6 +361,7 @@ async def attach_acl_to_service(request: web.Request) -> web.Response: raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Attach ACL to {} service failed.".format(svc_name)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: # Call service security endpoint with attachACL = acl_name @@ -367,8 +379,8 @@ async def detach_acl_from_service(request: web.Request) -> web.Response: :Example: curl -H "authorization: $AUTH_TOKEN" -sX DELETE http://localhost:8081/fledge/service/Sine/ACL """ + svc_name = request.match_info.get('service_name', None) try: - svc_name = request.match_info.get('service_name', None) storage = connect.get_storage_async() payload = PayloadBuilder().SELECT(["id", "enabled"]).WHERE(['schedule_name', '=', svc_name]).payload() # check service name existence @@ -425,6 +437,7 @@ async def detach_acl_from_service(request: web.Request) -> web.Response: raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Detach ACL from {} service failed.".format(svc_name)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"message": message}) diff --git a/python/fledge/services/core/api/control_service/entrypoint.py b/python/fledge/services/core/api/control_service/entrypoint.py new file mode 100644 index 0000000000..1c5942a11a --- /dev/null +++ b/python/fledge/services/core/api/control_service/entrypoint.py @@ -0,0 +1,591 @@ +# -*- coding: utf-8 -*- + +# FLEDGE_BEGIN +# See: http://fledge-iot.readthedocs.io/ +# FLEDGE_END + +import json + +from enum import IntEnum +import aiohttp +from aiohttp import web + +from fledge.common.audit_logger import AuditLogger +from fledge.common.logger import FLCoreLogger +from fledge.common.service_record import ServiceRecord +from fledge.common.storage_client.payload_builder import PayloadBuilder +from fledge.services.core import connect, server +from fledge.services.core.service_registry.service_registry import ServiceRegistry +from fledge.services.core.service_registry import exceptions as service_registry_exceptions +from fledge.services.core.user_model import User + + +__author__ = "Ashish Jabble" +__copyright__ = "Copyright (c) 2023 Dianomic Systems Inc." +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + +_logger = FLCoreLogger().get_logger(__name__) + +_help = """ + Two types of users: Control Administrator and Control Requestor + Control Administrator: + - has access rights to create a control entrypoint. + - must be a user with role of admin or control + Control Requestor: + - can make requests to defined control entrypoint but cannot create new entrypoints. + - any user role can make request to control entrypoint but the username must match one in list + of users given when entrypoint was created. + ----------------------------------------------------------------------------------------------------------------- + | GET POST | /fledge/control/manage | + | GET PUT DELETE | /fledge/control/manage/{name} | + | PUT | /fledge/control/request/{name} | + ------------------------------------------------------------------------------------------------------------------ +""" + + +def setup(app): + app.router.add_route('POST', '/fledge/control/manage', create) + app.router.add_route('GET', '/fledge/control/manage', get_all) + app.router.add_route('GET', '/fledge/control/manage/{name}', get_by_name) + app.router.add_route('PUT', '/fledge/control/manage/{name}', update) + app.router.add_route('DELETE', '/fledge/control/manage/{name}', delete) + app.router.add_route('PUT', '/fledge/control/request/{name}', update_request) + + +class EntryPointType(IntEnum): + WRITE = 0 + OPERATION = 1 + + +class Destination(IntEnum): + BROADCAST = 0 + SERVICE = 1 + ASSET = 2 + SCRIPT = 3 + + +async def _get_type(identifier): + if isinstance(identifier, str): + type_converted = [ept.value for ept in EntryPointType if ept.name.lower() == identifier] + else: + type_converted = [ept.name.lower() for ept in EntryPointType if ept.value == identifier] + return type_converted[0] + + +async def _get_destination(identifier): + if isinstance(identifier, str): + dest_converted = [d.value for d in Destination if d.name.lower() == identifier] + else: + dest_converted = [d.name.lower() for d in Destination if d.value == identifier] + return dest_converted[0] + + +async def _check_parameters(payload, skip_required=False): + if not skip_required: + required_keys = {"name", "description", "type", "destination"} + if not all(k in payload.keys() for k in required_keys): + raise KeyError("{} required keys are missing in request payload.".format(required_keys)) + final = {} + name = payload.get('name', None) + if name is not None: + if not isinstance(name, str): + raise ValueError('Control entrypoint name should be in string.') + name = name.strip() + if len(name) == 0: + raise ValueError('Control entrypoint name cannot be empty.') + final['name'] = name + description = payload.get('description', None) + if description is not None: + if not isinstance(description, str): + raise ValueError('Control entrypoint description should be in string.') + description = description.strip() + if len(description) == 0: + raise ValueError('Control entrypoint description cannot be empty.') + final['description'] = description + _type = payload.get('type', None) + if _type is not None: + if not isinstance(_type, str): + raise ValueError('Control entrypoint type should be in string.') + _type = _type.strip() + if len(_type) == 0: + raise ValueError('Control entrypoint type cannot be empty.') + ept_names = [ept.name.lower() for ept in EntryPointType] + if _type not in ept_names: + raise ValueError('Possible types are: {}.'.format(ept_names)) + if _type == EntryPointType.OPERATION.name.lower(): + operation_name = payload.get('operation_name', None) + if operation_name is not None: + if not isinstance(operation_name, str): + raise ValueError('Control entrypoint operation name should be in string.') + operation_name = operation_name.strip() + if len(operation_name) == 0: + raise ValueError('Control entrypoint operation name cannot be empty.') + else: + raise KeyError('operation_name KV pair is missing.') + final['operation_name'] = operation_name + final['type'] = await _get_type(_type) + + destination = payload.get('destination', None) + if destination is not None: + if not isinstance(destination, str): + raise ValueError('Control entrypoint destination should be in string.') + destination = destination.strip() + if len(destination) == 0: + raise ValueError('Control entrypoint destination cannot be empty.') + dest_names = [d.name.lower() for d in Destination] + if destination not in dest_names: + raise ValueError('Possible destination values are: {}.'.format(dest_names)) + + destination_idx = await _get_destination(destination) + final['destination'] = destination_idx + + # only if non-zero + final['destination_arg'] = '' + if destination_idx: + destination_arg = payload.get(destination, None) + if destination_arg is not None: + if not isinstance(destination_arg, str): + raise ValueError('Control entrypoint destination argument should be in string.') + destination_arg = destination_arg.strip() + if len(destination_arg) == 0: + raise ValueError('Control entrypoint destination argument cannot be empty.') + final[destination] = destination_arg + final['destination_arg'] = destination + else: + raise KeyError('{} destination argument is missing.'.format(destination)) + anonymous = payload.get('anonymous', None) + if anonymous is not None: + if not isinstance(anonymous, bool): + raise ValueError('anonymous should be a bool.') + anonymous = 't' if anonymous else 'f' + final['anonymous'] = anonymous + constants = payload.get('constants', None) + if constants is not None: + if not isinstance(constants, dict): + raise ValueError('constants should be a dictionary.') + if not constants and _type == EntryPointType.WRITE.name.lower(): + raise ValueError('constants should not be empty.') + final['constants'] = constants + else: + if _type == EntryPointType.WRITE.name.lower(): + raise ValueError("For type write constants must have passed in payload and cannot have empty value.") + + variables = payload.get('variables', None) + if variables is not None: + if not isinstance(variables, dict): + raise ValueError('variables should be a dictionary.') + if not variables and _type == EntryPointType.WRITE.name.lower(): + raise ValueError('variables should not be empty.') + final['variables'] = variables + else: + if _type == EntryPointType.WRITE.name.lower(): + raise ValueError("For type write variables must have passed in payload and cannot have empty value.") + + allow = payload.get('allow', None) + if allow is not None: + if not isinstance(allow, list): + raise ValueError('allow should be an array of list of users.') + if allow: + users = await User.Objects.all() + usernames = [u['uname'] for u in users] + invalid_users = list(set(payload['allow']) - set(usernames)) + if invalid_users: + raise ValueError('Invalid user {} found.'.format(invalid_users)) + final['allow'] = allow + return final + + +async def create(request: web.Request) -> web.Response: + """Create a control entrypoint + :Example: + curl -sX POST http://localhost:8081/fledge/control/manage -d '{"name": "SetLatheSpeed", "description": "Set the speed of the lathe", "type": "write", "destination": "asset", "asset": "lathe", "constants": {"units": "spin"}, "variables": {"rpm": "100"}, "allow":["user"], "anonymous": false}' + """ + try: + data = await request.json() + payload = await _check_parameters(data) + name = payload['name'] + storage = connect.get_storage_async() + result = await storage.query_tbl("control_api") + entrypoints = [r['name'] for r in result['rows']] + if name in entrypoints: + raise ValueError('{} control entrypoint is already in use.'.format(name)) + # add common data keys in control_api table + control_api_column_name = {"name": name, + "description": payload['description'], + "type": payload['type'], + "operation_name": payload['operation_name'] if payload['type'] == 1 else "", + "destination": payload['destination'], + "destination_arg": payload[ + payload['destination_arg']] if payload['destination'] else "", + "anonymous": payload['anonymous'] + } + api_insert_payload = PayloadBuilder().INSERT(**control_api_column_name).payload() + insert_api_result = await storage.insert_into_tbl("control_api", api_insert_payload) + if insert_api_result['rows_affected'] == 1: + # add if any params data keys in control_api_parameters table + if 'constants' in payload: + for k, v in payload['constants'].items(): + control_api_params_column_name = {"name": name, "parameter": k, "value": v, "constant": 't'} + api_params_insert_payload = PayloadBuilder().INSERT(**control_api_params_column_name).payload() + await storage.insert_into_tbl("control_api_parameters", api_params_insert_payload) + if 'variables' in payload: + for k, v in payload['variables'].items(): + control_api_params_column_name = {"name": name, "parameter": k, "value": v, "constant": 'f'} + api_params_insert_payload = PayloadBuilder().INSERT(**control_api_params_column_name).payload() + await storage.insert_into_tbl("control_api_parameters", api_params_insert_payload) + # add if any users in control_api_acl table + if 'allow' in payload: + for u in payload['allow']: + control_acl_column_name = {"name": name, "user": u} + acl_insert_payload = PayloadBuilder().INSERT(**control_acl_column_name).payload() + await storage.insert_into_tbl("control_api_acl", acl_insert_payload) + except (KeyError, ValueError) as err: + msg = str(err) + raise web.HTTPBadRequest(body=json.dumps({"message": msg}), reason=msg) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to create control entrypoint.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + # CTEAD audit trail entry + audit = AuditLogger(storage) + if 'constants' not in data: + data['constants'] = {} + if 'variables' not in data: + data['variables'] = {} + await audit.information('CTEAD', data) + return web.json_response({"message": "{} control entrypoint has been created successfully.".format(name)}) + + +async def get_all(request: web.Request) -> web.Response: + """Get a list of all control entrypoints + :Example: + curl -sX GET http://localhost:8081/fledge/control/manage + """ + storage = connect.get_storage_async() + result = await storage.query_tbl("control_api") + entrypoint = [] + for row in result["rows"]: + permitted = await _get_permitted(request, storage, row) + entrypoint.append({"name": row['name'], "description": row['description'], "permitted": permitted}) + return web.json_response({"controls": entrypoint}) + + +async def get_by_name(request: web.Request) -> web.Response: + """Get a control entrypoint by name + :Example: + curl -sX GET http://localhost:8081/fledge/control/manage/SetLatheSpeed + """ + ep_name = request.match_info.get('name', None) + try: + response = await _get_entrypoint(ep_name) + response['permitted'] = await _get_permitted(request, None, response) + except ValueError as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + except KeyError as err: + msg = str(err.args[0]) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to fetch details of {} entrypoint.".format(ep_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + return web.json_response(response) + + +async def delete(request: web.Request) -> web.Response: + """Delete a control entrypoint + :Example: + curl -sX DELETE http://localhost:8081/fledge/control/manage/SetLatheSpeed + """ + name = request.match_info.get('name', None) + try: + storage = connect.get_storage_async() + payload = PayloadBuilder().WHERE(["name", '=', name]).payload() + result = await storage.query_tbl_with_payload("control_api", payload) + if not result['rows']: + raise KeyError('{} control entrypoint not found.'.format(name)) + await storage.delete_from_tbl("control_api_acl", payload) + await storage.delete_from_tbl("control_api_parameters", payload) + await storage.delete_from_tbl("control_api", payload) + except ValueError as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + except KeyError as err: + msg = str(err.args[0]) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to delete of {} entrypoint.".format(name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + message = "{} control entrypoint has been deleted successfully.".format(name) + # CTEDL audit trail entry + audit = AuditLogger(storage) + await audit.information('CTEDL', {"message": message, "name": name}) + return web.json_response({"message": message}) + + +async def update(request: web.Request) -> web.Response: + """Update a control entrypoint + :Example: + curl -sX PUT "http://localhost:8081/fledge/control/manage/SetLatheSpeed" -d '{"constants": {"x": "486"}, "variables": {"rpm": "1200"}, "description": "Perform lathesim", "anonymous": false, "destination": "script", "script": "S4", "allow": ["user"]}' + curl -sX PUT http://localhost:8081/fledge/control/manage/SetLatheSpeed -d '{"description": "Updated", "anonymous": false, "allow": []}' + curl -sX PUT http://localhost:8081/fledge/control/manage/SetLatheSpeed -d '{"allow": ["user"]}' + curl -sX PUT http://localhost:8081/fledge/control/manage/SetLatheSpeed -d '{"variables":{"rpm":"800", "distance": "138"}, "constants": {"x": "640", "y": "480"}}' + """ + name = request.match_info.get('name', None) + try: + storage = connect.get_storage_async() + payload = PayloadBuilder().WHERE(["name", '=', name]).payload() + entry_point_result = await storage.query_tbl_with_payload("control_api", payload) + if not entry_point_result['rows']: + msg = '{} control entrypoint not found.'.format(name) + raise KeyError(msg) + try: + data = await request.json() + columns = await _check_parameters(data, skip_required=True) + except Exception as ex: + msg = str(ex) + raise ValueError(msg) + old_entrypoint = await _get_entrypoint(name) + # TODO: FOGL-8037 rename + if 'name' in columns: + del columns['name'] + possible_keys = {"name", "description", "type", "operation_name", "destination", "destination_arg", + "anonymous", "constants", "variables", "allow"} + if 'type' in columns: + columns['operation_name'] = columns['operation_name'] if columns['type'] == 1 else "" + if 'destination_arg' in columns: + dest = await _get_destination(columns['destination']) + columns['destination_arg'] = columns[dest] if columns['destination'] else "" + entries_to_remove = set(columns) - set(possible_keys) + for k in entries_to_remove: + del columns[k] + control_api_columns = {} + if columns: + for k, v in columns.items(): + if k == "constants": + await _update_params( + name, old_entrypoint['constants'], columns['constants'], 't', storage) + elif k == "variables": + await _update_params( + name, old_entrypoint['variables'], columns['variables'], 'f', storage) + elif k == "allow": + allowed_users = [u for u in v] + db_allow_users = old_entrypoint["allow"] + insert_case = set(allowed_users) - set(db_allow_users) + for _user in insert_case: + acl_cols = {"name": name, "user": _user} + acl_insert_payload = PayloadBuilder().INSERT(**acl_cols).payload() + await storage.insert_into_tbl("control_api_acl", acl_insert_payload) + delete_case = set(db_allow_users) - set(allowed_users) + for _user in delete_case: + acl_delete_payload = PayloadBuilder().WHERE(["name", '=', name] + ).AND_WHERE(["user", '=', _user]).payload() + await storage.delete_from_tbl("control_api_acl", acl_delete_payload) + else: + control_api_columns[k] = v + if control_api_columns: + payload = PayloadBuilder().SET(**control_api_columns).WHERE(['name', '=', name]).payload() + await storage.update_tbl("control_api", payload) + else: + msg = "Nothing to update. No valid key value pair found in payload." + raise ValueError(msg) + except ValueError as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + except KeyError as err: + msg = str(err.args[0]) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to update the details of {} entrypoint.".format(name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + # CTECH audit trail entry + result = await _get_entrypoint(name) + audit = AuditLogger(storage) + await audit.information('CTECH', {'entrypoint': result, 'old_entrypoint': old_entrypoint}) + return web.json_response({"message": "{} control entrypoint has been updated successfully.".format(name)}) + + +async def update_request(request: web.Request) -> web.Response: + """API control entry points can be called with PUT operation to URL form + :Example: + curl -sX PUT http://localhost:8081/fledge/control/request/SetLatheSpeed -d '{"distance": "13"}' + """ + name = request.match_info.get('name', None) + try: + # check the dispatcher service state + try: + service = ServiceRegistry.get(s_type="Dispatcher") + if service[0]._status != ServiceRecord.Status.Running: + raise ValueError('The Dispatcher service is not in Running state.') + except service_registry_exceptions.DoesNotExist: + raise ValueError('Dispatcher service is either not installed or not added.') + + ep_info = await _get_entrypoint(name) + username = "Anonymous" + if request.user is not None: + # Admin and Control role users can always call entrypoints. + # For others, it must be matched from the list of allowed users + if request.user["role_id"] not in (1, 5): + allowed_user = [r for r in ep_info['allow']] + # TODO: FOGL-8037 - If allowed user list is empty then should we allow to proceed with update request? + # How about viewer and data viewer role access to this route? + # as of now simply reject with Forbidden 403 + if request.user["uname"] not in allowed_user: + raise ValueError("Operation is not allowed for the {} user.".format(request.user['uname'])) + username = request.user["uname"] + + data = await request.json() + dispatch_payload = {"destination": ep_info['destination'], "source": "API", "source_name": username} + # If destination is broadcast then name KV pair is excluded from dispatch payload + if str(ep_info['destination']).lower() != 'broadcast': + dispatch_payload["name"] = ep_info[ep_info['destination']] + constant_dict = {key: data.get(key, ep_info["constants"][key]) for key in ep_info["constants"]} + variables_dict = {key: data.get(key, ep_info["variables"][key]) for key in ep_info["variables"]} + params = {**constant_dict, **variables_dict} + if not params: + raise ValueError("Nothing to update as given entrypoint do not have the parameters.") + if ep_info['type'] == 'write': + url = "dispatch/write" + dispatch_payload["write"] = params + else: + url = "dispatch/operation" + dispatch_payload["operation"] = {ep_info["operation_name"]: params if params else {}} + _logger.debug("DISPATCH PAYLOAD: {}".format(dispatch_payload)) + svc, bearer_token = await _get_service_record_info_along_with_bearer_token() + await _call_dispatcher_service_api(svc._protocol, svc._address, svc._port, url, bearer_token, dispatch_payload) + except KeyError as err: + msg = str(err.args[0]) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except ValueError as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to update the control request details of {} entrypoint.".format(name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + return web.json_response({"message": "{} control entrypoint URL called.".format(name)}) + + +async def _get_entrypoint(name): + storage = connect.get_storage_async() + payload = PayloadBuilder().WHERE(["name", '=', name]).payload() + result = await storage.query_tbl_with_payload("control_api", payload) + if not result['rows']: + raise KeyError('{} control entrypoint not found.'.format(name)) + response = result['rows'][0] + response['type'] = await _get_type(response['type']) + response['destination'] = await _get_destination(response['destination']) + if response['destination'] != "broadcast": + response[response['destination']] = response['destination_arg'] + del response['destination_arg'] + response['anonymous'] = True if response['anonymous'] == 't' else False + param_result = await storage.query_tbl_with_payload("control_api_parameters", payload) + constants = {} + variables = {} + if param_result['rows']: + for r in param_result['rows']: + if r['constant'] == 't': + constants[r['parameter']] = r['value'] + else: + variables[r['parameter']] = r['value'] + response['constants'] = constants + response['variables'] = variables + else: + response['constants'] = constants + response['variables'] = variables + response['allow'] = [] + acl_result = await storage.query_tbl_with_payload("control_api_acl", payload) + if acl_result['rows']: + users = [] + for r in acl_result['rows']: + users.append(r['user']) + response['allow'] = users + return response + + +async def _get_service_record_info_along_with_bearer_token(): + try: + service = ServiceRegistry.get(s_type="Dispatcher") + svc_name = service[0]._name + token = ServiceRegistry.getBearerToken(svc_name) + except service_registry_exceptions.DoesNotExist: + msg = "No service available with type Dispatcher." + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + else: + return service[0], token + + +async def _call_dispatcher_service_api(protocol: str, address: str, port: int, uri: str, token: str, payload: dict): + # Custom Request header + headers = {} + if token is not None: + headers['Authorization'] = "Bearer {}".format(token) + url = "{}://{}:{}/{}".format(protocol, address, port, uri) + try: + async with aiohttp.ClientSession() as session: + async with session.post(url, data=json.dumps(payload), headers=headers) as resp: + message = await resp.text() + response = (resp.status, message) + if resp.status not in range(200, 209): + _logger.error("POST Request Error: Http status code: {}, reason: {}, response: {}".format( + resp.status, resp.reason, message)) + except Exception as ex: + raise Exception(str(ex)) + else: + # Return Tuple - (http statuscode, message) + return response + + +async def _update_params(ep_name: str, old_param: dict, new_param: dict, is_constant: str, _storage: connect): + insert_case = set(new_param) - set(old_param) + update_case = set(new_param) & set(old_param) + delete_case = set(old_param) - set(new_param) + + for uc in update_case: + update_payload = PayloadBuilder().WHERE(["name", '=', ep_name]).AND_WHERE( + ["constant", '=', is_constant]).AND_WHERE(["parameter", '=', uc]).SET(value=new_param[uc]).payload() + await _storage.update_tbl("control_api_parameters", update_payload) + + for dc in delete_case: + delete_payload = PayloadBuilder().WHERE(["name", '=', ep_name]).AND_WHERE( + ["constant", '=', is_constant]).AND_WHERE(["parameter", '=', dc]).payload() + await _storage.delete_from_tbl("control_api_parameters", delete_payload) + + for ic in insert_case: + column_name = {"name": ep_name, "parameter": ic, "value": new_param[ic], "constant": is_constant} + api_params_insert_payload = PayloadBuilder().INSERT(**column_name).payload() + await _storage.insert_into_tbl("control_api_parameters", api_params_insert_payload) + + +async def _get_permitted(request: web.Request, _storage: connect, ep: dict): + """permitted: means user is able to make the API call + This is on the basis of anonymous flag if true then permitted true + If anonymous flag is false then list of allowed users to determine if the specific user can make the call + Note: In case of authentication optional permitted always true + """ + if _storage is None: + _storage = connect.get_storage_async() + + if request.is_auth_optional is True: + return True + if ep['anonymous'] == 't' or ep['anonymous'] is True: + return True + + permitted = False + if request.user["role_id"] not in (1, 5): # Admin, Control + payload = PayloadBuilder().WHERE(["name", '=', ep['name']]).payload() + acl_result = await _storage.query_tbl_with_payload("control_api_acl", payload) + if acl_result['rows']: + users = [r['user'] for r in acl_result['rows']] + permitted = False if request.user["uname"] not in users else True + else: + permitted = True + return permitted diff --git a/python/fledge/services/core/api/control_service/pipeline.py b/python/fledge/services/core/api/control_service/pipeline.py new file mode 100644 index 0000000000..ffd932aa26 --- /dev/null +++ b/python/fledge/services/core/api/control_service/pipeline.py @@ -0,0 +1,663 @@ +# -*- coding: utf-8 -*- + +# FLEDGE_BEGIN +# See: http://fledge-iot.readthedocs.io/ +# FLEDGE_END + +import copy +import json +from aiohttp import web + +from fledge.common.audit_logger import AuditLogger +from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger +from fledge.common.storage_client.payload_builder import PayloadBuilder +from fledge.common.storage_client.exceptions import StorageServerError +from fledge.services.core import connect, server + +__author__ = "Ashish Jabble" +__copyright__ = "Copyright (c) 2023 Dianomic Systems Inc." +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + +_logger = FLCoreLogger().get_logger(__name__) + +_help = """ + ----------------------------------------------------------------------------------- + | GET POST | /fledge/control/pipeline | + | GET PUT DELETE | /fledge/control/pipeline/{id} | + | GET | /fledge/control/lookup | + ----------------------------------------------------------------------------------- +""" + + +def setup(app): + app.router.add_route('GET', '/fledge/control/lookup', get_lookup) + app.router.add_route('POST', '/fledge/control/pipeline', create) + app.router.add_route('GET', '/fledge/control/pipeline', get_all) + app.router.add_route('GET', '/fledge/control/pipeline/{id}', get_by_id) + app.router.add_route('PUT', '/fledge/control/pipeline/{id}', update) + app.router.add_route('DELETE', '/fledge/control/pipeline/{id}', delete) + + +async def get_lookup(request: web.Request) -> web.Response: + """List of supported control source and destinations + + :Example: + curl -sX GET http://localhost:8081/fledge/control/lookup + curl -sX GET http://localhost:8081/fledge/control/lookup?type=source + curl -sX GET http://localhost:8081/fledge/control/lookup?type=destination + """ + try: + _type = request.query.get('type') + if _type is None or not _type: + lookup = await _get_all_lookups() + response = {'controlLookup': lookup} + else: + table_name = None + if _type == "source": + table_name = "control_source" + elif _type == "destination": + table_name = "control_destination" + if table_name: + lookup = await _get_all_lookups(table_name) + response = lookup + else: + lookup = await _get_all_lookups() + response = {'controlLookup': lookup} + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to get all control lookups.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + return web.json_response(response) + + +async def create(request: web.Request) -> web.Response: + """Create a control pipeline. It's name must be unique and there must be no other pipelines with the same + source or destination + + :Example: + curl -sX POST http://localhost:8081/fledge/control/pipeline -d '{"name": "wildcard", "enabled": true, "execution": "shared", "source": {"type": 1}, "destination": {"type": 1}}' + curl -sX POST http://localhost:8081/fledge/control/pipeline -d '{"name": "pump", "enabled": true, "execution": "shared", "source": {"type": 2, "name": "pump"}}' + curl -sX POST http://localhost:8081/fledge/control/pipeline -d '{"name": "broadcast", "enabled": true, "execution": "exclusive", "destination": {"type": 5}}' + curl -sX POST http://localhost:8081/fledge/control/pipeline -d '{"name": "opcua_pump", "enabled": true, "execution": "shared", "source": {"type": 2, "name": "opcua"}, "destination": {"type": 3, "name": "pump1"}}' + curl -sX POST http://localhost:8081/fledge/control/pipeline -d '{"name": "opcua_pump1", "enabled": true, "execution": "exclusive", "source": {"type": 2, "name": "southOpcua"}, "destination": {"type": 2, "name": "northOpcua"}, "filters": ["Filter1"]}' + curl -sX POST http://localhost:8081/fledge/control/pipeline -d '{"name": "Test", "enabled": false, "filters": ["Filter1", "Filter2"]}' + """ + try: + data = await request.json() + # Create entry in control_pipelines table + column_names = await _check_parameters(data, request) + source_type = column_names.get("stype") + if source_type is None: + column_names['stype'] = 0 + column_names['sname'] = '' + des_type = column_names.get("dtype") + if des_type is None: + column_names['dtype'] = 0 + column_names['dname'] = '' + payload = PayloadBuilder().INSERT(**column_names).payload() + storage = connect.get_storage_async() + insert_result = await storage.insert_into_tbl("control_pipelines", payload) + pipeline_name = column_names['name'] + pipeline_filter = data.get('filters', None) + if insert_result['response'] == "inserted" and insert_result['rows_affected'] == 1: + source = {'type': column_names["stype"], 'name': column_names["sname"]} + destination = {'type': column_names["dtype"], 'name': column_names["dname"]} + final_result = await _pipeline_in_use(pipeline_name, source, destination, info=True) + final_result['source'] = {"type": await _get_lookup_value('source', final_result["stype"]), + "name": final_result['sname']} + final_result['destination'] = {"type": await _get_lookup_value('destination', final_result["dtype"]), + "name": final_result['dname']} + final_result.pop('stype', None) + final_result.pop('sname', None) + final_result.pop('dtype', None) + final_result.pop('dname', None) + final_result['enabled'] = False if final_result['enabled'] == 'f' else True + final_result['filters'] = [] + if pipeline_filter: + go_ahead = await _check_filters(storage, pipeline_filter) + if go_ahead: + filters = await _update_filters(storage, final_result['id'], pipeline_name, pipeline_filter) + final_result['filters'] = filters + else: + raise StorageServerError + except StorageServerError as serr: + msg = serr.error + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "Storage error: {}".format(msg)})) + except KeyError as err: + msg = str(err) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except ValueError as err: + msg = str(err) + raise web.HTTPBadRequest(body=json.dumps({"message": msg}), reason=msg) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to create pipeline: {}.".format(data.get('name'))) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + # CTPAD audit trail entry + audit = AuditLogger(storage) + await audit.information('CTPAD', final_result) + return web.json_response(final_result) + + +async def get_all(request: web.Request) -> web.Response: + """List of all control pipelines within the system + + :Example: + curl -sX GET http://localhost:8081/fledge/control/pipeline + """ + try: + storage = connect.get_storage_async() + result = await storage.query_tbl("control_pipelines") + control_pipelines = [] + source_lookup = await _get_all_lookups("control_source") + des_lookup = await _get_all_lookups("control_destination") + for r in result["rows"]: + source_name = [s['name'] for s in source_lookup if r['stype'] == s['cpsid']] + des_name = [s['name'] for s in des_lookup if r['dtype'] == s['cpdid']] + temp = { + 'id': r['cpid'], + 'name': r['name'], + 'source': { + 'type': ''.join(source_name), 'name': r['sname']} if r['stype'] else {'type': '', 'name': ''}, + 'destination': { + 'type': ''.join(des_name), 'name': r['dname']} if r['dtype'] else {'type': '', 'name': ''}, + 'enabled': False if r['enabled'] == 'f' else True, + 'execution': r['execution'] + } + result = await _get_table_column_by_value("control_filters", "cpid", r['cpid']) + temp.update({'filters': [r['fname'] for r in result["rows"]]}) + control_pipelines.append(temp) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to get all pipelines.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + return web.json_response({'pipelines': control_pipelines}) + + +async def get_by_id(request: web.Request) -> web.Response: + """Fetch the pipeline within the system + + :Example: + curl -sX GET http://localhost:8081/fledge/control/pipeline/2 + """ + cpid = request.match_info.get('id', None) + try: + pipeline = await _get_pipeline(cpid) + except ValueError as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + except KeyError as err: + msg = str(err) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to fetch details of pipeline having ID: <{}>.".format(cpid)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + return web.json_response(pipeline) + + +async def update(request: web.Request) -> web.Response: + """Update an existing pipeline within the system + + :Example: + curl -sX PUT http://localhost:8081/fledge/control/pipeline/1 -d '{"filters": ["F3", "F2"]}' + curl -sX PUT http://localhost:8081/fledge/control/pipeline/13 -d '{"name": "Changed"}' + curl -sX PUT http://localhost:8081/fledge/control/pipeline/9 -d '{"enabled": false, "execution": "exclusive", "filters": [], "source": {"type": 1, "name": "Universal"}, "destination": {"type": 4, "name": "TestScript"}}' + """ + cpid = request.match_info.get('id', None) + try: + pipeline = await _get_pipeline(cpid) + data = await request.json() + data['old_pipeline_name'] = pipeline['name'] + columns = await _check_parameters(data, request) + storage = connect.get_storage_async() + if columns: + payload = PayloadBuilder().SET(**columns).WHERE(['cpid', '=', cpid]).payload() + await storage.update_tbl("control_pipelines", payload) + filters = data.get('filters', None) + if filters is not None: + # Case: When filters payload is empty then remove all filters + if not filters: + await _remove_filters(storage, pipeline['filters'], cpid, pipeline['name']) + else: + go_ahead = await _check_filters(storage, filters) if filters else True + if go_ahead: + if filters: + result_filters = await _get_table_column_by_value("control_filters", "cpid", cpid) + db_filters = None + if result_filters['rows']: + db_filters = [r['fname'].replace("ctrl_{}_".format(pipeline['name']), '' + ) for r in result_filters['rows']] + await _update_filters(storage, cpid, pipeline['name'], filters, db_filters) + else: + raise ValueError('Filters do not exist as per the given list {}'.format(filters)) + except ValueError as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + except KeyError as err: + msg = str(err) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to update pipeline having ID: <{}>.".format(cpid)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + # CTPCH audit trail entry + audit = AuditLogger(storage) + updated_pipeline = await _get_pipeline(cpid) + await audit.information('CTPCH', {"pipeline": updated_pipeline, "old_pipeline": pipeline}) + return web.json_response( + {"message": "Control Pipeline with ID:<{}> has been updated successfully.".format(cpid)}) + + +async def delete(request: web.Request) -> web.Response: + """Delete an existing pipeline within the system. + Also remove the filters along with configuration that are part of pipeline + + :Example: + curl -sX DELETE http://localhost:8081/fledge/control/pipeline/1 + """ + cpid = request.match_info.get('id', None) + try: + storage = connect.get_storage_async() + pipeline = await _get_pipeline(cpid) + # Remove filters if exists and also delete the entry from control_filter table + await _remove_filters(storage, pipeline['filters'], cpid, pipeline['name']) + # Delete entry from control_pipelines + payload = PayloadBuilder().WHERE(['cpid', '=', pipeline['id']]).payload() + await storage.delete_from_tbl("control_pipelines", payload) + except KeyError as err: + msg = str(err) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except ValueError as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to delete pipeline having ID: <{}>.".format(cpid)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + message = {"message": "Control Pipeline with ID:<{}> has been deleted successfully.".format(cpid)} + audit_details = message + audit_details["name"] = pipeline['name'] + # CTPDL audit trail entry + audit = AuditLogger(storage) + await audit.information('CTPDL', audit_details) + return web.json_response(message) + + +async def _get_all_lookups(tbl_name=None): + storage = connect.get_storage_async() + if tbl_name: + res = await storage.query_tbl(tbl_name) + lookup = res["rows"] + return lookup + result = await storage.query_tbl("control_source") + source_lookup = result["rows"] + result = await storage.query_tbl("control_destination") + des_lookup = result["rows"] + return {"source": source_lookup, "destination": des_lookup} + + +async def _get_table_column_by_value(table, column_name, column_value, limit=None): + storage = connect.get_storage_async() + if table == "control_filters": + payload = PayloadBuilder().WHERE([column_name, '=', column_value]).ORDER_BY(["forder", "asc"]).payload() + else: + payload = PayloadBuilder().WHERE([column_name, '=', column_value]).payload() + if limit is not None: + payload = PayloadBuilder().WHERE([column_name, '=', column_value]).LIMIT(limit).payload() + result = await storage.query_tbl_with_payload(table, payload) + return result + + +async def _get_pipeline(cpid, filters=True): + result = await _get_table_column_by_value("control_pipelines", "cpid", cpid) + rows = result["rows"] + if not rows: + raise KeyError("Pipeline having ID: {} not found.".format(cpid)) + r = rows[0] + pipeline = { + 'id': r['cpid'], + 'name': r['name'], + 'source': {'type': await _get_lookup_value("source", r['stype']), 'name': r['sname'] + } if r['stype'] else {'type': '', 'name': ''}, + 'destination': {'type': await _get_lookup_value("destination", r['dtype']), 'name': r['dname'] + } if r['dtype'] else {'type': '', 'name': ''}, + 'enabled': False if r['enabled'] == 'f' else True, + 'execution': r['execution'] + } + if filters: + # update filters in pipeline + result = await _get_table_column_by_value("control_filters", "cpid", pipeline['id']) + pipeline['filters'] = [r['fname'] for r in result["rows"]] + return pipeline + + +async def _pipeline_in_use(name, source, destination, info=False): + result = await _get_table_column_by_value("control_pipelines", "name", name) + rows = result["rows"] + row = None + new_data = {'source': source if source else {'type': 0, 'name': ''}, + 'destination': destination if destination else {'type': 0, 'name': ''} + } + is_matched = False + for r in rows: + db_data = {'source': {'type': r['stype'], 'name': r['sname']}, + 'destination': {'type': r['dtype'], 'name': r['dname']}} + if json.dumps(db_data, sort_keys=True) == json.dumps(new_data, sort_keys=True): + is_matched = True + r["id"] = r['cpid'] + r.pop('cpid', None) + row = r + break + return row if info else is_matched + + +async def _get_lookup_value(_type, value): + if _type == "source": + tbl_name = "control_source" + key_name = 'cpsid' + else: + tbl_name = "control_destination" + key_name = 'cpdid' + lookup = await _get_all_lookups(tbl_name) + name = [lu['name'] for lu in lookup if value == lu[key_name]] + return ''.join(name) + + +async def _check_parameters(payload, request): + column_names = {} + # name + name = payload.get('name', None) + if name is not None: + if not isinstance(name, str): + raise ValueError('Pipeline name should be in string.') + name = name.strip() + if len(name) == 0: + raise ValueError('Pipeline name cannot be empty.') + cpid = request.match_info.get('id', None) + old_name = payload.get('old_pipeline_name', None) + await _check_unique_pipeline(name, old_name, cpid) + column_names['name'] = name + # enabled + enabled = payload.get('enabled', None) + if enabled is not None: + if not isinstance(enabled, bool): + raise ValueError('Enabled should be a bool.') + column_names['enabled'] = 't' if enabled else 'f' + # execution + execution = payload.get('execution', None) + if execution is not None: + if not isinstance(execution, str): + raise ValueError('Execution should be in string.') + execution = execution.strip() + if len(execution) == 0: + raise ValueError('Execution value cannot be empty.') + if execution.lower() not in ["shared", "exclusive"]: + raise ValueError('Execution model value either shared or exclusive.') + column_names['execution'] = execution + # source + source = payload.get('source', None) + if source is not None: + if not isinstance(source, dict): + raise ValueError('Source should be passed with type and name.') + if len(source): + source_type = source.get("type") + source_name = source.get("name") + if source_type is not None: + if not isinstance(source_type, int): + raise ValueError("Source type should be an integer value.") + stype = await _get_lookup_value("source", source_type) + if not stype: + raise ValueError("Invalid source type found.") + else: + raise ValueError('Source type is missing.') + # Note: when source type is Any or API; no name is applied + if source_type not in (1, 3): + if source_name is not None: + if not isinstance(source_name, str): + raise ValueError("Source name should be a string value.") + source_name = source_name.strip() + if len(source_name) == 0: + raise ValueError('Source name cannot be empty.') + await _validate_lookup_name("source", source_type, source_name) + column_names["stype"] = source_type + column_names["sname"] = source_name + else: + raise ValueError('Source name is missing.') + else: + source_name = '' + if source_type == 3: + source_name = request.user["uname"] if hasattr(request, "user") and request.user else "anonymous" + source = {'type': source_type, 'name': source_name} + column_names["stype"] = source_type + column_names["sname"] = source_name + else: + column_names["stype"] = 0 + column_names["sname"] = "" + # destination + destination = payload.get('destination', None) + if destination is not None: + if not isinstance(destination, dict): + raise ValueError('Destination should be passed with type and name.') + if len(destination): + des_type = destination.get("type") + des_name = destination.get("name") + if des_type is not None: + if not isinstance(des_type, int): + raise ValueError("Destination type should be an integer value.") + dtype = await _get_lookup_value("destination", des_type) + if not dtype: + raise ValueError("Invalid destination type found.") + else: + raise ValueError('Destination type is missing.') + # Note: when destination type is Any or Broadcast; no name is applied + if des_type not in (1, 5): + if des_name is not None: + if not isinstance(des_name, str): + raise ValueError("Destination name should be a string value.") + des_name = des_name.strip() + if len(des_name) == 0: + raise ValueError('Destination name cannot be empty.') + await _validate_lookup_name("destination", des_type, des_name) + column_names["dtype"] = des_type + column_names["dname"] = des_name + else: + raise ValueError('Destination name is missing.') + else: + des_name = '' + destination = {'type': des_type, 'name': des_name} + column_names["dtype"] = des_type + column_names["dname"] = des_name + else: + column_names["dtype"] = 0 + column_names["dname"] = "" + if source is not None and destination is not None: + error_msg = "Pipeline is not allowed with same type of source and destination." + # Service + if source_type == 2 and des_type == 2: + schedules = await server.Server.scheduler.get_schedules() + south_schedules = [sch.name for sch in schedules if sch.schedule_type == 1 and sch.process_name == "south_c"] + north_schedules = [sch.name for sch in schedules if + sch.schedule_type == 1 and sch.process_name == "north_C"] + if (source_name in south_schedules and des_name in south_schedules) or ( + source_name in north_schedules and des_name in north_schedules): + raise ValueError(error_msg) + # Script + if source_type == 6 and des_type == 4: + raise ValueError(error_msg) + # filters + filters = payload.get('filters', None) + if filters is not None: + if not isinstance(filters, list): + raise ValueError('Pipeline filters should be passed in list.') + return column_names + + +async def _validate_lookup_name(lookup_name, _type, value): + storage = connect.get_storage_async() + config_mgr = ConfigurationManager(storage) + + async def get_schedules(): + schedules = await server.Server.scheduler.get_schedules() + if _type == 5: + # Verify against all type of schedules + if not any(sch.name == value for sch in schedules): + raise ValueError("'{}' not a valid schedule name.".format(value)) + else: + # Verify against STARTUP type schedule and having South, North based service + if not any(sch.name == value for sch in schedules + if sch.schedule_type == 1 and sch.process_name in ('south_c', 'north_C')): + raise ValueError("'{}' not a valid service.".format(value)) + + async def get_control_scripts(): + script_payload = PayloadBuilder().SELECT("name").payload() + scripts = await storage.query_tbl_with_payload('control_script', script_payload) + if not any(s['name'] == value for s in scripts['rows']): + raise ValueError("'{}' not a valid script name.".format(value)) + + async def get_assets(): + asset_payload = PayloadBuilder().DISTINCT(["asset"]).payload() + assets = await storage.query_tbl_with_payload('asset_tracker', asset_payload) + if not any(ac['asset'] == value for ac in assets['rows']): + raise ValueError("'{}' not a valid asset name.".format(value)) + + async def get_notifications(): + all_notifications = await config_mgr._read_all_child_category_names("Notifications") + if not any(notify['child'] == value for notify in all_notifications): + raise ValueError("'{}' not a valid notification instance name.".format(value)) + + if (lookup_name == "source" and _type == 2) or (lookup_name == 'destination' and _type == 2): + # Verify schedule name in startup type and south, north based schedules + await get_schedules() + elif (lookup_name == "source" and _type == 6) or (lookup_name == 'destination' and _type == 4): + # Verify control script name + await get_control_scripts() + elif lookup_name == "source" and _type == 4: + # Verify notification instance name + await get_notifications() + elif lookup_name == "source" and _type == 5: + # Verify schedule name in all type of schedules + await get_schedules() + elif lookup_name == "destination" and _type == 3: + # Verify asset name + await get_assets() + else: + """No validation required for source id 1(Any) & destination id 4(Broadcast)""" + pass + + +async def _remove_filters(storage, filters, cp_id, cp_name=None): + cf_mgr = ConfigurationManager(storage) + if filters: + for f in filters: + # Delete entry from control_filter table + payload = PayloadBuilder().WHERE(['cpid', '=', cp_id]).AND_WHERE(['fname', '=', f]).payload() + await storage.delete_from_tbl("control_filters", payload) + + # Delete filter from filters table + filter_name = f.replace("ctrl_{}_".format(cp_name), '') + payload = PayloadBuilder().WHERE(['name', '=', filter_name]).payload() + await storage.delete_from_tbl("filters", payload) + + # Delete the filters category + await cf_mgr.delete_category_and_children_recursively(f) + await cf_mgr.delete_category_and_children_recursively(filter_name) + + +async def _check_filters(storage, cp_filters): + is_exist = False + filters_result = await storage.query_tbl("filters") + if filters_result['rows']: + filters_instances_list = [f['name'] for f in filters_result['rows']] + check_if = all(f in filters_instances_list for f in cp_filters) + if check_if: + is_exist = True + else: + _logger.warning("Filters do not exist as per the given {} payload..".format(cp_filters)) + else: + _logger.warning("No filter instances exists in the system.") + return is_exist + + +async def _update_filters(storage, cp_id, cp_name, cp_filters, db_filters=None): + if db_filters is None: + db_filters = [] + cf_mgr = ConfigurationManager(storage) + new_filters = [] + children = [] + + insert_filters = set(cp_filters) - set(db_filters) + update_filters = set(cp_filters) & set(db_filters) + delete_filters = set(db_filters) - set(cp_filters) + + if insert_filters: + for fid, fname in enumerate(insert_filters, start=1): + # get plugin config of filter + category_value = await cf_mgr.get_category_all_items(category_name=fname) + cat_value = copy.deepcopy(category_value) + if cat_value is None: + raise ValueError( + "{} category does not exist during {} control pipeline filter.".format( + fname, cp_name)) + # Copy value in default and remove value KV pair for creating new category + for k, v in cat_value.items(): + v['default'] = v['value'] + v.pop('value', None) + # Create category + cat_name = "ctrl_{}_{}".format(cp_name, fname) + await cf_mgr.create_category(category_name=cat_name, + category_description="Filter of {} control pipeline.".format( + cp_name), + category_value=cat_value, + keep_original_items=True) + new_category = await cf_mgr.get_category_all_items(cat_name) + if new_category is None: + raise KeyError("No such {} category found.".format(new_category)) + # Create entry in control_filters table + column_names = {"cpid": cp_id, "forder": fid, "fname": cat_name} + payload = PayloadBuilder().INSERT(**column_names).payload() + await storage.insert_into_tbl("control_filters", payload) + new_filters.append(cat_name) + children.append(cat_name) + children.extend([fname]) + try: + # Create parent-child relation with Dispatcher service + await cf_mgr.create_child_category("dispatcher", children) + except: + pass + if update_filters: + # only order + for fid, fname in enumerate(cp_filters, start=1): + payload = PayloadBuilder().SET(forder=fid).WHERE(["fname", "=", "ctrl_{}_{}".format(cp_name, fname)]).AND_WHERE(["cpid", "=", cp_id]).payload() + await storage.update_tbl("control_filters", payload) + if delete_filters: + del_filters = ["ctrl_{}_{}".format(cp_name, f) for f in list(delete_filters)] + await _remove_filters(storage, del_filters, cp_id, cp_name) + return new_filters + + +async def _check_unique_pipeline(name, old_name=None, cpid=None): + """Disallow pipeline name cases: + a) If given pipeline name already exists in DB. + b) If given pipeline has already attached filters. + """ + if cpid is not None: + if name != old_name: + pipeline_filter_result = await _get_table_column_by_value("control_filters", "cpid", cpid, limit=1) + if pipeline_filter_result['rows']: + raise ValueError('Filters are attached. Pipeline name cannot be changed.') + pipeline_result = await _get_table_column_by_value("control_pipelines", "name", name, limit=1) + if pipeline_result['rows']: + raise ValueError('{} pipeline already exists, name cannot be changed.'.format(name)) + else: + pipeline_result = await _get_table_column_by_value("control_pipelines", "name", name, limit=1) + if pipeline_result['rows']: + raise ValueError('{} pipeline already exists with the same name.'.format(name)) diff --git a/python/fledge/services/core/api/control_service/script_management.py b/python/fledge/services/core/api/control_service/script_management.py index 2b4c46b445..dded07370e 100644 --- a/python/fledge/services/core/api/control_service/script_management.py +++ b/python/fledge/services/core/api/control_service/script_management.py @@ -5,22 +5,20 @@ # FLEDGE_END import json -import logging import datetime import uuid from aiohttp import web -from fledge.common import logger +from fledge.common.acl_manager import ACLManager +from fledge.common.audit_logger import AuditLogger from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.exceptions import StorageServerError from fledge.common.storage_client.payload_builder import PayloadBuilder -from fledge.common.web.middleware import has_permission -from fledge.services.core import connect -from fledge.services.core import server +from fledge.services.core import connect, server from fledge.services.core.scheduler.entities import Schedule, ManualSchedule from fledge.services.core.api.control_service.exceptions import * -from fledge.common.acl_manager import ACLManager __author__ = "Ashish Jabble" @@ -36,7 +34,7 @@ ----------------------------------------------------------------------- """ -_logger = logger.setup(__name__, level=logging.INFO) +_logger = FLCoreLogger().get_logger(__name__) def setup(app): @@ -51,7 +49,6 @@ def setup(app): app.router.add_route('DELETE', '/fledge/control/script/{script_name}', delete) -@has_permission("admin") async def add_schedule_and_configuration(request: web.Request) -> web.Response: """ Create a schedule and configuration category for the task :Example: @@ -143,9 +140,10 @@ async def add_schedule_and_configuration(request: web.Request) -> web.Response: raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to add schedule task for control script {}.".format(name)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: - msg = "Schedule and configuration is created for an automation script with name {}".format(name) + msg = "Schedule and configuration is created for control script {}".format(name) return web.json_response({"message": msg}) @@ -192,6 +190,7 @@ async def get_all(request: web.Request) -> web.Response: scripts.append(row) except Exception as ex: msg = str(ex) + _logger.error(ex, "Get Control script failed.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"scripts": scripts}) @@ -248,12 +247,12 @@ async def get_by_name(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Get Control script by name failed.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(rows) -@has_permission("admin") async def add(request: web.Request) -> web.Response: """ Add a script @@ -314,6 +313,9 @@ async def add(request: web.Request) -> web.Response: if acl is not None: # Append ACL into response if acl exists in payload result["acl"] = acl + # CTSAD audit trail entry + audit = AuditLogger(storage) + await audit.information('CTSAD', result) else: raise StorageServerError(insert_control_script_result) else: @@ -333,12 +335,12 @@ async def add(request: web.Request) -> web.Response: raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Control script create failed.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(result) -@has_permission("admin") async def update(request: web.Request) -> web.Response: """ Update a script Only the steps & ACL parameters can be updated @@ -361,11 +363,13 @@ async def update(request: web.Request) -> web.Response: raise ValueError('ACL must be a string.') acl = acl.strip() set_values = {} + values = {'name': name} if steps is not None: + values['steps'] = steps set_values["steps"] = _validate_steps_and_convert_to_str(steps) storage = connect.get_storage_async() # Check existence of script record - payload = PayloadBuilder().SELECT("name").WHERE(['name', '=', name]).payload() + payload = PayloadBuilder().SELECT("name", "steps", "acl").WHERE(['name', '=', name]).payload() result = await storage.query_tbl_with_payload('control_script', payload) message = "" if 'rows' in result: @@ -381,6 +385,7 @@ async def update(request: web.Request) -> web.Response: else: raise StorageServerError(acl_result) set_values["acl"] = acl + values["acl"] = acl # Update script record update_query = PayloadBuilder() update_query.SET(**set_values).WHERE(['name', '=', name]) @@ -407,6 +412,9 @@ async def update(request: web.Request) -> web.Response: if 'response' in update_result: if update_result['response'] == "updated": message = "Control script {} updated successfully.".format(name) + # CTSCH audit trail entry + audit = AuditLogger(storage) + await audit.information('CTSCH', {'script': values, 'old_script': result['rows'][0]}) else: raise StorageServerError(update_result) else: @@ -424,12 +432,12 @@ async def update(request: web.Request) -> web.Response: raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Control script update failed.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"message": message}) -@has_permission("admin") async def delete(request: web.Request) -> web.Response: """ Delete a script @@ -469,6 +477,9 @@ async def delete(request: web.Request) -> web.Response: if 'response' in delete_result: if delete_result['response'] == "deleted": message = "{} script deleted successfully.".format(name) + # CTSDL audit trail entry + audit = AuditLogger(storage) + await audit.information('CTSDL', {'message': message, "name": name}) else: raise StorageServerError(delete_result) else: @@ -483,6 +494,7 @@ async def delete(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Control script delete failed.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"message": message}) diff --git a/python/fledge/services/core/api/filters.py b/python/fledge/services/core/api/filters.py index 1506a838b9..44ef07c221 100644 --- a/python/fledge/services/core/api/filters.py +++ b/python/fledge/services/core/api/filters.py @@ -10,14 +10,16 @@ from aiohttp import web from typing import List, Dict, Tuple +from fledge.common import utils +from fledge.common.common import _FLEDGE_ROOT from fledge.common.configuration_manager import ConfigurationManager -from fledge.services.core import connect -from fledge.services.core.api import utils as apiutils -from fledge.common import logger, utils +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.storage_client.exceptions import StorageServerError from fledge.common.storage_client.storage_client import StorageClientAsync -from fledge.common.common import _FLEDGE_ROOT + +from fledge.services.core import connect +from fledge.services.core.api import utils as apiutils from fledge.services.core.api.plugins import common __author__ = "Massimiliano Pinto, Amarendra K Sinha" @@ -32,8 +34,7 @@ | GET DELETE | /fledge/filter/{filter_name} | --------------------------------------------------------------------------- """ - -_LOGGER = logger.setup("filter") +_LOGGER = FLCoreLogger().get_logger(__name__) async def create_filter(request: web.Request) -> web.Response: @@ -128,22 +129,24 @@ async def create_filter(request: web.Request) -> web.Response: # Fetch the new created filter: get category items category_info = await cf_mgr.get_category_all_items(category_name=filter_name) if category_info is None: - raise ValueError("No such '{}' filter found".format(filter_name)) + raise ValueError("No such '{}' filter found.".format(filter_name)) else: return web.json_response({'filter': filter_name, 'description': filter_desc, 'value': category_info}) - except ValueError as ex: - _LOGGER.exception("Add filter, caught exception: " + str(ex)) - raise web.HTTPNotFound(reason=str(ex)) - except TypeError as ex: - _LOGGER.exception("Add filter, caught exception: " + str(ex)) - raise web.HTTPBadRequest(reason=str(ex)) + except ValueError as err: + msg = str(err) + raise web.HTTPNotFound(reason=msg) + except TypeError as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg) except StorageServerError as ex: + msg = ex.error await _delete_configuration_category(storage, filter_name) # Revert configuration entry - _LOGGER.exception("Failed to create filter. %s", ex.error) - raise web.HTTPInternalServerError(reason='Failed to create filter.') + _LOGGER.exception("Failed to create filter with: {}".format(msg)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: - _LOGGER.exception("Add filter, caught exception: %s", str(ex)) - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _LOGGER.error(ex, "Add filter failed.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) async def add_filters_pipeline(request: web.Request) -> web.Response: @@ -267,18 +270,20 @@ async def add_filters_pipeline(request: web.Request) -> web.Response: await cf_mgr.create_child_category(user_name, filter_list) return web.json_response( {'result': "Filter pipeline {} updated successfully".format(json.loads(result['value']))}) - except ValueError as ex: - _LOGGER.exception("Add filters pipeline, caught exception: %s", str(ex)) - raise web.HTTPNotFound(reason=str(ex)) - except TypeError as ex: - _LOGGER.exception("Add filters pipeline, caught exception: %s", str(ex)) - raise web.HTTPBadRequest(reason=ex) - except StorageServerError as ex: - _LOGGER.exception("Add filters pipeline, caught exception: %s", str(ex.error)) - raise web.HTTPInternalServerError(reason=str(ex.error)) + except ValueError as err: + msg = str(err) + raise web.HTTPNotFound(reason=msg) + except TypeError as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg) + except StorageServerError as e: + msg = e.error + _LOGGER.exception("Add filters pipeline, caught storage error: {}".format(msg)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: - _LOGGER.exception("Add filters pipeline, caught exception: %s", str(ex)) - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _LOGGER.error(ex, "Add filters pipeline failed.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) async def get_filter(request: web.Request) -> web.Response: @@ -315,14 +320,17 @@ async def get_filter(request: web.Request) -> web.Response: users.append(row["user"]) filter_detail.update({"users": users}) except StorageServerError as ex: - _LOGGER.exception("Get filter: %s, caught exception: %s", filter_name, str(ex.error)) - raise web.HTTPInternalServerError(reason=str(ex.error)) - except ValueError as ex: - raise web.HTTPNotFound(reason=ex) - except TypeError as ex: - raise web.HTTPBadRequest(reason=ex) + msg = ex.error + _LOGGER.exception("Failed to get filter name: {}. Storage error occurred: {}".format(filter_name, msg)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + except ValueError as err: + raise web.HTTPNotFound(reason=str(err)) + except TypeError as err: + raise web.HTTPBadRequest(reason=str(err)) except Exception as ex: - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _LOGGER.error(ex, "Get {} filter failed.".format(filter_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'filter': filter_detail}) @@ -338,10 +346,13 @@ async def get_filters(request: web.Request) -> web.Response: result = await storage.query_tbl("filters") filters = result["rows"] except StorageServerError as ex: - _LOGGER.exception("Get filters, caught exception: %s", str(ex.error)) - raise web.HTTPInternalServerError(reason=str(ex.error)) + msg = ex.error + _LOGGER.exception("Get all filters, caught storage exception: {}".format(msg)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _LOGGER.error(ex, "Get all filters failed.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'filters': filters}) @@ -364,16 +375,18 @@ async def get_filter_pipeline(request: web.Request) -> web.Response: filter_value_from_storage = json.loads(category_info['filter']['value']) except KeyError: - msg = "No filter pipeline exists for {}".format(user_name) - _LOGGER.info(msg) + msg = "No filter pipeline exists for {}.".format(user_name) raise web.HTTPNotFound(reason=msg) except StorageServerError as ex: - _LOGGER.exception("Get pipeline: %s, caught exception: %s", user_name, str(ex.error)) - raise web.HTTPInternalServerError(reason=str(ex.error)) - except ValueError as ex: - raise web.HTTPNotFound(reason=ex) + msg = ex.error + _LOGGER.exception("Failed to delete filter pipeline {}. Storage error occurred: {}".format(user_name, msg)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + except ValueError as err: + raise web.HTTPNotFound(reason=str(err)) except Exception as ex: - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _LOGGER.error(ex, "Get filter pipeline failed.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'result': filter_value_from_storage}) @@ -427,16 +440,19 @@ async def delete_filter(request: web.Request) -> web.Response: ['plugin', '=', filter_name]).payload() await storage.update_tbl("asset_tracker", update_payload) except StorageServerError as ex: - _LOGGER.exception("Delete filter: %s, caught exception: %s", filter_name, str(ex.error)) - raise web.HTTPInternalServerError(reason=str(ex.error)) - except ValueError as ex: - raise web.HTTPNotFound(reason=ex) - except TypeError as ex: - raise web.HTTPBadRequest(reason=ex) + msg = ex.error + _LOGGER.exception("Delete {} filter, caught storage exception: {}".format(filter_name, msg)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + except ValueError as err: + raise web.HTTPNotFound(reason=str(err)) + except TypeError as err: + raise web.HTTPBadRequest(reason=str(err)) except Exception as ex: - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _LOGGER.error(ex, "Delete {} filter failed.".format(filter_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: - return web.json_response({'result': "Filter {} deleted successfully".format(filter_name)}) + return web.json_response({'result': "Filter {} deleted successfully.".format(filter_name)}) async def delete_filter_pipeline(request: web.Request) -> web.Response: @@ -454,8 +470,8 @@ async def delete_filter_pipeline(request: web.Request) -> web.Response: status_code = resp.status jdoc = await resp.text() if status_code not in range(200, 209): - _LOGGER.error("Error code: %d, reason: %s, details: %s, url: %s", resp.status, resp.reason, jdoc, - put_url) + _LOGGER.error("Delete {} filter pipeline; Error code: {}, reason: {}, details: {}, url: {}" + "".format(user_name, resp.status, resp.reason, jdoc, put_url)) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) except Exception: raise diff --git a/python/fledge/services/core/api/health.py b/python/fledge/services/core/api/health.py index 08f0267217..cc02e87206 100644 --- a/python/fledge/services/core/api/health.py +++ b/python/fledge/services/core/api/health.py @@ -4,13 +4,12 @@ # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END -import logging import asyncio import json from aiohttp import web -from fledge.common import logger from fledge.common.common import _FLEDGE_DATA, _FLEDGE_ROOT +from fledge.common.logger import FLCoreLogger __author__ = "Deepanshu Yadav" @@ -24,7 +23,7 @@ | GET | /fledge/health/logging | ---------------------------------------------------------- """ -_LOGGER = logger.setup(__name__, level=logging.INFO) +_LOGGER = FLCoreLogger().get_logger(__name__) async def get_disk_usage(given_dir): @@ -39,7 +38,7 @@ async def get_disk_usage(given_dir): stdout, stderr = await disk_check_process.communicate() if disk_check_process.returncode != 0: stderr = stderr.decode("utf-8") - msg = "Failed to get disk stats! {}".format(str(stderr)) + msg = "Failed to get disk stats of {} directory. {}".format(given_dir, str(stderr)) _LOGGER.error(msg) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) @@ -109,9 +108,9 @@ async def get_logging_health(request: web.Request) -> web.Response: response["levels"] = log_levels except Exception as ex: - msg = "Could not fetch service information.{}".format(str(ex)) - _LOGGER.error(msg) - raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + msg = "Could not fetch service information." + _LOGGER.error(ex, msg) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "{} {}".format(msg, str(ex))})) try: response['disk'] = {} @@ -122,9 +121,9 @@ async def get_logging_health(request: web.Request) -> web.Response: response['disk']['available'] = available except Exception as ex: - msg = "Failed to get disk stats for /var/log !{}".format(str(ex)) - _LOGGER.error(msg) - raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + msg = "Failed to get disk stats for /var/log." + _LOGGER.error(ex, msg) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "{} {}".format(msg, str(ex))})) else: return web.json_response(response) @@ -189,7 +188,7 @@ async def get_storage_health(request: web.Request) -> web.Response: except Exception as ex: msg = str(ex) - _LOGGER.error("Could not ping Storage due to {}".format(msg)) + _LOGGER.error(ex, "Could not ping the Storage service.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) try: @@ -208,8 +207,8 @@ async def get_storage_health(request: web.Request) -> web.Response: response['disk']['available'] = available response['disk']['status'] = status except Exception as ex: - msg = "Failed to get disk stats! {}".format(str(ex)) - _LOGGER.error(msg) - raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + msg = "Failed to get disk stats for Storage service." + _LOGGER.error(ex, msg) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "{} {}".format(msg, str(ex))})) else: return web.json_response(response) diff --git a/python/fledge/services/core/api/north.py b/python/fledge/services/core/api/north.py index e0b7eb7621..9cdab722e4 100644 --- a/python/fledge/services/core/api/north.py +++ b/python/fledge/services/core/api/north.py @@ -8,16 +8,15 @@ from functools import lru_cache from aiohttp import web -from fledge.services.core import server from fledge.common.configuration_manager import ConfigurationManager -from fledge.common.storage_client.payload_builder import PayloadBuilder +from fledge.common.logger import FLCoreLogger from fledge.common.plugin_discovery import PluginDiscovery -from fledge.services.core import connect -from fledge.services.core.scheduler.entities import Task from fledge.common.service_record import ServiceRecord +from fledge.common.storage_client.payload_builder import PayloadBuilder +from fledge.services.core import connect, server +from fledge.services.core.scheduler.entities import Task from fledge.services.core.service_registry.service_registry import ServiceRegistry from fledge.services.core.service_registry.exceptions import DoesNotExist -from fledge.common import logger __author__ = "Praveen Garg" __copyright__ = "Copyright (c) 2018 OSIsoft, LLC" @@ -29,7 +28,7 @@ | GET | /fledge/north | ------------------------------------------------------------------------------- """ -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) async def _get_sent_stats(storage_client, north_schedules): @@ -186,6 +185,7 @@ async def get_north_schedules(request): return web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to get the north schedules.") return web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response(north_schedules) diff --git a/python/fledge/services/core/api/notification.py b/python/fledge/services/core/api/notification.py index 04c3da7bb7..396c1a0e56 100644 --- a/python/fledge/services/core/api/notification.py +++ b/python/fledge/services/core/api/notification.py @@ -10,14 +10,14 @@ from aiohttp import web from fledge.common import utils -from fledge.common import logger +from fledge.common.audit_logger import AuditLogger +from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger from fledge.common.service_record import ServiceRecord from fledge.common.storage_client.exceptions import StorageServerError -from fledge.common.configuration_manager import ConfigurationManager from fledge.services.core import connect from fledge.services.core.service_registry.service_registry import ServiceRegistry from fledge.services.core.service_registry import exceptions as service_registry_exceptions -from fledge.common.audit_logger import AuditLogger __author__ = "Amarendra K Sinha" __copyright__ = "Copyright (c) 2018 Dianomic Systems" @@ -32,8 +32,8 @@ | GET DELETE | /fledge/notification/{notification_name}/delivery/{channel_name} | ----------------------------------------------------------------------------------------------------- """ +_logger = FLCoreLogger().get_logger(__name__) -_logger = logger.setup() NOTIFICATION_TYPE = ["one shot", "retriggered", "toggled"] @@ -56,7 +56,9 @@ async def get_plugin(request): url = 'http://{}:{}/notification/delivery'.format(_address, _port) delivery_plugins = json.loads(await _hit_get_url(url)) except Exception as ex: - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _logger.error(ex, "Failed to get notification plugin list.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'rules': rule_plugins, 'delivery': delivery_plugins}) @@ -111,7 +113,9 @@ async def get_notification(request): except ValueError as ex: raise web.HTTPBadRequest(reason=str(ex)) except Exception as ex: - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _logger.error(ex, "Failed to get {} notification instance.".format(notif)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'notification': notification}) @@ -147,7 +151,9 @@ async def get_notifications(request): notifications.append(notification) except Exception as ex: - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _logger.error(ex, "Failed to get notification instances.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'notifications': notifications}) @@ -281,8 +287,10 @@ async def post_notification(request): await audit.information('NTFAD', {"name": name}) except ValueError as ex: raise web.HTTPBadRequest(reason=str(ex)) - except Exception as e: - raise web.HTTPInternalServerError(reason=str(e)) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to create notification instance.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'result': "Notification {} created successfully".format(name)}) @@ -436,7 +444,9 @@ async def put_notification(request): except NotFoundError as e: raise web.HTTPNotFound(reason=str(e)) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _logger.error(ex, "Failed to update {} notification instance.".format(notif)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: # TODO: Start notification after update return web.json_response({'result': "Notification {} updated successfully".format(notif)}) @@ -475,7 +485,9 @@ async def delete_notification(request): except ValueError as ex: raise web.HTTPBadRequest(reason=str(ex)) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _logger.error(ex, "Failed to delete {} notification instance.".format(notif)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'result': 'Notification {} deleted successfully.'.format(notif)}) @@ -488,8 +500,8 @@ async def _hit_get_url(get_url, token=None): status_code = resp.status jdoc = await resp.text() if status_code not in range(200, 209): - _logger.error("Error code: %d, reason: %s, details: %s, url: %s", resp.status, resp.reason, jdoc, - get_url) + _logger.error("Error code: {}, reason: {}, details: {}, url: {}".format( + resp.status, resp.reason, jdoc, get_url)) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) except Exception: raise @@ -504,8 +516,8 @@ async def _hit_post_url(post_url, data=None): status_code = resp.status jdoc = await resp.text() if status_code not in range(200, 209): - _logger.error("Error code: %d, reason: %s, details: %s, url: %s", resp.status, resp.reason, jdoc, - post_url) + _logger.error("Error code: {}, reason: {}, details: {}, url: {}".format( + resp.status, resp.reason, jdoc, post_url)) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) except Exception: raise @@ -527,8 +539,9 @@ async def _update_configurations(config_mgr, name, notification_config, rule_con category_name = "delivery{}".format(name) await config_mgr.update_configuration_item_bulk(category_name, delivery_config) except Exception as ex: - _logger.exception("Failed to update notification configuration. %s", str(ex)) - raise web.HTTPInternalServerError(reason='Failed to update notification configuration. {}'.format(ex)) + msg = "Failed to update {} notification configuration.".format(name) + _logger.error(ex, msg) + return web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) async def _hit_delete_url(delete_url, data=None): @@ -538,11 +551,8 @@ async def _hit_delete_url(delete_url, data=None): status_code = resp.status jdoc = await resp.text() if status_code not in range(200, 209): - _logger.error("Error code: %d, reason: %s, details: %s, url: %s", - resp.status, - resp.reason, - jdoc, - delete_url) + _logger.error("Error code: {}, reason: {}, details: {}, url: {}".format( + resp.status, resp.reason, jdoc, delete_url)) raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc) @@ -629,6 +639,7 @@ async def get_delivery_channels(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to get delivery channels of {} notification.".format(notification_instance_name)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"channels": channels}) @@ -689,6 +700,7 @@ async def post_delivery_channel(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to create delivery channel of {} notification".format(notification_instance_name)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"category": channel_name, "description": channel_description, @@ -721,6 +733,8 @@ async def get_delivery_channel_configuration(request: web.Request) -> web.Respon raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to get delivery channel configuration of {} notification.".format( + notification_instance_name)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"config": channel_config}) @@ -775,10 +789,12 @@ async def delete_delivery_channel(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to delete delivery channel of {} notification.".format(notification_instance_name)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"channels": channels}) + async def _get_all_delivery_channels(cfg_mgr: ConfigurationManager, notify_instance: str) -> dict: """ Remove all delivery channels in the form of array of dicts: diff --git a/python/fledge/services/core/api/package_log.py b/python/fledge/services/core/api/package_log.py index 78f17c8db4..04309fe94c 100644 --- a/python/fledge/services/core/api/package_log.py +++ b/python/fledge/services/core/api/package_log.py @@ -5,7 +5,6 @@ # FLEDGE_END import os -import logging import json from datetime import datetime @@ -13,7 +12,7 @@ from aiohttp import web from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA -from fledge.common import logger +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.services.core import connect @@ -31,7 +30,7 @@ """ valid_extension = '.log' valid_actions = ('list', 'install', 'purge', 'update') -_LOGGER = logger.setup(__name__, level=logging.INFO) +_LOGGER = FLCoreLogger().get_logger(__name__) async def get_logs(request: web.Request) -> web.Response: @@ -49,19 +48,24 @@ async def get_logs(request: web.Request) -> web.Response: result = [] for f in found_files: - # Empty log name for update cmd - name = "" - t1 = f.split(".log") - t2 = t1[0].split("-fledge") - t3 = t2[0].split("-") - t4 = t1[0].split("-list") - if len(t2) >= 2: - name = "fledge{}".format(t2[1]) - if len(t4) >= 2: - name = "list" - dt = "{}-{}-{}-{}".format(t3[0], t3[1], t3[2], t3[3]) - ts = datetime.strptime(dt, "%y%m%d-%H-%M-%S").strftime('%Y-%m-%d %H:%M:%S') - result.append({"timestamp": ts, "name": name, "filename": f}) + if f.endswith(valid_extension): + t1 = f.split(".log") + t2 = t1[0].split("-fledge") + t3 = t2[0].split("-") + t4 = t1[0].split("-list") + if len(t2) >= 2: + name = "fledge{}".format(t2[1]) + elif len(t4) >= 2: + name = "list" + else: + name = t1[0] + if len(t3) >= 4: + dt = "{}-{}-{}-{}".format(t3[0], t3[1], t3[2], t3[3]) + ts = datetime.strptime(dt, "%y%m%d-%H-%M-%S").strftime('%Y-%m-%d %H:%M:%S') + else: + dt = datetime.utcnow() + ts = dt.strftime("%Y-%m-%d %H:%M:%S") + result.append({"timestamp": ts, "name": name, "filename": f}) return web.json_response({"logs": result}) @@ -136,11 +140,15 @@ async def get_package_status(request: web.Request) -> web.Response: tmp['logFileURI'] = r['log_file_uri'] del tmp['log_file_uri'] result.append(tmp) - except ValueError as err_msg: - raise web.HTTPBadRequest(reason=err_msg, body=json.dumps({"message": str(err_msg)})) - except KeyError as err_msg: - raise web.HTTPNotFound(reason=err_msg, body=json.dumps({"message": str(err_msg)})) + except ValueError as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + except KeyError as err: + msg = str(err) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as exc: - raise web.HTTPInternalServerError(reason=str(exc)) + msg = str(exc) + _LOGGER.error(exc, "Failed to get package log status for {} action.".format(action)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"packageStatus": result}) diff --git a/python/fledge/services/core/api/plugins/common.py b/python/fledge/services/core/api/plugins/common.py index ff3e3c8c16..f05ef06013 100644 --- a/python/fledge/services/core/api/plugins/common.py +++ b/python/fledge/services/core/api/plugins/common.py @@ -7,9 +7,7 @@ """Common Definitions""" import sys import types -import logging import os -import platform import json import glob import importlib.util @@ -17,8 +15,9 @@ from datetime import datetime from functools import lru_cache -from fledge.common import logger +from fledge.common import utils as common_utils from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA, _FLEDGE_PLUGIN_PATH +from fledge.common.logger import FLCoreLogger from fledge.services.core.api import utils from fledge.services.core.api.plugins.exceptions import * @@ -28,7 +27,7 @@ __version__ = "${VERSION}" -_logger = logger.setup(__name__, level=logging.INFO) +_logger = FLCoreLogger().get_logger(__name__) _NO_OF_FILES_TO_RETAIN = 10 @@ -136,9 +135,11 @@ def load_and_fetch_c_hybrid_plugin_info(plugin_name: str, is_config: bool, plugi if is_config: plugin_info.update({'config': temp}) else: - _logger.warning("{} hybrid plugin is not installed which is required for {}".format(connection_name, plugin_name)) + _logger.warning("{} hybrid plugin is not installed which is required for {}".format( + connection_name, plugin_name)) else: - _logger.warning("{} hybrid plugin is not installed which is required for {}".format(connection_name, plugin_name)) + _logger.warning("{} hybrid plugin is not installed which is required for {}".format( + connection_name, plugin_name)) else: raise Exception('Required {} keys are missing for json file'.format(json_file_keys)) return plugin_info @@ -171,9 +172,8 @@ async def fetch_available_packages(package_type: str = "") -> tuple: stdout_file_path = create_log_file(action="list") tmp_log_output_fp = stdout_file_path.split('logs/')[:1][0] + "logs/output.txt" - _platform = platform.platform() pkg_type = "" if package_type is None else package_type - pkg_mgt = 'yum' if 'centos' in _platform or 'redhat' in _platform else 'apt' + pkg_mgt = 'yum' if common_utils.is_redhat_based() else 'apt' category = await server.Server._configuration_manager.get_category_all_items("Installation") max_update_cat_item = category['maxUpdate'] pkg_cache_mgr = server.Server._package_cache_manager @@ -184,10 +184,9 @@ async def fetch_available_packages(package_type: str = "") -> tuple: # If max update per day is set to 1, then an update can not occurs until 24 hours after the last accessed update. # If set to 2 then this drops to 12 hours between updates, 3 would result in 8 hours between calls and so on. if duration_in_sec > (24 / int(max_update_cat_item['value'])) * 60 * 60 or not last_accessed_time: - _logger.info("Attempting update on {}".format(now)) + _logger.info("Attempting {} update on {}...".format(pkg_mgt, now)) cmd = "sudo {} -y update > {} 2>&1".format(pkg_mgt, stdout_file_path) - if 'centos' in _platform or 'redhat' in _platform: - pkg_mgt = 'yum' + if pkg_mgt == 'yum': cmd = "sudo {} check-update > {} 2>&1".format(pkg_mgt, stdout_file_path) # Execute command os.system(cmd) @@ -195,7 +194,7 @@ async def fetch_available_packages(package_type: str = "") -> tuple: # fetch available package caching always clear on every update request _get_available_packages.cache_clear() else: - _logger.warning("Maximum update exceeds the limit for the day") + _logger.warning("Maximum {} update exceeds the limit for the day.".format(pkg_mgt)) ttl_cat_item_val = int(category['listAvailablePackagesCacheTTL']['value']) if ttl_cat_item_val > 0: last_accessed_time = pkg_cache_mgr['list']['last_accessed_time'] @@ -230,7 +229,7 @@ def create_log_file(action: str = "", plugin_name: str = "") -> str: logs_dir = '/logs/' _PATH = _FLEDGE_DATA + logs_dir if _FLEDGE_DATA else _FLEDGE_ROOT + '/data{}'.format(logs_dir) # YYMMDD-HH-MM-SS-{plugin_name}.log - file_spec = datetime.now().strftime('%y%m%d-%H-%M-%S') + file_spec = datetime.utcnow().strftime('%y%m%d-%H-%M-%S') if not action: log_file_name = "{}-{}.log".format(file_spec, plugin_name) if plugin_name else "{}.log".format(file_spec) else: diff --git a/python/fledge/services/core/api/plugins/data.py b/python/fledge/services/core/api/plugins/data.py index ba0c632367..4376b3cdf7 100644 --- a/python/fledge/services/core/api/plugins/data.py +++ b/python/fledge/services/core/api/plugins/data.py @@ -4,12 +4,11 @@ # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END -import logging import json import urllib.parse from aiohttp import web -from fledge.common import logger +from fledge.common.logger import FLCoreLogger from fledge.common.plugin_discovery import PluginDiscovery from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.plugins.common import utils as common_utils @@ -29,7 +28,7 @@ --------------------------------------------------------------------------------------- """ FORBIDDEN_MSG = "Resource you were trying to reach is absolutely forbidden!" -_logger = logger.setup(__name__, level=logging.INFO) +_logger = FLCoreLogger().get_logger(__name__) async def get_persist_plugins(request: web.Request) -> web.Response: @@ -73,6 +72,7 @@ async def get_persist_plugins(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to get persist plugins.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'persistent': plugins}) @@ -107,6 +107,7 @@ async def get(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to get {} plugin data for {} service.".format(plugin, service)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'data': data}) @@ -153,6 +154,7 @@ async def add(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to create plugin data.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'result': "{} key added successfully.".format(key)}) @@ -192,6 +194,7 @@ async def delete(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to delete {} plugin data for {} service.".format(plugin, service)) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'result': "{} deleted successfully.".format(key)}) diff --git a/python/fledge/services/core/api/plugins/discovery.py b/python/fledge/services/core/api/plugins/discovery.py index c997dfd990..8ef665c9e3 100644 --- a/python/fledge/services/core/api/plugins/discovery.py +++ b/python/fledge/services/core/api/plugins/discovery.py @@ -4,13 +4,13 @@ # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END -import logging import json from aiohttp import web + +from fledge.common.logger import FLCoreLogger from fledge.common.plugin_discovery import PluginDiscovery from fledge.services.core.api.plugins import common -from fledge.common import logger from fledge.services.core.api.plugins.exceptions import * __author__ = "Amarendra K Sinha, Ashish Jabble" @@ -25,7 +25,8 @@ | GET | /fledge/plugins/available | ------------------------------------------------------------------------------- """ -_logger = logger.setup(__name__, level=logging.INFO) + +_logger = FLCoreLogger().get_logger(__name__) async def get_plugins_installed(request): @@ -80,9 +81,11 @@ async def get_plugins_available(request: web.Request) -> web.Response: except ValueError as e: raise web.HTTPBadRequest(reason=e) except PackageError as e: - msg = "Fetch available plugins package request failed" + msg = "Fetch available plugins package request failed." raise web.HTTPBadRequest(body=json.dumps({"message": msg, "link": str(e)}), reason=msg) except Exception as ex: - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _logger.error(ex, "Failed to get plugins available list.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) return web.json_response({"plugins": plugins, "link": log_path}) diff --git a/python/fledge/services/core/api/plugins/install.py b/python/fledge/services/core/api/plugins/install.py index 29cfa88303..c898ddaba1 100644 --- a/python/fledge/services/core/api/plugins/install.py +++ b/python/fledge/services/core/api/plugins/install.py @@ -5,9 +5,7 @@ # FLEDGE_END import os -import platform import subprocess -import logging import asyncio import tarfile import hashlib @@ -21,17 +19,18 @@ from typing import Dict from datetime import datetime +from fledge.common import utils +from fledge.common.audit_logger import AuditLogger from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA -from fledge.services.core.api.plugins import common -from fledge.common import logger -from fledge.services.core.api.plugins.exceptions import * -from fledge.services.core import connect from fledge.common.configuration_manager import ConfigurationManager -from fledge.common.audit_logger import AuditLogger -from fledge.services.core import server +from fledge.common.logger import FLCoreLogger +from fledge.common.plugin_discovery import PluginDiscovery from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.storage_client.exceptions import StorageServerError -from fledge.common.plugin_discovery import PluginDiscovery +from fledge.services.core import connect, server +from fledge.services.core.api.plugins import common +from fledge.services.core.api.plugins.exceptions import * + __author__ = "Ashish Jabble" __copyright__ = "Copyright (c) 2019 Dianomic Systems Inc." @@ -46,7 +45,7 @@ _TIME_OUT = 120 _CHUNK_SIZE = 1024 _PATH = _FLEDGE_DATA + '/plugins/' if _FLEDGE_DATA else _FLEDGE_ROOT + '/data/plugins/' -_LOGGER = logger.setup(__name__, level=logging.INFO) +_LOGGER = FLCoreLogger().get_logger(__name__) async def add_plugin(request: web.Request) -> web.Response: @@ -118,8 +117,7 @@ async def add_plugin(request: web.Request) -> web.Response: if name not in plugins: raise KeyError('{} plugin is not available for the configured repository'.format(name)) - _platform = platform.platform() - pkg_mgt = 'yum' if 'centos' in _platform or 'redhat' in _platform else 'apt' + pkg_mgt = 'yum' if utils.is_redhat_based() else 'apt' # Insert record into Packages table insert_payload = PayloadBuilder().INSERT(id=str(uuid.uuid4()), name=name, action=action, status=-1, log_file_uri="").payload() @@ -191,7 +189,9 @@ async def add_plugin(request: web.Request) -> web.Response: except (TypeError, ValueError) as ex: raise web.HTTPBadRequest(reason=str(ex)) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _LOGGER.error(ex, "Failed to install plugin.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({'message': msg})) else: return web.json_response(result_payload) @@ -229,15 +229,15 @@ def install_package(file_name: str, pkg_mgt: str) -> tuple: pkg_file_path = "/data/plugins/{}".format(file_name) stdout_file_path = "/data/plugins/output.txt" cmd = "sudo {} -y install {} > {} 2>&1".format(pkg_mgt, _FLEDGE_ROOT + pkg_file_path, _FLEDGE_ROOT + stdout_file_path) - _LOGGER.debug("CMD....{}".format(cmd)) + _LOGGER.debug("Install Package with command: {}".format(cmd)) ret_code = os.system(cmd) - _LOGGER.debug("Return Code....{}".format(ret_code)) + _LOGGER.debug("Package install return code: {}".format(ret_code)) msg = "" with open("{}".format(_FLEDGE_ROOT + stdout_file_path), 'r') as fh: for line in fh: line = line.rstrip("\n") msg += line - _LOGGER.debug("Message.....{}".format(msg)) + _LOGGER.debug("Package install message: {}".format(msg)) # Remove stdout file cmd = "{}/extras/C/cmdutil rm {}".format(_FLEDGE_ROOT, stdout_file_path) subprocess.run([cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) @@ -258,8 +258,9 @@ def copy_file_install_requirement(dir_files: list, plugin_type: str, file_name: if so_1_file: if not so_file: - _LOGGER.error("Symlink file is missing") - raise FileNotFoundError("Symlink file is missing") + err_msg = "Symlink file is missing." + _LOGGER.debug(err_msg) + raise FileNotFoundError(err_msg) _dir = [] for s in dir_files: _dir.append(s.split("/")[-1]) @@ -326,7 +327,7 @@ def install_package_from_repo(name: str, pkg_mgt: str, version: str, uid: uuid, # If max upgrade per day is set to 1, then an upgrade can not occurs until 24 hours after the last accessed upgrade. # If set to 2 then this drops to 12 hours between upgrades, 3 would result in 8 hours between calls and so on. if duration_in_sec > (24 / int(max_upgrade_cat_item['value'])) * 60 * 60 or not last_accessed_time: - _LOGGER.info("Attempting upgrade on {}".format(now)) + _LOGGER.info("Attempting {} upgrade on {}...".format(pkg_mgt, now)) cmd = "sudo {} -y upgrade".format(pkg_mgt) if pkg_mgt == 'apt' else "sudo {} -y update".format(pkg_mgt) ret_code = os.system(cmd + " > {} 2>&1".format(stdout_file_path)) if ret_code != 0: @@ -337,7 +338,7 @@ def install_package_from_repo(name: str, pkg_mgt: str, version: str, uid: uuid, else: pkg_cache_mgr['upgrade']['last_accessed_time'] = now else: - _LOGGER.warning("Maximum upgrade exceeds the limit for the day") + _LOGGER.warning("Maximum {} upgrade exceeds the limit for the day.".format(pkg_mgt)) msg = "updated" cmd = "sudo {} -y install {}".format(pkg_mgt, name) if version: @@ -353,7 +354,7 @@ def install_package_from_repo(name: str, pkg_mgt: str, version: str, uid: uuid, audit_detail = {'packageName': name} log_code = 'PKGUP' if msg == 'updated' else 'PKGIN' loop.run_until_complete(audit.information(log_code, audit_detail)) - _LOGGER.info('{} plugin {} successfully'.format(name, msg)) + _LOGGER.info('{} plugin {} successfully.'.format(name, msg)) async def check_upgrade_on_install() -> Dict: diff --git a/python/fledge/services/core/api/plugins/remove.py b/python/fledge/services/core/api/plugins/remove.py index 36dae1e74f..16e87281cf 100644 --- a/python/fledge/services/core/api/plugins/remove.py +++ b/python/fledge/services/core/api/plugins/remove.py @@ -5,44 +5,183 @@ # FLEDGE_END import aiohttp -import platform import os -import logging import json import asyncio import uuid import multiprocessing from aiohttp import web -from fledge.common import logger +from fledge.common import utils +from fledge.common.audit_logger import AuditLogger +from fledge.common.common import _FLEDGE_ROOT +from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger from fledge.common.plugin_discovery import PluginDiscovery +from fledge.common.storage_client.exceptions import StorageServerError +from fledge.common.storage_client.payload_builder import PayloadBuilder +from fledge.services.core import connect from fledge.services.core.api.plugins import common from fledge.services.core.api.plugins.exceptions import * -from fledge.services.core import connect -from fledge.common.storage_client.payload_builder import PayloadBuilder -from fledge.common.configuration_manager import ConfigurationManager -from fledge.common.common import _FLEDGE_ROOT -from fledge.common.audit_logger import AuditLogger -from fledge.common.storage_client.exceptions import StorageServerError -__author__ = "Rajesh Kumar" -__copyright__ = "Copyright (c) 2020, Dianomic Systems Inc." + +__author__ = "Rajesh Kumar, Ashish Jabble" +__copyright__ = "Copyright (c) 2020-2023, Dianomic Systems Inc." __license__ = "Apache 2.0" __version__ = "${VERSION}" _help = """ - ------------------------------------------------------------------------------- - | DELETE | /fledge/plugins/{plugin-type}/{plugin-name} | - ------------------------------------------------------------------------------- + -------------------------------------------------------------------- + | DELETE | /fledge/plugins/{package_name} | + -------------------------------------------------------------------- """ -_logger = logger.setup(__name__, level=logging.INFO) - +_logger = FLCoreLogger().get_logger(__name__) valid_plugin_types = ['north', 'south', 'filter', 'notify', 'rule'] PYTHON_PLUGIN_PATH = _FLEDGE_ROOT+'/python/fledge/plugins/' C_PLUGINS_PATH = _FLEDGE_ROOT+'/plugins/' +# only work with core 2.1.0 onwards version +async def remove_package(request: web.Request) -> web.Response: + """Remove installed Package + + package_name: package name of plugin + + Example: + curl -sX DELETE http://localhost:8081/fledge/plugins/fledge-south-modbus + curl -sX DELETE http://localhost:8081/fledge/plugins/fledge-north-http-north + curl -sX DELETE http://localhost:8081/fledge/plugins/fledge-filter-scale + curl -sX DELETE http://localhost:8081/fledge/plugins/fledge-notify-alexa + curl -sX DELETE http://localhost:8081/fledge/plugins/fledge-rule-watchdog + """ + try: + package_name = request.match_info.get('package_name', "fledge-") + package_name = package_name.replace(" ", "") + final_response = {} + if not package_name.startswith("fledge-"): + raise ValueError("Package name should start with 'fledge-' prefix.") + plugin_type = package_name.split("-", 2)[1] + if not plugin_type: + raise ValueError('Invalid Package name. Check and verify the package name in plugins installed.') + if plugin_type not in valid_plugin_types: + raise ValueError("Invalid plugin type. Please provide valid type: {}".format(valid_plugin_types)) + installed_plugins = PluginDiscovery.get_plugins_installed(plugin_type, False) + plugin_info = [(_plugin["name"], _plugin["version"]) for _plugin in installed_plugins + if _plugin["packageName"] == package_name] + if not plugin_info: + raise KeyError("{} package not found. Either package is not installed or missing in plugins installed." + "".format(package_name)) + plugin_name = plugin_info[0][0] + plugin_version = plugin_info[0][1] + if plugin_type in ['notify', 'rule']: + notification_instances_plugin_used_in = await _check_plugin_usage_in_notification_instances(plugin_name) + if notification_instances_plugin_used_in: + err_msg = "{} cannot be removed. This is being used by {} instances.".format( + plugin_name, notification_instances_plugin_used_in) + _logger.warning(err_msg) + raise RuntimeError(err_msg) + else: + get_tracked_plugins = await _check_plugin_usage(plugin_type, plugin_name) + if get_tracked_plugins: + e = "{} cannot be removed. This is being used by {} instances.". \ + format(plugin_name, get_tracked_plugins[0]['service_list']) + _logger.warning(e) + raise RuntimeError(e) + else: + _logger.info("No entry found for {name} plugin in asset tracker; " + "or {name} plugin may have been added in disabled state & never used." + "".format(name=plugin_name)) + # Check Pre-conditions from Packages table + # if status is -1 (Already in progress) then return as rejected request + action = 'purge' + storage = connect.get_storage_async() + select_payload = PayloadBuilder().SELECT("status").WHERE(['action', '=', action]).AND_WHERE( + ['name', '=', package_name]).payload() + result = await storage.query_tbl_with_payload('packages', select_payload) + response = result['rows'] + if response: + exit_code = response[0]['status'] + if exit_code == -1: + msg = "{} package purge already in progress.".format(package_name) + return web.HTTPTooManyRequests(reason=msg, body=json.dumps({"message": msg})) + # Remove old entry from table for other cases + delete_payload = PayloadBuilder().WHERE(['action', '=', action]).AND_WHERE( + ['name', '=', package_name]).payload() + await storage.delete_from_tbl("packages", delete_payload) + + # Insert record into Packages table + insert_payload = PayloadBuilder().INSERT(id=str(uuid.uuid4()), name=package_name, action=action, + status=-1, log_file_uri="").payload() + result = await storage.insert_into_tbl("packages", insert_payload) + response = result['response'] + if response: + select_payload = PayloadBuilder().SELECT("id").WHERE(['action', '=', action]).AND_WHERE( + ['name', '=', package_name]).payload() + result = await storage.query_tbl_with_payload('packages', select_payload) + response = result['rows'] + if response: + pn = "{}-{}".format(action, plugin_name) + uid = response[0]['id'] + p = multiprocessing.Process(name=pn, + target=_uninstall, + args=(package_name, plugin_version, uid, storage) + ) + p.daemon = True + p.start() + msg = "{} plugin remove started.".format(plugin_name) + status_link = "fledge/package/{}/status?id={}".format(action, uid) + final_response = {"message": msg, "id": uid, "statusLink": status_link} + else: + raise StorageServerError + except (ValueError, RuntimeError) as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({'message': msg})) + except KeyError as err: + msg = str(err) + raise web.HTTPNotFound(reason=msg, body=json.dumps({'message': msg})) + except StorageServerError as e: + msg = e.error + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "Storage error: {}".format(msg)})) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to delete {} package.".format(package_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({'message': msg})) + else: + return web.json_response(final_response) + + +def _uninstall(pkg_name: str, version: str, uid: uuid, storage: connect) -> tuple: + from fledge.services.core.server import Server + _logger.info("{} package removal started...".format(pkg_name)) + stdout_file_path = '' + try: + stdout_file_path = common.create_log_file(action='remove', plugin_name=pkg_name) + link = "log/" + stdout_file_path.split("/")[-1] + if utils.is_redhat_based(): + cmd = "sudo yum -y remove {} > {} 2>&1".format(pkg_name, stdout_file_path) + else: + cmd = "sudo apt -y purge {} > {} 2>&1".format(pkg_name, stdout_file_path) + code = os.system(cmd) + # Update record in Packages table + payload = PayloadBuilder().SET(status=code, log_file_uri=link).WHERE(['id', '=', uid]).payload() + loop = asyncio.new_event_loop() + loop.run_until_complete(storage.update_tbl("packages", payload)) + if code == 0: + # Clear internal cache + loop.run_until_complete(_put_refresh_cache("http", Server._host, Server.core_management_port)) + # Audit logger + audit = AuditLogger(storage) + audit_detail = {'package_name': pkg_name, 'version': version} + loop.run_until_complete(audit.information('PKGRM', audit_detail)) + _logger.info('{} removed successfully.'.format(pkg_name)) + except Exception: + # Non-Zero integer - Case of fail + code = 1 + return code, stdout_file_path + + +# only work with lesser or equal to version of core 2.1.0 version async def remove_plugin(request: web.Request) -> web.Response: """ Remove installed plugin from fledge @@ -62,37 +201,43 @@ async def remove_plugin(request: web.Request) -> web.Response: plugin_type = str(plugin_type).lower() if plugin_type not in valid_plugin_types: raise ValueError("Invalid plugin type. Please provide valid type: {}".format(valid_plugin_types)) - if plugin_type == 'notify': - installed_dir_name = 'notificationDelivery' - elif plugin_type == 'rule': - installed_dir_name = 'notificationRule' - else: - installed_dir_name = plugin_type + # only OMF is an inbuilt plugin + if name.lower() == 'omf': + raise ValueError("Cannot delete an inbuilt {} plugin.".format(name.upper())) result_payload = {} - installed_plugin = PluginDiscovery.get_plugins_installed(installed_dir_name, False) - if name not in [plugin['name'] for plugin in installed_plugin]: - raise KeyError("Invalid plugin name {} or plugin is not installed".format(name)) + installed_plugins = PluginDiscovery.get_plugins_installed(plugin_type, False) + plugin_info = [(_plugin["name"], _plugin["packageName"], _plugin["version"]) for _plugin in installed_plugins] + package_name = "fledge-{}-{}".format(plugin_type, name.lower().replace("_", "-")) + plugin_found = False + plugin_version = None + for p in plugin_info: + if p[0] == name: + package_name = p[1] + plugin_version = p[2] + plugin_found = True + break + if not plugin_found: + raise KeyError("Invalid plugin name {} or plugin is not installed.".format(name)) if plugin_type in ['notify', 'rule']: notification_instances_plugin_used_in = await _check_plugin_usage_in_notification_instances(name) if notification_instances_plugin_used_in: - err_msg = "{} cannot be removed. This is being used by {} instances".format( + err_msg = "{} cannot be removed. This is being used by {} instances.".format( name, notification_instances_plugin_used_in) - _logger.error(err_msg) + _logger.warning(err_msg) raise RuntimeError(err_msg) else: get_tracked_plugins = await _check_plugin_usage(plugin_type, name) if get_tracked_plugins: - e = "{} cannot be removed. This is being used by {} instances".\ + e = "{} cannot be removed. This is being used by {} instances.".\ format(name, get_tracked_plugins[0]['service_list']) - _logger.error(e) + _logger.warning(e) raise RuntimeError(e) else: _logger.info("No entry found for {name} plugin in asset tracker; or " - "{name} plugin may have been added in disabled state & never used".format(name=name)) + "{name} plugin may have been added in disabled state & never used.".format(name=name)) # Check Pre-conditions from Packages table # if status is -1 (Already in progress) then return as rejected request action = 'purge' - package_name = "fledge-{}-{}".format(plugin_type, name.lower().replace("_", "-")) storage = connect.get_storage_async() select_payload = PayloadBuilder().SELECT("status").WHERE(['action', '=', action]).AND_WHERE( ['name', '=', package_name]).payload() @@ -101,7 +246,7 @@ async def remove_plugin(request: web.Request) -> web.Response: if response: exit_code = response[0]['status'] if exit_code == -1: - msg = "{} package purge already in progress".format(package_name) + msg = "{} package purge already in progress.".format(package_name) return web.HTTPTooManyRequests(reason=msg, body=json.dumps({"message": msg})) # Remove old entry from table for other cases delete_payload = PayloadBuilder().WHERE(['action', '=', action]).AND_WHERE( @@ -121,10 +266,13 @@ async def remove_plugin(request: web.Request) -> web.Response: if response: pn = "{}-{}".format(action, name) uid = response[0]['id'] - p = multiprocessing.Process(name=pn, target=purge_plugin, args=(plugin_type, name, uid, storage)) + p = multiprocessing.Process(name=pn, + target=purge_plugin, + args=(plugin_type, name, package_name, plugin_version, uid, storage) + ) p.daemon = True p.start() - msg = "{} plugin purge started.".format(name) + msg = "{} plugin remove started.".format(name) status_link = "fledge/package/{}/status?id={}".format(action, uid) result_payload = {"message": msg, "id": uid, "statusLink": status_link} else: @@ -133,11 +281,13 @@ async def remove_plugin(request: web.Request) -> web.Response: raise web.HTTPBadRequest(reason=str(err), body=json.dumps({'message': str(err)})) except KeyError as err: raise web.HTTPNotFound(reason=str(err), body=json.dumps({'message': str(err)})) - except StorageServerError as err: - msg = str(err) + except StorageServerError as e: + msg = e.error raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "Storage error: {}".format(msg)})) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex), body=json.dumps({'message': str(ex)})) + msg = str(ex) + _logger.error(ex, "Failed to remove {} plugin.".format(name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({'message': msg})) else: return web.json_response(result_payload) @@ -211,62 +361,54 @@ async def _check_plugin_usage_in_notification_instances(plugin_name: str) -> lis async def _put_refresh_cache(protocol: str, host: int, port: int) -> None: + # Scheme is always http:// on core_management_port management_api_url = '{}://{}:{}/fledge/cache'.format(protocol, host, port) headers = {'content-type': 'application/json'} - verify_ssl = False if protocol == 'HTTP' else True + verify_ssl = False connector = aiohttp.TCPConnector(verify_ssl=verify_ssl) async with aiohttp.ClientSession(connector=connector) as session: async with session.put(management_api_url, data=json.dumps({}), headers=headers) as resp: result = await resp.text() status_code = resp.status if status_code in range(400, 500): - _logger.error("Bad request error code: %d, reason: %s when refresh cache", status_code, resp.reason) + _logger.error("Bad request error code: {}, reason: {} when refresh cache".format(status_code, resp.reason)) if status_code in range(500, 600): - _logger.error("Server error code: %d, reason: %s when refresh cache", status_code, resp.reason) + _logger.error("Server error code: {}, reason: {} when refresh cache".format(status_code, resp.reason)) response = json.loads(result) - _logger.debug("PUT Refresh Cache response: %s", response) + _logger.debug("PUT Refresh Cache response: {}".format(response)) -def purge_plugin(plugin_type: str, name: str, uid: uuid, storage: connect) -> tuple: +def purge_plugin(plugin_type: str, plugin_name: str, pkg_name: str, version: str, uid: uuid, storage: connect) -> tuple: from fledge.services.core.server import Server - - # FIXME: non-package removal - _logger.info("{} plugin purge started...".format(name)) + _logger.info("{} plugin remove started...".format(pkg_name)) is_package = True stdout_file_path = '' - original_name = name - # Special case handling - installed directory name Vs package name - # For example: Plugins like http_south Vs http-south - name = name.replace('_', '-').lower() - plugin_name = 'fledge-{}-{}'.format(plugin_type, name) - - get_platform = platform.platform() try: - if 'centos' in get_platform or 'redhat' in get_platform: + if utils.is_redhat_based(): rpm_list = os.popen('rpm -qa | grep fledge*').read() _logger.debug("rpm list : {}".format(rpm_list)) if len(rpm_list): - f = rpm_list.find(plugin_name) + f = rpm_list.find(pkg_name) if f == -1: raise KeyError else: raise KeyError - stdout_file_path = common.create_log_file(action='remove', plugin_name=plugin_name) + stdout_file_path = common.create_log_file(action='remove', plugin_name=pkg_name) link = "log/" + stdout_file_path.split("/")[-1] - cmd = "sudo yum -y remove {} > {} 2>&1".format(plugin_name, stdout_file_path) + cmd = "sudo yum -y remove {} > {} 2>&1".format(pkg_name, stdout_file_path) else: dpkg_list = os.popen('dpkg --list "fledge*" 2>/dev/null') ls_output = dpkg_list.read() _logger.debug("dpkg list output: {}".format(ls_output)) if len(ls_output): - f = ls_output.find(plugin_name) + f = ls_output.find(pkg_name) if f == -1: raise KeyError else: raise KeyError - stdout_file_path = common.create_log_file(action='remove', plugin_name=plugin_name) + stdout_file_path = common.create_log_file(action='remove', plugin_name=pkg_name) link = "log/" + stdout_file_path.split("/")[-1] - cmd = "sudo apt -y purge {} > {} 2>&1".format(plugin_name, stdout_file_path) + cmd = "sudo apt -y purge {} > {} 2>&1".format(pkg_name, stdout_file_path) code = os.system(cmd) # Update record in Packages table @@ -276,13 +418,12 @@ def purge_plugin(plugin_type: str, name: str, uid: uuid, storage: connect) -> tu if code == 0: # Clear internal cache - loop.run_until_complete(_put_refresh_cache(Server.is_rest_server_http_enabled, - Server._host, Server.core_management_port)) + loop.run_until_complete(_put_refresh_cache("http", Server._host, Server.core_management_port)) # Audit info audit = AuditLogger(storage) - audit_detail = {'package_name': "fledge-{}-{}".format(plugin_type, name)} + audit_detail = {'package_name': pkg_name, 'version': version} loop.run_until_complete(audit.information('PKGRM', audit_detail)) - _logger.info('{} plugin purged successfully'.format(name)) + _logger.info('{} plugin removed successfully.'.format(pkg_name)) except KeyError: # This case is for non-package installation - python plugin path will be tried first and then C _logger.info("Trying removal of manually installed plugin...") @@ -290,16 +431,17 @@ def purge_plugin(plugin_type: str, name: str, uid: uuid, storage: connect) -> tu if plugin_type in ['notify', 'rule']: plugin_type = 'notificationDelivery' if plugin_type == 'notify' else 'notificationRule' try: - path = PYTHON_PLUGIN_PATH+'{}/{}'.format(plugin_type, original_name) + path = PYTHON_PLUGIN_PATH+'{}/{}'.format(plugin_type, plugin_name) if not os.path.isdir(path): - path = C_PLUGINS_PATH + '{}/{}'.format(plugin_type, original_name) + path = C_PLUGINS_PATH + '{}/{}'.format(plugin_type, plugin_name) rm_cmd = 'rm -rv {}'.format(path) if os.path.exists("{}/bin".format(_FLEDGE_ROOT)) and os.path.exists("{}/bin/fledge".format(_FLEDGE_ROOT)): rm_cmd = 'sudo rm -rv {}'.format(path) code = os.system(rm_cmd) if code != 0: - raise OSError("While deleting, invalid plugin path found for {}".format(original_name)) + raise OSError("While deleting, invalid plugin path found for {}".format(plugin_name)) except Exception as ex: code = 1 - _logger.error("Error in removing plugin: {}".format(str(ex))) + _logger.error(ex, "Error in removing plugin.") + _logger.info('{} plugin removed successfully.'.format(plugin_name)) return code, stdout_file_path, is_package diff --git a/python/fledge/services/core/api/plugins/update.py b/python/fledge/services/core/api/plugins/update.py index 58e3a04dd7..490c322466 100644 --- a/python/fledge/services/core/api/plugins/update.py +++ b/python/fledge/services/core/api/plugins/update.py @@ -7,37 +7,177 @@ import aiohttp import asyncio import os -import logging import uuid -import platform import multiprocessing import json from aiohttp import web -from fledge.common import logger -from fledge.services.core import connect -from fledge.common.storage_client.payload_builder import PayloadBuilder -from fledge.services.core import server -from fledge.common.plugin_discovery import PluginDiscovery -from fledge.services.core.api.plugins import common -from fledge.common.configuration_manager import ConfigurationManager +from fledge.common import utils from fledge.common.audit_logger import AuditLogger +from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger +from fledge.common.plugin_discovery import PluginDiscovery from fledge.common.storage_client.exceptions import StorageServerError +from fledge.common.storage_client.payload_builder import PayloadBuilder +from fledge.services.core import connect, server +from fledge.services.core.api.plugins import common __author__ = "Ashish Jabble" -__copyright__ = "Copyright (c) 2019, Dianomic Systems Inc." +__copyright__ = "Copyright (c) 2019-2023, Dianomic Systems Inc." __license__ = "Apache 2.0" __version__ = "${VERSION}" _help = """ - ------------------------------------------------------------------------------- - | PUT | /fledge/plugin/{type}/{name}/update | - ------------------------------------------------------------------------------- + ------------------------------------------------------------------------ + | PUT | /fledge/plugins/{package_name} | + ------------------------------------------------------------------------ """ -_logger = logger.setup(__name__, level=logging.INFO) +_logger = FLCoreLogger().get_logger(__name__) + + +# only work with core 2.1.0 onwards version +async def update_package(request: web.Request) -> web.Response: + """ Update Package + + package_name: package name of plugin + + Example: + curl -sX PUT http://localhost:8081/fledge/plugins/fledge-south-modbus + curl -sX PUT http://localhost:8081/fledge/plugins/fledge-north-http-north + curl -sX PUT http://localhost:8081/fledge/plugins/fledge-filter-scale + curl -sX PUT http://localhost:8081/fledge/plugins/fledge-notify-alexa + curl -sX PUT http://localhost:8081/fledge/plugins/fledge-rule-watchdog + """ + + try: + valid_plugin_types = ['north', 'south', 'filter', 'notify', 'rule'] + package_name = request.match_info.get('package_name', "fledge-") + package_name = package_name.replace(" ", "") + final_response = {} + if not package_name.startswith("fledge-"): + raise ValueError("Package name should start with 'fledge-' prefix.") + plugin_type = package_name.split("-", 2)[1] + if not plugin_type: + raise ValueError('Invalid Package name. Check and verify the package name in plugins installed.') + if plugin_type not in valid_plugin_types: + raise ValueError("Invalid plugin type. Please provide valid type: {}".format(valid_plugin_types)) + installed_plugins = PluginDiscovery.get_plugins_installed(plugin_type, False) + plugin_info = [_plugin["name"] for _plugin in installed_plugins if _plugin["packageName"] == package_name] + if not plugin_info: + raise KeyError("{} package not found. Either package is not installed or missing in plugins installed." + "".format(package_name)) + plugin_name = plugin_info[0] + # Check Pre-conditions from Packages table + # if status is -1 (Already in progress) then return as rejected request + action = 'update' + storage_client = connect.get_storage_async() + select_payload = PayloadBuilder().SELECT("status").WHERE(['action', '=', action]).AND_WHERE( + ['name', '=', package_name]).payload() + result = await storage_client.query_tbl_with_payload('packages', select_payload) + response = result['rows'] + if response: + exit_code = response[0]['status'] + if exit_code == -1: + msg = "{} package {} already in progress.".format(package_name, action) + return web.HTTPTooManyRequests(reason=msg, body=json.dumps({"message": msg})) + # Remove old entry from table for other cases + delete_payload = PayloadBuilder().WHERE(['action', '=', action]).AND_WHERE( + ['name', '=', package_name]).payload() + await storage_client.delete_from_tbl("packages", delete_payload) + + schedules = [] + notifications = [] + if plugin_type in ['notify', 'rule']: + # Check Notification service is enabled or not + payload = PayloadBuilder().SELECT("id", "enabled", "schedule_name").WHERE(['process_name', '=', + 'notification_c']).payload() + result = await storage_client.query_tbl_with_payload('schedules', payload) + sch_info = result['rows'] + if sch_info and sch_info[0]['enabled'] == 't': + # Find notification instances which are used by requested plugin name + # If its config item 'enable' is true then update to false + config_mgr = ConfigurationManager(storage_client) + all_notifications = await config_mgr._read_all_child_category_names("Notifications") + for notification in all_notifications: + notification_config = await config_mgr._read_category_val(notification['child']) + notification_name = notification_config['name']['value'] + channel = notification_config['channel']['value'] + rule = notification_config['rule']['value'] + is_enabled = True if notification_config['enable']['value'] == 'true' else False + if (channel == plugin_name and is_enabled) or (rule == plugin_name and is_enabled): + _logger.warning( + "Disabling {} notification instance, as {} {} plugin is being updated...".format( + notification_name, plugin_name, plugin_type)) + await config_mgr.set_category_item_value_entry(notification_name, "enable", "false") + notifications.append(notification_name) + else: + # FIXME: if any south/north service or task doesnot have tracked by Fledge; + # then we need to handle the case to disable the service or task if enabled + # Tracked plugins from asset tracker + tracked_plugins = await _get_plugin_and_sch_name_from_asset_tracker(plugin_type) + filters_used_by = [] + if plugin_type == 'filter': + # In case of filter, for asset_tracker table we are inserting filter category_name in plugin column + # instead of filter plugin name by Design + # Hence below query is required to get actual plugin name from filters table + storage_client = connect.get_storage_async() + payload = PayloadBuilder().SELECT("name").WHERE(['plugin', '=', plugin_name]).payload() + result = await storage_client.query_tbl_with_payload('filters', payload) + filters_used_by = [r['name'] for r in result['rows']] + for p in tracked_plugins: + if (plugin_name == p['plugin'] and not plugin_type == 'filter') or ( + p['plugin'] in filters_used_by and plugin_type == 'filter'): + sch_info = await _get_sch_id_and_enabled_by_name(p['service']) + if sch_info[0]['enabled'] == 't': + status, reason = await server.Server.scheduler.disable_schedule(uuid.UUID(sch_info[0]['id'])) + if status: + _logger.warning("Disabling {} {} instance, as {} plugin is being updated...".format( + p['service'], plugin_type, plugin_name)) + schedules.append(sch_info[0]['id']) + # Insert record into Packages table + insert_payload = PayloadBuilder().INSERT(id=str(uuid.uuid4()), name=package_name, action=action, status=-1, + log_file_uri="").payload() + result = await storage_client.insert_into_tbl("packages", insert_payload) + response = result['response'] + if response: + select_payload = PayloadBuilder().SELECT("id").WHERE(['action', '=', action]).AND_WHERE( + ['name', '=', package_name]).payload() + result = await storage_client.query_tbl_with_payload('packages', select_payload) + response = result['rows'] + if response: + pn = "{}-{}".format(action, package_name) + uid = response[0]['id'] + p = multiprocessing.Process(name=pn, + target=do_update, + args=("http", server.Server._host, + server.Server.core_management_port, storage_client, plugin_type, + plugin_name, package_name, uid, schedules, notifications)) + p.daemon = True + p.start() + msg = "{} {} started.".format(package_name, action) + status_link = "fledge/package/{}/status?id={}".format(action, uid) + final_response = {"message": msg, "id": uid, "statusLink": status_link} + else: + raise StorageServerError + except KeyError as err: + msg = str(err) + raise web.HTTPNotFound(reason=msg, body=json.dumps({'message': msg})) + except ValueError as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({'message': msg})) + except StorageServerError as e: + msg = e.error + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "Storage error: {}".format(msg)})) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to update {} package.".format(package_name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({'message': msg})) + else: + return web.json_response(final_response) +# only work with lesser or equal to version of core 2.1.0 version async def update_plugin(request: web.Request) -> web.Response: """ update plugin @@ -54,18 +194,26 @@ async def update_plugin(request: web.Request) -> web.Response: _type = _type.lower() if _type not in ['north', 'south', 'filter', 'notify', 'rule']: raise ValueError("Invalid plugin type. Must be one of 'south' , north', 'filter', 'notify' or 'rule'") - if _type == 'notify': - installed_dir_name = 'notificationDelivery' - elif _type == 'rule': - installed_dir_name = 'notificationRule' - else: - installed_dir_name = _type + # only OMF is an inbuilt plugin + if name.lower() == 'omf': + raise ValueError("Cannot update an inbuilt {} plugin.".format(name.upper())) + # Check requested plugin name is installed or not + installed_plugins = PluginDiscovery.get_plugins_installed(_type, False) + plugin_info = [(_plugin["name"], _plugin["packageName"]) for _plugin in installed_plugins] + package_name = "fledge-{}-{}".format(_type, name.lower().replace('_', '-')) + plugin_found = False + for p in plugin_info: + if p[0] == name: + package_name = p[1] + plugin_found = True + break + if not plugin_found: + raise KeyError("{} plugin is not yet installed. So update is not possible.".format(name)) # Check Pre-conditions from Packages table # if status is -1 (Already in progress) then return as rejected request result_payload = {} action = 'update' - package_name = "fledge-{}-{}".format(_type, name.lower().replace('_', '-')) storage_client = connect.get_storage_async() select_payload = PayloadBuilder().SELECT("status").WHERE(['action', '=', action]).AND_WHERE( ['name', '=', package_name]).payload() @@ -74,21 +222,15 @@ async def update_plugin(request: web.Request) -> web.Response: if response: exit_code = response[0]['status'] if exit_code == -1: - msg = "{} package {} already in progress".format(package_name, action) + msg = "{} package {} already in progress.".format(package_name, action) return web.HTTPTooManyRequests(reason=msg, body=json.dumps({"message": msg})) # Remove old entry from table for other cases delete_payload = PayloadBuilder().WHERE(['action', '=', action]).AND_WHERE( ['name', '=', package_name]).payload() await storage_client.delete_from_tbl("packages", delete_payload) - # Check requested plugin name is installed or not - installed_plugins = PluginDiscovery.get_plugins_installed(installed_dir_name, False) - installed_plugin_name = [p_name["name"] for p_name in installed_plugins] - if name not in installed_plugin_name: - raise KeyError("{} plugin is not yet installed. So update is not possible.".format(name)) - - sch_list = [] - notification_list = [] + schedules = [] + notifications = [] if _type in ['notify', 'rule']: # Check Notification service is enabled or not payload = PayloadBuilder().SELECT("id", "enabled", "schedule_name").WHERE(['process_name', '=', @@ -110,8 +252,10 @@ async def update_plugin(request: web.Request) -> web.Response: _logger.warning("Disabling {} notification instance, as {} {} plugin is being updated...".format( notification_name, name, _type)) await config_mgr.set_category_item_value_entry(notification_name, "enable", "false") - notification_list.append(notification_name) + notifications.append(notification_name) else: + # FIXME: if any south/north service or task doesnot have tracked by Fledge; + # then we need to handle the case to disable the service or task if enabled # Tracked plugins from asset tracker tracked_plugins = await _get_plugin_and_sch_name_from_asset_tracker(_type) filters_used_by = [] @@ -132,7 +276,7 @@ async def update_plugin(request: web.Request) -> web.Response: if status: _logger.warning("Disabling {} {} instance, as {} plugin is being updated...".format( p['service'], _type, name)) - sch_list.append(sch_info[0]['id']) + schedules.append(sch_info[0]['id']) # Insert record into Packages table insert_payload = PayloadBuilder().INSERT(id=str(uuid.uuid4()), name=package_name, action=action, status=-1, log_file_uri="").payload() @@ -146,11 +290,12 @@ async def update_plugin(request: web.Request) -> web.Response: if response: pn = "{}-{}".format(action, name) uid = response[0]['id'] - p = multiprocessing.Process(name=pn, target=do_update, args=(server.Server.is_rest_server_http_enabled, - server.Server._host, - server.Server.core_management_port, - storage_client, _type, name, uid, sch_list, - notification_list)) + p = multiprocessing.Process(name=pn, + target=do_update, + args=("http", + server.Server._host, server.Server.core_management_port, + storage_client, _type, name, package_name, uid, + schedules, notifications)) p.daemon = True p.start() msg = "{} {} started.".format(package_name, action) @@ -162,11 +307,13 @@ async def update_plugin(request: web.Request) -> web.Response: raise web.HTTPNotFound(reason=str(ex)) except ValueError as ex: raise web.HTTPBadRequest(reason=str(ex)) - except StorageServerError as err: - msg = str(err) + except StorageServerError as e: + msg = e.error raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "Storage error: {}".format(msg)})) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) + msg = str(ex) + _logger.error(ex, "Failed to update {} plugin.".format(name)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({'message': msg})) else: return web.json_response(result_payload) @@ -174,10 +321,13 @@ async def update_plugin(request: web.Request) -> web.Response: async def _get_plugin_and_sch_name_from_asset_tracker(_type: str) -> list: if _type == "south": event_name = "Ingest" - elif _type == 'filter': + elif _type == "filter": event_name = "Filter" - else: + elif _type == "north": event_name = "Egress" + else: + # Return empty if _type is different + return [] storage_client = connect.get_storage_async() payload = PayloadBuilder().SELECT("plugin", "service").WHERE(['event', '=', event_name]).payload() result = await storage_client.query_tbl_with_payload('asset_tracker', payload) @@ -192,42 +342,34 @@ async def _get_sch_id_and_enabled_by_name(name: str) -> list: async def _put_schedule(protocol: str, host: str, port: int, sch_id: uuid, is_enabled: bool) -> None: + # Scheme is always http:// on core_management_port management_api_url = '{}://{}:{}/fledge/schedule/{}/enable'.format(protocol, host, port, sch_id) headers = {'content-type': 'application/json'} - verify_ssl = False if protocol == 'HTTP' else True + verify_ssl = False connector = aiohttp.TCPConnector(verify_ssl=verify_ssl) async with aiohttp.ClientSession(connector=connector) as session: async with session.put(management_api_url, data=json.dumps({"value": is_enabled}), headers=headers) as resp: result = await resp.text() status_code = resp.status if status_code in range(400, 500): - _logger.error("Bad request error code: %d, reason: %s when PUT schedule", status_code, resp.reason) + _logger.error("Bad request error code: {}, reason: {} when PUT schedule".format(status_code, resp.reason)) if status_code in range(500, 600): - _logger.error("Server error code: %d, reason: %s when PUT schedule", status_code, resp.reason) + _logger.error("Server error code: {}, reason: {} when PUT schedule".format(status_code, resp.reason)) response = json.loads(result) - _logger.debug("PUT Schedule response: %s", response) - + _logger.debug("PUT Schedule response: {}".format(response)) -def _update_repo_sources_and_plugin(_type: str, name: str) -> tuple: - # Below check is needed for python plugins - # For Example: installed_plugin_dir=wind_turbine; package_name=wind-turbine - name = name.replace("_", "-") - # For endpoint curl -X GET http://localhost:8081/fledge/plugins/available we used - # sudo apt list command internal so package name always returns in lowercase, - # irrespective of package name defined in the configured repo. - name = "fledge-{}-{}".format(_type, name.lower()) - _platform = platform.platform() - stdout_file_path = common.create_log_file(action="update", plugin_name=name) +def _update_repo_sources_and_plugin(pkg_name: str) -> tuple: + stdout_file_path = common.create_log_file(action="update", plugin_name=pkg_name) pkg_mgt = 'apt' cmd = "sudo {} -y update > {} 2>&1".format(pkg_mgt, stdout_file_path) - if 'centos' in _platform or 'redhat' in _platform: + if utils.is_redhat_based(): pkg_mgt = 'yum' cmd = "sudo {} check-update > {} 2>&1".format(pkg_mgt, stdout_file_path) ret_code = os.system(cmd) # sudo apt/yum -y install only happens when update is without any error if ret_code == 0: - cmd = "sudo {} -y install {} >> {} 2>&1".format(pkg_mgt, name, stdout_file_path) + cmd = "sudo {} -y install {} >> {} 2>&1".format(pkg_mgt, pkg_name, stdout_file_path) ret_code = os.system(cmd) # relative log file link @@ -235,11 +377,11 @@ def _update_repo_sources_and_plugin(_type: str, name: str) -> tuple: return ret_code, link -def do_update(http_enabled: bool, host: str, port: int, storage: connect, _type: str, name: str, uid: str, - schedules: list, notifications: list) -> None: - _logger.info("{} plugin update started...".format(name)) - protocol = "HTTP" if http_enabled else "HTTPS" - code, link = _update_repo_sources_and_plugin(_type, name) +def do_update(protocol: str, host: str, port: int, storage: connect, _type: str, plugin_name: str, + pkg_name: str, uid: str, schedules: list, notifications: list) -> None: + _logger.info("{} package update started...".format(pkg_name)) + + code, link = _update_repo_sources_and_plugin(pkg_name) # Update record in Packages table payload = PayloadBuilder().SET(status=code, log_file_uri=link).WHERE(['id', '=', uid]).payload() @@ -249,9 +391,13 @@ def do_update(http_enabled: bool, host: str, port: int, storage: connect, _type: if code == 0: # Audit info audit = AuditLogger(storage) - audit_detail = {'packageName': "fledge-{}-{}".format(_type, name.replace("_", "-"))} + installed_plugins = PluginDiscovery.get_plugins_installed(_type, False) + version = [p["version"] for p in installed_plugins if p['name'] == plugin_name] + audit_detail = {'packageName': pkg_name} + if version: + audit_detail['version'] = version[0] loop.run_until_complete(audit.information('PKGUP', audit_detail)) - _logger.info('{} plugin updated successfully'.format(name)) + _logger.info('{} package updated successfully.'.format(pkg_name)) # Restart the services which were disabled before plugin update for sch in schedules: diff --git a/python/fledge/services/core/api/python_packages.py b/python/fledge/services/core/api/python_packages.py index 54578452f1..c3b16ce80c 100644 --- a/python/fledge/services/core/api/python_packages.py +++ b/python/fledge/services/core/api/python_packages.py @@ -4,15 +4,14 @@ # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END -import logging -import pkg_resources -import json import asyncio - +import json +from typing import List +import pkg_resources from aiohttp import web -from fledge.common import logger -from fledge.services.core import connect + from fledge.common.audit_logger import AuditLogger +from fledge.common.logger import FLCoreLogger from fledge.services.core import connect __author__ = "Himanshu Vimal" @@ -26,7 +25,14 @@ | POST | /fledge/python/package | ---------------------------------------------------------- """ -_LOGGER = logger.setup(__name__, level=logging.INFO) +_LOGGER = FLCoreLogger().get_logger(__name__) + + +def get_packages_installed() -> List: + package_ws = pkg_resources.WorkingSet() + installed_pkgs = [{'package': dist.project_name, 'version': dist.version} for dist in package_ws] + return installed_pkgs + async def get_packages(request: web.Request) -> web.Response: """ @@ -39,15 +45,13 @@ async def get_packages(request: web.Request) -> web.Response: :Example: curl -X GET http://localhost:8081/fledge/python/packages """ - package_ws = pkg_resources.WorkingSet() - installed_pkgs = [{'package':dist.project_name,'version': dist.version} for dist in package_ws] - return web.json_response({'packages': installed_pkgs}) + return web.json_response({'packages': get_packages_installed()}) async def install_package(request: web.Request) -> web.Response: """ Args: - Request: '{ "package" : "numpy", + request: '{ "package" : "numpy", "version" : "1.2" #optional }' @@ -78,29 +82,30 @@ def get_installed_package_info(input_package): installed_package, installed_version = get_installed_package_info(input_package_name) if installed_package: - #Package already exists - _LOGGER.info("Package: {} Version: {} already installed.".format(installed_package, installed_version)) + # Package already exists + _LOGGER.warning("Package: {} Version: {} already installed.".format(installed_package, installed_version)) return web.HTTPConflict(reason="Package already installed.", - body=json.dumps({"message":"Package {} version {} already installed." + body=json.dumps({"message": "Package {} version {} already installed." .format(installed_package, installed_version)})) - #Package not found, install package via pip - pip_process = await asyncio.create_subprocess_shell('python3 -m pip install '+ install_args, + # Package not found, install package via pip + pip_process = await asyncio.create_subprocess_shell('python3 -m pip install ' + install_args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) stdout, stderr = await pip_process.communicate() if pip_process.returncode == 0: - _LOGGER.info("Package: {} successfully installed", format(input_package_name)) + _LOGGER.info("Package: {} successfully installed.", format(input_package_name)) try: - #Audit log entry: PIPIN + # Audit log entry: PIPIN storage_client = connect.get_storage_async() pip_audit_log = AuditLogger(storage_client) - audit_message = {"package":input_package_name, "status": "Success"} + audit_message = {"package": input_package_name, "status": "Success"} if input_package_version: audit_message["version"] = input_package_version await pip_audit_log.information('PIPIN', audit_message) except: - _LOGGER.exception("Failed to log the audit entry for PIPIN, for package {} install", format(input_package_name)) + _LOGGER.error("Failed to log the audit entry for PIPIN, for package {} install", format( + input_package_name)) response = "Package {} version {} installed successfully.".format(input_package_name, input_package_version) if not input_package_version: diff --git a/python/fledge/services/core/api/repos/configure.py b/python/fledge/services/core/api/repos/configure.py index 74dadba46e..1fd166010f 100644 --- a/python/fledge/services/core/api/repos/configure.py +++ b/python/fledge/services/core/api/repos/configure.py @@ -6,13 +6,13 @@ import os import platform -import logging import json from aiohttp import web +from fledge.common import utils from fledge.common.common import _FLEDGE_ROOT -from fledge.common import logger +from fledge.common.logger import FLCoreLogger __author__ = "Ashish Jabble" @@ -25,7 +25,7 @@ | POST | /fledge/repository | ------------------------------------------------------------------------------- """ -_LOGGER = logger.setup(__name__, level=logging.INFO) +_LOGGER = FLCoreLogger().get_logger(__name__) async def add_package_repo(request: web.Request) -> web.Response: @@ -49,53 +49,63 @@ async def add_package_repo(request: web.Request) -> web.Response: raise ValueError('url param is required') _platform = platform.platform() - pkg_mgt = 'yum' if 'centos' in _platform or 'redhat' in _platform else 'apt' v_list = ['nightly', 'latest'] if not (version in v_list or version.startswith('fixes/')): if str(version).count('.') != 2: - raise ValueError('Invalid version; it should be latest, nightly or a valid semantic version X.Y.Z i.e. major.minor.patch') + raise ValueError('Invalid version; it should be latest, ' + 'nightly or a valid semantic version X.Y.Z i.e. major.minor.patch') - if 'x86_64-with-Ubuntu-18.04' in _platform: - os_name = "ubuntu1804" - architecture = "x86_64" - extra_commands = "" - elif 'x86_64-with-glib' in _platform: - os_name = "ubuntu2004" - architecture = "x86_64" - extra_commands = "" - elif 'armv7l-with-debian' in _platform: - os_name = "buster" - architecture = "armv7l" - extra_commands = "" - elif 'armv7l-with-glibc' in _platform: - os_name = "bullseye" - architecture = "armv7l" - extra_commands = "" - elif 'aarch64-with-Ubuntu-18.04' in _platform: - os_name = "ubuntu1804" - architecture = "aarch64" - extra_commands = "" - elif 'x86_64-with-redhat' in _platform: - os_name = "rhel7" - architecture = "x86_64" - extra_commands = "sudo yum-config-manager --enable 'Red Hat Enterprise Linux Server 7 RHSCL (RPMs)'" - elif 'aarch64-with-Mendel' in _platform: - os_name = "mendel" - architecture = "aarch64" - extra_commands = "" - elif 'x86_64-with-centos' in _platform: - os_name = "centos7" - architecture = "x86_64" - extra_commands = "sudo yum install -y centos-release-scl-rh epel-release" + if utils.is_redhat_based(): + pkg_mgt = 'yum' + if 'x86_64-with-redhat' in _platform: + os_name = "rhel7" + architecture = "x86_64" + extra_commands = "sudo yum-config-manager --enable 'Red Hat Enterprise Linux Server 7 RHSCL (RPMs)'" + elif 'x86_64-with-centos' in _platform: + os_name = "centos7" + architecture = "x86_64" + extra_commands = "sudo yum install -y centos-release-scl-rh epel-release" + elif 'x86_64-with-glibc' in _platform: + os_name = "centos-stream-9" + architecture = "x86_64" + extra_commands = "" + else: + raise ValueError("{} is not supported".format(_platform)) else: - raise ValueError("{} is not supported".format(_platform)) + pkg_mgt = 'apt' + if 'x86_64-with-Ubuntu-18.04' in _platform: + os_name = "ubuntu1804" + architecture = "x86_64" + extra_commands = "" + elif 'x86_64-with-glib' in _platform: + os_name = "ubuntu2004" + architecture = "x86_64" + extra_commands = "" + elif 'armv7l-with-debian' in _platform: + os_name = "buster" + architecture = "armv7l" + extra_commands = "" + elif 'armv7l-with-glibc' in _platform: + os_name = "bullseye" + architecture = "armv7l" + extra_commands = "" + elif 'aarch64-with-Ubuntu-18.04' in _platform: + os_name = "ubuntu1804" + architecture = "aarch64" + extra_commands = "" + elif 'aarch64-with-Mendel' in _platform: + os_name = "mendel" + architecture = "aarch64" + extra_commands = "" + else: + raise ValueError("{} is not supported".format(_platform)) stdout_file_path = _FLEDGE_ROOT + "/data/configure_repo_output.txt" if pkg_mgt == 'yum': cmd = "sudo rpm --import {}/RPM-GPG-KEY-fledge > {} 2>&1".format(url, stdout_file_path) else: cmd = "wget -q -O - {}/KEY.gpg | sudo apt-key add - > {} 2>&1".format(url, stdout_file_path) - _LOGGER.debug("CMD-1....{}".format(cmd)) + _LOGGER.debug("Add the key that is used to verify the package with command: {}".format(cmd)) ret_code = os.system(cmd) if ret_code != 0: raise RuntimeError("See logs in {}".format(stdout_file_path)) @@ -105,7 +115,7 @@ async def add_package_repo(request: web.Request) -> web.Response: else: cmd = "echo \"deb {}/ /\" | sudo tee /etc/apt/sources.list.d/fledge.list >> {} 2>&1".format( full_url, stdout_file_path) - _LOGGER.debug("CMD-2....{}".format(cmd)) + _LOGGER.debug("Edit the sources list with command: {}".format(cmd)) ret_code = os.system(cmd) if ret_code != 0: raise RuntimeError("See logs in {}".format(stdout_file_path)) @@ -113,7 +123,7 @@ async def add_package_repo(request: web.Request) -> web.Response: cmd = "{} >> {} 2>&1".format(extra_commands, stdout_file_path) else: cmd = "sudo {} -y update >> {} 2>&1".format(pkg_mgt, stdout_file_path) - _LOGGER.debug("CMD-3....{}".format(cmd)) + _LOGGER.debug("Fetch the list of packages with command: {}".format(cmd)) ret_code = os.system(cmd) if ret_code != 0: raise RuntimeError("See logs in {}".format(stdout_file_path)) @@ -126,8 +136,9 @@ async def add_package_repo(request: web.Request) -> web.Response: raise web.HTTPBadRequest(body=json.dumps({"message": "Failed to configure package repository", "output_log": msg}), reason=msg) except Exception as ex: - raise web.HTTPInternalServerError(reason=str(ex)) + msg = "Failed to configure archive package repository setup." + _LOGGER.error(ex, msg) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "{} {}".format(msg, str(ex))})) else: return web.json_response({"message": "Package repository configured successfully.", "output_log": stdout_file_path}) - diff --git a/python/fledge/services/core/api/scheduler.py b/python/fledge/services/core/api/scheduler.py index 14144bdf36..d3d985630a 100644 --- a/python/fledge/services/core/api/scheduler.py +++ b/python/fledge/services/core/api/scheduler.py @@ -648,6 +648,12 @@ async def update_schedule(request): if not sch: raise ScheduleNotFoundError(schedule_id) + # Restrict name and type properties for STARTUP type schedules + if 'name' in data and Schedule.Type(int(sch.schedule_type)).name == "STARTUP": + raise ValueError("{} is a STARTUP schedule type and cannot be renamed.".format(sch.name)) + if 'type' in data and Schedule.Type(int(sch.schedule_type)).name == "STARTUP": + raise ValueError("{} is a STARTUP schedule type and cannot be changed its type.".format(sch.name)) + curr_value = dict() curr_value['schedule_id'] = sch.schedule_id curr_value['schedule_process_name'] = sch.process_name diff --git a/python/fledge/services/core/api/service.py b/python/fledge/services/core/api/service.py index 9a58eac121..e15490d48d 100644 --- a/python/fledge/services/core/api/service.py +++ b/python/fledge/services/core/api/service.py @@ -9,14 +9,13 @@ import os import datetime import uuid -import platform import json import multiprocessing from aiohttp import web from typing import Dict, List from fledge.common import utils -from fledge.common import logger +from fledge.common.logger import FLCoreLogger from fledge.common.service_record import ServiceRecord from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.storage_client.exceptions import StorageServerError @@ -50,8 +49,7 @@ | POST | /fledge/service/{service_name}/otp | ------------------------------------------------------------------------------ """ - -_logger = logger.setup() +_logger = FLCoreLogger().get_logger(__name__) ################################# # Service @@ -263,9 +261,7 @@ async def add_service(request): delimiter = '.' if str(version).count(delimiter) != 2: raise ValueError('Service semantic version is incorrect; it should be like X.Y.Z') - - _platform = platform.platform() - pkg_mgt = 'yum' if 'centos' in _platform or 'redhat' in _platform else 'apt' + pkg_mgt = 'yum' if utils.is_redhat_based() else 'apt' # Check Pre-conditions from Packages table # if status is -1 (Already in progress) then return as rejected request storage = connect.get_storage_async() @@ -356,21 +352,21 @@ async def add_service(request): plugin_info = common.load_and_fetch_python_plugin_info(plugin_module_path, plugin, service_type) plugin_config = plugin_info['config'] if not plugin_config: - _logger.exception("Plugin %s import problem from path %s", plugin, plugin_module_path) - raise web.HTTPNotFound(reason='Plugin "{}" import problem from path "{}".'.format( - plugin, plugin_module_path)) + msg = "Plugin '{}' import problem from path '{}''.".format(plugin, plugin_module_path) + _logger.exception(msg) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except FileNotFoundError as ex: # Checking for C-type plugins plugin_config = load_c_plugin(plugin, service_type) + plugin_module_path = "{}/plugins/{}/{}".format(_FLEDGE_ROOT, service_type, plugin) if not plugin_config: - _logger.exception("Plugin %s import problem from path %s. %s", plugin, plugin_module_path, str(ex)) - raise web.HTTPNotFound(reason='Plugin "{}" import problem from path "{}".'.format( - plugin, plugin_module_path)) + msg = "Plugin '{}' not found in path '{}'.".format(plugin, plugin_module_path) + _logger.exception(ex, msg) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except TypeError as ex: - _logger.exception(str(ex)) raise web.HTTPBadRequest(reason=str(ex)) except Exception as ex: - _logger.exception("Failed to fetch plugin configuration. %s", str(ex)) + _logger.error(ex, "Failed to fetch plugin info config item.") raise web.HTTPInternalServerError(reason='Failed to fetch plugin configuration') elif service_type == 'notification': if not os.path.exists(_FLEDGE_ROOT + "/services/fledge.services.{}".format(service_type)): @@ -424,7 +420,7 @@ async def add_service(request): _logger.exception("Failed to create scheduled process. %s", ex.error) raise web.HTTPInternalServerError(reason='Failed to create service.') except Exception as ex: - _logger.exception("Failed to create scheduled process. %s", str(ex)) + _logger.error(ex, "Failed to create scheduled process.") raise web.HTTPInternalServerError(reason='Failed to create service.') # check that notification service is not already registered, right now notification service LIMIT to 1 @@ -474,11 +470,11 @@ async def add_service(request): raise ValueError('Config must be a JSON object') for k, v in config.items(): await config_mgr.set_category_item_value_entry(name, k, v['value']) - except Exception as ex: await config_mgr.delete_category_and_children_recursively(name) - _logger.exception("Failed to create plugin configuration. %s", str(ex)) - raise web.HTTPInternalServerError(reason='Failed to create plugin configuration. {}'.format(ex)) + msg = "Failed to create plugin configuration while adding service." + _logger.error(ex, msg) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) # If all successful then lastly add a schedule to run the new service at startup try: @@ -499,7 +495,7 @@ async def add_service(request): raise web.HTTPInternalServerError(reason='Failed to create service.') except Exception as ex: await config_mgr.delete_category_and_children_recursively(name) - _logger.exception("Failed to create service. %s", str(ex)) + _logger.error(ex, "Failed to create service.") raise web.HTTPInternalServerError(reason='Failed to create service.') except ValueError as err: msg = str(err) @@ -595,11 +591,13 @@ async def update_service(request: web.Request) -> web.Response: name = request.match_info.get('name', None) try: _type = _type.lower() - # TODO: 5141 - once done we need to fix for dispatcher type as well - if _type != 'notification': - raise ValueError("Invalid service type. Must be 'notification'") + if _type not in ('notification', 'dispatcher', 'bucket_storage', 'management'): + raise ValueError("Invalid service type.") + + # NOTE: `bucketstorage` repository name with `BucketStorage` type in service registry has package name *-`bucket`. + # URL: /fledge/service/bucket_storage/bucket/update - # Check requested service name is installed or not + # Check requested service is installed or not installed_services = get_service_installed() if name not in installed_services: raise KeyError("{} service is not installed yet. Hence update is not possible.".format(name)) @@ -623,9 +621,11 @@ async def update_service(request: web.Request) -> web.Response: ['name', '=', package_name]).payload() await storage_client.delete_from_tbl("packages", delete_payload) - # process_name always ends with "_c" suffix - payload = PayloadBuilder().SELECT("id", "enabled", "schedule_name").WHERE(['process_name', '=', '{}_c'.format( - _type)]).payload() + _where_clause = ['process_name', '=', '{}_c'.format(_type)] + if _type == 'management': + _where_clause = ['process_name', '=', '{}'.format(_type)] + + payload = PayloadBuilder().SELECT("id", "enabled", "schedule_name").WHERE(_where_clause).payload() result = await storage_client.query_tbl_with_payload('schedules', payload) sch_info = result['rows'] sch_list = [] @@ -643,10 +643,12 @@ async def update_service(request: web.Request) -> web.Response: result = await storage_client.insert_into_tbl("packages", insert_payload) if result['response'] == "inserted" and result['rows_affected'] == 1: pn = "{}-{}".format(action, name) - p = multiprocessing.Process(name=pn, target=do_update, args=(server.Server.is_rest_server_http_enabled, - server.Server._host, - server.Server.core_management_port, - storage_client, package_name, uid, sch_list)) + # Scheme is always http:// on core_management_port + p = multiprocessing.Process(name=pn, + target=do_update, + args=("http", server.Server._host, + server.Server.core_management_port, storage_client, package_name, + uid, sch_list)) p.daemon = True p.start() msg = "{} {} started".format(package_name, action) @@ -667,7 +669,7 @@ async def update_service(request: web.Request) -> web.Response: async def _put_schedule(protocol: str, host: str, port: int, sch_id: uuid, is_enabled: bool) -> None: management_api_url = '{}://{}:{}/fledge/schedule/{}/enable'.format(protocol, host, port, sch_id) headers = {'content-type': 'application/json'} - verify_ssl = False if protocol == 'HTTP' else True + verify_ssl = False connector = aiohttp.TCPConnector(verify_ssl=verify_ssl) async with aiohttp.ClientSession(connector=connector) as session: async with session.put(management_api_url, data=json.dumps({"value": is_enabled}), headers=headers) as resp: @@ -681,16 +683,12 @@ async def _put_schedule(protocol: str, host: str, port: int, sch_id: uuid, is_en _logger.debug("PUT Schedule response: %s", response) -def do_update(http_enabled: bool, host: str, port: int, storage: connect, pkg_name: str, uid: str, - schedules: list) -> None: +def do_update(protocol: str, host: str, port: int, storage: connect, pkg_name: str, uid: str, schedules: list) -> None: _logger.info("{} service update started...".format(pkg_name)) - _platform = platform.platform() stdout_file_path = common.create_log_file("update", pkg_name) - pkg_mgt = 'apt' + pkg_mgt = 'yum' if utils.is_redhat_based() else 'apt' cmd = "sudo {} -y update > {} 2>&1".format(pkg_mgt, stdout_file_path) - protocol = "HTTP" if http_enabled else "HTTPS" - if 'centos' in _platform or 'redhat' in _platform: - pkg_mgt = 'yum' + if pkg_mgt == 'yum': cmd = "sudo {} check-update > {} 2>&1".format(pkg_mgt, stdout_file_path) ret_code = os.system(cmd) # sudo apt/yum -y install only happens when update is without any error diff --git a/python/fledge/services/core/api/statistics.py b/python/fledge/services/core/api/statistics.py index 142cf90026..b0da9ff5ff 100644 --- a/python/fledge/services/core/api/statistics.py +++ b/python/fledge/services/core/api/statistics.py @@ -9,6 +9,7 @@ from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.services.core import connect from fledge.services.core.scheduler.scheduler import Scheduler +from fledge.common.logger import FLCoreLogger __author__ = "Amarendra K. Sinha, Ashish Jabble" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" @@ -23,6 +24,8 @@ ------------------------------------------------------------------------------ """ +_logger = FLCoreLogger().get_logger(__name__) + ################################# # Statistics @@ -152,20 +155,19 @@ async def get_statistics_history(request): async def get_statistics_rate(request: web.Request) -> web.Response: - """ + """To retrieve the statistics rates and will be calculated by formula: + (sum(value) / ((60 * period) / stats_collector_interval)) + For example: + If stats_collector_interval set to 15 seconds then + a) For a 1 minute period should take 4 statistics history values, sum those and then divide by period + b) For a 5 minute period should take 20 statistics history values, sum those and then divide by period Args: request: Returns: - A JSON document with the rates for each of the statistics + A JSON document with the rates for each of the statistics :Example: - curl -X GET http://localhost:8081/fledge/statistics/rate?periods=1,5,15&statistics=SINUSOID,FASTSINUSOID,READINGS - - Implementation: - Calculation via: (sum(value) / count(value)) * 60 / () - Queries for above example: - select key, 4 * (sum(value) / count(value)) from statistics_history where history_ts >= datetime('now', '-1 Minute') and key in ("SINUSOID", "FASTSINUSOID", "READINGS" ) group by key; - select key, 4 * (sum(value) / count(value)) from statistics_history where history_ts >= datetime('now', '-5 Minute') and key in ("SINUSOID", "FASTSINUSOID", "READINGS" ) group by key; - select key, 4 * (sum(value) / count(value)) from statistics_history where history_ts >= datetime('now', '-15 Minute') and key in ("SINUSOID", "FASTSINUSOID", "READINGS" ) group by key; + curl -sX GET "http://localhost:8081/fledge/statistics/rate?periods=5&statistics=READINGS" + curl -sX GET "http://localhost:8081/fledge/statistics/rate?periods=1,5,15&statistics=SINUSOID,FASTSINUSOID" """ params = request.query if 'periods' not in params: @@ -202,25 +204,22 @@ async def get_statistics_rate(request: web.Request) -> web.Response: seconds=interval_dt.second).total_seconds() else: raise web.HTTPNotFound(reason="No stats collector schedule found") - ts = datetime.datetime.now().timestamp() resp = [] for x, y in [(x, y) for x in period_split_list for y in stat_split_list]: - time_diff = ts - int(x) - # TODO: FOGL-4102 - # For example: - # time_diff = 1590066814.037321 - # ERROR: PostgreSQL storage plugin raising error: ERROR: invalid input syntax for type timestamp with time zone: "1590066814.037321" - # "where": {"column": "history_ts", "condition": ">=", "value": "1590066814.037321"} - Payload works with sqlite engine BUT not with postgres - # To overcome above problem on postgres - I have used "dt = 2020-05-21 13:13:34" - but I see some deviations in results for both engines when we use datetime format - _payload = PayloadBuilder().SELECT("key").AGGREGATE(["sum", "value"]).AGGREGATE(["count", "value"]).WHERE( - ['history_ts', '>=', str(time_diff)]).AND_WHERE(['key', '=', y]).chain_payload() - stats_rate_payload = PayloadBuilder(_payload).GROUP_BY("key").payload() + # Get value column as per given key along with history_ts column order by + _payload = PayloadBuilder().SELECT("value").WHERE(['key', '=', y]).ORDER_BY(["history_ts", "desc"] + ).chain_payload() + # LIMIT set to ((60 * period) / stats_collector_interval)) + calculated_formula = int((60 * int(x) / int(interval_in_secs))) + stats_rate_payload = PayloadBuilder(_payload).LIMIT(calculated_formula).payload() result = await storage_client.query_tbl_with_payload("statistics_history", stats_rate_payload) temp_dict = {y: {x: 0}} if result['rows']: - calculated_formula_str = (int(result['rows'][0]['sum_value']) / int(result['rows'][0]['count_value']) - ) * (60 / int(interval_in_secs)) - temp_dict = {y: {x: calculated_formula_str}} + row_sum = 0 + values = [r['value'] for r in result['rows']] + for v in values: + row_sum += v + temp_dict = {y: {x: row_sum / int(x)}} resp.append(temp_dict) rate_dict = {} for d in resp: diff --git a/python/fledge/services/core/api/support.py b/python/fledge/services/core/api/support.py index d95b279e37..0a4cadb067 100644 --- a/python/fledge/services/core/api/support.py +++ b/python/fledge/services/core/api/support.py @@ -5,35 +5,29 @@ # FLEDGE_END import os -import platform import subprocess import json -import logging import datetime import urllib.parse from pathlib import Path from aiohttp import web -from fledge.common import logger +from fledge.common import utils from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA +from fledge.common.logger import FLCoreLogger from fledge.services.core.support import SupportBuilder -from fledge.common.common import _FLEDGE_ROOT __author__ = "Ashish Jabble" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}" -_logger = logger.setup(__name__, level=logging.INFO) - -_SYSLOG_FILE = '/var/log/syslog' -if any(x in platform.platform() for x in ['centos', 'redhat']): - _SYSLOG_FILE = '/var/log/messages' +_logger = FLCoreLogger().get_logger(__name__) +_SYSLOG_FILE = '/var/log/messages' if utils.is_redhat_based() else '/var/log/syslog' _SCRIPTS_DIR = "{}/scripts".format(_FLEDGE_ROOT) - __DEFAULT_LIMIT = 20 __DEFAULT_OFFSET = 0 __DEFAULT_LOG_SOURCE = 'Fledge' @@ -115,7 +109,9 @@ async def create_support_bundle(request): try: bundle_name = await SupportBuilder(support_dir).build() except Exception as ex: - raise web.HTTPInternalServerError(reason='Support bundle could not be created. {}'.format(str(ex))) + msg = 'Failed to create support bundle.' + _logger.error(ex, msg) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) return web.json_response({"bundle created": bundle_name}) @@ -134,9 +130,11 @@ async def get_syslog_entries(request): curl -X GET "http://localhost:8081/fledge/syslog?limit=5&source=storage" curl -X GET "http://localhost:8081/fledge/syslog?limit=5&offset=5&source=storage" curl -sX GET "http://localhost:8081/fledge/syslog?nontotals=true" + curl -sX GET "http://localhost:8081/fledge/syslog?nontotals=true&keyword=Storage%20error" curl -sX GET "http://localhost:8081/fledge/syslog?nontotals=true&source=|" curl -sX GET "http://localhost:8081/fledge/syslog?nontotals=true&limit=5" curl -sX GET "http://localhost:8081/fledge/syslog?nontotals=true&limit=100&offset=50" + curl -sX GET "http://localhost:8081/fledge/syslog?nontotals=true&limit=100&offset=50&keyword=fledge.services" curl -sX GET "http://localhost:8081/fledge/syslog?nontotals=true&source=|&limit=10&offset=50" curl -sX GET "http://localhost:8081/fledge/syslog?nontotals=true&source=|" """ @@ -185,7 +183,10 @@ async def get_syslog_entries(request): template = __GET_SYSLOG_CMD_WITH_ERROR_TEMPLATE lines = __GET_SYSLOG_ERROR_MATCHED_LINES levels = "(ERROR|FATAL)" - + # keyword + keyword = '' + if 'keyword' in request.query and request.query['keyword'] != '': + keyword = request.query['keyword'] response = {} # nontotals non_totals = request.query['nontotals'].lower() if 'nontotals' in request.query and request.query[ @@ -200,11 +201,14 @@ async def get_syslog_entries(request): response['count'] = total_lines cmd = template.format(valid_source[source], _SYSLOG_FILE, total_lines - offset, limit) else: - scriptPath = os.path.join(_SCRIPTS_DIR, "common", "get_logs.sh") + script_path = os.path.join(_SCRIPTS_DIR, "common", "get_logs.sh") # cmd = non_total_template.format(valid_source[source], _SYSLOG_FILE, offset, limit) pattern = '({})\[.*\].*{}:'.format(valid_source[source], levels) - cmd = '{} -offset {} -limit {} -pattern \'{}\' -logfile {} -source {} -level {}'.format(scriptPath, offset, limit, pattern, _SYSLOG_FILE, source, level) - _logger.debug('********* non_totals=true: new shell command: {}'.format(cmd)) + cmd = '{} -offset {} -limit {} -pattern \'{}\' -logfile {} -source \'{}\' -level {}'.format( + script_path, offset, limit, pattern, _SYSLOG_FILE, source, level) + if len(keyword): + cmd += ' -keyword \'{}\''.format(keyword) + _logger.debug('********* non_totals={}: new shell command: {}'.format(non_totals, cmd)) t1 = datetime.datetime.now() rv = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE).stdout.readlines() @@ -217,6 +221,7 @@ async def get_syslog_entries(request): raise web.HTTPBadRequest(body=json.dumps({"message": msg}), reason=msg) except (OSError, Exception) as ex: msg = str(ex) + _logger.error(ex, "Failed to get syslog entries.") raise web.HTTPInternalServerError(body=json.dumps({"message": msg}), reason=msg) return web.json_response(response) diff --git a/python/fledge/services/core/api/task.py b/python/fledge/services/core/api/task.py index 18860e549e..6495198a19 100644 --- a/python/fledge/services/core/api/task.py +++ b/python/fledge/services/core/api/task.py @@ -10,8 +10,8 @@ from aiohttp import web from fledge.common import utils -from fledge.common import logger from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.storage_client.exceptions import StorageServerError @@ -34,7 +34,7 @@ ------------------------------------------------------------------------------- """ -_logger = logger.setup() +_logger = FLCoreLogger().get_logger(__name__) async def add_task(request): @@ -163,30 +163,28 @@ async def add_task(request): # Checking for C-type plugins plugin_info = apiutils.get_plugin_info(plugin, dir=task_type) if not plugin_info: - msg = "Plugin {} does not appear to be a valid plugin".format(plugin) - _logger.error(msg) + msg = "Plugin {} does not appear to be a valid plugin.".format(plugin) return web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) valid_c_plugin_info_keys = ['name', 'version', 'type', 'interface', 'flag', 'config'] for k in valid_c_plugin_info_keys: if k not in list(plugin_info.keys()): - msg = "Plugin info does not appear to be a valid for {} plugin. '{}' item not found".format( + msg = "Plugin info does not appear to be a valid for {} plugin. '{}' item not found.".format( plugin, k) - _logger.error(msg) return web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) if plugin_info['type'] != task_type: - msg = "Plugin of {} type is not supported".format(plugin_info['type']) - _logger.error(msg) + msg = "Plugin of {} type is not supported.".format(plugin_info['type']) return web.HTTPBadRequest(reason=msg) plugin_config = plugin_info['config'] if not plugin_config: - _logger.exception("Plugin %s import problem from path %s. %s", plugin, plugin_module_path, str(ex)) - raise web.HTTPNotFound(reason='Plugin "{}" import problem from path "{}"'.format(plugin, - plugin_module_path)) + plugin_module_path = "{}/plugins/{}/{}".format(_FLEDGE_ROOT, task_type, plugin) + raise web.HTTPNotFound(reason='Plugin "{}" import problem from path "{}"'.format( + plugin, plugin_module_path)) except TypeError as ex: raise web.HTTPBadRequest(reason=str(ex)) except Exception as ex: - _logger.exception("Failed to fetch plugin configuration. %s", str(ex)) - raise web.HTTPInternalServerError(reason='Failed to fetch plugin configuration.') + msg = "Failed to fetch plugin configuration." + _logger.error(ex, msg) + raise web.HTTPInternalServerError(reason=msg) storage = connect.get_storage_async() config_mgr = ConfigurationManager(storage) @@ -202,7 +200,7 @@ async def add_task(request): if result['count'] >= 1: msg = 'Unable to reuse name {0}, already used by a previous task.'.format(name) - _logger.exception(msg) + _logger.warning(msg) raise web.HTTPBadRequest(reason=msg) # Check whether category name already exists @@ -225,10 +223,10 @@ async def add_task(request): try: res = await storage.insert_into_tbl("scheduled_processes", payload) except StorageServerError as ex: - _logger.exception("Failed to create scheduled process. %s", ex.error) + _logger.exception("Failed to create scheduled process due to {}".format(ex.error)) raise web.HTTPInternalServerError(reason='Failed to create north instance.') except Exception as ex: - _logger.exception("Failed to create scheduled process. %s", ex) + _logger.error(ex, "Failed to create scheduled process.") raise web.HTTPInternalServerError(reason='Failed to create north instance.') # If successful then create a configuration entry from plugin configuration @@ -251,7 +249,7 @@ async def add_task(request): await config_mgr.set_category_item_value_entry(name, k, v['value']) except Exception as ex: await config_mgr.delete_category_and_children_recursively(name) - _logger.exception("Failed to create plugin configuration. %s", str(ex)) + _logger.error(ex, "Failed to create plugin configuration.") raise web.HTTPInternalServerError(reason='Failed to create plugin configuration. {}'.format(ex)) # If all successful then lastly add a schedule to run the new task at startup @@ -276,11 +274,11 @@ async def add_task(request): schedule = await server.Server.scheduler.get_schedule_by_name(name) except StorageServerError as ex: await config_mgr.delete_category_and_children_recursively(name) - _logger.exception("Failed to create schedule. %s", ex.error) + _logger.exception("Failed to create north instance due to {}".format(ex.error)) raise web.HTTPInternalServerError(reason='Failed to create north instance.') except Exception as ex: await config_mgr.delete_category_and_children_recursively(name) - _logger.exception("Failed to create schedule. %s", str(ex)) + _logger.error(ex, "Failed to create north instance.") raise web.HTTPInternalServerError(reason='Failed to create north instance.') except ValueError as e: @@ -295,8 +293,8 @@ async def delete_task(request): :Example: curl -X DELETE http://localhost:8081/fledge/scheduled/task/ """ + north_instance = request.match_info.get('task_name', None) try: - north_instance = request.match_info.get('task_name', None) storage = connect.get_storage_async() result = await get_schedule(storage, north_instance) @@ -324,9 +322,10 @@ async def delete_task(request): await delete_plugin_data(storage, north_instance) # update deprecated timestamp in asset_tracker await update_deprecated_ts_in_asset_tracker(storage, north_instance) - except Exception as ex: - raise web.HTTPInternalServerError(reason=ex) + msg = str(ex) + _logger.error(ex, "Failed to delete {} north task.".format(north_instance)) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({'result': 'North instance {} deleted successfully.'.format(north_instance)}) diff --git a/python/fledge/services/core/api/update.py b/python/fledge/services/core/api/update.py index 0fde626928..e49e7cb75e 100644 --- a/python/fledge/services/core/api/update.py +++ b/python/fledge/services/core/api/update.py @@ -13,14 +13,13 @@ import os import asyncio import re -import platform +from fledge.common import utils +from fledge.common.logger import FLCoreLogger from fledge.services.core import server -from fledge.common import logger from fledge.services.core.scheduler.entities import ManualSchedule -_LOG_LEVEL = 20 -_logger = logger.setup(__name__, level=_LOG_LEVEL) +_logger = FLCoreLogger().get_logger(__name__) __author__ = "Massimiliano Pinto" __copyright__ = "Copyright (c) 2018 OSIsoft, LLC" @@ -77,7 +76,6 @@ async def update_package(request): manual_schedule = ManualSchedule() if not manual_schedule: - _logger.error(error_message) raise ValueError(error_message) # Set schedule fields manual_schedule.name = _FLEDGE_MANUAL_UPDATE_SCHEDULE @@ -107,6 +105,7 @@ async def update_package(request): raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except Exception as ex: msg = str(ex) + _logger.error(ex, "Failed to update Fledge package.") raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"status": "Running", "message": status_message}) @@ -126,10 +125,9 @@ async def get_updates(request: web.Request) -> web.Response: Example curl -sX GET http://localhost:8081/fledge/update |jq """ - _platform = platform.platform() update_cmd = "sudo apt update" upgradable_pkgs_check_cmd = "apt list --upgradable | grep \^fledge" - if "centos" in _platform or "redhat" in _platform: + if utils.is_redhat_based(): update_cmd = "sudo yum check-update" upgradable_pkgs_check_cmd = "yum list updates | grep \^fledge" @@ -152,7 +150,7 @@ async def get_updates(request: web.Request) -> web.Response: return web.json_response({'updates': upgradable_packages}) try: process_output = stdout.decode("utf-8") - _logger.info(process_output) + _logger.debug(process_output) # split on new-line word_list = re.split(r"\n+", process_output) @@ -177,8 +175,8 @@ async def get_updates(request: web.Request) -> web.Response: # Make a set to avoid duplicates. upgradable_packages = list(set(packages)) except Exception as ex: - msg = "Failed to fetch upgradable packages list for the configured repository! {}".format(str(ex)) - _logger.error(msg) - raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + msg = "Failed to fetch upgradable packages list for the configured repository!" + _logger.error(ex, msg) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "{} {}".format(msg, str(ex))})) else: return web.json_response({'updates': upgradable_packages}) diff --git a/python/fledge/services/core/api/utils.py b/python/fledge/services/core/api/utils.py index 8d57dc650e..292754b656 100644 --- a/python/fledge/services/core/api/utils.py +++ b/python/fledge/services/core/api/utils.py @@ -4,67 +4,64 @@ # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END - import subprocess import os import json - -from fledge.common import logger from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_PLUGIN_PATH +from fledge.common.logger import FLCoreLogger -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) _lib_path = _FLEDGE_ROOT + "/" + "plugins" +C_PLUGIN_UTIL_PATH = _FLEDGE_ROOT + "/extras/C/get_plugin_info" if os.path.isdir(_FLEDGE_ROOT + "/extras/C") \ + else _FLEDGE_ROOT + "/cmake_build/C/plugins/utils/get_plugin_info" + def get_plugin_info(name, dir): try: - arg1 = _find_c_util('get_plugin_info') arg2 = _find_c_lib(name, dir) if arg2 is None: raise ValueError('The plugin {} does not exist'.format(name)) - cmd_with_args = [arg1, arg2, "plugin_info"] + cmd_with_args = [C_PLUGIN_UTIL_PATH, arg2, "plugin_info"] p = subprocess.Popen(cmd_with_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() res = out.decode("utf-8") jdoc = json.loads(res) - except OSError as err: - _logger.error("%s C plugin get info failed due to %s", name, str(err)) + except json.decoder.JSONDecodeError as err: + _logger.error("Failed to parse JSON data returned from the plugin information of {}, {} line {} column {}".format(name, err.msg, err.lineno, err.colno)) + return {} + except (OSError, ValueError) as err: + _logger.error(err, "{} C plugin get info failed.".format(name)) return {} except subprocess.CalledProcessError as err: if err.output is not None: - _logger.error("%s C plugin get info failed '%s' due to %s", name, err.output, str(err)) + _logger.error(err, "{} C plugin get info failed '{}'.".format(name, err.output)) else: - _logger.error("%s C plugin get info failed due to %s", name, str(err)) - return {} - except ValueError as err: - _logger.error(str(err)) + _logger.error(err, "{} C plugin get info failed.".format(name)) return {} except Exception as ex: - _logger.exception("%s C plugin get info failed due to %s", name, str(ex)) + _logger.error(ex, "{} C plugin get info failed.".format(name)) return {} else: return jdoc -def _find_c_lib(name, dir): - _path = [_lib_path + "/" + dir] +def _find_c_lib(name, installed_dir): + _path = [_lib_path + "/" + installed_dir] _path = _find_plugins_from_env(_path) + lib_path = None + for fp in _path: for path, subdirs, files in os.walk(fp): for fname in files: # C-binary file if fname.endswith("lib{}.so".format(name)): - return os.path.join(path, fname) - return None - - -def _find_c_util(name): - for path, subdirs, files in os.walk(_FLEDGE_ROOT): - for fname in files: - # C-utility file - if fname == name: - return os.path.join(path, fname) - return None + lib_path = os.path.join(path, fname) + break + else: + continue + break + return lib_path def find_c_plugin_libs(direction): @@ -72,32 +69,30 @@ def find_c_plugin_libs(direction): _path = [_lib_path] _path = _find_plugins_from_env(_path) for fp in _path: - for root, dirs, files in os.walk(fp + "/" + direction): - for name in dirs: - p = os.path.join(root, name) - for path, subdirs, f in os.walk(p): - for fname in f: - # C-binary file - if fname.endswith('.so'): - # Replace lib and .so from fname - libraries.append((fname.replace("lib", "").replace(".so", ""), 'binary')) - # For Hybrid plugins - if direction == 'south' and fname.endswith('.json'): - libraries.append((fname.replace(".json", ""), 'json')) + if os.path.isdir(fp + "/" + direction): + for name in os.listdir(fp + "/" + direction): + p = fp + "/" + direction + "/" + name + for fname in os.listdir(p): + if fname.endswith('.so'): + # Replace lib and .so from fname + libraries.append((fname.replace("lib", "").replace(".so", ""), 'binary')) + # For Hybrid plugins + if direction == 'south' and fname.endswith('.json'): + libraries.append((fname.replace(".json", ""), 'json')) return libraries def _find_plugins_from_env(_plugin_path: list) -> list: if _FLEDGE_PLUGIN_PATH: my_list = _FLEDGE_PLUGIN_PATH.split(";") - for l in my_list: - dir_found = os.path.isdir(l) + for ml in my_list: + dir_found = os.path.isdir(ml) if dir_found: - subdirs = [dirs for x, dirs, files in os.walk(l)] + subdirs = [dirs for x, dirs, files in os.walk(ml)] if subdirs[0]: - _plugin_path.append(l) + _plugin_path.append(ml) else: - _logger.warning("{} subdir type not found".format(l)) + _logger.warning("{} subdir type not found.".format(ml)) else: - _logger.warning("{} dir path not found".format(l)) + _logger.warning("{} dir path not found.".format(ml)) return _plugin_path diff --git a/python/fledge/services/core/asset_tracker/asset_tracker.py b/python/fledge/services/core/asset_tracker/asset_tracker.py index d8098b49da..e0be33965c 100644 --- a/python/fledge/services/core/asset_tracker/asset_tracker.py +++ b/python/fledge/services/core/asset_tracker/asset_tracker.py @@ -4,20 +4,18 @@ # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END -from fledge.common import logger +from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.storage_client.storage_client import StorageClientAsync from fledge.common.storage_client.exceptions import StorageServerError -from fledge.common.configuration_manager import ConfigurationManager - __author__ = "Ashish Jabble" __copyright__ = "Copyright (c) 2018 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}" - -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) class AssetTracker(object): @@ -48,7 +46,7 @@ async def load_asset_records(self): for row in results['rows']: self._registered_asset_records.append(row) except Exception as ex: - _logger.exception('Failed to retrieve asset records, %s', str(ex)) + _logger.exception(ex, 'Failed to retrieve asset records') async def add_asset_record(self, *, asset, event, service, plugin, jsondata = {}): """ diff --git a/python/fledge/services/core/connect.py b/python/fledge/services/core/connect.py index 7305a61dfc..ed0915873e 100644 --- a/python/fledge/services/core/connect.py +++ b/python/fledge/services/core/connect.py @@ -4,19 +4,16 @@ # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END - -from fledge.services.core.service_registry.service_registry import ServiceRegistry +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.storage_client import StorageClientAsync, ReadingsStorageClientAsync -from fledge.common import logger +from fledge.services.core.service_registry.service_registry import ServiceRegistry __author__ = "Ashish Jabble" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}" - -# _logger = logger.setup(__name__, level=20) -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) # TODO: Needs refactoring or better way to allow global discovery in core process @@ -25,24 +22,21 @@ def get_storage_async(): try: services = ServiceRegistry.get(name="Fledge Storage") storage_svc = services[0] - _storage = StorageClientAsync(core_management_host=None, core_management_port=None, - svc=storage_svc) - # _logger.info(type(_storage)) + _storage = StorageClientAsync(core_management_host=None, core_management_port=None, svc=storage_svc) except Exception as ex: - _logger.exception(str(ex)) + _logger.error(ex) raise return _storage + # TODO: Needs refactoring or better way to allow global discovery in core process def get_readings_async(): """ Storage Object """ try: services = ServiceRegistry.get(name="Fledge Storage") storage_svc = services[0] - _readings = ReadingsStorageClientAsync(core_mgt_host=None, core_mgt_port=None, - svc=storage_svc) - # _logger.info(type(_storage)) + _readings = ReadingsStorageClientAsync(core_mgt_host=None, core_mgt_port=None, svc=storage_svc) except Exception as ex: - _logger.exception(str(ex)) + _logger.error(ex) raise return _readings diff --git a/python/fledge/services/core/interest_registry/change_callback.py b/python/fledge/services/core/interest_registry/change_callback.py index b5f54ed95f..eeb7e4c10c 100644 --- a/python/fledge/services/core/interest_registry/change_callback.py +++ b/python/fledge/services/core/interest_registry/change_callback.py @@ -64,7 +64,7 @@ async def run(category_name): if status_code in range(500, 600): _LOGGER.error("Server error code: %d, reason: %s", status_code, resp.reason) except Exception as ex: - _LOGGER.exception("Unable to notify microservice with uuid %s due to exception: %s", i._microservice_uuid, str(ex)) + _LOGGER.exception(ex, "Unable to notify microservice with uuid {}".format(i._microservice_uuid)) continue @@ -96,7 +96,8 @@ async def run_child_create(parent_category_name, child_category_list): try: service_record = ServiceRegistry.get(idx=i._microservice_uuid)[0] except service_registry_exceptions.DoesNotExist: - _LOGGER.exception("Unable to notify microservice with uuid %s as it is not found in the service registry", i._microservice_uuid) + _LOGGER.exception("Unable to notify microservice with uuid {} as it is not " + "found in the service registry".format(i._microservice_uuid)) continue url = "{}://{}:{}/fledge/child_create".format(service_record._protocol, service_record._address, service_record._management_port) @@ -110,9 +111,10 @@ async def run_child_create(parent_category_name, child_category_list): if status_code in range(500, 600): _LOGGER.error("Server error code: %d, reason: %s", status_code, resp.reason) except Exception as ex: - _LOGGER.exception("Unable to notify microservice with uuid %s due to exception: %s", i._microservice_uuid, str(ex)) + _LOGGER.exception(ex, "Unable to notify microservice with uuid {}".format(i._microservice_uuid)) continue + async def run_child_delete(parent_category_name, child_category): """ Call the child_delete Management API @@ -154,9 +156,10 @@ async def run_child_delete(parent_category_name, child_category): if status_code in range(500, 600): _LOGGER.error("Server error code: %d, reason: %s", status_code, resp.reason) except Exception as ex: - _LOGGER.exception("Unable to notify microservice with uuid %s due to exception: %s", i._microservice_uuid, str(ex)) + _LOGGER.exception(ex, "Unable to notify microservice with uuid {}".format(i._microservice_uuid)) continue + async def run_child(parent_category_name, child_category_list, operation): """ Callback run by configuration category to notify changes to interested microservices diff --git a/python/fledge/services/core/proxy.py b/python/fledge/services/core/proxy.py index 3285bd0e4e..8d4d40b478 100644 --- a/python/fledge/services/core/proxy.py +++ b/python/fledge/services/core/proxy.py @@ -5,12 +5,11 @@ # FLEDGE_END import json -import logging import urllib.parse import aiohttp from aiohttp import web -from fledge.common import logger +from fledge.common.logger import FLCoreLogger from fledge.services.core import server from fledge.services.core.service_registry.service_registry import ServiceRegistry from fledge.services.core.service_registry import exceptions as service_registry_exceptions @@ -20,7 +19,7 @@ __license__ = "Apache 2.0" __version__ = "${VERSION}" -_logger = logger.setup(__name__, level=logging.INFO) +_logger = FLCoreLogger().get_logger(__name__) def setup(app): diff --git a/python/fledge/services/core/routes.py b/python/fledge/services/core/routes.py index a135e3a525..b356008e63 100644 --- a/python/fledge/services/core/routes.py +++ b/python/fledge/services/core/routes.py @@ -5,38 +5,20 @@ # FLEDGE_END from fledge.services.core import proxy - -from fledge.services.core.api import auth +from fledge.services.core.api import asset_tracker, auth, backup_restore, browser, certificate_store, filters, health, notification, north, package_log, python_packages, south, support, service, task, update from fledge.services.core.api import audit as api_audit -from fledge.services.core.api import browser from fledge.services.core.api import common as api_common from fledge.services.core.api import configuration as api_configuration from fledge.services.core.api import scheduler as api_scheduler from fledge.services.core.api import statistics as api_statistics -from fledge.services.core.api import backup_restore -from fledge.services.core.api import update -from fledge.services.core.api import service -from fledge.services.core.api import certificate_store -from fledge.services.core.api import support -from fledge.services.core.api import task -from fledge.services.core.api import asset_tracker -from fledge.services.core.api import south -from fledge.services.core.api import north -from fledge.services.core.api import filters -from fledge.services.core.api import notification +from fledge.services.core.api.control_service import script_management, acl_management, pipeline, entrypoint from fledge.services.core.api.plugins import data as plugin_data from fledge.services.core.api.plugins import install as plugins_install, discovery as plugins_discovery from fledge.services.core.api.plugins import update as plugins_update from fledge.services.core.api.plugins import remove as plugins_remove +from fledge.services.core.api.repos import configure as configure_repo from fledge.services.core.api.snapshot import plugins as snapshot_plugins from fledge.services.core.api.snapshot import table as snapshot_table -from fledge.services.core.api import package_log -from fledge.services.core.api.repos import configure as configure_repo -from fledge.services.core.api.control_service import script_management -from fledge.services.core.api.control_service import acl_management -from fledge.services.core.api import python_packages -from fledge.services.core.api import health - __author__ = "Ashish Jabble, Praveen Garg, Massimiliano Pinto, Amarendra K Sinha" @@ -193,9 +175,15 @@ def setup(app): app.router.add_route('GET', '/fledge/plugins/installed', plugins_discovery.get_plugins_installed) app.router.add_route('GET', '/fledge/plugins/available', plugins_discovery.get_plugins_available) app.router.add_route('POST', '/fledge/plugins', plugins_install.add_plugin) - app.router.add_route('PUT', '/fledge/plugins/{type}/{name}/update', plugins_update.update_plugin) - app.router.add_route('DELETE', '/fledge/plugins/{type}/{name}', plugins_remove.remove_plugin) - + if api_common.get_version() <= "2.1.0": + """Note: This is only for to maintain the backward compatibility. (having core version<=2.1.0) + Plugin Update & Delete routes on the basis of type & installed name""" + app.router.add_route('PUT', '/fledge/plugins/{type}/{name}/update', plugins_update.update_plugin) + app.router.add_route('DELETE', '/fledge/plugins/{type}/{name}', plugins_remove.remove_plugin) + else: + # routes available 2.1.0 onwards + app.router.add_route('PUT', '/fledge/plugins/{package_name}', plugins_update.update_package) + app.router.add_route('DELETE', '/fledge/plugins/{package_name}', plugins_remove.remove_package) # plugin data app.router.add_route('GET', '/fledge/service/{service_name}/persist', plugin_data.get_persist_plugins) app.router.add_route('GET', '/fledge/service/{service_name}/plugin/{plugin_name}/data', plugin_data.get) @@ -259,6 +247,13 @@ def setup(app): app.router.add_route('PUT', '/fledge/service/{service_name}/ACL', acl_management.attach_acl_to_service) app.router.add_route('DELETE', '/fledge/service/{service_name}/ACL', acl_management.detach_acl_from_service) + # Control Pipelines + pipeline.setup(app) + + # Control Entrypoint + entrypoint.setup(app) + + # Python packages app.router.add_route('GET', '/fledge/python/packages', python_packages.get_packages) app.router.add_route('POST', '/fledge/python/package', python_packages.install_package) diff --git a/python/fledge/services/core/scheduler/scheduler.py b/python/fledge/services/core/scheduler/scheduler.py index 91ee7ba0ec..61a5d659ac 100644 --- a/python/fledge/services/core/scheduler/scheduler.py +++ b/python/fledge/services/core/scheduler/scheduler.py @@ -18,13 +18,13 @@ import signal from typing import List -from fledge.common import logger from fledge.common import utils as common_utils from fledge.common.audit_logger import AuditLogger +from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.exceptions import * from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.storage_client.storage_client import StorageClientAsync -from fledge.common.configuration_manager import ConfigurationManager from fledge.services.core.scheduler.entities import * from fledge.services.core.scheduler.exceptions import * from fledge.services.core.service_registry.service_registry import ServiceRegistry @@ -135,8 +135,7 @@ def __init__(self, core_management_host=None, core_management_port=None, is_safe # Initialize class attributes if not cls._logger: - cls._logger = logger.setup(__name__, level=logging.INFO) - # cls._logger = logger.setup(__name__, level=logging.DEBUG) + cls._logger = FLCoreLogger().get_logger(__name__) if not cls._core_management_port: cls._core_management_port = core_management_port if not cls._core_management_host: @@ -848,7 +847,7 @@ async def stop(self): try: await self._purge_tasks_task except Exception as ex: - self._logger.exception('An exception was raised by Scheduler._purge_tasks %s', str(ex)) + self._logger.exception(ex, 'An exception was raised by Scheduler._purge_tasks.') self._resume_check_schedules() @@ -856,7 +855,7 @@ async def stop(self): try: await self._scheduler_loop_task except Exception as ex: - self._logger.exception('An exception was raised by Scheduler._scheduler_loop %s', str(ex)) + self._logger.exception(ex, 'An exception was raised by Scheduler._scheduler_loop') self._scheduler_loop_task = None # Can not iterate over _task_processes - it can change mid-iteration @@ -1091,8 +1090,7 @@ async def save_schedule(self, schedule: Schedule, is_enabled_modified=None, dryr except Exception: self._logger.exception('Update failed: %s', update_payload) raise - audit = AuditLogger(self._storage_async) - await audit.information('SCHCH', {'schedule': schedule.toDict()}) + await self.audit_trail_entry(prev_schedule_row, schedule) else: insert_payload = PayloadBuilder() \ .INSERT(id=str(schedule.schedule_id), @@ -1238,6 +1236,7 @@ async def disable_schedule(self, schedule_id: uuid.UUID, bypass_check=None, reco return True, "Schedule {} already disabled".format(str(schedule_id)) # Disable Schedule - update the schedule in memory + prev_schedule_row = self._schedules[schedule_id] self._schedules[schedule_id] = self._schedules[schedule_id]._replace(enabled=False) # Update database @@ -1319,9 +1318,8 @@ async def disable_schedule(self, schedule_id: uuid.UUID, bypass_check=None, reco str(schedule_id), schedule.process_name) if record_audit_trail: - audit = AuditLogger(self._storage_async) - sch = await self.get_schedule(schedule_id) - await audit.information('SCHCH', {'schedule': sch.toDict()}) + new_schedule_row = await self.get_schedule(schedule_id) + await self.audit_trail_entry(prev_schedule_row, new_schedule_row) return True, "Schedule successfully disabled" async def enable_schedule(self, schedule_id: uuid.UUID, bypass_check=None, record_audit_trail=True): @@ -1345,6 +1343,7 @@ async def enable_schedule(self, schedule_id: uuid.UUID, bypass_check=None, recor return True, "Schedule is already enabled" # Enable Schedule + prev_schedule_row = self._schedules[schedule_id] self._schedules[schedule_id] = self._schedules[schedule_id]._replace(enabled=True) # Update database @@ -1371,9 +1370,8 @@ async def enable_schedule(self, schedule_id: uuid.UUID, bypass_check=None, recor str(schedule_id), schedule.process_name) if record_audit_trail: - audit = AuditLogger(self._storage_async) - sch = await self.get_schedule(schedule_id) - await audit.information('SCHCH', { 'schedule': sch.toDict() }) + new_schedule_row = await self.get_schedule(schedule_id) + await self.audit_trail_entry(prev_schedule_row, new_schedule_row) return True, "Schedule successfully enabled" async def queue_task(self, schedule_id: uuid.UUID, start_now=True) -> None: @@ -1633,3 +1631,19 @@ def extract_day_time_from_interval(self, str_interval): interval_time = datetime.datetime.strptime(interval_time, "%H:%M:%S") return int(interval_days), interval_time + + async def audit_trail_entry(self, old_row, new_row): + audit = AuditLogger(self._storage_async) + old_schedule = {"name": old_row.name, + 'type': old_row.type, + "processName": old_row.process_name, + "repeat": old_row.repeat.total_seconds() if old_row.repeat else 0, + "enabled": True if old_row.enabled else False, + "exclusive": True if old_row.exclusive else False + } + # Timed schedule KV pairs + if old_row.type == 2: + old_schedule["time"] = "{}:{}:{}".format(old_row.time.hour, old_row.time.minute, old_row.time.second + ) if old_row.time else '00:00:00' + old_schedule["day"] = old_row.day if old_row.day else 0 + await audit.information('SCHCH', {'schedule': new_row.toDict(), 'old_schedule': old_schedule}) diff --git a/python/fledge/services/core/server.py b/python/fledge/services/core/server.py index 618b298c53..9faa4d19b3 100755 --- a/python/fledge/services/core/server.py +++ b/python/fledge/services/core/server.py @@ -9,6 +9,7 @@ import asyncio import os +import logging import subprocess import sys import ssl @@ -24,11 +25,10 @@ from fledge.common import logger from fledge.common.audit_logger import AuditLogger from fledge.common.configuration_manager import ConfigurationManager - -from fledge.common.web import middleware from fledge.common.storage_client.exceptions import * from fledge.common.storage_client.storage_client import StorageClientAsync from fledge.common.storage_client.storage_client import ReadingsStorageClientAsync +from fledge.common.web import middleware from fledge.services.core import routes as admin_routes from fledge.services.core.api import configuration as conf_api @@ -56,7 +56,7 @@ __license__ = "Apache 2.0" __version__ = "${VERSION}" -_logger = logger.setup(__name__, level=20) +_logger = logger.setup(__name__, level=logging.INFO) # FLEDGE_ROOT env variable _FLEDGE_DATA = os.getenv("FLEDGE_DATA", default=None) @@ -64,7 +64,7 @@ _SCRIPTS_DIR = os.path.expanduser(_FLEDGE_ROOT + '/scripts') # PID dir and filename -_FLEDGE_PID_DIR= "/var/run" +_FLEDGE_PID_DIR = "/var/run" _FLEDGE_PID_FILE = "fledge.core.pid" @@ -149,7 +149,8 @@ class Server: 'type': 'string', 'default': 'Fledge', 'displayName': 'Name', - 'order': '1' + 'order': '1', + 'mandatory': "true" }, 'description': { 'description': 'Description of this Fledge service', @@ -279,6 +280,20 @@ class Server: }, } + _LOGGING_DEFAULT_CONFIG = { + 'logLevel': { + 'description': 'Minimum logging level reported for Core server', + 'type': 'enumeration', + 'displayName': 'Minimum Log Level', + 'options': ['debug', 'info', 'warning', 'error', 'critical'], + 'default': 'warning', + 'order': '1' + } + } + + _log_level = _LOGGING_DEFAULT_CONFIG['logLevel']['default'] + """ Common logging level for Core """ + _start_time = time.time() """ Start time of core process """ @@ -465,7 +480,7 @@ async def rest_api_config(cls): port_from_config, type(port_from_config)) raise except Exception as ex: - _logger.exception(str(ex)) + _logger.exception(ex) raise @classmethod @@ -491,7 +506,7 @@ async def service_config(cls): except KeyError: cls._service_description = 'Fledge REST Services' except Exception as ex: - _logger.exception(str(ex)) + _logger.exception(ex) raise @classmethod @@ -512,7 +527,26 @@ async def installation_config(cls): cls._package_cache_manager = {"update": {"last_accessed_time": ""}, "upgrade": {"last_accessed_time": ""}, "list": {"last_accessed_time": ""}} except Exception as ex: - _logger.exception(str(ex)) + _logger.exception(ex) + raise + + @classmethod + async def core_logger_setup(cls): + """ Get the logging level configuration """ + try: + config = cls._LOGGING_DEFAULT_CONFIG + category = 'LOGGING' + description = "Logging Level of Core Server" + if cls._configuration_manager is None: + cls._configuration_manager = ConfigurationManager(cls._storage_client_async) + await cls._configuration_manager.create_category(category, config, description, True, + display_name='Logging') + config = await cls._configuration_manager.get_category_all_items(category) + cls._log_level = config['logLevel']['value'] + from fledge.common.logger import FLCoreLogger + FLCoreLogger().set_level(cls._log_level) + except Exception as ex: + _logger.exception(ex) raise @staticmethod @@ -536,6 +570,8 @@ def _make_app(auth_required=True, auth_method='any'): mwares.append(middleware.auth_middleware) app = web.Application(middlewares=mwares, client_max_size=AIOHTTP_CLIENT_MAX_SIZE) + # aiohttp web server logging level always set to warning + web.access_logger.setLevel(logging.WARNING) admin_routes.setup(app) return app @@ -546,6 +582,8 @@ def _make_core_app(cls): :rtype: web.Application """ app = web.Application(middlewares=[middleware.error_middleware], client_max_size=AIOHTTP_CLIENT_MAX_SIZE) + # aiohttp web server logging level always set to warning + web.access_logger.setLevel(logging.WARNING) management_routes.setup(app, cls, True) return app @@ -571,19 +609,19 @@ async def _start_scheduler(cls): @staticmethod def __start_storage(host, m_port): - _logger.info("Start storage, from directory %s", _SCRIPTS_DIR) + _logger.info("Start storage, from directory {}".format(_SCRIPTS_DIR)) try: cmd_with_args = ['./services/storage', '--address={}'.format(host), '--port={}'.format(m_port)] subprocess.call(cmd_with_args, cwd=_SCRIPTS_DIR) except Exception as ex: - _logger.exception(str(ex)) + _logger.exception(ex) @classmethod async def _start_storage(cls, loop): if loop is None: loop = asyncio.get_event_loop() - # callback with args + # callback with args loop.call_soon(cls.__start_storage, cls._host, cls.core_management_port) @classmethod @@ -626,7 +664,7 @@ def pid_filename(): def _pidfile_exists(cls): """ Check whether the PID file exists """ try: - fh = open(cls._pidfile,'r') + fh = open(cls._pidfile, 'r') fh.close() return True except (FileNotFoundError, IOError, TypeError): @@ -675,11 +713,11 @@ def _write_pid(cls, api_address, api_port): raise # Build the JSON object to write into PID file - info_data = {'processID' : pid,\ - 'adminAPI' : {\ + info_data = {'processID': pid,\ + 'adminAPI': {\ "protocol": "HTTP" if cls.is_rest_server_http_enabled else "HTTPS",\ "addresses": [api_address],\ - "port": api_port }\ + "port": api_port}\ } # Write data into PID file @@ -762,7 +800,7 @@ async def _config_parents(cls): # Create the parent category for all advanced configuration categories try: await cls._configuration_manager.create_category("Advanced", {}, 'Advanced', True) - await cls._configuration_manager.create_child_category("Advanced", ["SMNTR", "SCHEDULER"]) + await cls._configuration_manager.create_child_category("Advanced", ["SMNTR", "SCHEDULER", "LOGGING"]) except KeyError: _logger.error('Failed to create Advanced parent configuration category for service') raise @@ -807,6 +845,9 @@ def _start_core(cls, loop=None): cls._configuration_manager = ConfigurationManager(cls._storage_client_async) cls._interest_registry = InterestRegistry(cls._configuration_manager) + # Logging category + loop.run_until_complete(cls.core_logger_setup()) + # start scheduler # see scheduler.py start def FIXME # scheduler on start will wait for storage service registration @@ -898,8 +939,8 @@ def _start_core(cls, loop=None): # dryrun execution of all the tasks that are installed but have schedule type other than STARTUP schedule_list = loop.run_until_complete(cls.scheduler.get_schedules()) for sch in schedule_list: - # STARTUP type exclusion - if int(sch.schedule_type) != 1: + # STARTUP type schedules and special FledgeUpdater schedule process name exclusion to avoid dryrun + if int(sch.schedule_type) != 1 and sch.process_name != "FledgeUpdater": schedule_row = cls.scheduler._ScheduleRow( id=sch.schedule_id, name=sch.name, @@ -1038,7 +1079,7 @@ async def stop_microservices(cls): except service_registry_exceptions.DoesNotExist: pass except Exception as ex: - _logger.exception(str(ex)) + _logger.exception(ex) @classmethod async def _request_microservice_shutdown(cls, svc): @@ -1071,7 +1112,7 @@ def get_process_id(name): """Return process ids found by (partial) name or regex.""" child = subprocess.Popen(['pgrep', '-f', 'name={}'.format(name)], stdout=subprocess.PIPE, shell=False) response = child.communicate()[0] - return [int(pid) for pid in response.split()] + return [int(_pid) for _pid in response.split()] try: shutdown_threshold = 0 @@ -1103,7 +1144,7 @@ def get_process_id(name): except service_registry_exceptions.DoesNotExist: pass except Exception as ex: - _logger.exception(str(ex)) + _logger.exception(ex) @classmethod async def _stop_scheduler(cls): @@ -1155,7 +1196,7 @@ async def register(cls, request): if token is None and ServiceRegistry.getStartupToken(service_name) is not None: raise web.HTTPBadRequest(body=json.dumps({"message": 'Required registration token is missing.'})) - + # If token, then check single use token verification; if bad then return 4XX if token is not None: if not isinstance(token, str): @@ -1164,7 +1205,7 @@ async def register(cls, request): # Check startup token exists if ServiceRegistry.checkStartupToken(service_name, token) == False: - msg = 'Token for the service was not found' + msg = 'Token for the service was not found' raise web.HTTPBadRequest(reason=msg) try: @@ -1198,15 +1239,13 @@ async def register(cls, request): # Add public token claims claims = { 'aud': service_type, - 'sub' : service_name, - 'iss' : SERVICE_JWT_AUDIENCE, + 'sub': service_name, + 'iss': SERVICE_JWT_AUDIENCE, 'exp': exp } # Create JWT token - bearer_token = jwt.encode(claims, - SERVICE_JWT_SECRET, - SERVICE_JWT_ALGORITHM).decode("utf-8") if token is not None else "" + bearer_token = jwt.encode(claims, SERVICE_JWT_SECRET, SERVICE_JWT_ALGORITHM) if token is not None else "" # Add the bearer token for that service being registered ServiceRegistry.addBearerToken(service_name, bearer_token) @@ -1215,7 +1254,7 @@ async def register(cls, request): _response = { 'id': registered_service_id, 'message': "Service registered successfully", - 'bearer_token' : bearer_token + 'bearer_token': bearer_token } _logger.debug("For service: {} SERVER RESPONSE: {}".format(service_name, _response)) @@ -1249,7 +1288,7 @@ async def unregister(cls, request): cls._audit = AuditLogger(cls._storage_client_async) await cls._audit.information('SRVUN', {'name': services[0]._name}) except Exception as ex: - _logger.exception(str(ex)) + _logger.exception(ex) _resp = {'id': str(service_id), 'message': 'Service unregistered'} @@ -1280,7 +1319,7 @@ async def restart_service(cls, request): cls._audit = AuditLogger(cls._storage_client_async) await cls._audit.information('SRVRS', {'name': services[0]._name}) except Exception as ex: - _logger.exception(str(ex)) + _logger.exception(ex) _resp = {'id': str(service_id), 'message': 'Service restart requested'} @@ -1291,7 +1330,6 @@ async def restart_service(cls, request): msg = str(ex) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) - @classmethod async def get_service(cls, request): """ Returns a list of all services or as per name &|| type filter @@ -1346,13 +1384,13 @@ async def get_service(cls, request): async def get_auth_token(cls, request: web.Request) -> web.Response: """ get auth token :Example: - curl -sX GET -H "{'Authorization': 'Bearer ..'}" http://localhost:/fledge/service/authtoken + curl -sX GET -H "{'Authorization': 'Bearer ..'}" http://localhost:/fledge/service/authtoken """ async def cert_login(ca_cert): certs_dir = _FLEDGE_DATA + '/etc/certs' if _FLEDGE_DATA else _FLEDGE_ROOT + "/data/etc/certs" ca_cert_file = "{}/{}.cert".format(certs_dir, ca_cert) SSLVerifier.set_ca_cert(ca_cert_file) - # FIXME: allow to supply content and any cert name as placed with configured CA sign + # FIXME: allow to supply content and any cert name as placed with configured CA sign with open('{}/{}'.format(certs_dir, "admin.cert"), 'r') as content_file: cert_content = content_file.read() SSLVerifier.set_user_cert(cert_content) @@ -1365,13 +1403,13 @@ async def cert_login(ca_cert): cfg_mgr = ConfigurationManager(cls._storage_client_async) category_info = await cfg_mgr.get_category_all_items('rest_api') is_auth_optional = True if category_info['authentication']['value'].lower() == 'optional' else False - + if is_auth_optional: raise api_exception.AuthenticationIsOptional - + auth_method = category_info['authMethod']['value'] - ca_cert_name = category_info['authCertificateName']['value'] - + ca_cert_name = category_info['authCertificateName']['value'] + try: auth_header = request.headers.get('Authorization', None) except: @@ -1465,7 +1503,7 @@ async def restart(cls, request): await asyncio.sleep(2.0, loop=loop) _logger.info("Stopping the Fledge Core event loop. Good Bye!") loop.stop() - + if 'safe-mode' in sys.argv: sys.argv.remove('safe-mode') sys.argv.append('') @@ -1642,7 +1680,7 @@ async def add_track(cls, request): if not isinstance(data, dict): raise ValueError('Data payload must be a dictionary') - jsondata=data.get("data") + jsondata = data.get("data") try: if jsondata is None: @@ -1657,7 +1695,7 @@ async def add_track(cls, request): service=data.get("service"), event=data.get("event"), jsondata=data.get("data")) - + except (TypeError, StorageServerError) as ex: raise web.HTTPBadRequest(reason=str(ex)) except ValueError as ex: @@ -1673,7 +1711,6 @@ async def enable_disable_schedule(cls, request: web.Request) -> web.Response: try: schedule_id = request.match_info.get('schedule_id', None) is_enabled = data.get('value', False) - _logger.exception("{} is_enabled: {}".format(cls.core_management_port, is_enabled)) if is_enabled: status, reason = await cls.scheduler.enable_schedule(uuid.UUID(schedule_id)) else: @@ -1764,9 +1801,9 @@ async def add_audit(cls, request): raise ValueError('Data payload must be a dictionary') try: - code=data.get("source") - level=data.get("severity") - message=data.get("details") + code = data.get("source") + level = data.get("severity") + message = data.get("details") # Add audit entry code and message for the given level await getattr(cls._audit, str(level).lower())(code, message) @@ -1778,7 +1815,7 @@ async def add_audit(cls, request): message = {'timestamp': str(timestamp), 'source': code, 'severity': level, - 'details': message + 'details': message } except (TypeError, StorageServerError) as ex: @@ -1815,13 +1852,11 @@ def validate_token(cls, token): """ Validate service bearer token """ try: - ret = jwt.decode(token, - SERVICE_JWT_SECRET, - algorithms=[SERVICE_JWT_ALGORITHM], - options = {"verify_signature": True, "verify_aud": False, "verify_exp": True}) + ret = jwt.decode(token, SERVICE_JWT_SECRET, algorithms=[SERVICE_JWT_ALGORITHM], + options={"verify_signature": True, "verify_aud": False, "verify_exp": True}) return ret except Exception as e: - return { 'error' : str(e) } + return {'error': str(e)} @classmethod async def refresh_token(cls, request): @@ -1841,14 +1876,12 @@ async def refresh_token(cls, request): try: claims = cls.get_token_common(request) # Expiration set to now + delta - claims['exp'] = int(time.time()) + SERVICE_JWT_EXP_DELTA_SECONDS - bearer_token = jwt.encode(claims, - SERVICE_JWT_SECRET, - SERVICE_JWT_ALGORITHM).decode("utf-8") + claims['exp'] = int(time.time()) + SERVICE_JWT_EXP_DELTA_SECONDS + bearer_token = jwt.encode(claims, SERVICE_JWT_SECRET, SERVICE_JWT_ALGORITHM) # Replace bearer_token for the service ServiceRegistry.addBearerToken(claims['sub'], bearer_token) - ret = {'bearer_token' : bearer_token} + ret = {'bearer_token': bearer_token} return web.json_response(ret) @@ -1909,7 +1942,7 @@ def get_token_common(cls, request): if not "Bearer " in auth_header: msg = "Invalid Authorization token" - # FIXME: raise UNAUTHORISED here and among other places + # FIXME: raise UNAUTHORISED here and among other places # and JSON body to have message key raise web.HTTPBadRequest(reason=msg, body=json.dumps({"error": msg})) diff --git a/python/fledge/services/core/snapshot.py b/python/fledge/services/core/snapshot.py index a72d9fdc9b..a9ebc4cab2 100644 --- a/python/fledge/services/core/snapshot.py +++ b/python/fledge/services/core/snapshot.py @@ -15,8 +15,8 @@ import time from collections import OrderedDict -from fledge.common import logger from fledge.common.common import _FLEDGE_ROOT +from fledge.common.logger import FLCoreLogger __author__ = "Amarendra K Sinha" @@ -24,9 +24,9 @@ __license__ = "Apache 2.0" __version__ = "${VERSION}" -_LOGGER = logger.setup(__name__) _NO_OF_FILES_TO_RETAIN = 3 SNAPSHOT_PREFIX = "snapshot-plugin" +_LOGGER = FLCoreLogger().get_logger(__name__) class SnapshotPluginBuilder: @@ -44,7 +44,7 @@ def __init__(self, snapshot_plugin_dir): self._out_file_path = snapshot_plugin_dir self._interim_file_path = snapshot_plugin_dir except (OSError, Exception) as ex: - _LOGGER.error("Error in initializing SnapshotPluginBuilder class: %s ", str(ex)) + _LOGGER.error(ex, "Error in initializing SnapshotPluginBuilder class.") raise RuntimeError(str(ex)) async def build(self): @@ -76,7 +76,7 @@ def reset(tarinfo): except Exception as ex: if os.path.isfile(tar_file_name): os.remove(tar_file_name) - _LOGGER.error("Error in creating Snapshot .tar.gz file: %s ", str(ex)) + _LOGGER.error(ex, "Error in creating Snapshot .tar.gz file.") raise RuntimeError(str(ex)) self.check_and_delete_temp_files(self._interim_file_path) @@ -98,7 +98,7 @@ def check_and_delete_plugins_tar_files(self, snapshot_plugin_dir): _LOGGER.warning("Removing plugin snapshot file %s.", _path) os.remove(_path) except OSError as ex: - _LOGGER.error("ERROR while deleting plugin file", str(ex)) + _LOGGER.error(ex, "ERROR while deleting plugin file.") def check_and_delete_temp_files(self, snapshot_plugin_dir): # Delete all non *.tar.gz files diff --git a/python/fledge/services/core/support.py b/python/fledge/services/core/support.py index 7b84d2ca3f..f026f80750 100644 --- a/python/fledge/services/core/support.py +++ b/python/fledge/services/core/support.py @@ -6,10 +6,7 @@ """ Provides utility functions to build a Fledge Support bundle. """ - -import logging import datetime -import platform import os from os.path import basename import glob @@ -19,13 +16,16 @@ import tarfile import fnmatch import subprocess -from fledge.services.core.connect import * -from fledge.common import logger + +from fledge.common import utils from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger from fledge.common.plugin_discovery import PluginDiscovery from fledge.common.storage_client import payload_builder +from fledge.services.core.api.python_packages import get_packages_installed from fledge.services.core.api.service import get_service_records, get_service_installed +from fledge.services.core.connect import * __author__ = "Amarendra K Sinha" @@ -33,14 +33,12 @@ __license__ = "Apache 2.0" __version__ = "${VERSION}" -_LOGGER = logger.setup(__name__, level=logging.INFO) +_LOGGER = FLCoreLogger().get_logger(__name__) + _NO_OF_FILES_TO_RETAIN = 3 -_SYSLOG_FILE = '/var/log/syslog' +_SYSLOG_FILE = '/var/log/messages' if utils.is_redhat_based() else '/var/log/syslog' _PATH = _FLEDGE_DATA if _FLEDGE_DATA else _FLEDGE_ROOT + '/data' -if ('centos' in platform.platform()) or ('redhat' in platform.platform()): - _SYSLOG_FILE = '/var/log/messages' - class SupportBuilder: @@ -59,12 +57,12 @@ def __init__(self, support_dir): self._interim_file_path = support_dir self._storage = get_storage_async() # from fledge.services.core.connect except (OSError, Exception) as ex: - _LOGGER.error("Error in initializing SupportBuilder class: %s ", str(ex)) + _LOGGER.error(ex, "Error in initializing SupportBuilder class.") raise RuntimeError(str(ex)) async def build(self): try: - today = datetime.datetime.now() + today = datetime.datetime.utcnow() file_spec = today.strftime('%y%m%d-%H-%M-%S') tar_file_name = self._out_file_path+"/"+"support-{}.tar.gz".format(file_spec) pyz = tarfile.open(tar_file_name, "w:gz") @@ -72,6 +70,7 @@ async def build(self): await self.add_fledge_version_and_schema(pyz) self.add_syslog_fledge(pyz, file_spec) self.add_syslog_storage(pyz, file_spec) + self.add_syslog_utility(pyz) cf_mgr = ConfigurationManager(self._storage) try: south_cat = await cf_mgr.get_category_child("South") @@ -88,10 +87,11 @@ async def build(self): self.add_syslog_service(pyz, file_spec, task) except: pass - await self.add_table_configuration(pyz, file_spec) - await self.add_table_audit_log(pyz, file_spec) - await self.add_table_schedules(pyz, file_spec) - await self.add_table_scheduled_processes(pyz, file_spec) + db_tables = {"configuration": "category", "log": "audit", "schedules": "schedule", + "scheduled_processes": "schedule-process", "monitors": "service-monitoring", + "statistics": "statistics"} + for tbl_name, file_name in sorted(db_tables.items()): + await self.add_db_content(pyz, file_spec, tbl_name, file_name) await self.add_table_statistics_history(pyz, file_spec) await self.add_table_plugin_data(pyz, file_spec) await self.add_table_streams(pyz, file_spec) @@ -101,10 +101,11 @@ async def build(self): self.add_script_dir_content(pyz) self.add_package_log_dir_content(pyz) self.add_software_list(pyz, file_spec) + self.add_python_packages_list(pyz, file_spec) finally: pyz.close() except Exception as ex: - _LOGGER.error("Error in creating Support .tar.gz file: %s ", str(ex)) + _LOGGER.error(ex, "Error in creating Support .tar.gz file.") raise RuntimeError(str(ex)) self.check_and_delete_temp_files(self._interim_file_path) @@ -143,7 +144,7 @@ def add_syslog_fledge(self, pyz, file_spec): subprocess.call("grep -a '{}' {} > {}".format("Fledge", _SYSLOG_FILE, temp_file), shell=True) except OSError as ex: raise RuntimeError("Error in creating {}. Error-{}".format(temp_file, str(ex))) - pyz.add(temp_file, arcname=basename(temp_file)) + pyz.add(temp_file, arcname='logs/sys/{}'.format(basename(temp_file))) def add_syslog_storage(self, pyz, file_spec): # The contents of the syslog file that relate to the database layer (postgres) @@ -152,7 +153,7 @@ def add_syslog_storage(self, pyz, file_spec): subprocess.call("grep -a '{}' {} > {}".format("Fledge Storage", _SYSLOG_FILE, temp_file), shell=True) except OSError as ex: raise RuntimeError("Error in creating {}. Error-{}".format(temp_file, str(ex))) - pyz.add(temp_file, arcname=basename(temp_file)) + pyz.add(temp_file, arcname='logs/sys/{}'.format(basename(temp_file))) def add_syslog_service(self, pyz, file_spec, service): # The fledge entries from the syslog file for a service or task @@ -161,31 +162,20 @@ def add_syslog_service(self, pyz, file_spec, service): temp_file = self._interim_file_path + "/" + "syslog-{}-{}".format(tmp_svc, file_spec) try: subprocess.call("grep -a -E '(Fledge {})\[' {} > {}".format(service, _SYSLOG_FILE, temp_file), shell=True) - pyz.add(temp_file, arcname=basename(temp_file)) + pyz.add(temp_file, arcname='logs/sys/{}'.format(basename(temp_file))) except Exception as ex: raise RuntimeError("Error in creating {}. Error-{}".format(temp_file, str(ex))) - async def add_table_configuration(self, pyz, file_spec): - # The contents of the configuration table from the storage layer - temp_file = self._interim_file_path + "/" + "configuration-{}".format(file_spec) - data = await self._storage.query_tbl("configuration") - self.write_to_tar(pyz, temp_file, data) - - async def add_table_audit_log(self, pyz, file_spec): - # The contents of the audit log from the storage layer - temp_file = self._interim_file_path + "/" + "audit-{}".format(file_spec) - data = await self._storage.query_tbl("log") - self.write_to_tar(pyz, temp_file, data) + def add_syslog_utility(self, pyz): + # syslog utility files + for filename in os.listdir("/tmp"): + if filename.startswith("fl_syslog"): + temp_file = "/tmp/{}".format(filename) + pyz.add(temp_file, arcname='logs/sys/{}'.format(filename)) - async def add_table_schedules(self, pyz, file_spec): - # The contents of the schedules table from the storage layer - temp_file = self._interim_file_path + "/" + "schedules-{}".format(file_spec) - data = await self._storage.query_tbl("schedules") - self.write_to_tar(pyz, temp_file, data) - - async def add_table_scheduled_processes(self, pyz, file_spec): - temp_file = self._interim_file_path + "/" + "scheduled_processes-{}".format(file_spec) - data = await self._storage.query_tbl("scheduled_processes") + async def add_db_content(self, pyz, file_spec, tbl_name, file_name): + temp_file = "{}/{}-{}".format(self._interim_file_path, file_name, file_spec) + data = await self._storage.query_tbl(tbl_name) self.write_to_tar(pyz, temp_file, data) async def add_table_statistics_history(self, pyz, file_spec): @@ -286,7 +276,7 @@ def add_package_log_dir_content(self, pyz): script_package_logs_path = _PATH + '/logs' if os.path.exists(script_package_logs_path): # recursively 'true' by default and __pycache__ dir excluded - pyz.add(script_package_logs_path, arcname='package_logs', filter=self.exclude_pycache) + pyz.add(script_package_logs_path, arcname='logs/package', filter=self.exclude_pycache) def add_software_list(self, pyz, file_spec) -> None: data = { @@ -296,5 +286,10 @@ def add_software_list(self, pyz, file_spec) -> None: temp_file = self._interim_file_path + "/" + "software-{}".format(file_spec) self.write_to_tar(pyz, temp_file, data) + def add_python_packages_list(self, pyz, file_spec) -> None: + data = {'packages': get_packages_installed()} + temp_file = self._interim_file_path + "/" + "python-packages-{}".format(file_spec) + self.write_to_tar(pyz, temp_file, data) + def exclude_pycache(self, tar_info): return None if '__pycache__' in tar_info.name else tar_info diff --git a/python/fledge/services/core/user_model.py b/python/fledge/services/core/user_model.py index 289526766c..8575264639 100644 --- a/python/fledge/services/core/user_model.py +++ b/python/fledge/services/core/user_model.py @@ -4,21 +4,21 @@ # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END -""" Fledge user entity class with CRUD operations to Storage layer - -""" +"""Fledge user entity class with CRUD operations to Storage layer""" +import json import uuid import hashlib from datetime import datetime, timedelta import jwt -from fledge.services.core import connect +from fledge.common.audit_logger import AuditLogger +from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA +from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger from fledge.common.storage_client.payload_builder import PayloadBuilder from fledge.common.storage_client.exceptions import StorageServerError -from fledge.common.configuration_manager import ConfigurationManager -from fledge.common import logger from fledge.common.web.ssl_wrapper import SSLVerifier -from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA +from fledge.services.core import connect __author__ = "Praveen Garg, Ashish Jabble, Amarendra K Sinha" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" @@ -32,7 +32,7 @@ ERROR_MSG = 'Something went wrong' USED_PASSWORD_HISTORY_COUNT = 3 -_logger = logger.setup(__name__) +_logger = FLCoreLogger().get_logger(__name__) class User: @@ -110,6 +110,12 @@ async def create(cls, username, password, role_id, access_method='any', real_nam description=description).payload() try: result = await storage_client.insert_into_tbl("users", payload) + # USRAD audit trail entry + audit = AuditLogger(storage_client) + audit_details = json.loads(payload) + audit_details.pop('pwd', None) + audit_details['message'] = "'{}' username created for '{}' user.".format(username, real_name) + await audit.information('USRAD', audit_details) except StorageServerError as ex: if ex.error["retryable"]: pass # retry INSERT @@ -135,8 +141,13 @@ async def delete(cls, user_id): # first delete the active login references await cls.delete_user_tokens(user_id) - payload = PayloadBuilder().SET(enabled="f").WHERE(['id', '=', user_id]).AND_WHERE(['enabled', '=', 't']).payload() + payload = PayloadBuilder().SET(enabled="f").WHERE(['id', '=', user_id]).AND_WHERE( + ['enabled', '=', 't']).payload() result = await storage_client.update_tbl("users", payload) + # USRDL audit trail entry + audit = AuditLogger(storage_client) + await audit.information( + 'USRDL', {"user_id": user_id, "message": "User ID: <{}> has been disabled.".format(user_id)}) except StorageServerError as ex: if ex.error["retryable"]: pass # retry INSERT @@ -155,29 +166,35 @@ async def update(cls, user_id, user_data): """ if not user_data: return False - kwargs = dict() + old_data = await cls.get(uid=user_id) + new_kwargs = {} + old_kwargs = {} if 'access_method' in user_data: - kwargs.update({"access_method": user_data['access_method']}) + old_kwargs["access_method"] = old_data['access_method'] + new_kwargs.update({"access_method": user_data['access_method']}) if 'real_name' in user_data: - kwargs.update({"real_name": user_data['real_name']}) + old_kwargs["real_name"] = old_data['real_name'] + new_kwargs.update({"real_name": user_data['real_name']}) if 'description' in user_data: - kwargs.update({"description": user_data['description']}) + old_kwargs["description"] = old_data['description'] + new_kwargs.update({"description": user_data['description']}) if 'role_id' in user_data: - kwargs.update({"role_id": user_data['role_id']}) + old_kwargs["role_id"] = old_data['role_id'] + new_kwargs.update({"role_id": user_data['role_id']}) storage_client = connect.get_storage_async() - hashed_pwd = None pwd_history_list = [] if 'password' in user_data: if len(user_data['password']): hashed_pwd = cls.hash_password(user_data['password']) current_datetime = datetime.now() - kwargs.update({"pwd": hashed_pwd, "pwd_last_changed": str(current_datetime)}) + old_kwargs["pwd"] = "****" + new_kwargs.update({"pwd": hashed_pwd, "pwd_last_changed": str(current_datetime)}) # get password history list pwd_history_list = await cls._get_password_history(storage_client, user_id, user_data) try: - payload = PayloadBuilder().SET(**kwargs).WHERE(['id', '=', user_id]).AND_WHERE( + payload = PayloadBuilder().SET(**new_kwargs).WHERE(['id', '=', user_id]).AND_WHERE( ['enabled', '=', 't']).payload() result = await storage_client.update_tbl("users", payload) if result['rows_affected']: @@ -191,6 +208,14 @@ async def update(cls, user_id, user_data): await cls._insert_pwd_history_with_oldest_pwd_deletion_if_count_exceeds( storage_client, user_id, hashed_pwd, pwd_history_list) + # USRCH audit trail entry + audit = AuditLogger(storage_client) + if 'pwd' in new_kwargs: + new_kwargs['pwd'] = "Password has been updated." + new_kwargs.pop('pwd_last_changed', None) + await audit.information( + 'USRCH', {'user_id': user_id, 'old_value': old_kwargs, 'new_value': new_kwargs, + "message": "'{}' user has been changed.".format(old_data['uname'])}) return True except StorageServerError as ex: if ex.error["retryable"]: @@ -216,9 +241,7 @@ async def is_user_exists(cls, uid, password): @classmethod async def all(cls): storage_client = connect.get_storage_async() - payload = PayloadBuilder().SELECT("id", "uname", "role_id", "access_method", "real_name", - "description").WHERE(['enabled', '=', 't']).payload() - result = await storage_client.query_tbl_with_payload('users', payload) + result = await storage_client.query_tbl('users') return result['rows'] @classmethod @@ -356,7 +379,7 @@ async def _get_new_token(cls, storage_client, found_user, host): exp = datetime.now() + timedelta(seconds=JWT_EXP_DELTA_SECONDS) uid = found_user['id'] p = {'uid': uid, 'exp': exp} - jwt_token = jwt.encode(p, JWT_SECRET, JWT_ALGORITHM).decode("utf-8") + jwt_token = jwt.encode(p, JWT_SECRET, JWT_ALGORITHM) payload = PayloadBuilder().INSERT(user_id=p['uid'], token=jwt_token, token_expiration=str(exp), ip=host).payload() diff --git a/python/fledge/services/south/ingest.py b/python/fledge/services/south/ingest.py index 36d6ddc56d..bae967c3b6 100644 --- a/python/fledge/services/south/ingest.py +++ b/python/fledge/services/south/ingest.py @@ -382,7 +382,7 @@ async def _insert_readings(cls): except Exception as ex: attempt += 1 - _LOGGER.exception('Insert failed on attempt #%s, list index: %s | %s', attempt, list_index, str(ex)) + _LOGGER.exception(ex, 'Insert failed on attempt #{}, list index: {}'.format(attempt, list_index)) if cls._stop or attempt >= _MAX_ATTEMPTS: # Stopping. Discard the entire list upon failure. @@ -432,7 +432,7 @@ async def _write_statistics(cls): cls._discarded_readings_stats += discarded_readings for key in sensor_readings: cls._sensor_stats[key] += sensor_readings[key] - _LOGGER.exception('An error occurred while writing sensor statistics, Error: %s', str(ex)) + _LOGGER.exception(ex, 'An error occurred while writing sensor statistics') @classmethod def is_available(cls) -> bool: diff --git a/python/fledge/tasks/automation_script/__main__.py b/python/fledge/tasks/automation_script/__main__.py index 3d6b35c35b..a789f3ddd8 100755 --- a/python/fledge/tasks/automation_script/__main__.py +++ b/python/fledge/tasks/automation_script/__main__.py @@ -8,12 +8,11 @@ """Automation script starter""" import sys -import logging import json import http.client import argparse -from fledge.common import logger +from fledge.common.logger import FLCoreLogger __author__ = "Ashish Jabble" __copyright__ = "Copyright (c) 2022 Dianomic Systems Inc." @@ -22,7 +21,7 @@ if __name__ == '__main__': - _logger = logger.setup("Automation Script", level=logging.INFO) + _logger = FLCoreLogger().get_logger("Control Script") parser = argparse.ArgumentParser() parser.add_argument("--name", required=True) parser.add_argument("--address", required=True) diff --git a/python/fledge/tasks/north/sending_process.py b/python/fledge/tasks/north/sending_process.py index e49fbb3ac7..90c02adb4b 100644 --- a/python/fledge/tasks/north/sending_process.py +++ b/python/fledge/tasks/north/sending_process.py @@ -33,8 +33,8 @@ from fledge.common import statistics from fledge.common.jqfilter import JQFilter from fledge.common.audit_logger import AuditLogger +from fledge.common.logger import FLCoreLogger from fledge.common.process import FledgeProcess -from fledge.common import logger from fledge.common.common import _FLEDGE_ROOT from fledge.services.core.api.plugins import common @@ -93,7 +93,7 @@ } """ Messages used for Information, Warning and Error notice """ -_LOGGER = logger.setup(__name__) +_LOGGER = FLCoreLogger().get_logger(__name__) _event_loop = "" _log_performance = False """ Enable/Disable performance logging, enabled using a command line parameter""" diff --git a/python/fledge/tasks/purge/__main__.py b/python/fledge/tasks/purge/__main__.py index 6168c060cd..6c343aa266 100755 --- a/python/fledge/tasks/purge/__main__.py +++ b/python/fledge/tasks/purge/__main__.py @@ -8,8 +8,9 @@ """Purge process starter""" import asyncio +from fledge.common.logger import FLCoreLogger from fledge.tasks.purge.purge import Purge -from fledge.common import logger + __author__ = "Terris Linenbach, Vaibhav Singhal" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" @@ -17,7 +18,7 @@ __version__ = "${VERSION}" if __name__ == '__main__': - _logger = logger.setup("Purge") + _logger = FLCoreLogger().get_logger("Purge") loop = asyncio.get_event_loop() purge_process = Purge() loop.run_until_complete(purge_process.run()) diff --git a/python/fledge/tasks/purge/purge.py b/python/fledge/tasks/purge/purge.py index 91ef7ca5f1..3183407786 100644 --- a/python/fledge/tasks/purge/purge.py +++ b/python/fledge/tasks/purge/purge.py @@ -31,13 +31,14 @@ import time from datetime import datetime, timedelta +from fledge.common import statistics from fledge.common.audit_logger import AuditLogger from fledge.common.configuration_manager import ConfigurationManager -from fledge.common import statistics +from fledge.common.logger import FLCoreLogger +from fledge.common.process import FledgeProcess from fledge.common.storage_client.payload_builder import PayloadBuilder -from fledge.common import logger from fledge.common.storage_client.exceptions import * -from fledge.common.process import FledgeProcess + __author__ = "Ori Shadmon, Vaibhav Singhal, Mark Riddoch, Amarendra K Sinha" __copyright__ = "Copyright (c) 2017 OSI Soft, LLC" @@ -94,7 +95,7 @@ class Purge(FledgeProcess): def __init__(self): super().__init__() - self._logger = logger.setup("Data Purge") + self._logger = FLCoreLogger().get_logger("Data Purge") self._audit = AuditLogger(self._storage_async) async def write_statistics(self, total_purged, unsent_purged): @@ -130,6 +131,8 @@ async def purge_data(self, config): total_rows_removed = 0 unsent_rows_removed = 0 unsent_retained = 0 + duration = 0 + method = None start_time = time.strftime('%Y-%m-%d %H:%M:%S.%s', time.localtime(time.time())) if config['retainUnsent']['value'].lower() == "purge unsent": @@ -204,6 +207,8 @@ async def purge_data(self, config): total_rows_removed = result['removed'] unsent_rows_removed = result['unsentPurged'] unsent_retained = result['unsentRetained'] + duration += result['duration'] + method = result['method'] except ValueError: self._logger.error("purge_data - Configuration item age {} should be integer!".format( config['age']['value'])) @@ -218,7 +223,13 @@ async def purge_data(self, config): if result is not None: total_rows_removed += result['removed'] unsent_rows_removed += result['unsentPurged'] - unsent_retained += result['unsentRetained'] + unsent_retained = result['unsentRetained'] + duration += result['duration'] + if method is None: + method = result['method'] + else: + method += " and " + method += result['method'] except ValueError: self._logger.error("purge_data - Configuration item size {} should be integer!".format( config['size']['value'])) @@ -234,7 +245,9 @@ async def purge_data(self, config): "end_time": end_time, "rowsRemoved": total_rows_removed, "unsentRowsRemoved": unsent_rows_removed, - "rowsRetained": unsent_retained + "rowsRetained": unsent_retained, + "duration": duration, + "method": method }) else: self._logger.info("No rows purged") @@ -272,4 +285,4 @@ async def run(self): await self.purge_stats_history(config) await self.purge_audit_trail_log(config) except Exception as ex: - self._logger.exception(str(ex)) + self._logger.exception(ex) diff --git a/python/fledge/tasks/statistics/__main__.py b/python/fledge/tasks/statistics/__main__.py index 9e92502393..cf5df676a7 100755 --- a/python/fledge/tasks/statistics/__main__.py +++ b/python/fledge/tasks/statistics/__main__.py @@ -8,8 +8,9 @@ """Statistics history process starter""" import asyncio +from fledge.common.logger import FLCoreLogger from fledge.tasks.statistics.statistics_history import StatisticsHistory -from fledge.common import logger + __author__ = "Terris Linenbach, Vaibhav Singhal" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" @@ -17,7 +18,7 @@ __version__ = "${VERSION}" if __name__ == '__main__': - _logger = logger.setup("StatisticsHistory") + _logger = FLCoreLogger().get_logger("StatisticsHistory") statistics_history_process = StatisticsHistory() loop = asyncio.get_event_loop() loop.run_until_complete(statistics_history_process.run()) diff --git a/python/fledge/tasks/statistics/statistics_history.py b/python/fledge/tasks/statistics/statistics_history.py index db19f64407..ea34c00755 100644 --- a/python/fledge/tasks/statistics/statistics_history.py +++ b/python/fledge/tasks/statistics/statistics_history.py @@ -12,10 +12,10 @@ """ import json -from fledge.common.storage_client.payload_builder import PayloadBuilder -from fledge.common import logger -from fledge.common.process import FledgeProcess from fledge.common import utils as common_utils +from fledge.common.logger import FLCoreLogger +from fledge.common.process import FledgeProcess +from fledge.common.storage_client.payload_builder import PayloadBuilder __author__ = "Ori Shadmon, Ashish Jabble" __copyright__ = "Copyright (c) 2017 OSI Soft, LLC" @@ -29,7 +29,7 @@ class StatisticsHistory(FledgeProcess): def __init__(self): super().__init__() - self._logger = logger.setup("StatisticsHistory") + self._logger = FLCoreLogger().get_logger("StatisticsHistory") async def _bulk_update_previous_value(self, payload): """ UPDATE previous_value of column to have the same value as snapshot diff --git a/python/requirements.txt b/python/requirements.txt index 47cd3e821b..75e8a090ce 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -4,8 +4,8 @@ aiohttp_cors==0.7.0 cchardet==2.1.4;python_version<"3.9" cchardet==2.1.7;python_version>="3.9" yarl==1.7.2 +pyjwt==2.4.0 -pyjwt==1.6.4 # only required for Public Proxy multipart payload requests-toolbelt==0.9.1 diff --git a/requirements.sh b/requirements.sh index 743889acfa..cb5bf4a3a0 100755 --- a/requirements.sh +++ b/requirements.sh @@ -206,7 +206,9 @@ if [[ $YUM_PLATFORM = true ]]; then set -e make - # TODO: Use make install to install sqlite3 as a command + make install + else + make install fi cd $fledge_location set -e @@ -253,6 +255,7 @@ elif apt --version 2>/dev/null; then sqlite3_build_prepare make + make install apt install -y sqlite3 # make install after sqlite3_build_prepare should be enough to install sqlite3 as a command apt install -y pkg-config diff --git a/scripts/common/get_logs.sh b/scripts/common/get_logs.sh index 47d6e75652..ea67f64cae 100755 --- a/scripts/common/get_logs.sh +++ b/scripts/common/get_logs.sh @@ -3,7 +3,6 @@ __author__="Amandeep Singh Arora" __version__="1.0" - # open a log file at FD 2 for debugging purposes > /tmp/fl_syslog.log exec 2<&- @@ -15,6 +14,7 @@ NUM_LOGFILE_LINES_TO_CHECK_INITIALLY=2000 offset=100 limit=5 pattern="" +keyword="" level="debug" logfile="/var/log/syslog" sourceApp="fledge" @@ -22,6 +22,7 @@ sourceApp="fledge" while [ "$#" -gt 0 ]; do case "$1" in -pattern) pattern="$2"; shift 2;; + -keyword) keyword="$2"; shift 2;; -offset) offset="$2"; shift 2;; -limit) limit="$2"; shift 2;; -level) level="$2"; shift 2;; @@ -30,7 +31,17 @@ while [ "$#" -gt 0 ]; do esac done + sum=$(($offset + $limit)) +keyword_len=${#keyword} +if [[ $keyword_len -gt 0 ]]; then + factor_keyword="$sourceApp:$level:$keyword:" + search_pattern="grep -a -E '${pattern}' | grep -F '$keyword'" +else + factor_keyword="$sourceApp:$level:" + search_pattern="grep -a -E '${pattern}'" +fi + echo "" >&2 echo "****************************************************************************************" >&2 echo "************************************* START ********************************************" >&2 @@ -50,47 +61,52 @@ if [[ $script_runs -gt ${RECALC_AFTER_N_SCRIPT_RUNS} ]]; then echo -n "$script_runs" > /tmp/fl_syslog_script_runs fi echo -n "$script_runs" > /tmp/fl_syslog_script_runs -echo "offset=$offset, limit=$limit, sum=$sum, pattern=$pattern, sourceApp=$sourceApp, level=$level, script_runs=$script_runs" >&2 +echo "offset=$offset, limit=$limit, sum=$sum, pattern=$search_pattern, sourceApp=$sourceApp, level=$level, script_runs=$script_runs" >&2 # calculate how many log lines are to be checked to get 'n' result lines for a given service and log level # if for getting 100 lines of interest, 6400 last syslog lines need to be checked, then factor would be 64 factor=2 +initial_factor=$((${NUM_LOGFILE_LINES_TO_CHECK_INITIALLY} / $sum)) if [[ $script_runs -eq 0 ]]; then - factor=$((${NUM_LOGFILE_LINES_TO_CHECK_INITIALLY} / $sum)) - [[ $factor -lt 2 ]] && factor=2 + factor=$initial_factor + [[ $factor -lt 2 ]] && factor=2 else - if [ -f /tmp/fl_syslog_factor ]; then - echo "Reading factor value from /tmp/fl_syslog_factor" >&2 - factor=$(grep "$sourceApp:$level:" /tmp/fl_syslog_factor | cut -d: -f3) - echo "Read factor value of '$factor' from /tmp/fl_syslog_factor" >&2 - [ -z $factor ] && factor=2 && echo "Using factor value of $factor" >&2 - else - [ -z $factor ] && factor=2 && echo "Using factor value of $factor; file '/tmp/fl_syslog_factor' is missing" >&2 - echo "Starting with factor=$factor" >&2 - fi + if [ -f /tmp/fl_syslog_factor ]; then + echo "Reading factor value from /tmp/fl_syslog_factor" >&2 + if [[ $keyword_len -gt 0 ]]; then + cmd="grep '$factor_keyword' /tmp/fl_syslog_factor | rev | cut -d: -f1 | rev" + else + cmd="grep '$factor_keyword[0-9][0-9]*$' /tmp/fl_syslog_factor | rev | cut -d: -f1 | rev" + fi + echo "Read factor cmd='$cmd'" >&2 + factor=$(eval $cmd) + echo "Read factor value of '$factor' from /tmp/fl_syslog_factor" >&2 + [ -z $factor ] && factor=$initial_factor && echo "Using factor value of $factor" >&2 + else + [ -z $factor ] && factor=$initial_factor && echo "Using factor value of $factor; file '/tmp/fl_syslog_factor' is missing" >&2 + echo "Starting with factor=$factor" >&2 + fi + fi tmpfile=$(mktemp) loop_iters=0 +logfile_line_count=$(wc -l < $logfile) # check the last 'n' lines of syslog for log lines of interest, else keep doubling 'n' till syslog file size while [ 1 ]; do t1=$(date +%s%N) - filesz=$(stat -c%s $logfile) - filesz_dbl=$(($filesz * 2)) - - lines=$(($factor * $sum)) - - echo "loop_iters=$loop_iters: factor=$factor, lines=$lines, tmpfile=$tmpfile" >&2 - - cmd="tail -n $lines $logfile | grep -a -E '${pattern}' > $tmpfile" - echo "cmd=$cmd, filesz=$filesz" >&2 + lines_to_check=$(($factor * $sum)) + echo >&2 + echo "loop_iters=$loop_iters: factor=$factor, lines=$lines_to_check, tmpfile=$tmpfile" >&2 + cmd="tail -n $lines_to_check $logfile | ${search_pattern} > $tmpfile" + echo "cmd=$cmd, logfile line count=$logfile_line_count" >&2 eval "$cmd" t2=$(date +%s%N) t_diff=$(((t2 - t1)/1000000)) count=$(wc -l < $tmpfile) - echo "Got $count matching log lines in last $lines lines of syslog file; processing time=${t_diff}ms" >&2 + echo "Got $count matching log lines in last $lines_to_check lines of syslog file; processing time=${t_diff}ms" >&2 if [[ $count -ge $sum ]]; then echo "Got sufficient number of matching log lines, current factor value of $factor is good" >&2 @@ -98,19 +114,13 @@ do rm $tmpfile touch /tmp/fl_syslog_factor - grep -v "$sourceApp:$level:" /tmp/fl_syslog_factor > /tmp/fl_syslog_factor.out; mv /tmp/fl_syslog_factor.out /tmp/fl_syslog_factor - echo "$sourceApp:$level:$factor" >> /tmp/fl_syslog_factor + grep -v "$factor_keyword" /tmp/fl_syslog_factor > /tmp/fl_syslog_factor.out; mv /tmp/fl_syslog_factor.out /tmp/fl_syslog_factor + echo "$factor_keyword$factor" >> /tmp/fl_syslog_factor break - else - new_factor=$factor - [[ $count -ne 0 ]] && ( new_factor=$(($lines / $count)) && new_factor=$(($new_factor + 1)) ) - echo "factor=$factor, new_factor=$new_factor" >&2 - [[ $new_factor -eq $factor ]] && [[ $lines -lt $filesz_dbl ]] && factor=$(($factor * 2)) || factor=$new_factor - echo "Didn't get sufficient number of matching log lines, trying factor=$factor" >&2 fi - if [[ $lines -gt $filesz_dbl ]]; then - echo "Cannot increase factor value any further; filesz=$filesz, lines=$lines" >&2 + if [[ $lines_to_check -gt $logfile_line_count ]]; then + echo "Cannot increase factor value any further; logfile line count=$logfile_line_count, lines=$lines_to_check" >&2 cat $tmpfile | tail -n $sum | head -n $limit echo "Log results START:" >&2 @@ -118,13 +128,16 @@ do echo "Log results END:" >&2 rm $tmpfile touch /tmp/fl_syslog_factor - grep -v "$sourceApp:$level:" /tmp/fl_syslog_factor > /tmp/fl_syslog_factor.out; mv /tmp/fl_syslog_factor.out /tmp/fl_syslog_factor - echo "$sourceApp:$level:$factor" >> /tmp/fl_syslog_factor + grep -v "$factor_keyword" /tmp/fl_syslog_factor > /tmp/fl_syslog_factor.out; mv /tmp/fl_syslog_factor.out /tmp/fl_syslog_factor + echo "$factor_keyword$factor" >> /tmp/fl_syslog_factor break fi + factor=$(($factor * 2)) + echo "Didn't get sufficient number of matching log lines, trying factor=$factor" >&2 + loop_iters=$(($loop_iters + 1)) done echo "******************************** END ***************************************************" >&2 -echo "" >&2 +echo "" >&2 \ No newline at end of file diff --git a/scripts/extras/update_task.apt b/scripts/extras/update_task.apt index 0c3f06fc1a..ffd9e9a1f2 100755 --- a/scripts/extras/update_task.apt +++ b/scripts/extras/update_task.apt @@ -39,13 +39,13 @@ trap "" 1 2 3 6 15 # Check availability of FLEDGE_ROOT directory if [ ! -d "${FLEDGE_ROOT}" ]; then - write_log "" "$0" "err" "home directory missing or incorrectly set environment" "logonly" + write_log "" "$0" "err" "home directory missing or incorrectly set environment." "logonly" exit 1 fi # Check availability of FLEDGE_DATA directory if [ ! -d "${FLEDGE_DATA}" ]; then - write_log "" "$0" "err" "Data directory is missing or incorrectly set environment" "logonly" + write_log "" "$0" "err" "Data directory is missing or incorrectly set environment." "logonly" exit 1 fi @@ -60,7 +60,7 @@ fledge_stop() { STOP_FLEDGE_CMD_STATUS=`$STOP_FLEDGE_CMD` sleep 15 if [ "${STOP_FLEDGE_CMD_STATUS}" = "" ]; then - write_log "" "$0" "err" "cannot run \"${STOP_FLEDGE_CMD}\" command" "logonly" + write_log "" "$0" "err" "cannot run \"${STOP_FLEDGE_CMD}\" command." "logonly" exit 1 fi } @@ -69,7 +69,6 @@ fledge_stop() { run_update() { # Download and update the package information from all of the configured sources UPDATE_CMD="sudo apt -y update" - write_log "" "$0" "info" "Executing ${UPDATE_CMD} command..." "logonly" UPDATE_CMD_OUT=`$UPDATE_CMD` UPDATE_CMD_STATUS="$?" if [ "$UPDATE_CMD_STATUS" != "0" ]; then @@ -79,23 +78,18 @@ run_update() { } run_upgrade() { - # Check GUI package is installed or not - if ! dpkg -l | grep fledge-gui; then - UPGRADE_CMD="sudo apt-get -y install fledge" - PKG_NAME="fledge" - else - PKG_NAME="fledge, fledge-gui" - UPGRADE_CMD="sudo apt-get -y install fledge fledge-gui" - fi - - # Upgrade Package - write_log "" "$0" "info" "Executing ${UPGRADE_CMD} command..." "logonly" - UPGRADE_CMD_OUT=`$UPGRADE_CMD` + # Upgrade Packages + PACKAGES_LIST=$(cat ${FLEDGE_DATA}/.upgradable) + UPGRADE_CMD="sudo apt -y upgrade $PACKAGES_LIST" + UPGRADE_CMD_OUT=$($UPGRADE_CMD) UPGRADE_CMD_STATUS="$?" if [ "$UPGRADE_CMD_STATUS" != "0" ]; then + $(rm -rf ${FLEDGE_DATA}/.upgradable) write_log "" "$0" "err" "Failed on $UPGRADE_CMD. Exit: $UPGRADE_CMD_STATUS. Out: $UPGRADE_CMD_OUT" "all" "pretty" exit 1 fi + msg="'$PACKAGES_LIST' packages upgraded successfully!" + write_log "" "$0" "info" "$msg" "all" "pretty" UPGRADE_DONE="Y" } @@ -108,9 +102,6 @@ fledge_start() { write_log "" "$0" "err" "Failed on $START_FLEDGE_CMD. Exit: $START_FLEDGE_CMD_STATUS. Out: $START_FLEDGE_CMD_OUT" "all" "pretty" exit 1 fi - - msg="'${PKG_NAME}' package updated successfully!" - write_log "" "$0" "info" "$msg" "all" "pretty" } # Find the local timestamp @@ -123,15 +114,10 @@ print (varDateTime) END } -# Find the REST API URL -get_rest_api_url () { - PID_FILE=${FLEDGE_DATA}/var/run/fledge.core.pid - REST_API_URL=`cat ${PID_FILE} | python3 ${FLEDGE_ROOT}/scripts/common/json_parse.py get_rest_api_url_from_pid` -} - # CREATE Audit trail entry for update package audit_trail_entry () { - SQL_DATA="log(code, level, log) VALUES('PKGUP', 4, '{\"packageName\": \"${PKG_NAME}\"}');" + AUDIT_PACKAGES_LIST=$(echo $PACKAGES_LIST | sed -e 's/ /, /g') + SQL_DATA="log(code, level, log) VALUES('PKGUP', 4, '{\"packageName\": \"${AUDIT_PACKAGES_LIST}\"}');" # Find storage engine value STORAGE=`${FLEDGE_ROOT}/services/fledge.services.storage --plugin | awk '{print $1}'` if [ "${STORAGE}" = "postgres" ]; then @@ -147,11 +133,12 @@ audit_trail_entry () { ADD_AUDIT_LOG_STATUS="$?" if [ "$ADD_AUDIT_LOG_STATUS" != "0" ]; then + $(rm -rf ${FLEDGE_DATA}/.upgradable) write_log "" "$0" "err" "Failed on execution of ${INSERT_SQL}. Exit: ${ADD_AUDIT_LOG_STATUS}." "all" "pretty" exit 1 else - msg="Audit trail entry created for '${PKG_NAME}' package update!" - write_log "" "$0" "info" "$msg" "all" "pretty" + $(rm -rf ${FLEDGE_DATA}/.upgradable) + msg="Audit trail entry created for '${AUDIT_PACKAGES_LIST}' packages upgrade!" fi } @@ -181,32 +168,40 @@ update_task() { write_log "" "$0" "err" "Failed on execution of ${UPDATE_SQL_QUERY} in engine '${STORAGE}'. Exit: $UPDATE_TASK_STATUS." "all" "pretty" exit 1 else - msg="'$SCHEDULE_NAME' task state updated successfully" - write_log "" "$0" "debug" "$msg" "all" "pretty" + msg="'$SCHEDULE_NAME' task state updated successfully." + write_log "" "$0" "info" "$msg" "all" "pretty" fi } # Upgrade check upgrade_check() { - # System update request - run_update - - UPGRADE_CHECK="sudo apt list --upgradable" - write_log "" "$0" "info" "Executing ${UPGRADE_CHECK} command..." "logonly" - UPGRADE_CMD_OUT=`$UPGRADE_CHECK 2> /dev/null | grep 'fledge/'` + # Find the upgradable list of fledge packages + UPGRADABLE_LIST="sudo apt list --upgradable | grep ^fledge" + # write_log "" "$0" "debug" "Executing $UPGRADABLE_LIST ..." "logonly" + UPGRADE_CMD_OUT=$(eval $UPGRADABLE_LIST) UPGRADE_CMD_STATUS="$?" - write_log "" "$0" "debug" "Upgrade check result [$UPGRADE_CMD_OUT], retcode $UPGRADE_CMD_STATUS" "all" "pretty" + # write_log "" "$0" "debug" "Upgrade check result [$UPGRADE_CMD_OUT], retcode $UPGRADE_CMD_STATUS" "all" "pretty" if [ "$UPGRADE_CMD_STATUS" != "0" ]; then - write_log "" "$0" "info" "No new Fledge package to upgrade" "all" "pretty" - echo 0 + write_log "" "$0" "info" "No new Fledge packages to upgrade." "all" "pretty" + echo 0 else - echo 1 + while IFS= read -r line + do + if [[ "$line" == fledge* ]]; then + pkg=$(echo $line | cut -d "/" -f 1) + PACKAGES_LIST+=" $pkg" + fi + done < <(printf '%s\n' "$UPGRADE_CMD_OUT") + echo $PACKAGES_LIST > ${FLEDGE_DATA}/.upgradable + echo 1 fi } # Main -DO_UPGRADE=`upgrade_check` +DO_UPDATE=$(run_update) + +DO_UPGRADE=$(upgrade_check) if [ "$DO_UPGRADE" = "1" ]; then # Stop Fledge diff --git a/scripts/fledge b/scripts/fledge index 74af95db0f..8eb9fed76f 100755 --- a/scripts/fledge +++ b/scripts/fledge @@ -534,7 +534,10 @@ fledge_status() { ps -ef | grep -v 'cpulimit*' | grep -o 'python3 -m fledge.tasks.*' | grep -o 'fledge.tasks.*' | grep -v 'fledge.tasks\.\*' || true # Show Tasks in C code - ps -ef | grep './tasks.' | grep -v python3 | grep -v grep | grep -v awk | awk '{printf "tasks/sending_process "; for(i=9;i<=NF;++i) printf $i FS; printf "\n"}' || true + for task_name in `ls ${FLEDGE_ROOT}/tasks` + do + ps -ef | grep "./tasks/$task_name" | grep -v python3 | grep -v grep | grep -v awk | awk '{printf "tasks/'$task_name' " ; for(i=9;i<=NF;++i) printf $i FS; printf "\n"}' || true + done fi ;; *) diff --git a/scripts/plugins/storage/postgres/downgrade/59.sql b/scripts/plugins/storage/postgres/downgrade/59.sql new file mode 100644 index 0000000000..94df72c65a --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/59.sql @@ -0,0 +1,12 @@ +-- Drop control pipeline sequences & tables +DROP TABLE IF EXISTS fledge.control_source; +DROP SEQUENCE IF EXISTS fledge.control_source_id_seq; + +DROP TABLE IF EXISTS fledge.control_destination; +DROP SEQUENCE IF EXISTS fledge.control_destination_id_seq; + +DROP TABLE IF EXISTS fledge.control_filters; +DROP SEQUENCE IF EXISTS fledge.control_filters_id_seq; + +DROP TABLE IF EXISTS fledge.control_pipelines; +DROP SEQUENCE IF EXISTS fledge.control_pipelines_id_seq; diff --git a/scripts/plugins/storage/postgres/downgrade/60.sql b/scripts/plugins/storage/postgres/downgrade/60.sql new file mode 100644 index 0000000000..eb3c975713 --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/60.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('USRAD', 'USRDL', 'USRCH', 'USRRS' ); diff --git a/scripts/plugins/storage/postgres/downgrade/61.sql b/scripts/plugins/storage/postgres/downgrade/61.sql new file mode 100644 index 0000000000..66236db6c4 --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/61.sql @@ -0,0 +1,2 @@ +DROP TABLE IF EXISTS fledge.monitors; +DROP INDEX IF EXISTS fledge.monitors_ix1; diff --git a/scripts/plugins/storage/postgres/downgrade/62.sql b/scripts/plugins/storage/postgres/downgrade/62.sql new file mode 100644 index 0000000000..8e92e980f1 --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/62.sql @@ -0,0 +1,4 @@ +-- Delete role +DELETE FROM fledge.roles WHERE name='control'; +-- Reset auto increment +ALTER SEQUENCE fledge.roles_id_seq RESTART WITH 5 \ No newline at end of file diff --git a/scripts/plugins/storage/postgres/downgrade/63.sql b/scripts/plugins/storage/postgres/downgrade/63.sql new file mode 100644 index 0000000000..5b68761180 --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/63.sql @@ -0,0 +1,4 @@ +-- Drop control flow tables +DROP TABLE IF EXISTS fledge.control_api_acl; +DROP TABLE IF EXISTS fledge.control_api_parameters; +DROP TABLE IF EXISTS fledge.control_api; \ No newline at end of file diff --git a/scripts/plugins/storage/postgres/downgrade/64.sql b/scripts/plugins/storage/postgres/downgrade/64.sql new file mode 100644 index 0000000000..42fa14f3d6 --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/64.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('ACLAD', 'ACLCH', 'ACLDL', 'CTSAD', 'CTSCH', 'CTSDL', 'CTPAD', 'CTPCH', 'CTPDL'); diff --git a/scripts/plugins/storage/postgres/downgrade/65.sql b/scripts/plugins/storage/postgres/downgrade/65.sql new file mode 100644 index 0000000000..60e1544ad2 --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/65.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('CTEAD', 'CTECH', 'CTEDL'); diff --git a/scripts/plugins/storage/postgres/init.sql b/scripts/plugins/storage/postgres/init.sql index 3098557080..c8e1a32c68 100644 --- a/scripts/plugins/storage/postgres/init.sql +++ b/scripts/plugins/storage/postgres/init.sql @@ -168,6 +168,35 @@ CREATE SEQUENCE fledge.asset_tracker_id_seq MAXVALUE 9223372036854775807 CACHE 1; +CREATE SEQUENCE fledge.control_source_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 9223372036854775807 + CACHE 1; + +CREATE SEQUENCE fledge.control_destination_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 9223372036854775807 + CACHE 1; + +CREATE SEQUENCE fledge.control_pipelines_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 9223372036854775807 + CACHE 1; + +CREATE SEQUENCE fledge.control_filters_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 9223372036854775807 + CACHE 1; + + ----- TABLES & SEQUENCES -- Log Codes Table @@ -837,6 +866,88 @@ CREATE TABLE fledge.acl_usage ( entity_name character varying(255) NOT NULL, -- associated entity name CONSTRAINT usage_acl_pkey PRIMARY KEY (name, entity_type, entity_name) ); +-- Create control_source table +CREATE TABLE fledge.control_source ( + cpsid integer NOT NULL DEFAULT nextval('fledge.control_source_id_seq'::regclass), -- auto source id + name character varying(40) NOT NULL , -- source name + description character varying(120) NOT NULL , -- source description + CONSTRAINT control_source_pkey PRIMARY KEY (cpsid) + ); + +-- Create control_destination table +CREATE TABLE fledge.control_destination ( + cpdid integer NOT NULL DEFAULT nextval('fledge.control_destination_id_seq'::regclass), -- auto destination id + name character varying(40) NOT NULL , -- destination name + description character varying(120) NOT NULL , -- destination description + CONSTRAINT control_destination_pkey PRIMARY KEY (cpdid) + ); + +-- Create control_pipelines table +CREATE TABLE fledge.control_pipelines ( + cpid integer NOT NULL DEFAULT nextval('fledge.control_pipelines_id_seq'::regclass), -- control pipeline id + name character varying(255) NOT NULL , -- control pipeline name + stype integer , -- source type id from control_source table + sname character varying(80) , -- source name from control_source table + dtype integer , -- destination type id from control_destination table + dname character varying(80) , -- destination name from control_destination table + enabled boolean NOT NULL DEFAULT FALSE , -- false = A given pipeline is disabled by default + execution character varying(20) NOT NULL DEFAULT 'shared' , -- pipeline will be executed as with shared execution model by default + CONSTRAINT control_pipelines_pkey PRIMARY KEY (cpid) + ); + +-- Create control_filters table +CREATE TABLE fledge.control_filters ( + fid integer NOT NULL DEFAULT nextval('fledge.control_filters_id_seq'::regclass), -- auto filter id + cpid integer NOT NULL , -- control pipeline id + forder integer NOT NULL , -- filter order + fname character varying(255) NOT NULL , -- Name of the filter instance + CONSTRAINT control_filters_pkey PRIMARY KEY (fid) , + CONSTRAINT control_filters_fk1 FOREIGN KEY (cpid) REFERENCES fledge.control_pipelines (cpid) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION + ); + +-- Create control_api table +CREATE TABLE fledge.control_api ( + name character varying(255) NOT NULL , -- control API name + description character varying(255) NOT NULL , -- description of control API + type integer NOT NULL , -- 0 for write and 1 for operation + operation_name character varying(255) , -- name of the operation and only valid if type is operation + destination integer NOT NULL , -- destination of request; 0-broadcast, 1-service, 2-asset, 3-script + destination_arg character varying(255) , -- name of the destination and only used if destination is non-zero + anonymous boolean NOT NULL DEFAULT 'f' , -- anonymous callers to make request to control API; by default false + CONSTRAINT control_api_pname PRIMARY KEY (name) + ); + +-- Create control_api_parameters table +CREATE TABLE fledge.control_api_parameters ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + parameter character varying(255) NOT NULL , -- name of parameter + value character varying(255) , -- value of parameter if constant otherwise default + constant boolean NOT NULL , -- parameter is either a constant or variable + CONSTRAINT control_api_parameters_fk1 FOREIGN KEY (name) REFERENCES fledge.control_api (name) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION + ); + +-- Create control_api_acl table +CREATE TABLE fledge.control_api_acl ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + "user" character varying(255) NOT NULL , -- foreign key to fledge.users + CONSTRAINT control_api_acl_fk1 FOREIGN KEY (name) REFERENCES fledge.control_api (name) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION, + CONSTRAINT control_api_acl_fk2 FOREIGN KEY ("user") REFERENCES fledge.users (uname) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION + ); + +CREATE TABLE fledge.monitors ( + service character varying(255) NOT NULL, + monitor character varying(80) NOT NULL, + minimum bigint, + maximum bigint, + average bigint, + samples bigint, + ts timestamp(6) with time zone NOT NULL DEFAULT now() + ); + +CREATE INDEX monitors_ix1 + ON fledge.monitors(service, monitor); + + -- Grants to fledge schema GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA fledge TO PUBLIC; @@ -850,7 +961,8 @@ INSERT INTO fledge.roles ( name, description ) VALUES ('admin', 'All CRUD privileges'), ('user', 'All CRUD operations and self profile management'), ('view', 'Only to view the configuration'), - ('data-view', 'Only read the data in buffer'); + ('data-view', 'Only read the data in buffer'), + ('control', 'Same as editor can do and also have access for control scripts and pipelines'); -- Users @@ -905,7 +1017,16 @@ INSERT INTO fledge.log_codes ( code, description ) ( 'ASTDP', 'Asset deprecated' ), ( 'ASTUN', 'Asset un-deprecated' ), ( 'PIPIN', 'Pip installation' ), - ( 'AUMRK', 'Audit Log Marker' ); + ( 'AUMRK', 'Audit Log Marker' ), + ( 'USRAD', 'User Added' ), + ( 'USRDL', 'User Deleted' ), + ( 'USRCH', 'User Changed' ), + ( 'USRRS', 'User Restored' ), + ( 'ACLAD', 'ACL Added' ),( 'ACLCH', 'ACL Changed' ),( 'ACLDL', 'ACL Deleted' ), + ( 'CTSAD', 'Control Script Added' ),( 'CTSCH', 'Control Script Changed' ),('CTSDL', 'Control Script Deleted' ), + ( 'CTPAD', 'Control Pipeline Added' ),( 'CTPCH', 'Control Pipeline Changed' ),('CTPDL', 'Control Pipeline Deleted' ), + ( 'CTEAD', 'Control Entrypoint Added' ),( 'CTECH', 'Control Entrypoint Changed' ),('CTEDL', 'Control Entrypoint Deleted' ) + ; -- -- Configuration parameters @@ -973,7 +1094,7 @@ INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, 'purge', -- process_name 3, -- schedule_type (interval) NULL, -- schedule_time - '01:00:00', -- schedule_interval (evey hour) + '00:10:00', -- schedule_interval (evey hour) true, -- exclusive true -- enabled ); @@ -1070,3 +1191,22 @@ CREATE TABLE fledge.service_schema ( service character varying(255) NOT NULL, version integer NOT NULL, definition JSON); + +-- Insert predefined entries for Control Source +DELETE FROM fledge.control_source; +INSERT INTO fledge.control_source ( name, description ) + VALUES ('Any', 'Any source.'), + ('Service', 'A named service in source of the control pipeline.'), + ('API', 'The control pipeline source is the REST API.'), + ('Notification', 'The control pipeline originated from a notification.'), + ('Schedule', 'The control request was triggered by a schedule.'), + ('Script', 'The control request has come from the named script.'); + +-- Insert predefined entries for Control Destination +DELETE FROM fledge.control_destination; +INSERT INTO fledge.control_destination ( name, description ) + VALUES ('Any', 'Any destination.'), + ('Service', 'A name of service that is being controlled.'), + ('Asset', 'A name of asset that is being controlled.'), + ('Script', 'A name of script that will be executed.'), + ('Broadcast', 'No name is applied and pipeline will be considered for any control writes or operations to broadcast destinations.'); diff --git a/scripts/plugins/storage/postgres/upgrade/60.sql b/scripts/plugins/storage/postgres/upgrade/60.sql new file mode 100644 index 0000000000..a3e7be4608 --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/60.sql @@ -0,0 +1,89 @@ +-- Create SEQUENCE for control_source +CREATE SEQUENCE fledge.control_source_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 9223372036854775807 + CACHE 1; + +-- Create control_source table +CREATE TABLE fledge.control_source ( + cpsid integer NOT NULL DEFAULT nextval('fledge.control_source_id_seq'::regclass), -- auto source id + name character varying(40) NOT NULL , -- source name + description character varying(120) NOT NULL , -- source description + CONSTRAINT control_source_pkey PRIMARY KEY (cpsid) + ); + +-- Create SEQUENCE for control_destination +CREATE SEQUENCE fledge.control_destination_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 9223372036854775807 + CACHE 1; + +-- Create control_destination table +CREATE TABLE fledge.control_destination ( + cpdid integer NOT NULL DEFAULT nextval('fledge.control_destination_id_seq'::regclass), -- auto destination id + name character varying(40) NOT NULL , -- destination name + description character varying(120) NOT NULL , -- destination description + CONSTRAINT control_destination_pkey PRIMARY KEY (cpdid) + ); + +-- Create SEQUENCE for control_pipelines +CREATE SEQUENCE fledge.control_pipelines_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 9223372036854775807 + CACHE 1; + +-- Create control_pipelines table +CREATE TABLE fledge.control_pipelines ( + cpid integer NOT NULL DEFAULT nextval('fledge.control_pipelines_id_seq'::regclass), -- control pipeline id + name character varying(255) NOT NULL , -- control pipeline name + stype integer , -- source type id from control_source table + sname character varying(80) , -- source name from control_source table + dtype integer , -- destination type id from control_destination table + dname character varying(80) , -- destination name from control_destination table + enabled boolean NOT NULL DEFAULT FALSE , -- false = A given pipeline is disabled by default + execution character varying(20) NOT NULL DEFAULT 'shared' , -- pipeline will be executed as with shared execution model by default + CONSTRAINT control_pipelines_pkey PRIMARY KEY (cpid) + ); + +-- Create SEQUENCE for control_filters +CREATE SEQUENCE fledge.control_filters_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 9223372036854775807 + CACHE 1; + +-- Create control_filters table +CREATE TABLE fledge.control_filters ( + fid integer NOT NULL DEFAULT nextval('fledge.control_filters_id_seq'::regclass), -- auto filter id + cpid integer NOT NULL , -- control pipeline id + forder integer NOT NULL , -- filter order + fname character varying(255) NOT NULL , -- Name of the filter instance + CONSTRAINT control_filters_pkey PRIMARY KEY (fid) , + CONSTRAINT control_filters_fk1 FOREIGN KEY (cpid) REFERENCES fledge.control_pipelines (cpid) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION + ); + +-- Insert predefined entries for Control Source +DELETE FROM fledge.control_source; +INSERT INTO fledge.control_source ( name, description ) + VALUES ('Any', 'Any source.'), + ('Service', 'A named service in source of the control pipeline.'), + ('API', 'The control pipeline source is the REST API.'), + ('Notification', 'The control pipeline originated from a notification.'), + ('Schedule', 'The control request was triggered by a schedule.'), + ('Script', 'The control request has come from the named script.'); + +-- Insert predefined entries for Control Destination +DELETE FROM fledge.control_destination; +INSERT INTO fledge.control_destination ( name, description ) + VALUES ('Any', 'Any destination.'), + ('Service', 'A name of service that is being controlled.'), + ('Asset', 'A name of asset that is being controlled.'), + ('Script', 'A name of script that will be executed.'), + ('Broadcast', 'No name is applied and pipeline will be considered for any control writes or operations to broadcast destinations.'); diff --git a/scripts/plugins/storage/postgres/upgrade/61.sql b/scripts/plugins/storage/postgres/upgrade/61.sql new file mode 100644 index 0000000000..f69e72ae07 --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/61.sql @@ -0,0 +1,6 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES + ( 'USRAD', 'User Added' ), + ( 'USRDL', 'User Deleted' ), + ( 'USRCH', 'User Changed' ), + ( 'USRRS', 'User Restored' ); diff --git a/scripts/plugins/storage/postgres/upgrade/62.sql b/scripts/plugins/storage/postgres/upgrade/62.sql new file mode 100644 index 0000000000..754518062b --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/62.sql @@ -0,0 +1,13 @@ + +CREATE TABLE fledge.monitors ( + service character varying(255) NOT NULL, + monitor character varying(80) NOT NULL, + minimum bigint, + maximum bigint, + average bigint, + samples bigint, + ts timestamp(6) with time zone NOT NULL DEFAULT now() +); + +CREATE INDEX monitors_ix1 + ON fledge.monitors(service, monitor); diff --git a/scripts/plugins/storage/postgres/upgrade/63.sql b/scripts/plugins/storage/postgres/upgrade/63.sql new file mode 100644 index 0000000000..8295ca306a --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/63.sql @@ -0,0 +1,3 @@ +-- Roles +INSERT INTO fledge.roles ( name, description ) + VALUES ('control', 'Same as editor can do and also have access for control scripts and pipelines'); diff --git a/scripts/plugins/storage/postgres/upgrade/64.sql b/scripts/plugins/storage/postgres/upgrade/64.sql new file mode 100644 index 0000000000..0fc56e790f --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/64.sql @@ -0,0 +1,28 @@ +-- Create control_api table +CREATE TABLE fledge.control_api ( + name character varying(255) NOT NULL , -- control API name + description character varying(255) NOT NULL , -- description of control API + type integer NOT NULL , -- 0 for write and 1 for operation + operation_name character varying(255) , -- name of the operation and only valid if type is operation + destination integer NOT NULL , -- destination of request; 0-broadcast, 1-service, 2-asset, 3-script + destination_arg character varying(255) , -- name of the destination and only used if destination is non-zero + anonymous boolean NOT NULL DEFAULT 'f' , -- anonymous callers to make request to control API; by default false + CONSTRAINT control_api_pname PRIMARY KEY (name) + ); + +-- Create control_api_parameters table +CREATE TABLE fledge.control_api_parameters ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + parameter character varying(255) NOT NULL , -- name of parameter + value character varying(255) , -- value of parameter if constant otherwise default + constant boolean NOT NULL , -- parameter is either a constant or variable + CONSTRAINT control_api_parameters_fk1 FOREIGN KEY (name) REFERENCES fledge.control_api (name) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION + ); + +-- Create control_api_acl table +CREATE TABLE fledge.control_api_acl ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + "user" character varying(255) NOT NULL , -- foreign key to fledge.users + CONSTRAINT control_api_acl_fk1 FOREIGN KEY (name) REFERENCES fledge.control_api (name) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION, + CONSTRAINT control_api_acl_fk2 FOREIGN KEY ("user") REFERENCES fledge.users (uname) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION + ); diff --git a/scripts/plugins/storage/postgres/upgrade/65.sql b/scripts/plugins/storage/postgres/upgrade/65.sql new file mode 100644 index 0000000000..9a5e1f0435 --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/65.sql @@ -0,0 +1,4 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES ( 'ACLAD', 'ACL Added' ),( 'ACLCH', 'ACL Changed' ),( 'ACLDL', 'ACL Deleted' ), + ( 'CTSAD', 'Control Script Added' ),( 'CTSCH', 'Control Script Changed' ),('CTSDL', 'Control Script Deleted' ), + ( 'CTPAD', 'Control Pipeline Added' ),( 'CTPCH', 'Control Pipeline Changed' ),('CTPDL', 'Control Pipeline Deleted' ); diff --git a/scripts/plugins/storage/postgres/upgrade/66.sql b/scripts/plugins/storage/postgres/upgrade/66.sql new file mode 100644 index 0000000000..d2b521c76f --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/66.sql @@ -0,0 +1,4 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES ( 'CTEAD', 'Control Entrypoint Added' ), + ( 'CTECH', 'Control Entrypoint Changed' ), + ('CTEDL', 'Control Entrypoint Deleted' ); \ No newline at end of file diff --git a/scripts/plugins/storage/sqlite/downgrade/59.sql b/scripts/plugins/storage/sqlite/downgrade/59.sql new file mode 100644 index 0000000000..c1c032ab8a --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/59.sql @@ -0,0 +1,5 @@ +-- Drop control pipeline tables +DROP TABLE IF EXISTS fledge.control_source; +DROP TABLE IF EXISTS fledge.control_destination; +DROP TABLE IF EXISTS fledge.control_pipelines; +DROP TABLE IF EXISTS fledge.control_filters; diff --git a/scripts/plugins/storage/sqlite/downgrade/60.sql b/scripts/plugins/storage/sqlite/downgrade/60.sql new file mode 100644 index 0000000000..eb3c975713 --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/60.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('USRAD', 'USRDL', 'USRCH', 'USRRS' ); diff --git a/scripts/plugins/storage/sqlite/downgrade/61.sql b/scripts/plugins/storage/sqlite/downgrade/61.sql new file mode 100644 index 0000000000..66236db6c4 --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/61.sql @@ -0,0 +1,2 @@ +DROP TABLE IF EXISTS fledge.monitors; +DROP INDEX IF EXISTS fledge.monitors_ix1; diff --git a/scripts/plugins/storage/sqlite/downgrade/62.sql b/scripts/plugins/storage/sqlite/downgrade/62.sql new file mode 100644 index 0000000000..112b7c25ae --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/62.sql @@ -0,0 +1,5 @@ +-- Delete roles +DELETE FROM fledge.roles WHERE name IN ('view','control'); +-- Reset auto increment +-- You cannot use ALTER TABLE for that. The autoincrement counter is stored in a separate table named "sqlite_sequence". You can modify the value there +UPDATE sqlite_sequence SET seq=1 WHERE name="roles"; diff --git a/scripts/plugins/storage/sqlite/downgrade/63.sql b/scripts/plugins/storage/sqlite/downgrade/63.sql new file mode 100644 index 0000000000..5b68761180 --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/63.sql @@ -0,0 +1,4 @@ +-- Drop control flow tables +DROP TABLE IF EXISTS fledge.control_api_acl; +DROP TABLE IF EXISTS fledge.control_api_parameters; +DROP TABLE IF EXISTS fledge.control_api; \ No newline at end of file diff --git a/scripts/plugins/storage/sqlite/downgrade/64.sql b/scripts/plugins/storage/sqlite/downgrade/64.sql new file mode 100644 index 0000000000..42fa14f3d6 --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/64.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('ACLAD', 'ACLCH', 'ACLDL', 'CTSAD', 'CTSCH', 'CTSDL', 'CTPAD', 'CTPCH', 'CTPDL'); diff --git a/scripts/plugins/storage/sqlite/downgrade/65.sql b/scripts/plugins/storage/sqlite/downgrade/65.sql new file mode 100644 index 0000000000..60e1544ad2 --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/65.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('CTEAD', 'CTECH', 'CTEDL'); diff --git a/scripts/plugins/storage/sqlite/init.sql b/scripts/plugins/storage/sqlite/init.sql index 105912711f..ea55f9a235 100644 --- a/scripts/plugins/storage/sqlite/init.sql +++ b/scripts/plugins/storage/sqlite/init.sql @@ -630,6 +630,85 @@ CREATE TABLE fledge.acl_usage ( entity_name character varying(255) NOT NULL, -- associated entity name CONSTRAINT usage_acl_pkey PRIMARY KEY (name, entity_type, entity_name) ); +-- Create control_source table +CREATE TABLE fledge.control_source ( + cpsid integer PRIMARY KEY AUTOINCREMENT, -- auto source id + name character varying(40) NOT NULL, -- source name + description character varying(120) NOT NULL -- source description + ); + +-- Create control_destination table +CREATE TABLE fledge.control_destination ( + cpdid integer PRIMARY KEY AUTOINCREMENT, -- auto destination id + name character varying(40) NOT NULL, -- destination name + description character varying(120) NOT NULL -- destination description + ); + +-- Create control_pipelines table +CREATE TABLE fledge.control_pipelines ( + cpid integer PRIMARY KEY AUTOINCREMENT, -- control pipeline id + name character varying(255) NOT NULL , -- control pipeline name + stype integer , -- source type id from control_source table + sname character varying(80) , -- source name from control_source table + dtype integer , -- destination type id from control_destination table + dname character varying(80) , -- destination name from control_destination table + enabled boolean NOT NULL DEFAULT 'f' , -- false = A given pipeline is disabled by default + execution character varying(20) NOT NULL DEFAULT 'shared' -- pipeline will be executed as with shared execution model by default + ); + +-- Create control_filters table +CREATE TABLE fledge.control_filters ( + fid integer PRIMARY KEY AUTOINCREMENT, -- auto filter id + cpid integer NOT NULL , -- control pipeline id + forder integer NOT NULL , -- filter order + fname character varying(255) NOT NULL , -- Name of the filter instance + CONSTRAINT control_filters_fk1 FOREIGN KEY (cpid) + REFERENCES control_pipelines (cpid) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION + ); + +-- Create control_api table +CREATE TABLE fledge.control_api ( + name character varying(255) NOT NULL , -- control API name + description character varying(255) NOT NULL , -- description of control API + type integer NOT NULL , -- 0 for write and 1 for operation + operation_name character varying(255) , -- name of the operation and only valid if type is operation + destination integer NOT NULL , -- destination of request; 0-broadcast, 1-service, 2-asset, 3-script + destination_arg character varying(255) , -- name of the destination and only used if destination is non-zero + anonymous boolean NOT NULL DEFAULT 'f' , -- anonymous callers to make request to control API; by default false + CONSTRAINT control_api_pname PRIMARY KEY (name) + ); + +-- Create control_api_parameters table +CREATE TABLE fledge.control_api_parameters ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + parameter character varying(255) NOT NULL , -- name of parameter + value character varying(255) , -- value of parameter if constant otherwise default + constant boolean NOT NULL , -- parameter is either a constant or variable + FOREIGN KEY (name) REFERENCES control_api (name) + ); + +-- Create control_api_acl table +CREATE TABLE fledge.control_api_acl ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + user character varying(255) NOT NULL , -- foreign key to fledge.users + FOREIGN KEY (name) REFERENCES control_api (name) , + FOREIGN KEY (user) REFERENCES users (uname) + ); + +-- Create monitors table +CREATE TABLE fledge.monitors ( + service character varying(255) NOT NULL, + monitor character varying(80) NOT NULL, + minimum integer, + maximum integer, + average integer, + samples integer, + ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')) + ); + +CREATE INDEX monitors_ix1 + ON monitors(service, monitor); + ---------------------------------------------------------------------- -- Initialization phase - DML ---------------------------------------------------------------------- @@ -640,7 +719,8 @@ INSERT INTO fledge.roles ( name, description ) VALUES ('admin', 'All CRUD privileges'), ('user', 'All CRUD operations and self profile management'), ('view', 'Only to view the configuration'), - ('data-view', 'Only read the data in buffer'); + ('data-view', 'Only read the data in buffer'), + ('control', 'Same as editor can do and also have access for control scripts and pipelines'); -- Users DELETE FROM fledge.users; @@ -692,7 +772,16 @@ INSERT INTO fledge.log_codes ( code, description ) ( 'ASTDP', 'Asset deprecated' ), ( 'ASTUN', 'Asset un-deprecated' ), ( 'PIPIN', 'Pip installation' ), - ( 'AUMRK', 'Audit Log Marker' ); + ( 'AUMRK', 'Audit Log Marker' ), + ( 'USRAD', 'User Added' ), + ( 'USRDL', 'User Deleted' ), + ( 'USRCH', 'User Changed' ), + ( 'USRRS', 'User Restored' ), + ( 'ACLAD', 'ACL Added' ),( 'ACLCH', 'ACL Changed' ),( 'ACLDL', 'ACL Deleted' ), + ( 'CTSAD', 'Control Script Added' ),( 'CTSCH', 'Control Script Changed' ),('CTSDL', 'Control Script Deleted' ), + ( 'CTPAD', 'Control Pipeline Added' ),( 'CTPCH', 'Control Pipeline Changed' ),('CTPDL', 'Control Pipeline Deleted' ), + ( 'CTEAD', 'Control Entrypoint Added' ),( 'CTECH', 'Control Entrypoint Changed' ),('CTEDL', 'Control Entrypoint Deleted' ) + ; -- -- Configuration parameters @@ -761,7 +850,7 @@ INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, 'purge', -- process_name 3, -- schedule_type (interval) NULL, -- schedule_time - '01:00:00', -- schedule_interval (evey hour) + '00:10:00', -- schedule_interval (evey hour) 't', -- exclusive 't' -- enabled ); @@ -858,3 +947,23 @@ CREATE TABLE fledge.service_schema ( service character varying(255) NOT NULL, version integer NOT NULL, definition JSON); + +-- Control Source +DELETE FROM fledge.control_source; +INSERT INTO fledge.control_source ( name, description ) + VALUES ('Any', 'Any source.'), + ('Service', 'A named service in source of the control pipeline.'), + ('API', 'The control pipeline source is the REST API.'), + ('Notification', 'The control pipeline originated from a notification.'), + ('Schedule', 'The control request was triggered by a schedule.'), + ('Script', 'The control request has come from the named script.'); + +-- Control Destination +DELETE FROM fledge.control_destination; +INSERT INTO fledge.control_destination ( name, description ) + VALUES ('Any', 'Any destination.'), + ('Service', 'A name of service that is being controlled.'), + ('Asset', 'A name of asset that is being controlled.'), + ('Script', 'A name of script that will be executed.'), + ('Broadcast', 'No name is applied and pipeline will be considered for any control writes or operations to broadcast destinations.'); + diff --git a/scripts/plugins/storage/sqlite/upgrade/60.sql b/scripts/plugins/storage/sqlite/upgrade/60.sql new file mode 100644 index 0000000000..ed5a072205 --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/60.sql @@ -0,0 +1,54 @@ +-- Create control_source table +CREATE TABLE fledge.control_source ( + cpsid integer PRIMARY KEY AUTOINCREMENT, -- auto source id + name character varying(40) NOT NULL, -- source name + description character varying(120) NOT NULL -- source description + ); + +-- Create control_destination table +CREATE TABLE fledge.control_destination ( + cpdid integer PRIMARY KEY AUTOINCREMENT, -- auto destination id + name character varying(40) NOT NULL, -- destination name + description character varying(120) NOT NULL -- destination description + ); + +-- Create control_pipelines table +CREATE TABLE fledge.control_pipelines ( + cpid integer PRIMARY KEY AUTOINCREMENT, -- control pipeline id + name character varying(255) NOT NULL , -- control pipeline name + stype integer , -- source type id from control_source table + sname character varying(80) , -- source name from control_source table + dtype integer , -- destination type id from control_destination table + dname character varying(80) , -- destination name from control_destination table + enabled boolean NOT NULL DEFAULT 'f' , -- false = A given pipeline is disabled by default + execution character varying(20) NOT NULL DEFAULT 'shared' -- pipeline will be executed as with shared execution model by default + ); + +-- Create control_filters table +CREATE TABLE fledge.control_filters ( + fid integer PRIMARY KEY AUTOINCREMENT, -- auto filter id + cpid integer NOT NULL , -- control pipeline id + forder integer NOT NULL , -- filter order + fname character varying(255) NOT NULL , -- Name of the filter instance + CONSTRAINT control_filters_fk1 FOREIGN KEY (cpid) + REFERENCES control_pipelines (cpid) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION + ); + +-- Insert predefined entries for Control Source +DELETE FROM fledge.control_source; +INSERT INTO fledge.control_source ( name, description ) + VALUES ('Any', 'Any source.'), + ('Service', 'A named service in source of the control pipeline.'), + ('API', 'The control pipeline source is the REST API.'), + ('Notification', 'The control pipeline originated from a notification.'), + ('Schedule', 'The control request was triggered by a schedule.'), + ('Script', 'The control request has come from the named script.'); + +-- Insert predefined entries for Control Destination +DELETE FROM fledge.control_destination; +INSERT INTO fledge.control_destination ( name, description ) + VALUES ('Any', 'Any destination.'), + ('Service', 'A name of service that is being controlled.'), + ('Asset', 'A name of asset that is being controlled.'), + ('Script', 'A name of script that will be executed.'), + ('Broadcast', 'No name is applied and pipeline will be considered for any control writes or operations to broadcast destinations.'); diff --git a/scripts/plugins/storage/sqlite/upgrade/61.sql b/scripts/plugins/storage/sqlite/upgrade/61.sql new file mode 100644 index 0000000000..f69e72ae07 --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/61.sql @@ -0,0 +1,6 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES + ( 'USRAD', 'User Added' ), + ( 'USRDL', 'User Deleted' ), + ( 'USRCH', 'User Changed' ), + ( 'USRRS', 'User Restored' ); diff --git a/scripts/plugins/storage/sqlite/upgrade/62.sql b/scripts/plugins/storage/sqlite/upgrade/62.sql new file mode 100644 index 0000000000..fec0571be1 --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/62.sql @@ -0,0 +1,14 @@ + +CREATE TABLE fledge.monitors ( + service character varying(255) NOT NULL, + monitor character varying(80) NOT NULL, + minimum integer, + maximum integer, + average integer, + samples integer, + ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')) +); + + +CREATE INDEX monitors_ix1 + ON monitors(service, monitor); diff --git a/scripts/plugins/storage/sqlite/upgrade/63.sql b/scripts/plugins/storage/sqlite/upgrade/63.sql new file mode 100644 index 0000000000..8295ca306a --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/63.sql @@ -0,0 +1,3 @@ +-- Roles +INSERT INTO fledge.roles ( name, description ) + VALUES ('control', 'Same as editor can do and also have access for control scripts and pipelines'); diff --git a/scripts/plugins/storage/sqlite/upgrade/64.sql b/scripts/plugins/storage/sqlite/upgrade/64.sql new file mode 100644 index 0000000000..ab191fa824 --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/64.sql @@ -0,0 +1,28 @@ +-- Create control_api table +CREATE TABLE fledge.control_api ( + name character varying(255) NOT NULL , -- control API name + description character varying(255) NOT NULL , -- description of control API + type integer NOT NULL , -- 0 for write and 1 for operation + operation_name character varying(255) , -- name of the operation and only valid if type is operation + destination integer NOT NULL , -- destination of request; 0-broadcast, 1-service, 2-asset, 3-script + destination_arg character varying(255) , -- name of the destination and only used if destination is non-zero + anonymous boolean NOT NULL DEFAULT 'f' , -- anonymous callers to make request to control API; by default false + CONSTRAINT control_api_pname PRIMARY KEY (name) + ); + +-- Create control_api_parameters table +CREATE TABLE fledge.control_api_parameters ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + parameter character varying(255) NOT NULL , -- name of parameter + value character varying(255) , -- value of parameter if constant otherwise default + constant boolean NOT NULL , -- parameter is either a constant or variable + FOREIGN KEY (name) REFERENCES control_api (name) + ); + +-- Create control_api_acl table +CREATE TABLE fledge.control_api_acl ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + user character varying(255) NOT NULL , -- foreign key to fledge.users + FOREIGN KEY (name) REFERENCES control_api (name) , + FOREIGN KEY (user) REFERENCES users (uname) + ); diff --git a/scripts/plugins/storage/sqlite/upgrade/65.sql b/scripts/plugins/storage/sqlite/upgrade/65.sql new file mode 100644 index 0000000000..9a5e1f0435 --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/65.sql @@ -0,0 +1,4 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES ( 'ACLAD', 'ACL Added' ),( 'ACLCH', 'ACL Changed' ),( 'ACLDL', 'ACL Deleted' ), + ( 'CTSAD', 'Control Script Added' ),( 'CTSCH', 'Control Script Changed' ),('CTSDL', 'Control Script Deleted' ), + ( 'CTPAD', 'Control Pipeline Added' ),( 'CTPCH', 'Control Pipeline Changed' ),('CTPDL', 'Control Pipeline Deleted' ); diff --git a/scripts/plugins/storage/sqlite/upgrade/66.sql b/scripts/plugins/storage/sqlite/upgrade/66.sql new file mode 100644 index 0000000000..d2b521c76f --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/66.sql @@ -0,0 +1,4 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES ( 'CTEAD', 'Control Entrypoint Added' ), + ( 'CTECH', 'Control Entrypoint Changed' ), + ('CTEDL', 'Control Entrypoint Deleted' ); \ No newline at end of file diff --git a/scripts/plugins/storage/sqlitelb/downgrade/59.sql b/scripts/plugins/storage/sqlitelb/downgrade/59.sql new file mode 100644 index 0000000000..c1c032ab8a --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/59.sql @@ -0,0 +1,5 @@ +-- Drop control pipeline tables +DROP TABLE IF EXISTS fledge.control_source; +DROP TABLE IF EXISTS fledge.control_destination; +DROP TABLE IF EXISTS fledge.control_pipelines; +DROP TABLE IF EXISTS fledge.control_filters; diff --git a/scripts/plugins/storage/sqlitelb/downgrade/60.sql b/scripts/plugins/storage/sqlitelb/downgrade/60.sql new file mode 100644 index 0000000000..eb3c975713 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/60.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('USRAD', 'USRDL', 'USRCH', 'USRRS' ); diff --git a/scripts/plugins/storage/sqlitelb/downgrade/61.sql b/scripts/plugins/storage/sqlitelb/downgrade/61.sql new file mode 100644 index 0000000000..66236db6c4 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/61.sql @@ -0,0 +1,2 @@ +DROP TABLE IF EXISTS fledge.monitors; +DROP INDEX IF EXISTS fledge.monitors_ix1; diff --git a/scripts/plugins/storage/sqlitelb/downgrade/62.sql b/scripts/plugins/storage/sqlitelb/downgrade/62.sql new file mode 100644 index 0000000000..112b7c25ae --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/62.sql @@ -0,0 +1,5 @@ +-- Delete roles +DELETE FROM fledge.roles WHERE name IN ('view','control'); +-- Reset auto increment +-- You cannot use ALTER TABLE for that. The autoincrement counter is stored in a separate table named "sqlite_sequence". You can modify the value there +UPDATE sqlite_sequence SET seq=1 WHERE name="roles"; diff --git a/scripts/plugins/storage/sqlitelb/downgrade/63.sql b/scripts/plugins/storage/sqlitelb/downgrade/63.sql new file mode 100644 index 0000000000..5b68761180 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/63.sql @@ -0,0 +1,4 @@ +-- Drop control flow tables +DROP TABLE IF EXISTS fledge.control_api_acl; +DROP TABLE IF EXISTS fledge.control_api_parameters; +DROP TABLE IF EXISTS fledge.control_api; \ No newline at end of file diff --git a/scripts/plugins/storage/sqlitelb/downgrade/64.sql b/scripts/plugins/storage/sqlitelb/downgrade/64.sql new file mode 100644 index 0000000000..42fa14f3d6 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/64.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('ACLAD', 'ACLCH', 'ACLDL', 'CTSAD', 'CTSCH', 'CTSDL', 'CTPAD', 'CTPCH', 'CTPDL'); diff --git a/scripts/plugins/storage/sqlitelb/downgrade/65.sql b/scripts/plugins/storage/sqlitelb/downgrade/65.sql new file mode 100644 index 0000000000..60e1544ad2 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/65.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('CTEAD', 'CTECH', 'CTEDL'); diff --git a/scripts/plugins/storage/sqlitelb/init.sql b/scripts/plugins/storage/sqlitelb/init.sql index 8da2ee2443..b7c2adce2c 100644 --- a/scripts/plugins/storage/sqlitelb/init.sql +++ b/scripts/plugins/storage/sqlitelb/init.sql @@ -630,6 +630,85 @@ CREATE TABLE fledge.acl_usage ( entity_name character varying(255) NOT NULL, -- associated entity name CONSTRAINT usage_acl_pkey PRIMARY KEY (name, entity_type, entity_name) ); +-- Create control_source table +CREATE TABLE fledge.control_source ( + cpsid integer PRIMARY KEY AUTOINCREMENT, -- auto source id + name character varying(40) NOT NULL, -- source name + description character varying(120) NOT NULL -- source description + ); + +-- Create control_destination table +CREATE TABLE fledge.control_destination ( + cpdid integer PRIMARY KEY AUTOINCREMENT, -- auto destination id + name character varying(40) NOT NULL, -- destination name + description character varying(120) NOT NULL -- destination description + ); + +-- Create control_pipelines table +CREATE TABLE fledge.control_pipelines ( + cpid integer PRIMARY KEY AUTOINCREMENT, -- control pipeline id + name character varying(255) NOT NULL , -- control pipeline name + stype integer , -- source type id from control_source table + sname character varying(80) , -- source name from control_source table + dtype integer , -- destination type id from control_destination table + dname character varying(80) , -- destination name from control_destination table + enabled boolean NOT NULL DEFAULT 'f' , -- false = A given pipeline is disabled by default + execution character varying(20) NOT NULL DEFAULT 'shared' -- pipeline will be executed as with shared execution model by default + ); + +-- Create control_filters table +CREATE TABLE fledge.control_filters ( + fid integer PRIMARY KEY AUTOINCREMENT, -- auto filter id + cpid integer NOT NULL , -- control pipeline id + forder integer NOT NULL , -- filter order + fname character varying(255) NOT NULL , -- Name of the filter instance + CONSTRAINT control_filters_fk1 FOREIGN KEY (cpid) + REFERENCES control_pipelines (cpid) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION + ); + +-- Create control_api table +CREATE TABLE fledge.control_api ( + name character varying(255) NOT NULL , -- control API name + description character varying(255) NOT NULL , -- description of control API + type integer NOT NULL , -- 0 for write and 1 for operation + operation_name character varying(255) , -- name of the operation and only valid if type is operation + destination integer NOT NULL , -- destination of request; 0-broadcast, 1-service, 2-asset, 3-script + destination_arg character varying(255) , -- name of the destination and only used if destination is non-zero + anonymous boolean NOT NULL DEFAULT 'f' , -- anonymous callers to make request to control API; by default false + CONSTRAINT control_api_pname PRIMARY KEY (name) + ); + +-- Create control_api_parameters table +CREATE TABLE fledge.control_api_parameters ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + parameter character varying(255) NOT NULL , -- name of parameter + value character varying(255) , -- value of parameter if constant otherwise default + constant boolean NOT NULL , -- parameter is either a constant or variable + FOREIGN KEY (name) REFERENCES control_api (name) + ); + +-- Create control_api_acl table +CREATE TABLE fledge.control_api_acl ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + user character varying(255) NOT NULL , -- foreign key to fledge.users + FOREIGN KEY (name) REFERENCES control_api (name) , + FOREIGN KEY (user) REFERENCES users (uname) + ); + +-- Create monitors table +CREATE TABLE fledge.monitors ( + service character varying(255) NOT NULL, + monitor character varying(80) NOT NULL, + minimum integer, + maximum integer, + average integer, + samples integer, + ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')) + ); + +CREATE INDEX monitors_ix1 + ON monitors(service, monitor); + ---------------------------------------------------------------------- -- Initialization phase - DML ---------------------------------------------------------------------- @@ -640,7 +719,8 @@ INSERT INTO fledge.roles ( name, description ) VALUES ('admin', 'All CRUD privileges'), ('user', 'All CRUD operations and self profile management'), ('view', 'Only to view the configuration'), - ('data-view', 'Only read the data in buffer'); + ('data-view', 'Only read the data in buffer'), + ('control', 'Same as editor can do and also have access for control scripts and pipelines'); -- Users DELETE FROM fledge.users; @@ -692,7 +772,16 @@ INSERT INTO fledge.log_codes ( code, description ) ( 'ASTDP', 'Asset deprecated' ), ( 'ASTUN', 'Asset un-deprecated' ), ( 'PIPIN', 'Pip installation' ), - ( 'AUMRK', 'Audit Log Marker' ); + ( 'AUMRK', 'Audit Log Marker' ), + ( 'USRAD', 'User Added' ), + ( 'USRDL', 'User Deleted' ), + ( 'USRCH', 'User Changed' ), + ( 'USRRS', 'User Restored' ), + ( 'ACLAD', 'ACL Added' ),( 'ACLCH', 'ACL Changed' ),( 'ACLDL', 'ACL Deleted' ), + ( 'CTSAD', 'Control Script Added' ),( 'CTSCH', 'Control Script Changed' ),('CTSDL', 'Control Script Deleted' ), + ( 'CTPAD', 'Control Pipeline Added' ),( 'CTPCH', 'Control Pipeline Changed' ),('CTPDL', 'Control Pipeline Deleted' ), + ( 'CTEAD', 'Control Entrypoint Added' ),( 'CTECH', 'Control Entrypoint Changed' ),('CTEDL', 'Control Entrypoint Deleted' ) + ; -- -- Configuration parameters @@ -761,7 +850,7 @@ INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, 'purge', -- process_name 3, -- schedule_type (interval) NULL, -- schedule_time - '01:00:00', -- schedule_interval (evey hour) + '00:10:00', -- schedule_interval (evey hour) 't', -- exclusive 't' -- enabled ); @@ -857,3 +946,23 @@ CREATE TABLE fledge.service_schema ( service character varying(255) NOT NULL, version integer NOT NULL, definition JSON); + +-- Control Source +DELETE FROM fledge.control_source; +INSERT INTO fledge.control_source ( name, description ) + VALUES ('Any', 'Any source.'), + ('Service', 'A named service in source of the control pipeline.'), + ('API', 'The control pipeline source is the REST API.'), + ('Notification', 'The control pipeline originated from a notification.'), + ('Schedule', 'The control request was triggered by a schedule.'), + ('Script', 'The control request has come from the named script.'); + +-- Control Destination +DELETE FROM fledge.control_destination; +INSERT INTO fledge.control_destination ( name, description ) + VALUES ('Any', 'Any destination.'), + ('Service', 'A name of service that is being controlled.'), + ('Asset', 'A name of asset that is being controlled.'), + ('Script', 'A name of script that will be executed.'), + ('Broadcast', 'No name is applied and pipeline will be considered for any control writes or operations to broadcast destinations.'); + diff --git a/scripts/plugins/storage/sqlitelb/upgrade/60.sql b/scripts/plugins/storage/sqlitelb/upgrade/60.sql new file mode 100644 index 0000000000..ed5a072205 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/60.sql @@ -0,0 +1,54 @@ +-- Create control_source table +CREATE TABLE fledge.control_source ( + cpsid integer PRIMARY KEY AUTOINCREMENT, -- auto source id + name character varying(40) NOT NULL, -- source name + description character varying(120) NOT NULL -- source description + ); + +-- Create control_destination table +CREATE TABLE fledge.control_destination ( + cpdid integer PRIMARY KEY AUTOINCREMENT, -- auto destination id + name character varying(40) NOT NULL, -- destination name + description character varying(120) NOT NULL -- destination description + ); + +-- Create control_pipelines table +CREATE TABLE fledge.control_pipelines ( + cpid integer PRIMARY KEY AUTOINCREMENT, -- control pipeline id + name character varying(255) NOT NULL , -- control pipeline name + stype integer , -- source type id from control_source table + sname character varying(80) , -- source name from control_source table + dtype integer , -- destination type id from control_destination table + dname character varying(80) , -- destination name from control_destination table + enabled boolean NOT NULL DEFAULT 'f' , -- false = A given pipeline is disabled by default + execution character varying(20) NOT NULL DEFAULT 'shared' -- pipeline will be executed as with shared execution model by default + ); + +-- Create control_filters table +CREATE TABLE fledge.control_filters ( + fid integer PRIMARY KEY AUTOINCREMENT, -- auto filter id + cpid integer NOT NULL , -- control pipeline id + forder integer NOT NULL , -- filter order + fname character varying(255) NOT NULL , -- Name of the filter instance + CONSTRAINT control_filters_fk1 FOREIGN KEY (cpid) + REFERENCES control_pipelines (cpid) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION + ); + +-- Insert predefined entries for Control Source +DELETE FROM fledge.control_source; +INSERT INTO fledge.control_source ( name, description ) + VALUES ('Any', 'Any source.'), + ('Service', 'A named service in source of the control pipeline.'), + ('API', 'The control pipeline source is the REST API.'), + ('Notification', 'The control pipeline originated from a notification.'), + ('Schedule', 'The control request was triggered by a schedule.'), + ('Script', 'The control request has come from the named script.'); + +-- Insert predefined entries for Control Destination +DELETE FROM fledge.control_destination; +INSERT INTO fledge.control_destination ( name, description ) + VALUES ('Any', 'Any destination.'), + ('Service', 'A name of service that is being controlled.'), + ('Asset', 'A name of asset that is being controlled.'), + ('Script', 'A name of script that will be executed.'), + ('Broadcast', 'No name is applied and pipeline will be considered for any control writes or operations to broadcast destinations.'); diff --git a/scripts/plugins/storage/sqlitelb/upgrade/61.sql b/scripts/plugins/storage/sqlitelb/upgrade/61.sql new file mode 100644 index 0000000000..f69e72ae07 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/61.sql @@ -0,0 +1,6 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES + ( 'USRAD', 'User Added' ), + ( 'USRDL', 'User Deleted' ), + ( 'USRCH', 'User Changed' ), + ( 'USRRS', 'User Restored' ); diff --git a/scripts/plugins/storage/sqlitelb/upgrade/62.sql b/scripts/plugins/storage/sqlitelb/upgrade/62.sql new file mode 100644 index 0000000000..9119862285 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/62.sql @@ -0,0 +1,13 @@ + +CREATE TABLE fledge.monitors ( + service character varying(255) NOT NULL, + monitor character varying(80) NOT NULL, + minimum integer, + maximum integer, + average integer, + samples integer, + ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')) +); + +CREATE INDEX monitors_ix1 + ON monitors(service, monitor); diff --git a/scripts/plugins/storage/sqlitelb/upgrade/63.sql b/scripts/plugins/storage/sqlitelb/upgrade/63.sql new file mode 100644 index 0000000000..8295ca306a --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/63.sql @@ -0,0 +1,3 @@ +-- Roles +INSERT INTO fledge.roles ( name, description ) + VALUES ('control', 'Same as editor can do and also have access for control scripts and pipelines'); diff --git a/scripts/plugins/storage/sqlitelb/upgrade/64.sql b/scripts/plugins/storage/sqlitelb/upgrade/64.sql new file mode 100644 index 0000000000..ab191fa824 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/64.sql @@ -0,0 +1,28 @@ +-- Create control_api table +CREATE TABLE fledge.control_api ( + name character varying(255) NOT NULL , -- control API name + description character varying(255) NOT NULL , -- description of control API + type integer NOT NULL , -- 0 for write and 1 for operation + operation_name character varying(255) , -- name of the operation and only valid if type is operation + destination integer NOT NULL , -- destination of request; 0-broadcast, 1-service, 2-asset, 3-script + destination_arg character varying(255) , -- name of the destination and only used if destination is non-zero + anonymous boolean NOT NULL DEFAULT 'f' , -- anonymous callers to make request to control API; by default false + CONSTRAINT control_api_pname PRIMARY KEY (name) + ); + +-- Create control_api_parameters table +CREATE TABLE fledge.control_api_parameters ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + parameter character varying(255) NOT NULL , -- name of parameter + value character varying(255) , -- value of parameter if constant otherwise default + constant boolean NOT NULL , -- parameter is either a constant or variable + FOREIGN KEY (name) REFERENCES control_api (name) + ); + +-- Create control_api_acl table +CREATE TABLE fledge.control_api_acl ( + name character varying(255) NOT NULL , -- foreign key to fledge.control_api + user character varying(255) NOT NULL , -- foreign key to fledge.users + FOREIGN KEY (name) REFERENCES control_api (name) , + FOREIGN KEY (user) REFERENCES users (uname) + ); diff --git a/scripts/plugins/storage/sqlitelb/upgrade/65.sql b/scripts/plugins/storage/sqlitelb/upgrade/65.sql new file mode 100644 index 0000000000..9a5e1f0435 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/65.sql @@ -0,0 +1,4 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES ( 'ACLAD', 'ACL Added' ),( 'ACLCH', 'ACL Changed' ),( 'ACLDL', 'ACL Deleted' ), + ( 'CTSAD', 'Control Script Added' ),( 'CTSCH', 'Control Script Changed' ),('CTSDL', 'Control Script Deleted' ), + ( 'CTPAD', 'Control Pipeline Added' ),( 'CTPCH', 'Control Pipeline Changed' ),('CTPDL', 'Control Pipeline Deleted' ); diff --git a/scripts/plugins/storage/sqlitelb/upgrade/66.sql b/scripts/plugins/storage/sqlitelb/upgrade/66.sql new file mode 100644 index 0000000000..d2b521c76f --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/66.sql @@ -0,0 +1,4 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES ( 'CTEAD', 'Control Entrypoint Added' ), + ( 'CTECH', 'Control Entrypoint Changed' ), + ('CTEDL', 'Control Entrypoint Deleted' ); \ No newline at end of file diff --git a/scripts/services/north_C b/scripts/services/north_C index b2dbbda0d1..80fa8a7968 100755 --- a/scripts/services/north_C +++ b/scripts/services/north_C @@ -10,5 +10,29 @@ if [ ! -d "${FLEDGE_ROOT}" ]; then exit 1 fi +runvalgrind=n +if [ "$VALGRIND_NORTH" != "" ]; then + for i in "$@"; do + case $i in + --name=*) + name="`echo $i | sed -e s/--name=//`" + ;; + esac + done + services=$(echo $VALGRIND_NORTH | tr ";" "\n") + for service in $services; do + if [ "$service" = "$name" ]; then + runvalgrind=y + fi + done +fi + cd "${FLEDGE_ROOT}/services" -./fledge.services.north "$@" +if [ "$runvalgrind" = "y" ]; then + file=${HOME}/north.${name}.valgrind.out + rm -f $file + valgrind --leak-check=full --trace-children=yes --show-leak-kinds=all --track-origins=yes --log-file=$file ./fledge.services.north "$@" +else + ./fledge.services.north "$@" +fi + diff --git a/scripts/services/south_c b/scripts/services/south_c index a3c0bfa5a8..6f3d2c13e9 100755 --- a/scripts/services/south_c +++ b/scripts/services/south_c @@ -11,5 +11,28 @@ fi cd "${FLEDGE_ROOT}/services" -./fledge.services.south "$@" +runvalgrind=n +if [ "$VALGRIND_SOUTH" != "" ]; then + for i in "$@"; do + case $i in + --name=*) + name=`echo $i | sed -e s/--name=//` + ;; + esac + done + services=$(echo $VALGRIND_SOUTH | tr ";" "\n") + for service in $services; do + if [ "$service" = "$name" ]; then + runvalgrind=y + fi + done +fi + +if [ "$runvalgrind" = "y" ]; then + file="$HOME/south.${name}.valgrind.out" + rm -f "$file" + valgrind --leak-check=full --trace-children=yes --log-file="$file" ./fledge.services.south "$@" +else + ./fledge.services.south "$@" +fi diff --git a/scripts/services/storage b/scripts/services/storage index dcea1179e9..0262751327 100755 --- a/scripts/services/storage +++ b/scripts/services/storage @@ -56,10 +56,17 @@ if [[ "$1" != "--plugin" ]]; then storagePlugin=${res[0]} managedEngine=${res[1]} # Call plugin check: this will create database if not set yet - ${pluginScriptPath}/${storagePlugin}.sh init ${FLEDGE_SCHEMA} ${managedEngine} -fi - -if [[ "$1" != "--readingsPlugin" ]]; then + ${pluginScriptPath}/${storagePlugin}.sh init ${FLEDGE_SCHEMA} ${managedEngine} + if [[ "$VALGRIND_STORAGE" = "y" ]]; then + write_log "" "scripts.services.storage" "warn" "Running storage service under valgrind" "logonly" "" + if [[ -f "$HOME/storage.valgrind.out" ]]; then + rm $HOME/storage.valgrind.out + fi + valgrind --leak-check=full --show-leak-kinds=all --trace-children=yes --log-file=$HOME/storage.valgrind.out ${storageExec} "$@" -d & + else + ${storageExec} "$@" + fi +elif [[ "$1" != "--readingsPlugin" ]]; then # Get db schema FLEDGE_VERSION_FILE="${FLEDGE_ROOT}/VERSION" FLEDGE_SCHEMA=`cat ${FLEDGE_VERSION_FILE} | tr -d ' ' | grep -i "FLEDGE_SCHEMA=" | sed -e 's/\(.*\)=\(.*\)/\2/g'` @@ -68,9 +75,17 @@ if [[ "$1" != "--readingsPlugin" ]]; then storagePlugin=${res[0]} managedEngine=${res[1]} # Call plugin check: this will create database if not set yet - ${pluginScriptPath}/${storagePlugin}.sh init ${FLEDGE_SCHEMA} ${managedEngine} + if [[ -x ${pluginScriptPath}/${storagePlugin}.sh ]]; then + ${pluginScriptPath}/${storagePlugin}.sh init ${FLEDGE_SCHEMA} ${managedEngine} + fi + ${storageExec} "$@" +elif [[ "$VALGRIND_STORAGE" = "y" ]]; then + write_log "" "scripts.services.storage" "warn" "Running storage service under valgrind" "logonly" "" + if [[ -f "$HOME/storage.valgrind.out" ]]; then + rm $HOME/storage.valgrind.out + fi + valgrind --leak-check=full --show-leak-kinds=all --trace-children=yes --log-file=$HOME/storage.valgrind.out ${storageExec} "$@" -d & +else + ${storageExec} "$@" fi - -# Run storage service -${storageExec} "$@" exit 0 diff --git a/scripts/tasks/statistics b/scripts/tasks/statistics index 1206e6fe6a..b1e2b1e9a4 100755 --- a/scripts/tasks/statistics +++ b/scripts/tasks/statistics @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Run a Fledge south service written in Python +# Run a Fledge task written in C if [ "${FLEDGE_ROOT}" = "" ]; then FLEDGE_ROOT=/usr/local/fledge fi @@ -9,20 +9,12 @@ if [ ! -d "${FLEDGE_ROOT}" ]; then exit 1 fi -if [ ! -d "${FLEDGE_ROOT}/python" ]; then - logger "Fledge home directory is missing the Python installation" - exit 1 -fi - -# We run the Python code from the python directory -cd "${FLEDGE_ROOT}/python" - os_name=`(grep -o '^NAME=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g')` if [[ $os_name == *"Raspbian"* ]]; then - cpulimit -l 40 -- python3 -m fledge.tasks.statistics "$@" + cpulimit -l 40 -- $FLEDGE_ROOT/tasks/statistics_history "$@" else # Standard execution on other platforms - python3 -m fledge.tasks.statistics "$@" + $FLEDGE_ROOT/tasks/statistics_history "$@" fi diff --git a/tests/system/lab/README.rst b/tests/system/lab/README.rst index 0fd6bf416c..80596b28e5 100644 --- a/tests/system/lab/README.rst +++ b/tests/system/lab/README.rst @@ -18,10 +18,39 @@ To run the test for required (say 10) iterations or until it fails - execute `./ **`run` and `run_until_fails` use the following scripts in its execution:** -**remove**: apt removes all fledge packages; deletes /usr/local/fledge; +- **remove**: apt removes all fledge packages; deletes /usr/local/fledge; -**install**: apt update; install fledge; install gui; install other fledge packages +- **install**: apt update; install fledge; install gui; install other fledge packages -**test**: curl commands to simulate all gui actions in the lab (except game) +- **test**: curl commands to simulate all gui actions in the lab (except game) -**reset**: Reset script is to stop fledge; reset the db and delete any python scripts. \ No newline at end of file +- **reset**: Reset script is to stop fledge; reset the db and delete any python scripts. + + +**`test.config` contains following variables that are used by `test` scripts in its execution:** + +- **FLEDGE_IP**: IP Address of the system on which fledge is running. + +- **PI_IP**: IP Address of PI Web API. + +- **PI_USER**: Username used for accessing PI Web API. + +- **PI_PASSWORD**: Password used for PI Web API. + +- **PI_PORT**: Port number of PI Web API on which fledge will connect. + +- **PI_DB**: Database in wihch PI Point is to be stored. + +- **MAX_RETRIES**: Retries to check data and info via API before declaring it failed to see the expected. + +- **SLEEP_FIX**: Time to sleep to fix bugs. This should be zero. + +- **EXIT_EARLY**: It is a Boolean variable, if contains value '1' then test will stop execution as soon as any error occur. + +- **ADD_NORTH_AS_SERVICE**: This variable defines whether North(OMF) is created as a task or a service. + +- **VERIFY_EGRESS_TO_PI**: It is a Boolean variable, if contains value '1' then North(OMF) is created and data sent to PI Web API will be verified. + +- **STORAGE**: This variable defines the storage plugin for configuration used by fledge, i.e. sqlite, sqlitelb, postgres. + +- **READING_PLUGIN_DB**: This variable by default contains "Use main plugin" that mean READING_PLUGIN_DB will be the same that used in `STORAGE` variable. Apart of "Use main plugin", it may also contain sqlite, sqlitelb, sqlite-in-memory, postgres values. diff --git a/tests/system/lab/reset b/tests/system/lab/reset index 469b3d5a65..8967b1ba09 100755 --- a/tests/system/lab/reset +++ b/tests/system/lab/reset @@ -1,45 +1,35 @@ #!/usr/bin/env bash -_postgres() { - sudo apt install -y postgresql - sudo -u postgres createuser -d "$(whoami)" - sudo sed -i 's/"plugin":{"value":"sqlite"/"plugin":{"value":"postgres"/g' /usr/local/fledge/data/etc/storage.json - sudo sed -i 's/"readingPlugin":{"value":"sqlitememory"/"readingPlugin":{"value":""/g' /usr/local/fledge/data/etc/storage.json -} -_sqliteinmemory () { - sudo sed -i 's/"plugin":{"value":"postgres"/"plugin":{"value":"sqlite"/g' /usr/local/fledge/data/etc/storage.json - sudo sed -i 's/"readingPlugin":{"value":""/"readingPlugin":{"value":"sqlitememory"/g' /usr/local/fledge/data/etc/storage.json -} +FLEDGE_ROOT="/usr/local/fledge" -_sqlite () { - sudo sed -i 's/"plugin":{"value":"postgres"/"plugin":{"value":"sqlite"/g' /usr/local/fledge/data/etc/storage.json - sudo sed -i 's/"readingPlugin":{"value":"sqlitememory"/"readingPlugin":{"value":""/g' /usr/local/fledge/data/etc/storage.json +install_postgres() { + sudo apt install -y postgresql + sudo -u postgres createuser -d "$(whoami)" } -_sqlitelb () { - sudo sed -i 's/"plugin":{"value":"sqlite"/"plugin":{"value":"sqlitelb"/g' /usr/local/fledge/data/etc/storage.json - sudo sed -i 's/"readingPlugin":{"value":"sqlitememory"/"readingPlugin":{"value":""/g' /usr/local/fledge/data/etc/storage.json +_config_reading_db () { + if [[ "postgres" == @($1|$2) ]] + then + install_postgres + fi + [[ -f $FLEDGE_ROOT/data/etc/storage.json ]] && echo $(jq -c --arg STORAGE_PLUGIN_VAL "${1}" '.plugin.value=$STORAGE_PLUGIN_VAL' $FLEDGE_ROOT/data/etc/storage.json) > $FLEDGE_ROOT/data/etc/storage.json || true + [[ -f $FLEDGE_ROOT/data/etc/storage.json ]] && echo $(jq -c --arg READING_PLUGIN_VAL "${2}" '.readingPlugin.value=$READING_PLUGIN_VAL' $FLEDGE_ROOT/data/etc/storage.json) > $FLEDGE_ROOT/data/etc/storage.json || true } # check for storage plugin . ./test.config -if [[ ${STORAGE} == "postgres" ]] -then - _postgres -elif [[ ${STORAGE} == "sqlite-in-memory" ]] -then - _sqliteinmemory -elif [[ ${STORAGE} == "sqlitelb" ]] +if [[ ${STORAGE} == @(sqlite|postgres|sqlitelb) && ${READING_PLUGIN_DB} == @(Use main plugin|sqlitememory|sqlite|postgres|sqlitelb) ]] then - _sqlitelb + _config_reading_db "${STORAGE}" "${READING_PLUGIN_DB}" else - _sqlite + echo "Invalid Storage Configuration" + exit 1 fi echo "Stopping Fledge using systemctl ..." sudo systemctl stop fledge -echo "YES" | /usr/local/fledge/bin/fledge reset || exit 1 +echo -e "YES\nYES" | $FLEDGE_ROOT/bin/fledge reset || exit 1 echo echo "Starting Fledge using systemctl ..." sudo systemctl start fledge diff --git a/tests/system/lab/run b/tests/system/lab/run index 835b853e44..3a4981d78f 100755 --- a/tests/system/lab/run +++ b/tests/system/lab/run @@ -11,5 +11,5 @@ fi ./remove ./install ${VERSION} -./reset +./reset || exit 1 ./test diff --git a/tests/system/lab/run_until_fails b/tests/system/lab/run_until_fails index 1d92e45286..f0afd387bc 100755 --- a/tests/system/lab/run_until_fails +++ b/tests/system/lab/run_until_fails @@ -25,11 +25,7 @@ for i in $(seq ${ITERATIONS}); do echo "Run $i" echo "***************" echo "***************" - ./reset - ./test - if [[ $? -ne 0 ]] - then - exit 1 - fi + ./reset || exit 1 + ./test || exit 1 done diff --git a/tests/system/lab/test b/tests/system/lab/test index 137630d483..c802b52b06 100755 --- a/tests/system/lab/test +++ b/tests/system/lab/test @@ -12,6 +12,8 @@ CRESET="${CPFX}0m" . ./test.config LAB_ASSET_NAME="PILAB-sinusoid" +AF_HIERARCHY_LEVEL="/$(date +%F | tr - _)_PIlabSinelvl1/PIlabSinelvl2/PIlabSinelvl3" + rm -f err.txt touch err.txt @@ -22,6 +24,7 @@ display_and_collect_err () { URL="http://$FLEDGE_IP:8081/fledge" +PROJECT_ROOT=$(git rev-parse --show-toplevel) sinusoid_config=$(cat < /dev/null; then + echo "Support Bundle Created" + rm -rf "$SUPPORT_BUNDLE_DIR" && mkdir -p "$SUPPORT_BUNDLE_DIR" && \ + cp -r /usr/local/fledge/data/support/* "$SUPPORT_BUNDLE_DIR"/. && \ + echo "Support bundle has been saved to path: $SUPPORT_BUNDLE_DIR" +else + echo "Failed to Create support bundle" + rm -rf "$SUPPORT_BUNDLE_DIR" && mkdir -p "$SUPPORT_BUNDLE_DIR" && \ + cp /var/log/syslog "$SUPPORT_BUNDLE_DIR"/. && \ + echo "Syslog Saved to $SUPPORT_BUNDLE_DIR" +fi +echo "===================== COLLECTED SUPPORT BUNDLE ============================" ERRORS="$(wc -c <"err.txt")" if [[ ${ERRORS} -ne 0 ]] - then +then echo "============================= TESTS FAILED! =============================" cat err.txt exit 1 - else +else echo "======================================================\ =================== S U C C E S S ====================\ ======================================================" diff --git a/tests/system/lab/test.config b/tests/system/lab/test.config index aee5e385ac..e0530db391 100755 --- a/tests/system/lab/test.config +++ b/tests/system/lab/test.config @@ -2,9 +2,12 @@ FLEDGE_IP=localhost PI_IP=192.168.4.41 PI_USER=Administrator PI_PASSWORD='xxx' +PI_PORT="443" +PI_DB="foglamp" MAX_RETRIES=100 SLEEP_FIX=10 # Time to sleep to fix bugs. This should be zero. EXIT_EARLY=0 ADD_NORTH_AS_SERVICE=true VERIFY_EGRESS_TO_PI=1 -STORAGE=sqlite # postgres, sqlite-in-memory, sqlitelb \ No newline at end of file +STORAGE=sqlite # postgres, sqlite-in-memory, sqlitelb +READING_PLUGIN_DB='Use main plugin' diff --git a/tests/system/lab/verify_clean_pi.py b/tests/system/lab/verify_clean_pi.py index 50f416824d..47f6e1990c 100644 --- a/tests/system/lab/verify_clean_pi.py +++ b/tests/system/lab/verify_clean_pi.py @@ -1,6 +1,7 @@ import argparse from pathlib import Path import sys +from datetime import datetime PROJECT_ROOT = Path(__file__).absolute().parent.parent.parent.parent sys.path.append('{}/tests/system/common'.format(PROJECT_ROOT)) @@ -11,6 +12,7 @@ data_from_pi = None retries = 6 wait_time = 10 +today = datetime.now().strftime("%Y_%m_%d") parser = argparse.ArgumentParser(description="PI server", formatter_class=argparse.ArgumentDefaultsHelpFormatter) @@ -34,7 +36,7 @@ pi_db = args["pi_db"] asset_name = args["asset_name"] -af_hierarchy_level = "PIlabSinelvl1/PIlabSinelvl2/PIlabSinelvl3" +af_hierarchy_level = "{}_PIlabSinelvl1/PIlabSinelvl2/PIlabSinelvl3".format(today) af_hierarchy_level_list = af_hierarchy_level.split("/") clear_pi_system_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, af_hierarchy_level_list, diff --git a/tests/system/memory_leak/config.sh b/tests/system/memory_leak/config.sh index 21c7505ccb..83e3d50d9c 100644 --- a/tests/system/memory_leak/config.sh +++ b/tests/system/memory_leak/config.sh @@ -1,2 +1,5 @@ FLEDGE_URL="http://localhost:8081/fledge" TEST_RUN_TIME=3600 +PI_IP="localhost" +PI_USER="Administrator" +PI_PASSWORD="password" \ No newline at end of file diff --git a/tests/system/memory_leak/scripts/reset b/tests/system/memory_leak/scripts/reset index 64e27cf867..2cc67c71f1 100755 --- a/tests/system/memory_leak/scripts/reset +++ b/tests/system/memory_leak/scripts/reset @@ -7,7 +7,7 @@ export FLEDGE_ROOT=$1 cd ${1}/scripts/ && ./fledge stop echo 'resetting fledge' -echo "YES" | ./fledge reset || exit 1 +echo -e "YES\nYES" | ./fledge reset || exit 1 echo echo "Starting Fledge" ./fledge start diff --git a/tests/system/memory_leak/scripts/setup b/tests/system/memory_leak/scripts/setup index 8eac77d792..f95fefbade 100755 --- a/tests/system/memory_leak/scripts/setup +++ b/tests/system/memory_leak/scripts/setup @@ -3,6 +3,7 @@ set -e BRANCH=${2:-develop} # here Branch means branch of fledge repository that is needed to be scanned through valgrind, default is develop +COLLECT_FILES=${3} OS_NAME=`(grep -o '^NAME=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g')` ID=$(cat /etc/os-release | grep -w ID | cut -f2 -d"=" | tr -d '"') @@ -17,6 +18,7 @@ echo "UNAME is "${UNAME} sudo apt -y install git # cloning fledge +echo "Cloning Fledge branch $BRANCH" git clone -b $BRANCH https://github.com/fledge-iot/fledge.git && cd fledge && chmod +x requirements.sh && sh -x requirements.sh ; echo 'Changing CMakelists' @@ -34,30 +36,44 @@ export FLEDGE_ROOT=`pwd` && cd ..; # modifying script echo 'fledge root path is set to ${FLEDGE_ROOT}' -valgrind_conf=' --tool=memcheck --fullpath-after= --xml=yes --log-file=\/tmp\/south_valgrind.log --child-silent-after-fork=no --leak-check=full --show-leak-kinds=all --track-origins=yes ' +valgrind_conf=' --tool=memcheck --leak-check=full --show-leak-kinds=all' psouth_c=${FLEDGE_ROOT}/scripts/services/south_c echo $psouth_c sudo sed -i 's#/usr/local/fledge#'"$FLEDGE_ROOT"'#g' ${psouth_c} -sudo sed -i '/.\/fledge.services.south.*/s/^/valgrind -v --xml-file=\/tmp\/south_valgrind_%p.xml '"$valgrind_conf"' /' ${psouth_c} +if [[ "${COLLECT_FILES}" == "LOGS" ]]; then + sudo sed -i '/.\/fledge.services.south.*/s/^/valgrind --log-file=\/tmp\/south_valgrind.log '"$valgrind_conf"' /' ${psouth_c} +else + sudo sed -i '/.\/fledge.services.south.*/s/^/valgrind --xml=yes --xml-file=\/tmp\/south_valgrind_%p.xml --track-origins=yes '"$valgrind_conf"' /' ${psouth_c} +fi pnorth_C=${FLEDGE_ROOT}/scripts/services/north_C echo $pnorth_C sudo sed -i 's#/usr/local/fledge#'"$FLEDGE_ROOT"'#g' ${pnorth_C} -sudo sed -i '/.\/fledge.services.north.*/s/^/valgrind -v --xml-file=\/tmp\/north_valgrind_%p.xml '"$valgrind_conf"' /' ${pnorth_C} +if [[ "${COLLECT_FILES}" == "LOGS" ]]; then + sudo sed -i '/.\/fledge.services.north.*/s/^/valgrind --log-file=\/tmp\/north_valgrind.log '"$valgrind_conf"' /' ${pnorth_C} +else + sudo sed -i '/.\/fledge.services.north.*/s/^/valgrind --xml=yes --xml-file=\/tmp\/north_valgrind_%p.xml --track-origins=yes '"$valgrind_conf"' /' ${pnorth_C} +fi pstorage=${FLEDGE_ROOT}/scripts/services/storage echo $pstorage sudo sed -i 's#/usr/local/fledge#'"$FLEDGE_ROOT"'#g' ${pstorage} -sudo sed -i '/\${storageExec} \"\$@\"/s/^/valgrind -v --xml-file=\/tmp\/storage_valgrind_%p.xml '"$valgrind_conf"' /' ${pstorage} +if [[ "${COLLECT_FILES}" == "LOGS" ]]; then + sudo sed -i '/\${storageExec} \"\$@\"/s/^/valgrind --log-file=\/tmp\/storage_valgrind.log '"$valgrind_conf"' /' ${pstorage} +else + sudo sed -i '/\${storageExec} \"\$@\"/s/^/valgrind --xml=yes --xml-file=\/tmp\/storage_valgrind_%p.xml --track-origins=yes '"$valgrind_conf"' /' ${pstorage} +fi # cloning plugins based on parameters passed to the script, Currently only installing sinusoid -for i in ${1} + +IFS=' ' read -ra plugin_list <<< "${1}" +for i in "${plugin_list[@]}" do echo $i git clone https://github.com/fledge-iot/${i}.git && cd ${i}; plugin_dir=`pwd` - # Cheking requirements.sh file exists or not, to install plugins dependencies + # Cheking requirements.sh file exists or not, to install plugins dependencies if [[ -f ${plugin_dir}/requirements.sh ]] then ./${plugin_dir}/requirements.sh @@ -69,7 +85,7 @@ do sed -i 's|c++11 -O3|c++11 -O0 -ggdb|g' ${plugin_dir}/CMakeLists.txt # building C based plugin echo 'Building C plugin' - mkdir -p build && cd build && cmake -DFLEDGE_INSTALL=${FLEDGE_ROOT} -DFLEDGE_ROOT=${FLEDGE_ROOT} .. && make && make install + mkdir -p build && cd build && cmake -DFLEDGE_INSTALL=${FLEDGE_ROOT} -DFLEDGE_ROOT=${FLEDGE_ROOT} .. && make && make install && cd .. else # Checking requirements.txt file exists or not, to install plugins dependencies (if any) if [[ -f ${plugin_dir}/requirements.txt ]] @@ -81,8 +97,7 @@ do sudo cp -r $plugin_dir/python $FLEDGE_ROOT/ echo 'Copied.' fi - cd ../.. + cd ../ done echo 'Current location - '; pwd; -echo 'End of setup' - +echo 'End of setup' \ No newline at end of file diff --git a/tests/system/memory_leak/test_memcheck.sh b/tests/system/memory_leak/test_memcheck.sh index 43d53d56b0..5d9d5308a4 100755 --- a/tests/system/memory_leak/test_memcheck.sh +++ b/tests/system/memory_leak/test_memcheck.sh @@ -7,18 +7,25 @@ source config.sh export FLEDGE_ROOT=$(pwd)/fledge -FLEDGE_TEST_BRANCH=$1 #here fledge_test_branch means branch of fledge repository that is needed to be scanned, default is devops +FLEDGE_TEST_BRANCH="$1" # here fledge_test_branch means branch of fledge repository that is needed to be scanned, default is develop + +COLLECT_FILES="${2:-LOGS}" + +if [[ ${COLLECT_FILES} != @(LOGS|XML|) ]] +then + echo "Invalid argument ${COLLECT_FILES}. Please provide valid arguments: XML or LOGS." + exit 1 +fi cleanup(){ # Removing temporary files, fledge and its plugin repository cloned by previous build of the Job - echo "Removing Cloned repository and tmp files" - rm -rf /tmp/*valgrind*.log /tmp/*valgrind*.xml + echo "Removing Cloned repository and log files" rm -rf fledge* reports && echo 'Done.' } # Setting up Fledge and installing its plugin setup(){ - ./scripts/setup fledge-south-sinusoid ${FLEDGE_TEST_BRANCH} + ./scripts/setup "fledge-south-sinusoid fledge-south-random" "${FLEDGE_TEST_BRANCH}" "${COLLECT_FILES}" } reset_fledge(){ @@ -26,7 +33,7 @@ reset_fledge(){ } add_sinusoid(){ - echo -e INFO: "Add South" + echo -e INFO: "Add South Sinusoid" curl -sX POST "$FLEDGE_URL/service" -d \ '{ "name": "Sine", @@ -44,6 +51,25 @@ add_sinusoid(){ echo } +add_random(){ + echo -e INFO: "Add South Random" + curl -sX POST "$FLEDGE_URL/service" -d \ + '{ + "name": "Random", + "type": "south", + "plugin": "Random", + "enabled": true, + "config": {} + }' + echo + echo 'Updating Readings per second' + + sleep 60 + + curl -sX PUT "$FLEDGE_URL/category/RandomAdvanced" -d '{ "readingsPerSec": "100"}' + echo + +} setup_north_pi_egress () { # Add PI North as service echo 'Setting up North' @@ -58,16 +84,16 @@ setup_north_pi_egress () { "value": "PI Web API" }, "ServerHostname": { - "value": "'$2'" + "value": "'${PI_IP}'" }, "ServerPort": { "value": "443" }, "PIWebAPIUserId": { - "value": "'$3'" + "value": "'${PI_USER}'" }, "PIWebAPIPassword": { - "value": "'$4'" + "value": "'${PI_PASSWORD}'" }, "NamingScheme": { "value": "Backward compatibility" @@ -96,16 +122,18 @@ collect_data(){ generate_valgrind_logs(){ echo 'Creating reports directory'; - mkdir -p reports/test1 ; ls -lrth + mkdir -p reports/ ; ls -lrth echo 'copying reports ' - cp -rf /tmp/*valgrind*.log /tmp/*valgrind*.xml reports/test1/. && echo 'copied' - rm -rf fledge* + extension="xml" + if [[ "${COLLECT_FILES}" == "LOGS" ]]; then extension="log"; fi + cp -rf /tmp/*valgrind*.${extension} reports/. && echo 'copied' } cleanup setup reset_fledge add_sinusoid +add_random setup_north_pi_egress collect_data generate_valgrind_logs diff --git a/tests/system/python/api/control_service/test_entrypoint.py b/tests/system/python/api/control_service/test_entrypoint.py new file mode 100644 index 0000000000..e5730ea79b --- /dev/null +++ b/tests/system/python/api/control_service/test_entrypoint.py @@ -0,0 +1,198 @@ +import http.client +import json +import pytest +from urllib.parse import quote +from collections import OrderedDict + +__author__ = "Ashish Jabble" +__copyright__ = "Copyright (c) 2023 Dianomic Systems Inc." +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + + +""" Control Flow Entrypoint API tests """ + +EP_1 = "EP #1" +EP_2 = "EP-1" +EP_3 = "EP #_2" +payload1 = {"name": EP_1, "type": "write", "description": "Entry Point 1", "operation_name": "", + "destination": "broadcast", "constants": {"c1": "100"}, "variables": {"v1": "100"}, "anonymous": False, + "allow": []} +payload2 = {"name": EP_2, "type": "operation", "description": "Operation 1", "operation_name": "distance", + "destination": "broadcast", "anonymous": False, "allow": []} +payload3 = {"name": EP_3, "type": "operation", "description": "Operation 2", "operation_name": "distance", + "destination": "broadcast", "constants": {"c1": "100"}, "variables": {"v1": "1200"}, "anonymous": True, + "allow": ["admin", "user"]} + +# TODO: add more tests +""" + a) authentication based + b) update request by installing external service +""" + + +class TestEntrypoint: + def test_empty_get_all(self, fledge_url, reset_and_start_fledge): + jdoc = self._get_all(fledge_url) + assert [] == jdoc + + @pytest.mark.parametrize("payload", [payload1, payload2, payload3]) + def test_create(self, fledge_url, payload): + ep_name = payload['name'] + conn = http.client.HTTPConnection(fledge_url) + conn.request('POST', '/fledge/control/manage', body=json.dumps(payload)) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert len(jdoc), "Failed to create {} entrypoint!".format(ep_name) + assert 'message' in jdoc + assert '{} control entrypoint has been created successfully.'.format(ep_name) == jdoc['message'] + self.verify_details(conn, payload) + self.verify_audit_details(conn, ep_name, 'CTEAD') + + def test_get_all(self, fledge_url): + jdoc = self._get_all(fledge_url) + assert 3 == len(jdoc) + assert ['name', 'description', 'permitted'] == list(jdoc[0].keys()) + + def test_get_by_name(self, fledge_url): + conn = http.client.HTTPConnection(fledge_url) + conn.request("GET", '/fledge/control/manage/{}'.format(quote(EP_1))) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert len(jdoc), "{} entrypoint found!".format(EP_1) + assert payload1 == jdoc + assert 'permitted' in jdoc + + @pytest.mark.parametrize("name, payload, old_info", [ + (EP_1, {"anonymous": True}, {"anonymous": False}), + (EP_2, {"description": "Updated", "type": "operation", "operation_name": "focus", "allow": ["user"]}, + {"description": "Operation 1", "type": "operation", "operation_name": "distance", "allow": []}), + (EP_3, {"constants": {"c1": "123", "c2": "100"}, "variables": {"v1": "900"}}, + {"constants": {"c1": "100"}, "variables": {"v1": "1200"}}) + ]) + def test_update(self, fledge_url, name, payload, old_info): + conn = http.client.HTTPConnection(fledge_url) + conn.request('PUT', '/fledge/control/manage/{}'.format(quote(name)), body=json.dumps(payload)) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert 'message' in jdoc + assert '{} control entrypoint has been updated successfully.'.format(name) == jdoc['message'] + + source = 'CTECH' + conn.request("GET", '/fledge/audit?source={}'.format(source)) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert 'audit' in jdoc + assert len(jdoc['audit']) + audit = jdoc['audit'][0] + assert 'INFORMATION' == audit['severity'] + assert source == audit['source'] + assert 'details' in audit + assert 'entrypoint' in audit['details'] + assert 'old_entrypoint' in audit['details'] + audit_old = audit['details']['old_entrypoint'] + audit_new = audit['details']['entrypoint'] + assert name == audit_new['name'] + assert name == audit_old['name'] + + conn.request("GET", '/fledge/control/manage/{}'.format(quote(name))) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert len(jdoc), "{} entrypoint found!".format(name) + assert name == jdoc['name'] + if name == EP_1: + assert old_info['anonymous'] == audit_old['anonymous'] + assert payload['anonymous'] == audit_new['anonymous'] + assert payload['anonymous'] == jdoc['anonymous'] + elif name == EP_2: + assert old_info['description'] == audit_old['description'] + assert payload['description'] == audit_new['description'] + assert old_info['type'] == audit_old['type'] + assert payload['type'] == audit_new['type'] + assert old_info['operation_name'] == audit_old['operation_name'] + assert payload['operation_name'] == audit_new['operation_name'] + assert old_info['allow'] == audit_old['allow'] + assert payload['allow'] == audit_new['allow'] + assert payload['description'] == jdoc['description'] + assert payload['type'] == jdoc['type'] + assert payload['operation_name'] == jdoc['operation_name'] + assert payload['allow'] == jdoc['allow'] + elif name == EP_3: + assert old_info['constants']['c1'] == audit_old['constants']['c1'] + assert 'c2' not in audit_old['constants'] + assert payload['constants']['c1'] == audit_new['constants']['c1'] + assert payload['constants']['c2'] == audit_new['constants']['c2'] + assert old_info['variables']['v1'] == audit_old['variables']['v1'] + assert payload['variables']['v1'] == audit_new['variables']['v1'] + assert payload['constants']['c1'] == jdoc['constants']['c1'] + assert payload['constants']['c2'] == jdoc['constants']['c2'] + assert payload['variables']['v1'] == jdoc['variables']['v1'] + else: + # Add more scenarios + pass + + @pytest.mark.parametrize("name, count", [(EP_1, 2), (EP_2, 1), (EP_3, 0)]) + def test_delete(self, fledge_url, name, count): + conn = http.client.HTTPConnection(fledge_url) + conn.request("DELETE", '/fledge/control/manage/{}'.format(quote(name))) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert len(jdoc), "{} entrypoint found!".format(name) + assert 'message' in jdoc + assert '{} control entrypoint has been deleted successfully.'.format(name) == jdoc['message'] + self.verify_audit_details(conn, name, 'CTEDL') + jdoc = self._get_all(fledge_url) + assert count == len(jdoc) + + def verify_audit_details(self, conn, ep_name, source): + conn.request("GET", '/fledge/audit?source={}'.format(source)) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert len(jdoc), "No audit record entry found!" + assert 'audit' in jdoc + assert ep_name == jdoc['audit'][0]['details']['name'] + assert 'INFORMATION' == jdoc['audit'][0]['severity'] + assert source == jdoc['audit'][0]['source'] + + def verify_details(self, conn, data): + name = data['name'] + conn.request("GET", '/fledge/control/manage/{}'.format(quote(name))) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert len(jdoc), "{} entrypoint found!".format(name) + data['permitted'] = True + if 'constants' not in data: + data['constants'] = {} + if 'variables' not in data: + data['variables'] = {} + + d1 = OrderedDict(sorted(data.items())) + d2 = OrderedDict(sorted(jdoc.items())) + assert d1 == d2 + + def _get_all(self, url): + conn = http.client.HTTPConnection(url) + conn.request("GET", '/fledge/control/manage') + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert len(jdoc), "No entrypoint found!" + assert 'controls' in jdoc + return jdoc['controls'] diff --git a/tests/system/python/api/test_audit.py b/tests/system/python/api/test_audit.py index 40740fb908..b8a4b1d328 100644 --- a/tests/system/python/api/test_audit.py +++ b/tests/system/python/api/test_audit.py @@ -18,16 +18,23 @@ __license__ = "Apache 2.0" __version__ = "${VERSION}" +DEFAULT_AUDIT_COUNT = 14 -class TestAudit: +class TestAudit: def test_get_log_codes(self, fledge_url, reset_and_start_fledge): expected_code_list = ['PURGE', 'LOGGN', 'STRMN', 'SYPRG', 'START', 'FSTOP', 'CONCH', 'CONAD', 'SCHCH', 'SCHAD', 'SRVRG', 'SRVUN', 'SRVFL', 'SRVRS', 'NHCOM', 'NHDWN', 'NHAVL', 'UPEXC', 'BKEXC', 'NTFDL', 'NTFAD', 'NTFSN', 'NTFCL', 'NTFST', 'NTFSD', 'PKGIN', 'PKGUP', 'PKGRM', 'DSPST', 'DSPSD', - 'ESSRT', 'ESSTP', 'ASTDP', 'ASTUN', 'PIPIN', 'AUMRK'] + 'ESSRT', 'ESSTP', 'ASTDP', 'ASTUN', 'PIPIN', 'AUMRK', + 'USRAD', 'USRDL', 'USRCH', 'USRRS', + 'ACLAD', 'ACLCH', 'ACLDL', + 'CTSAD', 'CTSCH', 'CTSDL', + 'CTPAD', 'CTPCH', 'CTPDL', + 'CTEAD', 'CTECH', 'CTEDL' + ] conn = http.client.HTTPConnection(fledge_url) conn.request("GET", '/fledge/audit/logcode') r = conn.getresponse() @@ -55,16 +62,16 @@ def test_get_severity(self, fledge_url): assert Counter([0, 1, 2, 4]) == Counter(index) @pytest.mark.parametrize("request_params, total_count, audit_count", [ - ('', 13, 13), - ('?limit=1', 13, 1), - ('?skip=4', 13, 9), - ('?limit=1&skip=8', 13, 1), + ('', DEFAULT_AUDIT_COUNT, DEFAULT_AUDIT_COUNT), + ('?limit=1', DEFAULT_AUDIT_COUNT, 1), + ('?skip=4', DEFAULT_AUDIT_COUNT, 10), + ('?limit=1&skip=8', DEFAULT_AUDIT_COUNT, 1), ('?source=START', 1, 1), - ('?source=CONAD', 12, 12), - ('?source=CONAD&limit=1', 12, 1), - ('?source=CONAD&skip=1', 12, 11), - ('?source=CONAD&skip=6&limit=1', 12, 1), - ('?severity=INFORMATION', 13, 13), + ('?source=CONAD', 13, 13), + ('?source=CONAD&limit=1', 13, 1), + ('?source=CONAD&skip=1', 13, 12), + ('?source=CONAD&skip=6&limit=1', 13, 1), + ('?severity=INFORMATION', DEFAULT_AUDIT_COUNT, DEFAULT_AUDIT_COUNT), ('?severity=failure', 0, 0), ('?source=CONAD&severity=failure', 0, 0), ('?source=START&severity=INFORMATION', 1, 1), @@ -88,10 +95,10 @@ def test_default_get_audit(self, fledge_url, wait_time, request_params, total_co assert audit_count == len(elems) @pytest.mark.parametrize("payload, total_count", [ - ({"source": "LOGGN", "severity": "warning", "details": {"message": "Engine oil pressure low"}}, 14), - ({"source": "NHCOM", "severity": "success", "details": {}}, 15), - ({"source": "START", "severity": "information", "details": {"message": "fledge started"}}, 16), - ({"source": "CONCH", "severity": "failure", "details": {"message": "Scheduler configuration failed"}}, 17) + ({"source": "LOGGN", "severity": "warning", "details": {"message": "Engine oil pressure low"}}, 1), + ({"source": "NHCOM", "severity": "success", "details": {}}, 2), + ({"source": "START", "severity": "information", "details": {"message": "fledge started"}}, 3), + ({"source": "CONCH", "severity": "failure", "details": {"message": "Scheduler configuration failed"}}, 4) ]) def test_create_audit_entry(self, fledge_url, payload, total_count): conn = http.client.HTTPConnection(fledge_url) @@ -112,7 +119,7 @@ def test_create_audit_entry(self, fledge_url, payload, total_count): r = r.read().decode() jdoc = json.loads(r) assert len(jdoc), "No data found" - assert total_count == jdoc['totalCount'] + assert DEFAULT_AUDIT_COUNT + total_count == jdoc['totalCount'] @pytest.mark.parametrize("payload", [ ({"source": "LOGGN_X", "severity": "warning", "details": {"message": "Engine oil pressure low"}}), @@ -122,12 +129,12 @@ def test_create_nonexistent_log_code_audit_entry(self, fledge_url, payload, stor if storage_plugin == 'sqlite': pytest.skip('TODO: FOGL-2124 Enable foreign key constraint in SQLite') + msg = "Audit entry cannot be logged." conn = http.client.HTTPConnection(fledge_url) conn.request('POST', '/fledge/audit', body=json.dumps(payload)) r = conn.getresponse() assert 400 == r.status - assert 'Bad Request' in r.reason + assert msg in r.reason r = r.read().decode() jdoc = json.loads(r) - print(jdoc) - assert "Audit entry cannot be logged" in jdoc['message'] + assert msg in jdoc['message'] diff --git a/tests/system/python/api/test_authentication.py b/tests/system/python/api/test_authentication.py index 3eedb9d5bd..37507faf9d 100644 --- a/tests/system/python/api/test_authentication.py +++ b/tests/system/python/api/test_authentication.py @@ -114,29 +114,38 @@ def test_get_roles(self, fledge_url): assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, - {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} + {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'}, + {'id': 5, 'name': 'control', 'description': + 'Same as editor can do and also have access for control scripts and pipelines'} ]} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2, 'accessMethod': 'any', 'realName': 'AJ', - 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully'}), + 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully.'}), ({"username": "admin1", "password": "F0gl@mp!", "role_id": 1}, {'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1, 'accessMethod': 'any', 'realName': '', - 'description': ''}, 'message': 'admin1 user has been created successfully'}), + 'description': ''}, 'message': 'admin1 user has been created successfully.'}), ({"username": "bogus", "password": "Fl3dG$", "role_id": 2}, {'user': {'userName': 'bogus', 'userId': 5, 'roleId': 2, 'accessMethod': 'any', 'realName': '', - 'description': ''}, 'message': 'bogus user has been created successfully'}), + 'description': ''}, 'message': 'bogus user has been created successfully.'}), ({"username": "view", "password": "V!3w@1", "role_id": 3, "real_name": "View", "description": "Only to view the configuration"}, {'user': { 'userName': 'view', 'userId': 6, 'roleId': 3, 'accessMethod': 'any', 'realName': 'View', - 'description': 'Only to view the configuration'}, 'message': 'view user has been created successfully'}), + 'description': 'Only to view the configuration'}, 'message': 'view user has been created successfully.'}), ({"username": "dataView", "password": "DV!3w@1", "role_id": 4, "real_name": "DataView", "description": "Only read the data in buffer"}, {'user': { 'userName': 'dataview', 'userId': 7, 'roleId': 4, 'accessMethod': 'any', 'realName': 'DataView', - 'description': 'Only read the data in buffer'}, 'message': 'dataview user has been created successfully'}) + 'description': 'Only read the data in buffer'}, 'message': 'dataview user has been created successfully.'} + ), + ({"username": "control", "password": "C0ntrol!", "role_id": 5, "real_name": "Control", + "description": "Same as editor can do and also have access for control scripts and pipelines"}, + {'user': { + 'userName': 'control', 'userId': 8, 'roleId': 5, 'accessMethod': 'any', 'realName': 'Control', + 'description': 'Same as editor can do and also have access for control scripts and pipelines'}, + 'message': 'control user has been created successfully.'}) ]) def test_create_user(self, fledge_url, form_data, expected_values): conn = http.client.HTTPConnection(fledge_url) @@ -157,7 +166,7 @@ def test_update_password(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'Password has been updated successfully for user id:<{}>'.format(uid)} == jdoc + assert {'message': 'Password has been updated successfully for user ID:<{}>.'.format(uid)} == jdoc def test_update_user(self, fledge_url): uid = 5 @@ -213,7 +222,7 @@ def test_enable_user(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'User with id:<{}> has been disabled successfully'.format(uid)} == jdoc + assert {'message': 'User with ID:<{}> has been disabled successfully.'.format(uid)} == jdoc # Fetch users list again and check disabled user does not exist in the response conn.request("GET", "/fledge/user", headers={"authorization": TOKEN}) @@ -234,7 +243,7 @@ def test_reset_user(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'User with id:<{}> has been updated successfully'.format(uid)} == jdoc + assert {'message': 'User with ID:<{}> has been updated successfully.'.format(uid)} == jdoc def test_delete_user(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) @@ -243,7 +252,7 @@ def test_delete_user(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': "User has been deleted successfully"} == jdoc + assert {'message': "User has been deleted successfully."} == jdoc def test_logout_all(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) diff --git a/tests/system/python/api/test_configuration.py b/tests/system/python/api/test_configuration.py index 8662e31911..7c36dc4d97 100644 --- a/tests/system/python/api/test_configuration.py +++ b/tests/system/python/api/test_configuration.py @@ -90,6 +90,12 @@ def test_default(self, fledge_url, reset_and_start_fledge, wait_time, storage_pl 'description': 'Scheduler configuration', 'displayName': 'Scheduler', 'children': [] + }, + { + "key": "LOGGING", + "description": "Logging Level of Core Server", + "displayName": "Logging", + 'children': [] } ] }, diff --git a/tests/system/python/api/test_endpoints_with_different_user_types.py b/tests/system/python/api/test_endpoints_with_different_user_types.py index 084c6fe79d..a0180372f8 100644 --- a/tests/system/python/api/test_endpoints_with_different_user_types.py +++ b/tests/system/python/api/test_endpoints_with_different_user_types.py @@ -22,6 +22,8 @@ VIEW_PWD = "V!3w@1" DATA_VIEW_USERNAME = "dataview" DATA_VIEW_PWD = "DV!3w$" +CONTROL_USERNAME = "control" +CONTROL_PWD = "C0ntrol!" @pytest.fixture @@ -65,8 +67,7 @@ def test_setup(reset_and_start_fledge, change_to_auth_mandatory, fledge_url, wai assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert "{} user has been created successfully".format(VIEW_USERNAME) == jdoc["message"] - + assert "{} user has been created successfully.".format(VIEW_USERNAME) == jdoc["message"] # Create Data view user data_view_payload = {"username": DATA_VIEW_USERNAME, "password": DATA_VIEW_PWD, "role_id": 4, "real_name": "DataView", "description": "Only read the data in buffer"} @@ -76,7 +77,16 @@ def test_setup(reset_and_start_fledge, change_to_auth_mandatory, fledge_url, wai assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert "{} user has been created successfully".format(DATA_VIEW_USERNAME) == jdoc["message"] + assert "{} user has been created successfully.".format(DATA_VIEW_USERNAME) == jdoc["message"] + # Create Control user + control_payload = {"username": CONTROL_USERNAME, "password": CONTROL_PWD, "role_id": 5, "real_name": "Control", + "description": "Same as editor can do and also have access for control scripts and pipelines"} + conn.request("POST", "/fledge/admin/user", body=json.dumps(control_payload), headers={"authorization": admin_token}) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert "{} user has been created successfully.".format(CONTROL_USERNAME) == jdoc["message"] class TestAPIEndpointsWithViewUserType: @@ -196,6 +206,14 @@ def test_login(self, fledge_url, wait_time): ("POST", "/fledge/ACL", 403), ("GET", "/fledge/ACL", 200), ("GET", "/fledge/ACL/foo", 404), ("PUT", "/fledge/ACL/foo", 403), ("DELETE", "/fledge/ACL/foo", 403), ("PUT", "/fledge/service/foo/ACL", 403), ("DELETE", "/fledge/service/foo/ACL", 403), + # control script + ("POST", "/fledge/control/script", 403), ("GET", "/fledge/control/script", 200), + ("GET", "/fledge/control/script/foo", 404), ("PUT", "/fledge/control/script/foo", 403), + ("DELETE", "/fledge/control/script/foo", 403), ("POST", "/fledge/control/script/foo/schedule", 403), + # control pipeline + ("POST", "/fledge/control/pipeline", 403), ("GET", "/fledge/control/lookup", 200), + ("GET", "/fledge/control/pipeline", 200), ("GET", "/fledge/control/pipeline/1", 404), + ("PUT", "/fledge/control/pipeline/1", 403), ("DELETE", "/fledge/control/pipeline/1", 403), # python packages ("GET", "/fledge/python/packages", 200), ("POST", "/fledge/python/package", 403), # notification @@ -207,10 +225,6 @@ def test_login(self, fledge_url, wait_time): ("DELETE", "/fledge/notification/N1/delivery/C1", 403) ]) def test_endpoints(self, fledge_url, method, route_path, http_status_code, storage_plugin): - # FIXME: Once below JIRA is resolved - if storage_plugin == 'postgres': - if route_path == '/fledge/statistics/rate?periods=1&statistics=FOO': - pytest.skip('Due to FOGL-7097') conn = http.client.HTTPConnection(fledge_url) conn.request(method, route_path, headers={"authorization": TOKEN}) r = conn.getresponse() @@ -342,6 +356,14 @@ def test_login(self, fledge_url, wait_time): ("POST", "/fledge/ACL", 403), ("GET", "/fledge/ACL", 403), ("GET", "/fledge/ACL/foo", 403), ("PUT", "/fledge/ACL/foo", 403), ("DELETE", "/fledge/ACL/foo", 403), ("PUT", "/fledge/service/foo/ACL", 403), ("DELETE", "/fledge/service/foo/ACL", 403), + # control script + ("POST", "/fledge/control/script", 403), ("GET", "/fledge/control/script", 403), + ("GET", "/fledge/control/script/foo", 403), ("PUT", "/fledge/control/script/foo", 403), + ("DELETE", "/fledge/control/script/foo", 403), ("POST", "/fledge/control/script/foo/schedule", 403), + # control pipeline + ("POST", "/fledge/control/pipeline", 403), ("GET", "/fledge/control/lookup", 403), + ("GET", "/fledge/control/pipeline", 403), ("GET", "/fledge/control/pipeline/1", 403), + ("PUT", "/fledge/control/pipeline/1", 403), ("DELETE", "/fledge/control/pipeline/1", 403), # python packages ("GET", "/fledge/python/packages", 403), ("POST", "/fledge/python/package", 403), # notification @@ -353,10 +375,161 @@ def test_login(self, fledge_url, wait_time): ("DELETE", "/fledge/notification/N1/delivery/C1", 403) ]) def test_endpoints(self, fledge_url, method, route_path, http_status_code, storage_plugin): - # FIXME: Once below JIRA is resolved - if storage_plugin == 'postgres': - if route_path == '/fledge/statistics/rate?periods=1&statistics=FOO': - pytest.skip('Due to FOGL-7097') + conn = http.client.HTTPConnection(fledge_url) + conn.request(method, route_path, headers={"authorization": TOKEN}) + r = conn.getresponse() + assert http_status_code == r.status + r.read().decode() + + def test_logout_me(self, fledge_url): + conn = http.client.HTTPConnection(fledge_url) + conn.request("PUT", '/fledge/logout', headers={"authorization": TOKEN}) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert jdoc['logout'] + + +class TestAPIEndpointsWithControlUserType: + def test_login(self, fledge_url, wait_time): + time.sleep(wait_time * 2) + conn = http.client.HTTPConnection(fledge_url) + conn.request("POST", "/fledge/login", json.dumps({"username": CONTROL_USERNAME, "password": CONTROL_PWD})) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert "Logged in successfully." == jdoc['message'] + assert "token" in jdoc + assert not jdoc['admin'] + global TOKEN + TOKEN = jdoc["token"] + + @pytest.mark.parametrize(("method", "route_path", "http_status_code"), [ + # common + ("GET", "/fledge/ping", 200), # ("PUT", "/fledge/shutdown", 200), ("PUT", "/fledge/restart", 200), + # health + ("GET", "/fledge/health/storage", 200), ("GET", "/fledge/health/logging", 200), + # user & roles + ("GET", "/fledge/user", 200), ("PUT", "/fledge/user", 500), ("PUT", "/fledge/user/1/password", 500), + ("PUT", "/fledge/user/3/password", 500), ("GET", "/fledge/user/role", 200), + # auth + ("POST", "/fledge/login", 500), ("PUT", "/fledge/31/logout", 401), + ("GET", "/fledge/auth/ott", 200), + # admin + ("POST", "/fledge/admin/user", 403), ("DELETE", "/fledge/admin/3/delete", 403), ("PUT", "/fledge/admin/3", 403), + ("PUT", "/fledge/admin/3/enable", 403), ("PUT", "/fledge/admin/3/reset", 403), + # category + ("GET", "/fledge/category", 200), ("POST", "/fledge/category", 400), ("GET", "/fledge/category/General", 200), + ("PUT", "/fledge/category/General", 400), ("DELETE", "/fledge/category/General", 400), + ("POST", "/fledge/category/General/children", 500), ("GET", "/fledge/category/General/children", 200), + ("DELETE", "/fledge/category/General/children/Advanced", 200), + ("DELETE", "/fledge/category/General/parent", 200), + ("GET", "/fledge/category/rest_api/allowPing", 200), ("PUT", "/fledge/category/rest_api/allowPing", 500), + ("DELETE", "/fledge/category/rest_api/allowPing/value", 200), + ("POST", "/fledge/category/rest_api/allowPing/upload", 400), + # schedule processes & schedules + ("GET", "/fledge/schedule/process", 200), ("POST", "/fledge/schedule/process", 500), + ("GET", "/fledge/schedule/process/purge", 200), + ("GET", "/fledge/schedule", 200), ("POST", "/fledge/schedule", 400), ("GET", "/fledge/schedule/type", 200), + ("GET", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0", 200), + ("PUT", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0/enable", 200), + ("PUT", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0/disable", 200), + ("PUT", "/fledge/schedule/enable", 404), ("PUT", "/fledge/schedule/disable", 404), + ("POST", "/fledge/schedule/start/2176eb68-7303-11e7-8cf7-a6006ad3dba0", 200), + ("PUT", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0", 400), + ("DELETE", "/fledge/schedule/d1631422-9ec6-11e7-abc4-cec278b6b50a", 200), + # tasks + ("GET", "/fledge/task", 200), ("GET", "/fledge/task/state", 200), ("GET", "/fledge/task/latest", 200), + ("GET", "/fledge/task/123", 404), ("PUT", "/fledge/task/123/cancel", 404), + ("POST", "/fledge/scheduled/task", 400), ("DELETE", "/fledge/scheduled/task/blah", 404), + # service + ("POST", "/fledge/service", 400), ("GET", "/fledge/service", 200), ("DELETE", "/fledge/service/blah", 404), + # ("GET", "/fledge/service/available", 200), -- checked manually and commented out only to avoid apt-update + ("GET", "/fledge/service/installed", 200), + ("PUT", "/fledge/service/Southbound/blah/update", 400), ("POST", "/fledge/service/blah/otp", 403), + # south & north + ("GET", "/fledge/south", 200), ("GET", "/fledge/north", 200), + # asset browse + ("GET", "/fledge/asset", 200), ("GET", "/fledge/asset/sinusoid", 200), + ("GET", "/fledge/asset/sinusoid/latest", 200), + ("GET", "/fledge/asset/sinusoid/summary", 404), ("GET", "/fledge/asset/sinusoid/sinusoid", 200), + ("GET", "/fledge/asset/sinusoid/sinusoid/summary", 404), ("GET", "/fledge/asset/sinusoid/sinusoid/series", 200), + ("GET", "/fledge/asset/sinusoid/bucket/1", 200), ("GET", "/fledge/asset/sinusoid/sinusoid/bucket/1", 200), + ("GET", "/fledge/structure/asset", 200), ("DELETE", "/fledge/asset", 200), + ("DELETE", "/fledge/asset/sinusoid", 200), + # asset tracker + ("GET", "/fledge/track", 200), ("GET", "/fledge/track/storage/assets", 200), + ("PUT", "/fledge/track/service/foo/asset/bar/event/Ingest", 404), + # statistics + ("GET", "/fledge/statistics", 200), ("GET", "/fledge/statistics/history", 200), + ("GET", "/fledge/statistics/rate?periods=1&statistics=FOO", 200), + # audit trail + ("POST", "/fledge/audit", 500), ("GET", "/fledge/audit", 200), ("GET", "/fledge/audit/logcode", 200), + ("GET", "/fledge/audit/severity", 200), + # backup & restore + ("GET", "/fledge/backup", 200), # ("POST", "/fledge/backup", 200), -- checked manually + ("POST", "/fledge/backup/upload", 500), + ("GET", "/fledge/backup/status", 200), ("GET", "/fledge/backup/123", 404), + ("DELETE", "/fledge/backup/123", 404), ("GET", "/fledge/backup/123/download", 404), + ("PUT", "/fledge/backup/123/restore", 200), + # package update + # ("GET", "/fledge/update", 200), -- checked manually and commented out only to avoid apt-update run + # ("PUT", "/fledge/update", 200), -- checked manually + # certs store + ("GET", "/fledge/certificate", 200), ("POST", "/fledge/certificate", 400), + ("DELETE", "/fledge/certificate/user", 403), + # support bundle + ("GET", "/fledge/support", 200), ("GET", "/fledge/support/foo", 400), + # ("POST", "/fledge/support", 200), - checked manually + # syslogs & package logs + ("GET", "/fledge/syslog", 200), ("GET", "/fledge/package/log", 200), ("GET", "/fledge/package/log/foo", 400), + ("GET", "/fledge/package/install/status", 404), + # plugins + ("GET", "/fledge/plugins/installed", 200), + # ("GET", "/fledge/plugins/available", 200), -- checked manually and commented out only to avoid apt operations + # ("PUT", "/fledge/plugins/south/sinusoid/update", 200), + # ("DELETE", "/fledge/plugins/south/sinusoid", 404), + ("POST", "/fledge/plugins", 400), ("GET", "/fledge/service/foo/persist", 404), + ("GET", "/fledge/service/foo/plugin/omf/data", 404), ("POST", "/fledge/service/foo/plugin/omf/data", 404), + ("DELETE", "/fledge/service/foo/plugin/omf/data", 404), + # filters + ("POST", "/fledge/filter", 404), ("PUT", "/fledge/filter/foo/pipeline", 404), + ("GET", "/fledge/filter/foo/pipeline", 404), ("GET", "/fledge/filter/bar", 404), ("GET", "/fledge/filter", 200), + ("DELETE", "/fledge/filter/foo/pipeline", 500), ("DELETE", "/fledge/filter/bar", 404), + # snapshots + ("GET", "/fledge/snapshot/plugins", 403), ("POST", "/fledge/snapshot/plugins", 403), + ("PUT", "/fledge/snapshot/plugins/1", 403), ("DELETE", "/fledge/snapshot/plugins/1", 403), + ("GET", "/fledge/snapshot/category", 403), ("POST", "/fledge/snapshot/category", 403), + ("PUT", "/fledge/snapshot/category/1", 403), ("DELETE", "/fledge/snapshot/category/1", 403), + ("GET", "/fledge/snapshot/schedule", 403), ("POST", "/fledge/snapshot/schedule", 403), + ("PUT", "/fledge/snapshot/schedule/1", 403), ("DELETE", "/fledge/snapshot/schedule/1", 403), + # repository + ("POST", "/fledge/repository", 400), + # ACL + ("POST", "/fledge/ACL", 403), ("GET", "/fledge/ACL", 200), ("GET", "/fledge/ACL/foo", 404), + ("PUT", "/fledge/ACL/foo", 403), ("DELETE", "/fledge/ACL/foo", 403), ("PUT", "/fledge/service/foo/ACL", 403), + ("DELETE", "/fledge/service/foo/ACL", 403), + # control script + ("POST", "/fledge/control/script", 400), ("GET", "/fledge/control/script", 200), + ("GET", "/fledge/control/script/foo", 404), ("PUT", "/fledge/control/script/foo", 400), + ("DELETE", "/fledge/control/script/foo", 404), ("POST", "/fledge/control/script/foo/schedule", 404), + # control pipeline + ("POST", "/fledge/control/pipeline", 400), ("GET", "/fledge/control/lookup", 200), + ("GET", "/fledge/control/pipeline", 200), ("GET", "/fledge/control/pipeline/1", 404), + ("PUT", "/fledge/control/pipeline/1", 404), ("DELETE", "/fledge/control/pipeline/1", 404), + # python packages + ("GET", "/fledge/python/packages", 200), ("POST", "/fledge/python/package", 500), + # notification + ("GET", "/fledge/notification", 200), ("GET", "/fledge/notification/plugin", 404), + ("GET", "/fledge/notification/type", 200), ("GET", "/fledge/notification/N1", 400), + ("POST", "/fledge/notification", 404), ("PUT", "/fledge/notification/N1", 404), + ("DELETE", "/fledge/notification/N1", 404), ("GET", "/fledge/notification/N1/delivery", 404), + ("POST", "/fledge/notification/N1/delivery", 400), ("GET", "/fledge/notification/N1/delivery/C1", 404), + ("DELETE", "/fledge/notification/N1/delivery/C1", 404) + ]) + def test_endpoints(self, fledge_url, method, route_path, http_status_code, storage_plugin): conn = http.client.HTTPConnection(fledge_url) conn.request(method, route_path, headers={"authorization": TOKEN}) r = conn.getresponse() diff --git a/tests/system/python/api/test_notification.py b/tests/system/python/api/test_notification.py index 019fd6c581..00413ed119 100644 --- a/tests/system/python/api/test_notification.py +++ b/tests/system/python/api/test_notification.py @@ -23,7 +23,7 @@ SERVICE = "notification" SERVICE_NAME = "Notification Server #1" NOTIFY_PLUGIN = "slack" -NOTIFY_INBUILT_RULES = ["Threshold"] +NOTIFY_INBUILT_RULES = ["Threshold", "DataAvailability"] DATA = {"name": "Test - 1", "description": "Test4_Notification", "rule": NOTIFY_INBUILT_RULES[0], @@ -174,7 +174,9 @@ def test_create_valid_notification_instance(self, fledge_url): assert 2 == len(jdoc) assert NOTIFY_PLUGIN == jdoc['delivery'][0]['name'] assert "notify" == jdoc['delivery'][0]['type'] - assert 1 == len(jdoc['rules']) + assert 2 == len(jdoc['rules']) + assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][1]['name'] + assert NOTIFY_INBUILT_RULES[1] == jdoc['rules'][0]['name'] @pytest.mark.parametrize("test_input, expected_error", [ ({"rule": "+"}, '400: Invalid rule property in payload.'), diff --git a/tests/system/python/conftest.py b/tests/system/python/conftest.py index eca0c587ef..62c4c37edb 100644 --- a/tests/system/python/conftest.py +++ b/tests/system/python/conftest.py @@ -8,7 +8,6 @@ """ import subprocess import os -import platform import sys import fnmatch import http.client @@ -16,10 +15,11 @@ import base64 import ssl import shutil -import pytest from urllib.parse import quote from pathlib import Path -import sys +import time +import pytest +from helpers import utils __author__ = "Vaibhav Singhal" @@ -59,12 +59,12 @@ def reset_and_start_fledge(storage_plugin): assert os.environ.get('FLEDGE_ROOT') is not None subprocess.run(["$FLEDGE_ROOT/scripts/fledge kill"], shell=True, check=True) - if storage_plugin == 'postgres': - subprocess.run(["sed -i 's/sqlite/postgres/g' $FLEDGE_ROOT/data/etc/storage.json"], shell=True, check=True) - else: - subprocess.run(["sed -i 's/postgres/sqlite/g' $FLEDGE_ROOT/data/etc/storage.json"], shell=True, check=True) - - subprocess.run(["echo YES | $FLEDGE_ROOT/scripts/fledge reset"], shell=True, check=True) + storage_plugin_val = "postgres" if storage_plugin == 'postgres' else "sqlite" + subprocess.run( + ["echo $(jq -c --arg STORAGE_PLUGIN_VAL {} '.plugin.value=$STORAGE_PLUGIN_VAL' " + "$FLEDGE_ROOT/data/etc/storage.json) > $FLEDGE_ROOT/data/etc/storage.json".format(storage_plugin_val)], + shell=True, check=True) + subprocess.run(["echo 'YES\nYES' | $FLEDGE_ROOT/scripts/fledge reset"], shell=True, check=True) subprocess.run(["$FLEDGE_ROOT/scripts/fledge start"], shell=True) stat = subprocess.run(["$FLEDGE_ROOT/scripts/fledge status"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -133,9 +133,7 @@ def clone_make_install(): clone_make_install() elif installation_type == 'package': try: - os_platform = platform.platform() - pkg_mgr = 'yum' if 'centos' in os_platform or 'redhat' in os_platform else 'apt' - subprocess.run(["sudo {} install -y fledge-south-{}".format(pkg_mgr, south_plugin)], shell=True, + subprocess.run(["sudo {} install -y fledge-south-{}".format(pytest.PKG_MGR, south_plugin)], shell=True, check=True) except subprocess.CalledProcessError: assert False, "{} package installation failed!".format(south_plugin) @@ -158,7 +156,7 @@ def clone_make_install(): @pytest.fixture def add_north(): def _add_fledge_north(fledge_url, north_plugin, north_branch, installation_type='make', north_instance_name="play", - config=None, + config=None, schedule_repeat_time=30, plugin_lang="python", use_pip_cache=True, enabled=True, plugin_discovery_name=None, is_task=True): """Add north plugin and start the service/task by default""" @@ -186,9 +184,7 @@ def clone_make_install(): clone_make_install() elif installation_type == 'package': try: - os_platform = platform.platform() - pkg_mgr = 'yum' if 'centos' in os_platform or 'redhat' in os_platform else 'apt' - subprocess.run(["sudo {} install -y fledge-north-{}".format(pkg_mgr, north_plugin)], shell=True, + subprocess.run(["sudo {} install -y fledge-north-{}".format(pytest.PKG_MGR, north_plugin)], shell=True, check=True) except subprocess.CalledProcessError: assert False, "{} package installation failed!".format(north_plugin) @@ -198,9 +194,10 @@ def clone_make_install(): if is_task: # Create north task - data = {"name": "{}".format(north_instance_name), "type": "North", + data = {"name": "{}".format(north_instance_name), "type": "north", "plugin": "{}".format(plugin_discovery_name), - "schedule_enabled": _enabled, "schedule_repeat": 30, "schedule_type": "3", "config": _config} + "schedule_enabled": _enabled, "schedule_repeat": "{}".format(schedule_repeat_time), "schedule_type": "3", "config": _config} + print(data) conn.request("POST", '/fledge/scheduled/task', json.dumps(data)) else: # Create north service @@ -218,6 +215,138 @@ def clone_make_install(): return _add_fledge_north +@pytest.fixture +def add_service(): + def _add_service(fledge_url, service, service_branch, retries, installation_type = "make", service_name = "svc@123", + enabled = True): + + """ + Fixture to add Service and start the start service by default + fledge_url: IP address or domain to access fledge + service: Service to be installed + service_branch: Branch of service to be installed + retries: Number of tries for polling + installation_type: Type of installation for service i.e. make or package + service_name: Name that will be given to service to be installed + enabled: Flag to enable or disable notification instance + """ + + # Check if the service is already installed installed + retval = utils.get_request(fledge_url, "/fledge/service") + for ele in retval["services"]: + if ele["type"].lower() == service: + return ele + + PROJECT_ROOT = Path(__file__).parent.parent.parent.parent + + # Install Service + def clone_make_install(): + try: + subprocess.run(["{}/tests/system/python/scripts/install_c_service {} {}".format( + PROJECT_ROOT, service_branch, service)], shell=True, check=True) + except subprocess.CalledProcessError: + assert False, "{} service installation failed".format(service) + + if installation_type == 'make': + clone_make_install() + elif installation_type == 'package': + try: + subprocess.run(["sudo {} install -y fledge-service-{}".format(pytest.PKG_MGR, service)], shell=True, + check=True) + except subprocess.CalledProcessError: + assert False, "{} package installation failed!".format(service) + else: + return("Skipped {} service installation. Installation mechanism is set to {}.".format(service, installation_type)) + + # Add Service + data = {"name": "{}".format(service_name), "type": "{}".format(service), "enabled": enabled} + retval = utils.post_request(fledge_url, "/fledge/service", data) + assert service_name == retval["name"] + return retval + + return _add_service + +@pytest.fixture +def add_notification_instance(): + def _add_notification_instance(fledge_url, delivery_plugin, delivery_branch , rule_config={}, delivery_config={}, + rule_plugin="Threshold", rule_branch=None, rule_plugin_discovery_name=None, + delivery_plugin_discovery_name=None, installation_type='make', notification_type="one shot", + notification_instance_name="noti@123", retrigger_time=30, enabled=True): + """ + Fixture to add Service instance and start the instance by default + fledge_url: IP address or domain to access fledge + delivery_plugin: Notify or Delivery plugin to be installed + delivery_branch: Branch of Notify or Delivery plugin to be installed + rule_config: Configuration of Rule plugin + delivery_config: Configuration of Delivery plugin + rule_plugin: Rule plugin to be installed, by default Threshold and DataAvailability plugin is installed + rule_branch: Branch of Rule plugin to be installed + rule_plugin_discovery_name: Name to identify the Rule Plugin after installation + delivery_plugin_discovery_name: Name to identify the Delivery Plugin after installation + installation_type: Type of installation for plugins i.e. make or package + notification_type: Type of notification to be triggered i.e. one_shot, retriggered, toggle + notification_instance_name: Name that will be given to notification instance to be created + retrigger_time: Interval between retriggered notifications + enabled: Flag to enable or disable notification instance + """ + PROJECT_ROOT = Path(__file__).parent.parent.parent.parent + + if rule_plugin_discovery_name is None: + rule_plugin_discovery_name = rule_plugin + + if delivery_plugin_discovery_name is None: + delivery_plugin_discovery_name = delivery_plugin + + def clone_make_install(plugin_branch, plugin_type, plugin): + try: + subprocess.run(["{}/tests/system/python/scripts/install_c_plugin {} {} {}".format( + PROJECT_ROOT, plugin_branch, plugin_type, plugin)], shell=True, check=True) + except subprocess.CalledProcessError: + assert False, "{} plugin installation failed".format(plugin) + + if installation_type == 'make': + # Install Rule Plugin if it is not Threshold or DataAvailability + if rule_plugin not in ("Threshold","DataAvailability"): + clone_make_install(rule_branch, "rule", rule_plugin) + + clone_make_install(delivery_branch, "notify", delivery_plugin) + + elif installation_type == 'package': + try: + if rule_plugin not in ["Threshold", "DataAvailability"]: + subprocess.run(["sudo {} install -y fledge-rule-{}".format(pytest.PKG_MGR, rule_plugin)], shell=True, + check=True) + + except subprocess.CalledProcessError: + assert False, "Package installation of {} failed!".format(rule_plugin) + + try : + subprocess.run(["sudo {} install -y fledge-notify-{}".format(pytest.PKG_MGR, delivery_plugin)], shell=True, + check=True) + + except subprocess.CalledProcessError: + assert False, "Package installation of {} failed!".format(delivery_plugin) + else: + return("Skipped {} and {} plugin installation. Installation mechanism is set to {}.".format(rule_plugin, delivery_plugin, + installation_type)) + + data = { + "name": notification_instance_name, + "description": "{} notification instance".format(notification_instance_name), + "rule_config": rule_config, + "rule": rule_plugin_discovery_name, + "delivery_config": delivery_config, + "channel": delivery_plugin_discovery_name, + "notification_type": notification_type, + "enabled": enabled, + "retrigger_time": "{}".format(retrigger_time), + } + + retval = utils.post_request(fledge_url, "/fledge/notification", data) + assert "Notification {} created successfully".format(notification_instance_name) == retval["result"] + return retval + + return _add_notification_instance @pytest.fixture def start_north_pi_v2(): @@ -429,6 +558,102 @@ def clear_pi_system_through_pi_web_api(): return clear_pi_system_pi_web_api +@pytest.fixture +def verify_hierarchy_and_get_datapoints_from_pi_web_api(): + def _verify_hierarchy_and_get_datapoints_from_pi_web_api(host, admin, password, pi_database, af_hierarchy_list, asset, sensor): + """ This method verifies hierarchy created in pi web api is correctly """ + + username_password = "{}:{}".format(admin, password) + username_password_b64 = base64.b64encode(username_password.encode('ascii')).decode("ascii") + headers = {'Authorization': 'Basic %s' % username_password_b64} + AF_HIERARCHY_LIST=af_hierarchy_list.split('/')[1:] + AF_HIERARCHY_COUNT=len(AF_HIERARCHY_LIST) + + try: + ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) + ctx.options |= ssl.PROTOCOL_TLSv1_1 + # With ssl.CERT_NONE as verify_mode, validation errors such as untrusted or expired cert + # are ignored and do not abort the TLS/SSL handshake. + ctx.verify_mode = ssl.CERT_NONE + conn = http.client.HTTPSConnection(host, context=ctx) + conn = http.client.HTTPSConnection(host, context=ctx) + conn.request("GET", '/piwebapi/assetservers', headers=headers) + res = conn.getresponse() + r = json.loads(res.read().decode()) + dbs_url= r['Items'][0]['Links']['Databases'] + print(dbs_url) + if dbs_url is not None: + conn.request("GET", dbs_url, headers=headers) + res = conn.getresponse() + r = json.loads(res.read().decode()) + items = r['Items'] + CHECK_DATABASE_EXISTS = list(filter(lambda items: items['Name'] == pi_database, items))[0] + + if len(CHECK_DATABASE_EXISTS) > 0: + elements_url = CHECK_DATABASE_EXISTS['Links']['Elements'] + else: + raise Exception('Database not exist') + + if elements_url is not None: + conn.request("GET", elements_url, headers=headers) + res = conn.getresponse() + r = json.loads(res.read().decode()) + items = r['Items'] + + CHECK_AF_ELEMENT_EXISTS = list(filter(lambda items: items['Name'] == AF_HIERARCHY_LIST[0], items))[0] + if len(CHECK_AF_ELEMENT_EXISTS) != 0: + + counter = 0 + while counter < AF_HIERARCHY_COUNT: + if CHECK_AF_ELEMENT_EXISTS['Name'] == AF_HIERARCHY_LIST[counter]: + counter+=1 + elements_url = CHECK_AF_ELEMENT_EXISTS['Links']['Elements'] + conn.request("GET", elements_url, headers=headers) + res = conn.getresponse() + CHECK_AF_ELEMENT_EXISTS = json.loads(res.read().decode())['Items'][0] + else: + raise Exception("AF Heirarchy is incorrect") + + record = dict() + if CHECK_AF_ELEMENT_EXISTS['Name'] == asset: + record_url = CHECK_AF_ELEMENT_EXISTS['Links']['RecordedData'] + get_record_url = quote("{}?limit=10000".format(record_url), safe='?,=&/.:') + print(get_record_url) + conn.request("GET", get_record_url, headers=headers) + res = conn.getresponse() + items = json.loads(res.read().decode())['Items'] + no_of_datapoint_in_pi_server = len(items) + Item_matched = False + count = 0 + if no_of_datapoint_in_pi_server == 0: + raise "Data points are not created in PI Server" + else: + for item in items: + count += 1 + if item['Name'] in sensor: + print(item['Name']) + record[item['Name']] = list(map(lambda val: val['Value'], filter(lambda ele: isinstance(ele['Value'], int) or isinstance(ele['Value'], float) , item['Items']))) + Item_matched = True + elif count == no_of_datapoint_in_pi_server and Item_matched == False: + raise "Required Data points is not Present --> {}".format(sensor) + else: + raise "Asset does not exist, Although Hierarchy is correct" + + return(record) + + else: + raise Exception("AF Root not exists") + else: + raise Exception("Elements URL not found") + else: + raise Exception("DataBase URL not found") + + + except (KeyError, IndexError, Exception) as ex: + print("Failed to read data due to {}".format(ex)) + return None + + return(_verify_hierarchy_and_get_datapoints_from_pi_web_api) @pytest.fixture def read_data_from_pi_web_api(): @@ -513,10 +738,8 @@ def _add_filter(filter_plugin, filter_plugin_branch, filter_name, filter_config, assert False, "{} filter plugin installation failed".format(filter_plugin) elif installation_type == 'package': try: - os_platform = platform.platform() - pkg_mgr = 'yum' if 'centos' in os_platform or 'redhat' in os_platform else 'apt' - subprocess.run(["sudo {} install -y fledge-filter-{}".format(pkg_mgr, filter_plugin)], shell=True, - check=True) + subprocess.run(["sudo {} install -y fledge-filter-{}".format(pytest.PKG_MGR, filter_plugin)], + shell=True, check=True) except subprocess.CalledProcessError: assert False, "{} package installation failed!".format(filter_plugin) else: @@ -583,12 +806,16 @@ def _disable_sch(fledge_url, sch_name): def pytest_addoption(parser): parser.addoption("--storage-plugin", action="store", default="sqlite", help="Database plugin to use for tests") + parser.addoption("--readings-plugin", action="store", default="Use main plugin", + help="Readings plugin to use for tests") parser.addoption("--fledge-url", action="store", default="localhost:8081", help="Fledge client api url") parser.addoption("--use-pip-cache", action="store", default=False, help="use pip cache is requirement is available") parser.addoption("--wait-time", action="store", default=5, type=int, help="Generic wait time between processes to run") + parser.addoption("--wait-fix", action="store", default=0, type=int, + help="Extra wait time required for process to run") parser.addoption("--retries", action="store", default=3, type=int, help="Number of tries for polling") # TODO: Temporary fixture, to be used with value False for environments where PI Web API is not stable @@ -613,6 +840,7 @@ def pytest_addoption(parser): help="Name of the South Service") parser.addoption("--asset-name", action="store", default="SystemTest", help="Name of asset") + parser.addoption("--num-assets", action="store", default=300, type=int, help="Total No. of Assets to be created") # Filter Args parser.addoption("--filter-branch", action="store", default="develop", help="Filter plugin repo branch") @@ -708,12 +936,49 @@ def pytest_addoption(parser): parser.addoption("--start-north-as-service", action="store", type=bool, default=True, help="Whether start the north as a service.") + # Fogbench Config + parser.addoption("--fogbench-host", action="store", default="localhost", + help="FogBench Destination Host Address") + + parser.addoption("--fogbench-port", action="store", default="5683", type=int, + help="FogBench Destination Port") + + # Azure-IoT Config + parser.addoption("--azure-host", action="store", default="azure-server", + help="Azure-IoT Host Name") + + parser.addoption("--azure-device", action="store", default="azure-iot-device", + help="Azure-IoT Device ID") + + parser.addoption("--azure-key", action="store", default="azure-iot-key", + help="Azure-IoT SharedAccess key") + + parser.addoption("--azure-storage-account-url", action="store", default="azure-storage-account-url", + help="Azure Storage Account URL") + + parser.addoption("--azure-storage-account-key", action="store", default="azure-storage-account-key", + help="Azure Storage Account Access Key") + + parser.addoption("--azure-storage-container", action="store", default="azure_storage_container", + help="Container Name in Azure where data is stored") + + parser.addoption("--run-time", action="store", default="60", + help="The number of minute for which a test should run") +@pytest.fixture +def num_assets(request): + return request.config.getoption("--num-assets") + @pytest.fixture def storage_plugin(request): return request.config.getoption("--storage-plugin") +@pytest.fixture +def readings_plugin(request): + return request.config.getoption("--readings-plugin") + + @pytest.fixture def remote_user(request): return request.config.getoption("--remote-user") @@ -793,7 +1058,10 @@ def fledge_url(request): def wait_time(request): return request.config.getoption("--wait-time") - +@pytest.fixture +def wait_fix(request): + return request.config.getoption("--wait-fix") + @pytest.fixture def retries(request): return request.config.getoption("--retries") @@ -982,3 +1250,79 @@ def throttled_network_config(request): @pytest.fixture def start_north_as_service(request): return request.config.getoption("--start-north-as-service") + + +def read_os_release(): + """ General information to identifying the operating system """ + import ast + import re + os_details = {} + with open('/etc/os-release', encoding="utf-8") as f: + for line_number, line in enumerate(f, start=1): + line = line.rstrip() + if not line or line.startswith('#'): + continue + m = re.match(r'([A-Z][A-Z_0-9]+)=(.*)', line) + if m: + name, val = m.groups() + if val and val[0] in '"\'': + val = ast.literal_eval(val) + os_details.update({name: val}) + return os_details + + +def is_redhat_based(): + """ + To check if the Operating system is of Red Hat family or Not + Examples: + a) For an operating system with "ID=centos", an assignment of "ID_LIKE="rhel fedora"" is appropriate + b) For an operating system with "ID=ubuntu/raspbian", an assignment of "ID_LIKE=debian" is appropriate. + """ + os_release = read_os_release() + id_like = os_release.get('ID_LIKE') + if id_like is not None and any(x in id_like.lower() for x in ['centos', 'rhel', 'redhat', 'fedora']): + return True + return False + + +def pytest_configure(): + pytest.OS_PLATFORM_DETAILS = read_os_release() + pytest.IS_REDHAT = is_redhat_based() + pytest.PKG_MGR = 'yum' if pytest.IS_REDHAT else 'apt' + +@pytest.fixture +def fogbench_host(request): + return request.config.getoption("--fogbench-host") + + +@pytest.fixture +def fogbench_port(request): + return request.config.getoption("--fogbench-port") + +@pytest.fixture +def azure_host(request): + return request.config.getoption("--azure-host") + +@pytest.fixture +def azure_device(request): + return request.config.getoption("--azure-device") + +@pytest.fixture +def azure_key(request): + return request.config.getoption("--azure-key") + +@pytest.fixture +def azure_storage_account_url(request): + return request.config.getoption("--azure-storage-account-url") + +@pytest.fixture +def azure_storage_account_key(request): + return request.config.getoption("--azure-storage-account-key") + +@pytest.fixture +def azure_storage_container(request): + return request.config.getoption("--azure-storage-container") + +@pytest.fixture +def run_time(request): + return request.config.getoption("--run-time") \ No newline at end of file diff --git a/tests/system/python/e2e/test_e2e_notification_service_with_plugins.py b/tests/system/python/e2e/test_e2e_notification_service_with_plugins.py index 5a76a3399c..dc433ccd14 100644 --- a/tests/system/python/e2e/test_e2e_notification_service_with_plugins.py +++ b/tests/system/python/e2e/test_e2e_notification_service_with_plugins.py @@ -30,7 +30,7 @@ SERVICE = "notification" SERVICE_NAME = "NotificationServer #1" NOTIFY_PLUGIN = "python35" -NOTIFY_INBUILT_RULES = ["Threshold"] +NOTIFY_INBUILT_RULES = ["Threshold", "DataAvailability"] def _configure_and_start_service(service_branch, fledge_url, remove_directories): @@ -139,8 +139,9 @@ def test_get_default_notification_plugins(self, fledge_url, remove_directories): remove_directories(os.environ['FLEDGE_ROOT'] + 'cmake_build/C/plugins/notificationRule') jdoc = _get_result(fledge_url, '/fledge/notification/plugin') assert [] == jdoc['delivery'] - assert 1 == len(jdoc['rules']) - assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][0]['name'] + assert 2 == len(jdoc['rules']) + assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][1]['name'] + assert NOTIFY_INBUILT_RULES[1] == jdoc['rules'][0]['name'] class TestNotificationCRUD: @@ -164,8 +165,9 @@ def test_inbuilt_rule_plugin_and_notify_python35_delivery(self, fledge_url): jdoc = _get_result(fledge_url, '/fledge/notification/plugin') assert 1 == len(jdoc['delivery']) assert NOTIFY_PLUGIN == jdoc['delivery'][0]['name'] - assert 1 == len(jdoc['rules']) - assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][0]['name'] + assert 2 == len(jdoc['rules']) + assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][1]['name'] + assert NOTIFY_INBUILT_RULES[1] == jdoc['rules'][0]['name'] def test_get_notifications_and_audit_entry(self, fledge_url): jdoc = _get_result(fledge_url, '/fledge/notification') @@ -322,6 +324,7 @@ def test_sent_and_receive_notification(self, fledge_url, start_south, wait_time) class TestStartStopNotificationService: def test_shutdown_service_with_schedule_disable(self, fledge_url, disable_schedule, wait_time): disable_schedule(fledge_url, SERVICE_NAME) + pause_for_x_seconds(x=wait_time) _verify_service(fledge_url, status='shutdown') pause_for_x_seconds(x=wait_time) # After shutdown there should be 1 entry for NTFSD (shutdown) diff --git a/tests/system/python/iprpc/README.rst b/tests/system/python/iprpc/README.rst index bee9a917ba..993f3458a7 100644 --- a/tests/system/python/iprpc/README.rst +++ b/tests/system/python/iprpc/README.rst @@ -72,15 +72,6 @@ While testing following settings can be present. Running Fledge System tests involving iprpc =========================================== -Test Prerequisites ------------------- - -To install the dependencies required to run python tests, run the following two commands from FLEDGE_ROOT -:: - - cd $FLEDGE_ROOT/tests/system/python/iprpc - python3 -m pip install -r requirements-iprpc-test.txt --user - Test Execution -------------- diff --git a/tests/system/python/iprpc/requirements-iprpc-test.txt b/tests/system/python/iprpc/requirements-iprpc-test.txt deleted file mode 100644 index ac495ece00..0000000000 --- a/tests/system/python/iprpc/requirements-iprpc-test.txt +++ /dev/null @@ -1 +0,0 @@ -numpy==1.19.4 \ No newline at end of file diff --git a/tests/system/python/packages/test_authentication.py b/tests/system/python/packages/test_authentication.py index a20d77e0ed..4669da7782 100644 --- a/tests/system/python/packages/test_authentication.py +++ b/tests/system/python/packages/test_authentication.py @@ -11,10 +11,10 @@ import http.client import json import time -import pytest -from pathlib import Path import ssl -import platform +from pathlib import Path +import pytest +from pytest import PKG_MGR __author__ = "Yash Tatkondawar" __copyright__ = "Copyright (c) 2019 Dianomic Systems" @@ -34,6 +34,14 @@ context = ssl._create_unverified_context() LOGIN_SUCCESS_MSG = "Logged in successfully." +ROLES = {'roles': [ + {'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, + {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, + {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, + {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'}, + {'id': 5, 'name': 'control', + 'description': 'Same as editor can do and also have access for control scripts and pipelines'} +]} def send_data_using_fogbench(wait_time): @@ -172,9 +180,7 @@ def remove_and_add_fledge_pkgs(package_build_version): assert False, "setup package script failed" try: - os_platform = platform.platform() - pkg_mgr = 'yum' if 'centos' in os_platform or 'redhat' in os_platform else 'apt' - subprocess.run(["sudo {} install -y fledge-south-http-south".format(pkg_mgr)], shell=True, check=True) + subprocess.run(["sudo {} install -y fledge-south-http-south".format(PKG_MGR)], shell=True, check=True) except subprocess.CalledProcessError: assert False, "installation of http-south package failed" @@ -566,11 +572,7 @@ def test_get_roles_with_password_token(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, - {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, - {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} - ]} == jdoc + assert ROLES == jdoc def test_get_roles_with_certificate_token(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) @@ -579,19 +581,15 @@ def test_get_roles_with_certificate_token(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, - {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, - {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} - ]} == jdoc + assert ROLES == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2, 'accessMethod': 'any', 'realName': 'AJ', - 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully'}), + 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully.'}), ({"username": "admin1", "password": "F0gl@mp!", "role_id": 1}, {'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1, 'accessMethod': 'any', 'realName': '', - 'description': ''}, 'message': 'admin1 user has been created successfully'}) + 'description': ''}, 'message': 'admin1 user has been created successfully.'}) ]) def test_create_user_with_password_token(self, fledge_url, form_data, expected_values): conn = http.client.HTTPConnection(fledge_url) @@ -606,10 +604,10 @@ def test_create_user_with_password_token(self, fledge_url, form_data, expected_v @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any2", "password": "User@123", "real_name": "PG", "description": "Nerd user"}, {'user': {'userName': 'any2', 'userId': 5, 'roleId': 2, 'accessMethod': 'any', 'realName': 'PG', - 'description': 'Nerd user'}, 'message': 'any2 user has been created successfully'}), + 'description': 'Nerd user'}, 'message': 'any2 user has been created successfully.'}), ({"username": "admin2", "password": "F0gl@mp!", "role_id": 1}, {'user': {'userName': 'admin2', 'userId': 6, 'roleId': 1, 'accessMethod': 'any', 'realName': '', - 'description': ''}, 'message': 'admin2 user has been created successfully'}) + 'description': ''}, 'message': 'admin2 user has been created successfully.'}) ]) def test_create_user_with_certificate_token(self, fledge_url, form_data, expected_values): conn = http.client.HTTPConnection(fledge_url) @@ -646,7 +644,7 @@ def test_update_password_with_password_token(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'Password has been updated successfully for user id:<{}>'.format(uid)} == jdoc + assert {'message': 'Password has been updated successfully for user ID:<{}>.'.format(uid)} == jdoc def test_update_password_with_certificate_token(self, fledge_url): uid = 5 @@ -658,7 +656,7 @@ def test_update_password_with_certificate_token(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'Password has been updated successfully for user id:<{}>'.format(uid)} == jdoc + assert {'message': 'Password has been updated successfully for user ID:<{}>.'.format(uid)} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "F0gl@mp1"}, LOGIN_SUCCESS_MSG), @@ -681,7 +679,7 @@ def test_reset_user_with_password_token(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'User with id:<3> has been updated successfully'} == jdoc + assert {'message': 'User with ID:<3> has been updated successfully.'} == jdoc def test_reset_user_with_certificate_token(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) @@ -691,7 +689,7 @@ def test_reset_user_with_certificate_token(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'User with id:<5> has been updated successfully'} == jdoc + assert {'message': 'User with ID:<5> has been updated successfully.'} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "F0gl@mp!#1"}, LOGIN_SUCCESS_MSG), @@ -713,7 +711,7 @@ def test_delete_user_with_password_token(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': "User has been deleted successfully"} == jdoc + assert {'message': "User has been deleted successfully."} == jdoc def test_delete_user_with_certificate_token(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) @@ -722,7 +720,7 @@ def test_delete_user_with_certificate_token(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': "User has been deleted successfully"} == jdoc + assert {'message': "User has been deleted successfully."} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "admin1", "password": "F0gl@mp!"}, ""), @@ -963,19 +961,15 @@ def test_get_roles(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, - {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, - {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} - ]} == jdoc + assert ROLES == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2, 'accessMethod': 'any', 'realName': 'AJ', - 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully'}), + 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully.'}), ({"username": "admin1", "password": "F0gl@mp!", "role_id": 1}, {'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1, 'accessMethod': 'any', 'realName': '', - 'description': ''}, 'message': 'admin1 user has been created successfully'}) + 'description': ''}, 'message': 'admin1 user has been created successfully.'}) ]) def test_create_user(self, fledge_url, form_data, expected_values): conn = http.client.HTTPConnection(fledge_url) @@ -1010,7 +1004,7 @@ def test_update_password(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'Password has been updated successfully for user id:<{}>'.format(uid)} == jdoc + assert {'message': 'Password has been updated successfully for user ID:<{}>.'.format(uid)} == jdoc def test_login_with_updated_password(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) @@ -1029,7 +1023,7 @@ def test_reset_user(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'User with id:<3> has been updated successfully'} == jdoc + assert {'message': 'User with ID:<3> has been updated successfully.'} == jdoc def test_login_with_resetted_password(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) @@ -1047,7 +1041,7 @@ def test_delete_user(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': "User has been deleted successfully"} == jdoc + assert {'message': "User has been deleted successfully."} == jdoc def test_login_of_deleted_user(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) @@ -1271,19 +1265,15 @@ def test_get_roles(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, - {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, - {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} - ]} == jdoc + assert ROLES == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2, 'accessMethod': 'any', 'realName': 'AJ', - 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully'}), + 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully.'}), ({"username": "admin1", "password": "F0gl@mp!", "role_id": 1}, {'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1, 'accessMethod': 'any', 'realName': '', - 'description': ''}, 'message': 'admin1 user has been created successfully'}) + 'description': ''}, 'message': 'admin1 user has been created successfully.'}) ]) def test_create_user(self, fledge_url, form_data, expected_values): conn = http.client.HTTPConnection(fledge_url) @@ -1305,7 +1295,7 @@ def test_update_password(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'Password has been updated successfully for user id:<{}>'.format(uid)} == jdoc + assert {'message': 'Password has been updated successfully for user ID:<{}>.'.format(uid)} == jdoc def test_reset_user(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) @@ -1315,7 +1305,7 @@ def test_reset_user(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'User with id:<3> has been updated successfully'} == jdoc + assert {'message': 'User with ID:<3> has been updated successfully.'} == jdoc def test_delete_user(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) @@ -1324,7 +1314,7 @@ def test_delete_user(self, fledge_url): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': "User has been deleted successfully"} == jdoc + assert {'message': "User has been deleted successfully."} == jdoc def test_logout_all(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) @@ -1676,11 +1666,7 @@ def test_get_roles_with_password_token(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, - {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, - {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} - ]} == jdoc + assert ROLES == jdoc def test_get_roles_with_certificate_token(self): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -1689,19 +1675,15 @@ def test_get_roles_with_certificate_token(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, - {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, - {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} - ]} == jdoc + assert ROLES == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2, 'accessMethod': 'any', 'realName': 'AJ', - 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully'}), + 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully.'}), ({"username": "admin1", "password": "F0gl@mp!", "role_id": 1}, {'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1, 'accessMethod': 'any', 'realName': '', - 'description': ''}, 'message': 'admin1 user has been created successfully'}) + 'description': ''}, 'message': 'admin1 user has been created successfully.'}) ]) def test_create_user_with_password_token(self, form_data, expected_values): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -1716,10 +1698,10 @@ def test_create_user_with_password_token(self, form_data, expected_values): @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any2", "password": "User@123", "real_name": "PG", "description": "Nerd user"}, {'user': {'userName': 'any2', 'userId': 5, 'roleId': 2, 'accessMethod': 'any', 'realName': 'PG', - 'description': 'Nerd user'}, 'message': 'any2 user has been created successfully'}), + 'description': 'Nerd user'}, 'message': 'any2 user has been created successfully.'}), ({"username": "admin2", "password": "F0gl@mp!", "role_id": 1}, {'user': {'userName': 'admin2', 'userId': 6, 'roleId': 1, 'accessMethod': 'any', 'realName': '', - 'description': ''}, 'message': 'admin2 user has been created successfully'}) + 'description': ''}, 'message': 'admin2 user has been created successfully.'}) ]) def test_create_user_with_certificate_token(self, form_data, expected_values): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -1756,7 +1738,7 @@ def test_update_password_with_password_token(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'Password has been updated successfully for user id:<{}>'.format(uid)} == jdoc + assert {'message': 'Password has been updated successfully for user ID:<{}>.'.format(uid)} == jdoc def test_update_password_with_certificate_token(self): uid = 5 @@ -1768,7 +1750,7 @@ def test_update_password_with_certificate_token(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'Password has been updated successfully for user id:<{}>'.format(uid)} == jdoc + assert {'message': 'Password has been updated successfully for user ID:<{}>.'.format(uid)} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "F0gl@mp1"}, LOGIN_SUCCESS_MSG), @@ -1791,7 +1773,7 @@ def test_reset_user_with_password_token(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'User with id:<3> has been updated successfully'} == jdoc + assert {'message': 'User with ID:<3> has been updated successfully.'} == jdoc def test_reset_user_with_certificate_token(self): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -1801,7 +1783,7 @@ def test_reset_user_with_certificate_token(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'User with id:<5> has been updated successfully'} == jdoc + assert {'message': 'User with ID:<5> has been updated successfully.'} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "F0gl@mp!#1"}, LOGIN_SUCCESS_MSG), @@ -1823,7 +1805,7 @@ def test_delete_user_with_password_token(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': "User has been deleted successfully"} == jdoc + assert {'message': "User has been deleted successfully."} == jdoc def test_delete_user_with_certificate_token(self): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -1832,7 +1814,7 @@ def test_delete_user_with_certificate_token(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': "User has been deleted successfully"} == jdoc + assert {'message': "User has been deleted successfully."} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "admin1", "password": "F0gl@mp!"}, ""), @@ -2077,19 +2059,15 @@ def test_get_roles(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, - {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, - {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} - ]} == jdoc + assert ROLES == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2, 'accessMethod': 'any', 'realName': 'AJ', - 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully'}), + 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully.'}), ({"username": "admin1", "password": "F0gl@mp!", "role_id": 1}, {'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1, 'accessMethod': 'any', 'realName': '', - 'description': ''}, 'message': 'admin1 user has been created successfully'}) + 'description': ''}, 'message': 'admin1 user has been created successfully.'}) ]) def test_create_user(self, form_data, expected_values): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -2124,7 +2102,7 @@ def test_update_password(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'Password has been updated successfully for user id:<{}>'.format(uid)} == jdoc + assert {'message': 'Password has been updated successfully for user ID:<{}>.'.format(uid)} == jdoc def test_login_with_updated_password(self): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -2143,7 +2121,7 @@ def test_reset_user(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'User with id:<3> has been updated successfully'} == jdoc + assert {'message': 'User with ID:<3> has been updated successfully.'} == jdoc def test_login_with_resetted_password(self): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -2161,7 +2139,7 @@ def test_delete_user(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': "User has been deleted successfully"} == jdoc + assert {'message': "User has been deleted successfully."} == jdoc def test_login_of_deleted_user(self): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -2392,19 +2370,15 @@ def test_get_roles(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, - {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, - {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} - ]} == jdoc + assert ROLES == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, {'user': {'userName': 'any1', 'userId': 3, 'roleId': 2, 'accessMethod': 'any', 'realName': 'AJ', - 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully'}), + 'description': 'Nerd user'}, 'message': 'any1 user has been created successfully.'}), ({"username": "admin1", "password": "F0gl@mp!", "role_id": 1}, {'user': {'userName': 'admin1', 'userId': 4, 'roleId': 1, 'accessMethod': 'any', 'realName': '', - 'description': ''}, 'message': 'admin1 user has been created successfully'}) + 'description': ''}, 'message': 'admin1 user has been created successfully.'}) ]) def test_create_user(self, form_data, expected_values): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -2426,7 +2400,7 @@ def test_update_password(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'Password has been updated successfully for user id:<{}>'.format(uid)} == jdoc + assert {'message': 'Password has been updated successfully for user ID:<{}>.'.format(uid)} == jdoc def test_reset_user(self): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -2436,7 +2410,7 @@ def test_reset_user(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': 'User with id:<3> has been updated successfully'} == jdoc + assert {'message': 'User with ID:<3> has been updated successfully.'} == jdoc def test_delete_user(self): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -2445,7 +2419,7 @@ def test_delete_user(self): assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) - assert {'message': "User has been deleted successfully"} == jdoc + assert {'message': "User has been deleted successfully."} == jdoc def test_logout_all(self): conn = http.client.HTTPSConnection("localhost", 1995, context=context) diff --git a/tests/system/python/packages/test_eds.py b/tests/system/python/packages/test_eds.py index dc71b43de6..6790f4a7a4 100644 --- a/tests/system/python/packages/test_eds.py +++ b/tests/system/python/packages/test_eds.py @@ -16,11 +16,11 @@ import http.client import json import time -import pytest from pathlib import Path -import utils from datetime import datetime -import platform +import pytest +import utils +from pytest import PKG_MGR __author__ = "Yash Tatkondawar" __copyright__ = "Copyright (c) 2020 Dianomic Systems, Inc." @@ -72,9 +72,7 @@ def remove_and_add_pkgs(package_build_version): assert False, "setup package script failed" try: - os_platform = platform.platform() - pkg_mgr = 'yum' if 'centos' in os_platform or 'redhat' in os_platform else 'apt' - subprocess.run(["sudo {} install -y fledge-south-sinusoid".format(pkg_mgr)], shell=True, check=True) + subprocess.run(["sudo {} install -y fledge-south-sinusoid".format(PKG_MGR)], shell=True, check=True) except subprocess.CalledProcessError: assert False, "installation of sinusoid package failed" diff --git a/tests/system/python/packages/test_gcp_gateway.py b/tests/system/python/packages/test_gcp_gateway.py index 5fcd6b67c1..229f1e517e 100644 --- a/tests/system/python/packages/test_gcp_gateway.py +++ b/tests/system/python/packages/test_gcp_gateway.py @@ -13,12 +13,12 @@ import http.client import json import time -import pytest from pathlib import Path -import utils from datetime import timezone, datetime -import itertools -import platform +import utils +import pytest +from pytest import PKG_MGR + __author__ = "Yash Tatkondawar" __copyright__ = "Copyright (c) 2020 Dianomic Systems Inc." @@ -67,9 +67,8 @@ def remove_and_add_pkgs(package_build_version): assert False, "setup package script failed" try: - os_platform = platform.platform() - pkg_mgr = 'yum' if 'centos' in os_platform or 'redhat' in os_platform else 'apt' - subprocess.run(["sudo {} install -y fledge-north-gcp fledge-south-sinusoid".format(pkg_mgr)], shell=True, check=True) + subprocess.run(["sudo {} install -y fledge-north-gcp fledge-south-sinusoid".format(PKG_MGR)], + shell=True, check=True) except subprocess.CalledProcessError: assert False, "installation of gcp-gateway and sinusoid packages failed" diff --git a/tests/system/python/packages/test_multiple_assets.py b/tests/system/python/packages/test_multiple_assets.py index 70e6528422..097c67aba8 100644 --- a/tests/system/python/packages/test_multiple_assets.py +++ b/tests/system/python/packages/test_multiple_assets.py @@ -13,7 +13,6 @@ import http.client import json import os -import platform import ssl import subprocess import time @@ -22,16 +21,16 @@ import pytest import utils +from pytest import PKG_MGR + # This gives the path of directory where fledge is cloned. test_file < packages < python < system < tests < ROOT PROJECT_ROOT = Path(__file__).parent.parent.parent.parent.parent SCRIPTS_DIR_ROOT = "{}/tests/system/python/packages/data/".format(PROJECT_ROOT) FLEDGE_ROOT = os.environ.get('FLEDGE_ROOT') BENCHMARK_SOUTH_SVC_NAME = "BenchMark #" -ASSET_NAME = "random_multiple_assets" -PER_BENCHMARK_ASSET_COUNT = 150 -AF_HIERARCHY_LEVEL = "multipleassets/multipleassetslvl2/multipleassetslvl3" - +ASSET_NAME = "{}_random_multiple_assets".format(time.strftime("%Y%m%d")) +AF_HIERARCHY_LEVEL = "{0}_multipleassets/{0}_multipleassetslvl2/{0}_multipleassetslvl3".format(time.strftime("%Y%m%d")) @pytest.fixture def reset_fledge(wait_time): @@ -60,15 +59,13 @@ def remove_and_add_pkgs(package_build_version): assert False, "setup package script failed" try: - os_platform = platform.platform() - pkg_mgr = 'yum' if 'centos' in os_platform or 'redhat' in os_platform else 'apt' - subprocess.run(["sudo {} install -y fledge-south-benchmark".format(pkg_mgr)], shell=True, check=True) + subprocess.run(["sudo {} install -y fledge-south-benchmark".format(PKG_MGR)], shell=True, check=True) except subprocess.CalledProcessError: assert False, "installation of benchmark package failed" @pytest.fixture -def start_north(start_north_omf_as_a_service, fledge_url, +def start_north(start_north_omf_as_a_service, fledge_url, num_assets, pi_host, pi_port, pi_admin, pi_passwd, clear_pi_system_through_pi_web_api, pi_db): global north_schedule_id @@ -79,8 +76,10 @@ def start_north(start_north_omf_as_a_service, fledge_url, asset_dict = {} no_of_services = 6 + num_assets_per_service=(num_assets//no_of_services) + # Creates assets dictionary for PI server cleanup for service_count in range(no_of_services): - for asst_count in range(PER_BENCHMARK_ASSET_COUNT): + for asst_count in range(num_assets_per_service): asset_name = ASSET_NAME + "-{}{}".format(service_count + 1, asst_count + 1) asset_dict[asset_name] = dp_list @@ -94,7 +93,7 @@ def start_north(start_north_omf_as_a_service, fledge_url, yield start_north -def add_benchmark(fledge_url, name, count): +def add_benchmark(fledge_url, name, count, num_assets_per_service): data = { "name": name, "type": "south", @@ -105,13 +104,21 @@ def add_benchmark(fledge_url, name, count): "value": "{}-{}".format(ASSET_NAME, count) }, "numAssets": { - "value": "{}".format(PER_BENCHMARK_ASSET_COUNT) + "value": "{}".format(num_assets_per_service) } } } post_url = "/fledge/service" utils.post_request(fledge_url, post_url, data) +def verify_restart(fledge_url, retries): + for i in range(retries): + time.sleep(30) + get_url = '/fledge/ping' + ping_result = utils.get_request(fledge_url, get_url) + if ping_result['uptime'] > 0: + return + assert ping_result['uptime'] > 0 def verify_service_added(fledge_url, name): get_url = "/fledge/south" @@ -119,7 +126,6 @@ def verify_service_added(fledge_url, name): assert len(result["services"]) assert name in [s["name"] for s in result["services"]] - def verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries): get_url = "/fledge/ping" ping_result = utils.get_request(fledge_url, get_url) @@ -144,10 +150,19 @@ def verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries): return ping_result -def verify_asset(fledge_url, total_assets): - get_url = "/fledge/asset" - result = utils.get_request(fledge_url, get_url) - assert len(result), "No asset found" +def verify_asset(fledge_url, total_assets, count, wait_time): + # Check whether "total_assets" are created or not by calling "/fledge/asset" endpoint for "count" number of iterations + # In each iteration sleep for wait_time * 6, i.e., 60 seconds.. + for i in range(count): + get_url = "/fledge/asset" + result = utils.get_request(fledge_url, get_url) + asset_created = len(result) + if (total_assets == asset_created): + print("Total {} asset created".format(asset_created)) + return + # Fledge takes 60 seconds to create 100 assets. + # Added sleep for "wait_time * 6", So that we can changes sleep time by changing value of wait_time from the jenkins job in future if required. + time.sleep(wait_time * 6) assert total_assets == len(result) @@ -164,16 +179,16 @@ def verify_asset_tracking_details(fledge_url, total_assets, total_benchmark_serv assert "Benchmark" in [s["plugin"] for s in tracking_details["track"]] -def _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries, total_benchmark_services): +def _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries, total_benchmark_services, num_assets_per_service): af_hierarchy_level_list = AF_HIERARCHY_LEVEL.split("/") type_id = 1 - for s in range(1,total_benchmark_services+1): - for a in range(1,PER_BENCHMARK_ASSET_COUNT+1): + for s in range(1, total_benchmark_services+1): + for a in range(1, num_assets_per_service+1): retry_count = 0 data_from_pi = None - asset_name = "random-" + str(s) + str(a) + asset_name = "random_multiple_assets-" + str(s) + str(a) print(asset_name) recorded_datapoint = "{}".format(asset_name) # Name of asset in the PI server @@ -192,8 +207,8 @@ def _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_d class TestMultiAssets: def test_multiple_assets_with_restart(self, remove_and_add_pkgs, reset_fledge, start_north, read_data_from_pi_web_api, - skip_verify_north_interface, fledge_url, - wait_time, retries, pi_host, pi_port, pi_admin, pi_passwd, pi_db): + skip_verify_north_interface, fledge_url, num_assets, wait_time, retries, pi_host, + pi_port, pi_admin, pi_passwd, pi_db): """ Test multiple benchmark services with multiple assets are created in fledge, also verifies assets after restarting fledge. remove_and_add_pkgs: Fixture to remove and install latest fledge packages @@ -204,33 +219,37 @@ def test_multiple_assets_with_restart(self, remove_and_add_pkgs, reset_fledge, s on endpoint GET /fledge/asset""" total_benchmark_services = 6 - total_assets = PER_BENCHMARK_ASSET_COUNT * total_benchmark_services + num_assets_per_service = (num_assets//total_benchmark_services) + # Total number of assets that would be created, total_assets variable is used instead of num_assets to handle case where num_assets is not divisible by 3 or 6. Since we are creating 3 or 6 services and each service should create equal num ber of aasets. + total_assets = num_assets_per_service * total_benchmark_services for count in range(total_benchmark_services): service_name = BENCHMARK_SOUTH_SVC_NAME + "{}".format(count + 1) - add_benchmark(fledge_url, service_name, count + 1) + add_benchmark(fledge_url, service_name, count + 1, num_assets_per_service) verify_service_added(fledge_url, service_name) - - # Wait until total_assets are created - time.sleep(PER_BENCHMARK_ASSET_COUNT + 2 * wait_time) + + # Sleep for few seconds, So that data from south service can be ingested into the Fledge + time.sleep(wait_time * 3) + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) - verify_asset(fledge_url, total_assets) + # num//assets return integer value that passes to count + verify_asset(fledge_url, total_assets, num_assets//100, wait_time) put_url = "/fledge/restart" utils.put_request(fledge_url, urllib.parse.quote(put_url)) # Wait for fledge to restart - time.sleep(wait_time * 2) + verify_restart(fledge_url, retries) verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) - verify_asset(fledge_url, total_assets) - - verify_asset_tracking_details(fledge_url, total_assets, total_benchmark_services, PER_BENCHMARK_ASSET_COUNT) + # num//assets return integer value that passes to count + verify_asset(fledge_url, total_assets, num_assets//100, wait_time) + verify_asset_tracking_details(fledge_url, total_assets, total_benchmark_services, num_assets_per_service) old_ping_result = verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) - # Wait for read and sent readings to increase - time.sleep(wait_time) - + # Sleep for few seconds to verify data ingestion into Fledge is increasing or not after restart. + time.sleep(wait_time * 3) + new_ping_result = verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) # Verifies whether Read and Sent readings are increasing after restart assert old_ping_result['dataRead'] < new_ping_result['dataRead'] @@ -238,13 +257,13 @@ def test_multiple_assets_with_restart(self, remove_and_add_pkgs, reset_fledge, s if not skip_verify_north_interface: assert old_ping_result['dataSent'] < new_ping_result['dataSent'] _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries, - total_benchmark_services) + total_benchmark_services, num_assets_per_service) # FIXME: If sleep is removed then the next test fails time.sleep(wait_time * 2) def test_add_multiple_assets_before_after_restart(self, reset_fledge, start_north, read_data_from_pi_web_api, - skip_verify_north_interface, fledge_url, + skip_verify_north_interface, fledge_url, num_assets, wait_time, retries, pi_host, pi_port, pi_admin, pi_passwd, pi_db): """ Test addition of multiple assets before and after restarting fledge. reset_fledge: Fixture to reset fledge @@ -254,45 +273,47 @@ def test_add_multiple_assets_before_after_restart(self, reset_fledge, start_nort on endpoint GET /fledge/asset""" total_benchmark_services = 3 - # Total number of assets that would be created - total_assets = PER_BENCHMARK_ASSET_COUNT * total_benchmark_services - + num_assets_per_service = (num_assets//(total_benchmark_services*2)) + # Total number of assets that would be created, total_assets variable is used instead of num_assets to handle case where num_assets is not divisible by 3 or 6. Since we are creating 3 or 6 services and each service should create equal num ber of aasets. + total_assets = num_assets_per_service * total_benchmark_services + for count in range(total_benchmark_services): service_name = BENCHMARK_SOUTH_SVC_NAME + "{}".format(count + 1) - add_benchmark(fledge_url, service_name, count + 1) + add_benchmark(fledge_url, service_name, count + 1, num_assets_per_service) verify_service_added(fledge_url, service_name) - # Wait until total_assets are created - time.sleep(PER_BENCHMARK_ASSET_COUNT + 2 * wait_time) + + # Sleep for few seconds, So that data from south service can be ingested into the Fledge. + time.sleep(wait_time * 3) + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) - verify_asset(fledge_url, total_assets) + # num//assets return integer value that passes to count + verify_asset(fledge_url, total_assets, num_assets//100, wait_time) - verify_asset_tracking_details(fledge_url, total_assets, total_benchmark_services, PER_BENCHMARK_ASSET_COUNT) + verify_asset_tracking_details(fledge_url, total_assets, total_benchmark_services, num_assets_per_service) put_url = "/fledge/restart" utils.put_request(fledge_url, urllib.parse.quote(put_url)) # Wait for fledge to restart - time.sleep(wait_time * 3) + verify_restart(fledge_url, retries) # We are adding more total_assets number of assets total_assets = total_assets * 2 for count in range(total_benchmark_services): service_name = BENCHMARK_SOUTH_SVC_NAME + "{}".format(count + 4) - add_benchmark(fledge_url, service_name, count + 4) + add_benchmark(fledge_url, service_name, count + 4, num_assets_per_service) verify_service_added(fledge_url, service_name) - # Wait until total_assets are created - time.sleep(PER_BENCHMARK_ASSET_COUNT + 2 * wait_time) verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) - verify_asset(fledge_url, total_assets) - - verify_asset_tracking_details(fledge_url, total_assets, total_benchmark_services * 2, PER_BENCHMARK_ASSET_COUNT) + # num//assets return integer value that passes to count + verify_asset(fledge_url, total_assets, num_assets//100, wait_time) + verify_asset_tracking_details(fledge_url, total_assets, total_benchmark_services * 2, num_assets_per_service) old_ping_result = verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) - # Wait for read and sent readings to increase - time.sleep(wait_time) + # Sleep for few seconds to verify data ingestion into Fledge is increasing or not after adding more services. + time.sleep(wait_time * 3) new_ping_result = verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) # Verifies whether Read and Sent readings are increasing after restart @@ -300,11 +321,12 @@ def test_add_multiple_assets_before_after_restart(self, reset_fledge, start_nort if not skip_verify_north_interface: assert old_ping_result['dataSent'] < new_ping_result['dataSent'] + # Initially total_benchmark_services is 3 but after the restart the 3 more south services are added. So, total_benchmark_services * 2 is 6 _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries, - total_benchmark_services) - - def test_multiple_assets_with_reconfig(self, reset_fledge, start_north, read_data_from_pi_web_api, skip_verify_north_interface, - fledge_url, + total_benchmark_services * 2, num_assets_per_service) + + def test_multiple_assets_with_reconfig(self, reset_fledge, start_north, read_data_from_pi_web_api, + skip_verify_north_interface, fledge_url, num_assets, wait_time, retries, pi_host, pi_port, pi_admin, pi_passwd, pi_db): """ Test addition of multiple assets with reconfiguration of south service. reset_fledge: Fixture to reset fledge @@ -314,24 +336,27 @@ def test_multiple_assets_with_reconfig(self, reset_fledge, start_north, read_dat on endpoint GET /fledge/asset""" total_benchmark_services = 3 - num_assets = 2 * PER_BENCHMARK_ASSET_COUNT - # Total number of assets that would be created - total_assets = PER_BENCHMARK_ASSET_COUNT * total_benchmark_services + num_assets_per_service = (num_assets//(total_benchmark_services*2)) + # total_assets variable is used instead of num_assets to handle case where num_assets is not divisible by 3 or 6. Since we are creating 3 or 6 services and each service should create equal num ber of aasets. + # Number of assets that would be created initially + total_assets = num_assets_per_service * total_benchmark_services for count in range(total_benchmark_services): service_name = BENCHMARK_SOUTH_SVC_NAME + "{}".format(count + 1) - add_benchmark(fledge_url, service_name, count + 1) + add_benchmark(fledge_url, service_name, count + 1, num_assets_per_service) verify_service_added(fledge_url, service_name) - # Wait until total_assets are created - time.sleep(PER_BENCHMARK_ASSET_COUNT + 2 * wait_time) + # Sleep for few seconds, So that data from south service can be ingested into the Fledge + time.sleep(wait_time * 3) + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) - verify_asset(fledge_url, total_assets) - - verify_asset_tracking_details(fledge_url, total_assets, total_benchmark_services, PER_BENCHMARK_ASSET_COUNT) + # num//assets return integer value that passes to count + verify_asset(fledge_url, total_assets, num_assets//100, wait_time) + verify_asset_tracking_details(fledge_url, total_assets, total_benchmark_services, num_assets_per_service) # With reconfig, number of assets are doubled in each south service - payload = {"numAssets": "{}".format(num_assets)} + num_assets_per_service = 2 * num_assets_per_service + payload = {"numAssets": "{}".format(num_assets_per_service)} for count in range(total_benchmark_services): service_name = BENCHMARK_SOUTH_SVC_NAME + "{}".format(count + 1) put_url = "/fledge/category/{}".format(service_name) @@ -340,17 +365,15 @@ def test_multiple_assets_with_reconfig(self, reset_fledge, start_north, read_dat # In reconfig number of assets are doubled total_assets = total_assets * 2 - # Wait until total_assets are created - time.sleep(num_assets + 2 * wait_time) verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) - verify_asset(fledge_url, total_assets) - - verify_asset_tracking_details(fledge_url, total_assets, total_benchmark_services, num_assets) + + verify_asset(fledge_url, total_assets, num_assets//100, wait_time) + verify_asset_tracking_details(fledge_url, total_assets, total_benchmark_services, num_assets_per_service) old_ping_result = verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) - - # Wait for read and sent readings to increase - time.sleep(wait_time) + + # Sleep for few seconds to verify data ingestion into Fledge is increasing or not after reconfig of south services. + time.sleep(wait_time * 3) new_ping_result = verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) # Verifies whether Read and Sent readings are increasing after restart @@ -359,4 +382,4 @@ def test_multiple_assets_with_reconfig(self, reset_fledge, start_north, read_dat if not skip_verify_north_interface: assert old_ping_result['dataSent'] < new_ping_result['dataSent'] _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries, - total_benchmark_services) + total_benchmark_services, num_assets_per_service) diff --git a/tests/system/python/packages/test_north_azure.py b/tests/system/python/packages/test_north_azure.py new file mode 100644 index 0000000000..38bc668e31 --- /dev/null +++ b/tests/system/python/packages/test_north_azure.py @@ -0,0 +1,729 @@ +# -*- coding: utf-8 -*- + +# FLEDGE_BEGIN +# See: http://fledge-iot.readthedocs.io/ +# FLEDGE_END + +""" Test sending data to Azure-IoT-Hub using fledge-north-azure plugin + +""" + +__author__ = "Mohit Singh Tomar" +__copyright__ = "Copyright (c) 2023 Dianomic Systems Inc" +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + +import subprocess +import http.client +import pytest +import os +import time +import utils +from pathlib import Path +import urllib.parse +import json +import sys +import datetime + +try: + subprocess.run(["python3 -m pip install azure-storage-blob==12.13.1"], shell=True, check=True) +except subprocess.CalledProcessError: + assert False, "Failed to install azure-storage-blob module" + +from azure.storage.blob import BlobServiceClient + +# This gives the path of directory where fledge is cloned. test_file < packages < python < system < tests < ROOT +PROJECT_ROOT = subprocess.getoutput("git rev-parse --show-toplevel") +SCRIPTS_DIR_ROOT = "{}/tests/system/python/scripts/package/".format(PROJECT_ROOT) +SOUTH_SERVICE_NAME = "FOGL-7352_sysinfo" +SOUTH_PLUGIN = "systeminfo" +NORTH_SERVICE_NAME = "FOGL-7352_azure" +NORTH_PLUGIN_NAME = "azure-iot" +NORTH_PLUGIN_DISCOVERY_NAME = "azure_iot" +LOCALJSONFILE = "azure.json" +FILTER = "expression" + +@pytest.fixture +def reset_fledge(wait_time): + try: + subprocess.run(["cd {} && ./reset" + .format(SCRIPTS_DIR_ROOT)], shell=True, check=True) + except subprocess.CalledProcessError: + assert False, "reset package script failed!" + + +def read_data_from_azure_storage_container(azure_storage_account_url,azure_storage_account_key, azure_storage_container): + + try: + t1=time.time() + blob_service_client_instance = BlobServiceClient(account_url=azure_storage_account_url, credential=azure_storage_account_key) + + container_client = blob_service_client_instance.get_container_client(container=azure_storage_container) + + blob_list = container_client.list_blobs() + + for blob in blob_list: + BLOBNAME = blob.name + print(f"Name: {blob.name}") + + + blob_client_instance = blob_service_client_instance.get_blob_client(azure_storage_container, BLOBNAME, snapshot=None) + with open(LOCALJSONFILE, "wb") as my_blob: + blob_data = blob_client_instance.download_blob() + blob_data.readinto(my_blob) + t2=time.time() + print(("It takes %s seconds to download "+BLOBNAME) % (t2 - t1)) + + with open(LOCALJSONFILE) as handler: + data = handler.readlines() + + return data + + except (Exception) as ex: + print("Failed to read data due to {}".format(ex)) + return None + +def verify_north_stats_on_invalid_config(fledge_url): + get_url = "/fledge/ping" + ping_result = utils.get_request(fledge_url, get_url) + assert "dataRead" in ping_result + assert ping_result['dataRead'] > 0, "South data NOT seen in ping header" + assert "dataSent" in ping_result + assert ping_result['dataSent'] < 1, "Data sent to Azure Iot Hub" + +def verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries): + get_url = "/fledge/ping" + ping_result = utils.get_request(fledge_url, get_url) + assert "dataRead" in ping_result + assert "dataSent" in ping_result + assert 0 < ping_result['dataRead'], "South data NOT seen in ping header" + + retry_count = 1 + sent = 0 + if not skip_verify_north_interface: + while retries > retry_count: + sent = ping_result["dataSent"] + if sent >= 1: + break + else: + time.sleep(wait_time) + + retry_count += 1 + ping_result = utils.get_request(fledge_url, get_url) + + assert 1 <= sent, "Failed to send data to Azure-IoT-Hub" + return ping_result + + +def verify_statistics_map(fledge_url, skip_verify_north_interface): + get_url = "/fledge/statistics" + jdoc = utils.get_request(fledge_url, get_url) + actual_stats_map = utils.serialize_stats_map(jdoc) + assert 1 <= actual_stats_map["{}-Ingest".format(SOUTH_SERVICE_NAME)] + assert 1 <= actual_stats_map['READINGS'] + if not skip_verify_north_interface: + assert 1 <= actual_stats_map['Readings Sent'] + assert 1 <= actual_stats_map[NORTH_SERVICE_NAME] + + +def verify_asset(fledge_url, ASSET): + get_url = "/fledge/asset" + result = utils.get_request(fledge_url, get_url) + assert len(result), "No asset found" + assert any(filter(lambda x: ASSET in x, [s["assetCode"] for s in result])) + + +def verify_asset_tracking_details(fledge_url, skip_verify_north_interface, ASSET): + tracking_details = utils.get_asset_tracking_details(fledge_url, "Ingest") + assert len(tracking_details["track"]), "Failed to track Ingest event" + tracked_item = tracking_details["track"][0] + assert ASSET in tracked_item["asset"] + assert "systeminfo" == tracked_item["plugin"] + + egress_tracking_details = utils.get_asset_tracking_details(fledge_url, "Egress") + assert len(egress_tracking_details["track"]), "Failed to track Egress event" + tracked_item = egress_tracking_details["track"][0] + assert ASSET in tracked_item["asset"] + assert NORTH_PLUGIN_DISCOVERY_NAME == tracked_item["plugin"] + + +def _verify_egress(azure_storage_account_url, azure_storage_account_key, azure_storage_container, wait_time, retries, ASSET): + retry_count = 0 + data_from_azure = None + + while (data_from_azure is None or len(data_from_azure) == 0) and retry_count < retries: + data_from_azure = read_data_from_azure_storage_container(azure_storage_account_url,azure_storage_account_key, azure_storage_container) + + if data_from_azure is None: + retry_count += 1 + time.sleep(wait_time) + + if data_from_azure is None or retry_count == retries: + assert False, "Failed to read data from Azure IoT Hub" + + asset_collected = list() + for ele in data_from_azure: + asset_collected.extend(list(map(lambda d: d['asset'], json.loads(ele)["Body"]))) + + assert any(filter(lambda x: ASSET in x, asset_collected)) + +@pytest.fixture +def add_south_north_task(add_south, add_north, fledge_url, azure_host, azure_device, azure_key): + """ This fixture + add_south: Fixture that adds a south service with given configuration + add_north: Fixture that adds a north service with given configuration + + """ + + # south_branch does not matter as these are archives.fledge-iot.org version install + add_south(SOUTH_PLUGIN, None, fledge_url, service_name=SOUTH_SERVICE_NAME, start_service=False, installation_type='package') + + _config = { + "primaryConnectionString": {"value":"HostName={};DeviceId={};SharedAccessKey={}".format(azure_host, azure_device, azure_key)} + } + # north_branch does not matter as these are archives.fledge-iot.org version install + add_north(fledge_url, NORTH_PLUGIN_NAME, None, installation_type='package', north_instance_name=NORTH_SERVICE_NAME, + config=_config, schedule_repeat_time=10, enabled=False, plugin_discovery_name=NORTH_PLUGIN_DISCOVERY_NAME, is_task=True) + +@pytest.fixture +def add_south_north_service(add_south, add_north, fledge_url, azure_host, azure_device, azure_key): + """ This fixture + add_south: Fixture that adds a south service with given configuration + add_north: Fixture that adds a north service with given configuration + + """ + # south_branch does not matter as these are archives.fledge-iot.org version install + add_south(SOUTH_PLUGIN, None, fledge_url, service_name=SOUTH_SERVICE_NAME, start_service=False, installation_type='package') + + _config = { + "primaryConnectionString": {"value":"HostName={};DeviceId={};SharedAccessKey={}".format(azure_host, azure_device, azure_key)} + } + # north_branch does not matter as these are archives.fledge-iot.org version install + add_north(fledge_url, NORTH_PLUGIN_NAME, None, installation_type='package', north_instance_name=NORTH_SERVICE_NAME, + config=_config, enabled=False, plugin_discovery_name=NORTH_PLUGIN_DISCOVERY_NAME, is_task=False) + +def config_south(fledge_url, ASSET): + payload = {"assetNamePrefix": "{}".format(ASSET)} + put_url = "/fledge/category/{}".format(SOUTH_SERVICE_NAME) + utils.put_request(fledge_url, urllib.parse.quote(put_url), payload) + +def update_filter_config(fledge_url, plugin, mode): + data = {"enable": "{}".format(mode)} + put_url = "/fledge/category/{}_{}_exp".format(NORTH_SERVICE_NAME, plugin) + utils.put_request(fledge_url, urllib.parse.quote(put_url, safe='?,=,&,/'), data) + +def add_expression_filter(add_filter, fledge_url, NORTH_PLUGIN_NAME): + filter_cfg = {"enable": "true", "expression": "log(1K-blocks)".format(), "name": "{}_exp".format(NORTH_PLUGIN_NAME)} + add_filter("{}".format(FILTER), None, "{}_exp".format(NORTH_PLUGIN_NAME), filter_cfg, fledge_url, "{}".format(NORTH_SERVICE_NAME), installation_type='package') + +class TestNorthAzureIoTHubDevicePlugin: + + def test_send(self, clean_setup_fledge_packages, reset_fledge, add_south_north_service, fledge_url, enable_schedule, + disable_schedule, azure_host, azure_device, azure_key, wait_time, retries, skip_verify_north_interface, + azure_storage_account_url, azure_storage_account_key, azure_storage_container): + + """ Test that check data is inserted in Fledge and sent to Azure-IoT Hub or not. + clean_setup_fledge_packages: Fixture for removing fledge from system completely if it is already present + and reinstall it baased on commandline arguments. + reset_fledge: Fixture that reset and cleanup the fledge + add_south_north_service: Fixture that add south and north instance in disable mode + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + azure_host: Fixture that provide Hostname of Azure IoT Hub + azure_device: Fixture that provide ID of Device deployed in Azure IoT Hub + azure_key: Fixture that provide access key of Azure IoT Hub + azure_storage_account_url: Fixture that provide URL for accessing Storage Blob of Azure + azure_storage_account_key: Fixture that provide access key for accessing Storage Blob + azure_storage_container: Fixture that provides name of container deployed in Azure + """ + # Update Asset name + ASSET = "test1_FOGL-7352_system" + config_south(fledge_url, ASSET) + + # Enable South Service for 10 Seonds + enable_schedule(fledge_url, SOUTH_SERVICE_NAME) + time.sleep(wait_time) + disable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + # Enable North Service for sending data to Azure-IOT-Hub + enable_schedule(fledge_url, NORTH_SERVICE_NAME) + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + verify_asset(fledge_url, ASSET) + verify_statistics_map(fledge_url, skip_verify_north_interface) + verify_asset_tracking_details(fledge_url, skip_verify_north_interface, ASSET) + + # Storage blob JSON will be created every 2 minutes + time.sleep(150) + + + _verify_egress(azure_storage_account_url, azure_storage_account_key, azure_storage_container, wait_time, retries, ASSET) + + + def test_mqtt_over_websocket_reconfig(self, reset_fledge, add_south_north_service, fledge_url, enable_schedule, disable_schedule, + azure_host, azure_device, azure_key, azure_storage_account_url, azure_storage_account_key, + azure_storage_container, wait_time, retries, skip_verify_north_interface): + + """ Test that enable MQTT over websocket then check data inserted into Fledge and sent to Azure-IoT Hub or not. + + reset_fledge: Fixture that reset and cleanup the fledge + add_south_north_service: Fixture that add south and north instance in disable mode + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + azure_host: Fixture that provide Hostname of Azure IoT Hub + azure_device: Fixture that provide ID of Device deployed in Azure IoT Hub + azure_key: Fixture that provide access key of Azure IoT Hub + azure_storage_account_url: Fixture that provide URL for accessing Storage Blob of Azure + azure_storage_account_key: Fixture that provide access key for accessing Storage Blob + azure_storage_container: Fixture that provides name of container deployed in Azure + """ + # Update Asset name + ASSET = "test2_FOGL-7352_system" + config_south(fledge_url, ASSET) + + # Enable South Service for 10 Seonds + enable_schedule(fledge_url, SOUTH_SERVICE_NAME) + time.sleep(wait_time) + disable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + # Enable MQTT over websocket + payload = {"websockets": "true"} + put_url = "/fledge/category/{}".format(NORTH_SERVICE_NAME) + utils.put_request(fledge_url, urllib.parse.quote(put_url), payload) + + # Enable North Service for sending data to Azure-IOT-Hub + enable_schedule(fledge_url, NORTH_SERVICE_NAME) + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + verify_asset(fledge_url, ASSET) + verify_statistics_map(fledge_url, skip_verify_north_interface) + verify_asset_tracking_details(fledge_url, skip_verify_north_interface, ASSET) + + # Storage blob JSON will be created every 2 minutes + time.sleep(150) + + + _verify_egress(azure_storage_account_url, azure_storage_account_key, azure_storage_container, wait_time, retries, ASSET) + + + def test_disable_enable(self, reset_fledge, add_south_north_service, fledge_url, enable_schedule, disable_schedule, + azure_host, azure_device, azure_key, azure_storage_account_url, azure_storage_account_key, + azure_storage_container, wait_time, retries, skip_verify_north_interface): + + """ Test that enable and disable south and north service perioically then + check data inserted into Fledge and sent to Azure-IoT Hub or not. + + reset_fledge: Fixture that reset and cleanup the fledge + add_south_north_service: Fixture that add south and north instance in disable mode + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + azure_host: Fixture that provide Hostname of Azure IoT Hub + azure_device: Fixture that provide ID of Device deployed in Azure IoT Hub + azure_key: Fixture that provide access key of Azure IoT Hub + azure_storage_account_url: Fixture that provide URL for accessing Storage Blob of Azure + azure_storage_account_key: Fixture that provide access key for accessing Storage Blob + azure_storage_container: Fixture that provides name of container deployed in Azure + """ + + for i in range(2): + # Update Asset name + ASSET = "test3.{}_FOGL-7352_system".format(i) + config_south(fledge_url, ASSET) + + # Enable South Service for 10 Seonds + enable_schedule(fledge_url, SOUTH_SERVICE_NAME) + time.sleep(wait_time) + disable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + # Enable North Service for sending data to Azure-IOT-Hub + enable_schedule(fledge_url, NORTH_SERVICE_NAME) + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + verify_asset(fledge_url, ASSET) + verify_statistics_map(fledge_url, skip_verify_north_interface) + + # Storage blob JSON will be created every 2 minutes + time.sleep(150) + disable_schedule(fledge_url, NORTH_SERVICE_NAME) + + _verify_egress(azure_storage_account_url, azure_storage_account_key, azure_storage_container, wait_time, retries, ASSET) + + + def test_send_with_filter(self, reset_fledge, add_south_north_service, fledge_url, enable_schedule, disable_schedule, + azure_host, azure_device, azure_key, azure_storage_account_url, azure_storage_account_key, + azure_storage_container, wait_time, retries, skip_verify_north_interface, add_filter): + + """ Test that attach filters to North service and enable and disable filter periodically + then check data inserted into Fledge and sent to Azure-IoT Hub or not. + + reset_fledge: Fixture that reset and cleanup the fledge + add_south_north_service: Fixture that add south and north instance in disable mode + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + azure_host: Fixture that provide Hostname of Azure IoT Hub + azure_device: Fixture that provide ID of Device deployed in Azure IoT Hub + azure_key: Fixture that provide access key of Azure IoT Hub + azure_storage_account_url: Fixture that provide URL for accessing Storage Blob of Azure + azure_storage_account_key: Fixture that provide access key for accessing Storage Blob + azure_storage_container: Fixture that provides name of container deployed in Azure + add_filter:Fixture that add filter to south and north Instances + """ + # Update Asset name + ASSET = "test4_FOGL-7352_system" + config_south(fledge_url, ASSET) + + # Add Expression filter to North Service + add_expression_filter(add_filter, fledge_url, NORTH_PLUGIN_NAME) + + # Enable South Service for 10 Seonds + enable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + # Enable North Service for sending data to Azure-IOT-Hub + enable_schedule(fledge_url, NORTH_SERVICE_NAME) + + print("On/Off of filter starts") + count = 0 + while count<3: + # For Disabling filter + update_filter_config(fledge_url, NORTH_PLUGIN_NAME, 'false') + time.sleep(wait_time*2) + + # For enabling filter + update_filter_config(fledge_url, NORTH_PLUGIN_NAME, 'true') + time.sleep(wait_time*2) + count+=1 + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + verify_asset(fledge_url, ASSET) + verify_statistics_map(fledge_url, skip_verify_north_interface) + verify_asset_tracking_details(fledge_url, skip_verify_north_interface, ASSET) + + # Storage blob JSON will be created every 2 minutes + time.sleep(150) + + + _verify_egress(azure_storage_account_url, azure_storage_account_key, azure_storage_container, wait_time, retries, ASSET) + + +class TestNorthAzureIoTHubDevicePluginTask: + + def test_send_as_a_task(self, reset_fledge, add_south_north_task, fledge_url, enable_schedule, disable_schedule, + azure_host, azure_device, azure_key, azure_storage_account_url, azure_storage_account_key, + azure_storage_container, wait_time, retries, skip_verify_north_interface): + + """ Test that creates south and north bound as task and check data is inserted in Fledge and sent to Azure-IoT Hub or not. + + reset_fledge: Fixture that reset and cleanup the fledge + add_south_north_task: Fixture that add south and north instance in disable mode + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + azure_host: Fixture that provide Hostname of Azure IoT Hub + azure_device: Fixture that provide ID of Device deployed in Azure IoT Hub + azure_key: Fixture that provide access key of Azure IoT Hub + azure_storage_account_url: Fixture that provide URL for accessing Storage Blob of Azure + azure_storage_account_key: Fixture that provide access key for accessing Storage Blob + azure_storage_container: Fixture that provides name of container deployed in Azure + """ + # Update Asset name + ASSET = "test5_FOGL-7352_system" + config_south(fledge_url, ASSET) + + # Enable South Service for 10 Seonds + enable_schedule(fledge_url, SOUTH_SERVICE_NAME) + time.sleep(wait_time) + disable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + # Enable North Service for sending data to Azure-IOT-Hub + enable_schedule(fledge_url, NORTH_SERVICE_NAME) + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + verify_asset(fledge_url, ASSET) + verify_statistics_map(fledge_url, skip_verify_north_interface) + verify_asset_tracking_details(fledge_url, skip_verify_north_interface, ASSET) + + # Storage blob JSON will be created every 2 minutes + time.sleep(150) + + + _verify_egress(azure_storage_account_url, azure_storage_account_key, azure_storage_container, wait_time, retries, ASSET) + + + def test_mqtt_over_websocket_reconfig_task(self, reset_fledge, add_south_north_task, fledge_url, enable_schedule, disable_schedule, + azure_host, azure_device, azure_key, azure_storage_account_url, azure_storage_account_key, + azure_storage_container, wait_time, retries, skip_verify_north_interface): + + """ Test that creates south and north bound as task as well as enable MQTT over websocket then + check data inserted in Fledge and sent to Azure-IoT Hub or not. + + reset_fledge: Fixture that reset and cleanup the fledge + add_south_north_task: Fixture that add south and north instance in disable mode + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + azure_host: Fixture that provide Hostname of Azure IoT Hub + azure_device: Fixture that provide ID of Device deployed in Azure IoT Hub + azure_key: Fixture that provide access key of Azure IoT Hub + azure_storage_account_url: Fixture that provide URL for accessing Storage Blob of Azure + azure_storage_account_key: Fixture that provide access key for accessing Storage Blob + azure_storage_container: Fixture that provides name of container deployed in Azure + """ + # Update Asset name + ASSET = "test6_FOGL-7352_system" + config_south(fledge_url, ASSET) + + # Enable South Service for 10 Seonds + enable_schedule(fledge_url, SOUTH_SERVICE_NAME) + time.sleep(wait_time) + disable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + # Enable MQTT over websocket + payload = {"websockets": "true"} + put_url = "/fledge/category/{}".format(NORTH_SERVICE_NAME) + utils.put_request(fledge_url, urllib.parse.quote(put_url), payload) + + # Enable North Service for sending data to Azure-IOT-Hub + enable_schedule(fledge_url, NORTH_SERVICE_NAME) + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + verify_asset(fledge_url, ASSET) + verify_statistics_map(fledge_url, skip_verify_north_interface) + verify_asset_tracking_details(fledge_url, skip_verify_north_interface, ASSET) + + # Storage blob JSON will be created every 2 minutes + time.sleep(150) + + + _verify_egress(azure_storage_account_url, azure_storage_account_key, azure_storage_container, wait_time, retries, ASSET) + + + def test_disable_enable_task(self, reset_fledge, add_south_north_task, fledge_url, enable_schedule, disable_schedule, + azure_host, azure_device, azure_key, azure_storage_account_url, azure_storage_account_key, + azure_storage_container, wait_time, retries, skip_verify_north_interface): + + """ Test that creates south and north bound as task as enable and disable them periodically then + check data inserted in Fledge and sent to Azure-IoT Hub or not. + + reset_fledge: Fixture that reset and cleanup the fledge + add_south_north_task: Fixture that add south and north instance in disable mode + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + azure_host: Fixture that provide Hostname of Azure IoT Hub + azure_device: Fixture that provide ID of Device deployed in Azure IoT Hub + azure_key: Fixture that provide access key of Azure IoT Hub + azure_storage_account_url: Fixture that provide URL for accessing Storage Blob of Azure + azure_storage_account_key: Fixture that provide access key for accessing Storage Blob + azure_storage_container: Fixture that provides name of container deployed in Azure + """ + for i in range(2): + # Update Asset name + ASSET = "test7.{}_FOGL-7352_system".format(i) + config_south(fledge_url, ASSET) + + # Enable South Service for 10 Seonds + enable_schedule(fledge_url, SOUTH_SERVICE_NAME) + time.sleep(wait_time) + disable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + # Enable North Service for sending data to Azure-IOT-Hub + enable_schedule(fledge_url, NORTH_SERVICE_NAME) + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + verify_asset(fledge_url, ASSET) + verify_statistics_map(fledge_url, skip_verify_north_interface) + + # Storage blob JSON will be created every 2 minutes + time.sleep(150) + disable_schedule(fledge_url, NORTH_SERVICE_NAME) + + _verify_egress(azure_storage_account_url, azure_storage_account_key, azure_storage_container, wait_time, retries, ASSET) + + def test_send_with_filter_task(self, reset_fledge, add_south_north_task, fledge_url, enable_schedule, disable_schedule, + azure_host, azure_device, azure_key, azure_storage_account_url, azure_storage_account_key, + azure_storage_container, wait_time, retries, skip_verify_north_interface, add_filter): + + """ Test that creates south and north bound as task and attach filters to North Bound as well as + enable and disable filters periodically then check data inserted in Fledge and sent to Azure-IoT Hub or not. + + reset_fledge: Fixture that reset and cleanup the fledge + add_south_north_task: Fixture that add south and north instance in disable mode + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + azure_host: Fixture that provide Hostname of Azure IoT Hub + azure_device: Fixture that provide ID of Device deployed in Azure IoT Hub + azure_key: Fixture that provide access key of Azure IoT Hub + azure_storage_account_url: Fixture that provide URL for accessing Storage Blob of Azure + azure_storage_account_key: Fixture that provide access key for accessing Storage Blob + azure_storage_container: Fixture that provides name of container deployed in Azure + add_filter: Fixture that add fiter to south or north instances. + """ + # Update Asset name + ASSET = "test8_FOGL-7352_system" + config_south(fledge_url, ASSET) + + # Add Expression filter to North Service + add_expression_filter(add_filter, fledge_url, NORTH_PLUGIN_NAME) + + # Enable South Service for 10 Seonds + enable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + # Enable North Service for sending data to Azure-IOT-Hub + enable_schedule(fledge_url, NORTH_SERVICE_NAME) + + print("On/Off of filter starts") + count = 0 + while count<3: + # For Disabling filter + update_filter_config(fledge_url, NORTH_PLUGIN_NAME, 'false') + time.sleep(wait_time*2) + + # For enabling filter + update_filter_config(fledge_url, NORTH_PLUGIN_NAME, 'true') + time.sleep(wait_time*2) + count+=1 + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + verify_asset(fledge_url, ASSET) + verify_statistics_map(fledge_url, skip_verify_north_interface) + verify_asset_tracking_details(fledge_url, skip_verify_north_interface, ASSET) + + # Storage blob JSON will be created every 2 minutes + time.sleep(150) + + + _verify_egress(azure_storage_account_url, azure_storage_account_key, azure_storage_container, wait_time, retries, ASSET) + + + +class TestNorthAzureIoTHubDevicePluginInvalidConfig: + + def test_invalid_connstr(self, reset_fledge, add_south, add_north, fledge_url, enable_schedule, disable_schedule, wait_time, retries): + + """ Test that checks connection string of north azure plugin is invalid or not. + + reset_fledge: Fixture that reset and cleanup the fledge + add_south: Fixture that south Instance in disable mode + add_north: Fixture that add north instance in disable mode + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + """ + # Add South and North + add_south(SOUTH_PLUGIN, None, fledge_url, service_name=SOUTH_SERVICE_NAME, start_service=False, installation_type='package') + + _config = { + "primaryConnectionString": {"value":"InvalidConfig"} + } + # north_branch does not matter as these are archives.fledge-iot.org version install + add_north(fledge_url, NORTH_PLUGIN_NAME, None, installation_type='package', north_instance_name=NORTH_SERVICE_NAME, + config=_config, enabled=False, plugin_discovery_name=NORTH_PLUGIN_DISCOVERY_NAME, is_task=False) + + # Update Asset name + ASSET = "test9_FOGL-7352_system" + config_south(fledge_url, ASSET) + + # Enable South Service for 10 Seonds + enable_schedule(fledge_url, SOUTH_SERVICE_NAME) + time.sleep(wait_time) + disable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + # Enable North Service for sending data to Azure-IOT-Hub + enable_schedule(fledge_url, NORTH_SERVICE_NAME) + + verify_north_stats_on_invalid_config(fledge_url) + + + def test_invalid_connstr_sharedkey(self, reset_fledge, add_south, add_north, fledge_url, enable_schedule, disable_schedule, + wait_time, retries, azure_host, azure_device, azure_key): + + """ Test that checks shared key passed to connection string of north azure plugin is invalid or not. + + reset_fledge: Fixture that reset and cleanup the fledge + add_south: Fixture that south Instance in disable mode + add_north: Fixture that add north instance in disable mode + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + azure_host: Fixture that provide Hostname of Azure IoT Hub + azure_device: Fixture that provide ID of Device deployed in Azure IoT Hub + azure_key: Fixture that provide access key of Azure IoT Hub + """ + # Add South and North + add_south(SOUTH_PLUGIN, None, fledge_url, service_name=SOUTH_SERVICE_NAME, start_service=False, installation_type='package') + + _config = { + "primaryConnectionString": {"value":"HostName={};DeviceId={};SharedAccessKey={}".format(azure_host, azure_device, azure_key[:-5])} + } + # north_branch does not matter as these are archives.fledge-iot.org version install + add_north(fledge_url, NORTH_PLUGIN_NAME, None, installation_type='package', north_instance_name=NORTH_SERVICE_NAME, + config=_config, enabled=False, plugin_discovery_name=NORTH_PLUGIN_DISCOVERY_NAME, is_task=False) + + # Update Asset name + ASSET = "test10_FOGL-7352_system" + config_south(fledge_url, ASSET) + + # Enable South Service for 10 Seonds + enable_schedule(fledge_url, SOUTH_SERVICE_NAME) + time.sleep(wait_time) + disable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + # Enable North Service for sending data to Azure-IOT-Hub + enable_schedule(fledge_url, NORTH_SERVICE_NAME) + + verify_north_stats_on_invalid_config(fledge_url) + + +class TestNorthAzureIoTHubDevicePluginLongRun: + + def test_send_long_run(self, clean_setup_fledge_packages, reset_fledge, add_south_north_service, fledge_url, enable_schedule, + disable_schedule, azure_host, azure_device, azure_key, wait_time, retries, skip_verify_north_interface, + azure_storage_account_url, azure_storage_account_key, azure_storage_container, run_time): + + """ Test that check data is inserted in Fledge and sent to Azure-IoT Hub for long duration based parameter passed. + + clean_setup_fledge_packages: Fixture for removing fledge from system completely if it is already present + and reinstall it baased on commandline arguments. + reset_fledge: Fixture that reset and cleanup the fledge + add_south_north_service: Fixture that add south and north instance in disable mode + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + azure_host: Fixture that provide Hostname of Azure IoT Hub + azure_device: Fixture that provide ID of Device deployed in Azure IoT Hub + azure_key: Fixture that provide access key of Azure IoT Hub + azure_storage_account_url: Fixture that provide URL for accessing Storage Blob of Azure + azure_storage_account_key: Fixture that provide access key for accessing Storage Blob + azure_storage_container: Fixture that provides name of container deployed in Azure + run_time: Fixture that defines durration for which this test will be executed. + """ + START_TIME = datetime.datetime.now() + current_iteration = 1 + # Update Asset name + ASSET = "test11_FOGL-7352_system" + config_south(fledge_url, ASSET) + + + # Enable South Service for ingesting data into fledge + enable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + time.sleep(wait_time) + # Enable North Service for sending data to Azure-IOT-Hub + enable_schedule(fledge_url, NORTH_SERVICE_NAME) + + while (datetime.datetime.now() - START_TIME).seconds <= (int(run_time) * 60): + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + verify_asset(fledge_url, ASSET) + verify_statistics_map(fledge_url, skip_verify_north_interface) + verify_asset_tracking_details(fledge_url, skip_verify_north_interface, ASSET) + + # Storage blob JSON will be created every 2 minutes + time.sleep(150) + + + _verify_egress(azure_storage_account_url, azure_storage_account_key, azure_storage_container, wait_time, retries, ASSET) + + print('Successfully ran {} iterations'.format(current_iteration), datetime.datetime.now()) + current_iteration += 1 + current_duration = (datetime.datetime.now() - START_TIME).seconds + + # Disable South Service + disable_schedule(fledge_url, SOUTH_SERVICE_NAME) + + # Disable North Service + disable_schedule(fledge_url, NORTH_SERVICE_NAME) + \ No newline at end of file diff --git a/tests/system/python/packages/test_omf_north_service.py b/tests/system/python/packages/test_omf_north_service.py index 77d4a24fe1..f62e0ce042 100644 --- a/tests/system/python/packages/test_omf_north_service.py +++ b/tests/system/python/packages/test_omf_north_service.py @@ -636,7 +636,7 @@ def test_omf_service_with_delete_add_filter(self, reset_fledge, start_south_nort delete_url = "/fledge/filter/{}".format(filter1_name) resp = utils.delete_request(fledge_url, delete_url) - assert "Filter {} deleted successfully".format(filter1_name) == resp['result'] + assert "Filter {} deleted successfully.".format(filter1_name) == resp['result'] filter_cfg_scale = {"enable": "true"} add_filter("scale", None, filter1_name, filter_cfg_scale, fledge_url, north_service_name, diff --git a/tests/system/python/packages/test_opcua.py b/tests/system/python/packages/test_opcua.py index b292aca1b7..5f48ff323a 100644 --- a/tests/system/python/packages/test_opcua.py +++ b/tests/system/python/packages/test_opcua.py @@ -31,11 +31,12 @@ import subprocess import time -import utils -import pytest -import platform import urllib.parse from typing import Tuple +import utils +import pytest +from pytest import PKG_MGR + """ First FL instance IP Address """ FL1_INSTANCE_IP = "192.168.1.8" @@ -140,9 +141,7 @@ def install_pkg(): """ Fixture used for to install packages and only used in First FL instance """ try: - os_platform = platform.platform() - pkg_mgr = 'yum' if 'centos' in os_platform or 'redhat' in os_platform else 'apt' - subprocess.run(["sudo {} install -y {}".format(pkg_mgr, PKG_LIST)], shell=True, check=True) + subprocess.run(["sudo {} install -y {}".format(PKG_MGR, PKG_LIST)], shell=True, check=True) except subprocess.CalledProcessError: assert False, "{} one of installation package failed".format(PKG_LIST) diff --git a/tests/system/python/packages/test_pi_webapi.py b/tests/system/python/packages/test_pi_webapi.py index b2be96c9e1..a4180a8298 100644 --- a/tests/system/python/packages/test_pi_webapi.py +++ b/tests/system/python/packages/test_pi_webapi.py @@ -165,7 +165,7 @@ def start_south_north(add_south, start_north_task_omf_web_api, remove_data_file, class TestPackagesCoAP_PI_WebAPI: def test_omf_task(self, clean_setup_fledge_packages, reset_fledge, start_south_north, read_data_from_pi_web_api, - fledge_url, pi_host, pi_admin, pi_passwd, pi_db, + fledge_url, pi_host, pi_admin, pi_passwd, pi_db, fogbench_host, fogbench_port, wait_time, retries, skip_verify_north_interface, asset_name=ASSET): """ Test that data is inserted in Fledge and sent to PI start_south_north: Fixture that add south and north instance @@ -179,8 +179,10 @@ def test_omf_task(self, clean_setup_fledge_packages, reset_fledge, start_south_n data received from PI is same as data sent""" conn = http.client.HTTPConnection(fledge_url) + # Time to get CoAP service started + time.sleep(2) subprocess.run( - ["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{}; cd -".format(TEMPLATE_NAME)], + ["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{} --host {} --port {}; cd -".format(TEMPLATE_NAME, fogbench_host, fogbench_port)], shell=True, check=True) time.sleep(wait_time) @@ -195,7 +197,7 @@ def test_omf_task(self, clean_setup_fledge_packages, reset_fledge, start_south_n asset_name) def test_omf_task_with_reconfig(self, reset_fledge, start_south_north, read_data_from_pi_web_api, - skip_verify_north_interface, fledge_url, + skip_verify_north_interface, fledge_url, fogbench_host, fogbench_port, wait_time, retries, pi_host, pi_port, pi_admin, pi_passwd, pi_db, asset_name=ASSET): """ Test OMF as a North task by reconfiguring it. @@ -210,8 +212,10 @@ def test_omf_task_with_reconfig(self, reset_fledge, start_south_north, read_data on endpoint GET /fledge/track""" conn = http.client.HTTPConnection(fledge_url) + # Time to get CoAP service started + time.sleep(2) subprocess.run( - ["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{}; cd -".format(TEMPLATE_NAME)], + ["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{} --host {} --port {}; cd -".format(TEMPLATE_NAME, fogbench_host, fogbench_port)], shell=True, check=True) time.sleep(wait_time) @@ -231,7 +235,7 @@ def test_omf_task_with_reconfig(self, reset_fledge, start_south_north, read_data conn = http.client.HTTPConnection(fledge_url) subprocess.run( - ["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{}; cd -".format(TEMPLATE_NAME)], + ["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{} --host {} --port {}; cd -".format(TEMPLATE_NAME, fogbench_host, fogbench_port)], shell=True, check=True) # Wait for the OMF schedule to run. @@ -257,7 +261,7 @@ def test_omf_task_with_reconfig(self, reset_fledge, start_south_north, read_data conn = http.client.HTTPConnection(fledge_url) subprocess.run( - ["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{}; cd -".format(TEMPLATE_NAME)], + ["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{} --host {} --port {}; cd -".format(TEMPLATE_NAME, fogbench_host, fogbench_port)], shell=True, check=True) # Wait for the OMF schedule to run. diff --git a/tests/system/python/packages/test_pi_webapi_linked_data_type.py b/tests/system/python/packages/test_pi_webapi_linked_data_type.py new file mode 100644 index 0000000000..46f19a937e --- /dev/null +++ b/tests/system/python/packages/test_pi_webapi_linked_data_type.py @@ -0,0 +1,361 @@ +# -*- coding: utf-8 -*- + +# FLEDGE_BEGIN +# See: http://fledge-iot.readthedocs.io/ +# FLEDGE_END + +""" Test sending data to PI using Web API + +""" + +__author__ = "Mohit Singh Tomar" +__copyright__ = "Copyright (c) 2023 Dianomic Systems Inc" +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + +import subprocess +import http.client +import pytest +import os +import time +import utils +import json +from pathlib import Path +import urllib.parse +import base64 +import ssl +import csv + +ASSET = "FOGL-7303" +ASSET_DICT = {ASSET: ['sinusoid', 'randomwalk', 'sinusoid_exp', 'randomwalk_exp']} +SOUTH_PLUGINS_LIST = ["sinusoid", "randomwalk"] +NORTH_INSTANCE_NAME = "NorthReadingsToPI_WebAPI" +FILTER = "expression" +print("Asset Dict -->", ASSET_DICT) +# This gives the path of directory where fledge is cloned. test_file < packages < python < system < tests < ROOT +PROJECT_ROOT = Path(__file__).parent.parent.parent.parent.parent +SCRIPTS_DIR_ROOT = "{}/tests/system/python/scripts/package/".format(PROJECT_ROOT) +DATA_DIR_ROOT = "{}/tests/system/python/packages/data/".format(PROJECT_ROOT) +AF_HIERARCHY_LEVEL = '/testpilinkeddata/testpilinkeddatalvl2/testpilinkeddatalvl3' +AF_HIERARCHY_LEVEL_LIST = AF_HIERARCHY_LEVEL.split("/")[1:] +print('AF HEIR -->', AF_HIERARCHY_LEVEL_LIST) + +@pytest.fixture +def reset_fledge(wait_time): + try: + subprocess.run(["cd {} && ./reset".format(SCRIPTS_DIR_ROOT)], shell=True, check=True) + except subprocess.CalledProcessError: + assert False, "reset package script failed!" + +def verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries): + get_url = "/fledge/ping" + ping_result = utils.get_request(fledge_url, get_url) + assert "dataRead" in ping_result + assert "dataSent" in ping_result + assert 0 < ping_result['dataRead'], "South data NOT seen in ping header" + + retry_count = 1 + sent = 0 + if not skip_verify_north_interface: + while retries > retry_count: + sent = ping_result["dataSent"] + if sent >= 1: + break + else: + time.sleep(wait_time) + + retry_count += 1 + ping_result = utils.get_request(fledge_url, get_url) + + assert 1 <= sent, "Failed to send data via PI Web API using Basic auth" + return ping_result + +def verify_asset(fledge_url): + get_url = "/fledge/asset" + result = utils.get_request(fledge_url, get_url) + assert len(result), "No asset found" + assert ASSET in [s["assetCode"] for s in result] + +def verify_statistics_map(fledge_url, skip_verify_north_interface): + get_url = "/fledge/statistics" + jdoc = utils.get_request(fledge_url, get_url) + actual_stats_map = utils.serialize_stats_map(jdoc) + assert 1 <= actual_stats_map[ASSET.upper()] + assert 1 <= actual_stats_map['READINGS'] + if not skip_verify_north_interface: + assert 1 <= actual_stats_map['Readings Sent'] + assert 1 <= actual_stats_map[NORTH_INSTANCE_NAME] + +def verify_asset_tracking_details(fledge_url, skip_verify_north_interface): + tracking_details = utils.get_asset_tracking_details(fledge_url, "Ingest") + assert len(tracking_details["track"]), "Failed to track Ingest event" + for item in tracking_details["track"]: + tracked_item= item + assert ASSET == tracked_item["asset"] + assert tracked_item["plugin"].lower() in SOUTH_PLUGINS_LIST + + if not skip_verify_north_interface: + egress_tracking_details = utils.get_asset_tracking_details(fledge_url, "Egress") + assert len(egress_tracking_details["track"]), "Failed to track Egress event" + tracked_item = egress_tracking_details["track"][0] + assert ASSET == tracked_item["asset"] + assert "OMF" == tracked_item["plugin"] + +def get_data_from_fledge(fledge_url, PLUGINS_LIST): + record = dict() + get_url = "/fledge/asset/{}?limit=10000".format(ASSET) + jdoc = utils.get_request(fledge_url, urllib.parse.quote(get_url, safe='?,=,&,/')) + for plugin in PLUGINS_LIST: + record[plugin] = list(map(lambda val: val['reading'][plugin], filter(lambda item: (len(item['reading'].keys())==1 and list(item['reading'].keys())[0] == plugin) or (len(item['reading'].keys())==2 and (plugin in list(item['reading'].keys()))), jdoc) )) + return(record) + +def verify_equality_between_fledge_and_pi(data_from_fledge, data_from_pi, PLUGINS_LIST): + matched = "" + for plugin in PLUGINS_LIST: + listA = sorted(data_from_fledge[plugin]) + listB = sorted(data_from_pi[plugin]) + if listA == listB: + matched = "Equal" + else: + matched = "Data of {} is unequal".format(plugin) + break + return(matched) + +def verify_filter_added(fledge_url): + get_url = "/fledge/filter" + result = utils.get_request(fledge_url, get_url)["filters"] + assert len(result) + list_of_filters = list(map(lambda val: val['name'], result)) + for plugin in SOUTH_PLUGINS_LIST: + assert "{}_exp".format(plugin) in list_of_filters + +def verify_service_added(fledge_url, ): + get_url = "/fledge/south" + result = utils.get_request(fledge_url, urllib.parse.quote(get_url, safe='?,=,&,/'))['services'] + assert len(result) + list_of_southbounds = list(map(lambda val: val['name'], result)) + for plugin in SOUTH_PLUGINS_LIST: + assert "{}_{}".format(ASSET, plugin) in list_of_southbounds + + get_url = "/fledge/north" + result = utils.get_request(fledge_url, get_url) + assert len(result) + list_of_northbounds = list(map(lambda val: val['name'], result)) + assert NORTH_INSTANCE_NAME in list_of_northbounds + + get_url = "/fledge/service" + result = utils.get_request(fledge_url, urllib.parse.quote(get_url, safe='?,=,&,/'))['services'] + assert len(result) + list_of_services = list(map(lambda val: val['name'], result)) + for plugin in SOUTH_PLUGINS_LIST: + assert "{}_{}".format(ASSET, plugin) in list_of_services + assert NORTH_INSTANCE_NAME in list_of_services + +def verify_data_between_fledge_and_piwebapi(fledge_url, pi_host, pi_admin, pi_passwd, pi_db, AF_HIERARCHY_LEVEL, ASSET, PLUGINS_LIST, verify_hierarchy_and_get_datapoints_from_pi_web_api, wait_time): + # Wait until All data loaded to PI server properly + time.sleep(wait_time) + # Checking if hierarchy created properly or not, and retrieveing data from PI Server + data_from_pi = verify_hierarchy_and_get_datapoints_from_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, AF_HIERARCHY_LEVEL, ASSET, ASSET_DICT[ASSET]) + assert len(data_from_pi) > 0, "Datapoint does not exist" + print('Data from PI Web API') + print(data_from_pi) + # Getting Data from fledge + data_from_fledge = dict() + data_from_fledge = get_data_from_fledge(fledge_url, PLUGINS_LIST) + print('data fledge retrieved') + print(data_from_fledge) + + # For verifying data send to PI Server from fledge is same. + check_data = verify_equality_between_fledge_and_pi(data_from_fledge, data_from_pi, PLUGINS_LIST) + assert check_data == 'Equal', "Data is not equal" + +def update_filter_config(fledge_url, plugin, mode): + data = {"enable": "{}".format(mode)} + put_url = "/fledge/category/{0}_{1}_{1}_exp".format(ASSET, plugin) + utils.put_request(fledge_url, urllib.parse.quote(put_url, safe='?,=,&,/'), data) + +def add_configure_filter(add_filter, fledge_url, south_plugin): + filter_cfg = {"enable": "true", "expression": "log({})".format(south_plugin), "name": "{}_exp".format(south_plugin)} + add_filter("expression", None, "{}_exp".format(south_plugin), filter_cfg, fledge_url, "{}_{}".format(ASSET, south_plugin), installation_type='package') + +@pytest.fixture +def start_south_north(add_south, start_north_task_omf_web_api, add_filter, remove_data_file, + fledge_url, pi_host, pi_port, pi_admin, pi_passwd, pi_db, + start_north_omf_as_a_service, asset_name=ASSET): + """ This fixture starts two south plugins,i.e. sinusoid and randomwalk., and one north plugin OMF for PIWebAPI. Also puts a filter + to insert reading id as a datapoint when we send the data to north. + clean_setup_fledge_packages: purge the fledge* packages and install latest for given repo url + add_south: Fixture that adds a south service with given configuration + start_north_task_omf_web_api: Fixture that starts PI north task + remove_data_file: Fixture that remove data file created during the tests """ + + poll_rate=1 + + _config = {"assetName": {"value": "{}".format(ASSET)}} + for south_plugin in SOUTH_PLUGINS_LIST: + add_south(south_plugin, None, fledge_url, config=_config, + service_name="{0}_{1}".format(ASSET, south_plugin), installation_type='package', start_service=False) + # Wait for 10 seconds, SO that Services can be added + time.sleep(10) + + data = {"readingsPerSec": "{}".format(poll_rate)} + put_url="/fledge/category/{0}_{1}Advanced".format(ASSET, south_plugin) + utils.put_request(fledge_url, urllib.parse.quote(put_url, safe='?,=,&,/'), data) + + poll_rate+=5 + + start_north_omf_as_a_service(fledge_url, pi_host, pi_port, pi_user=pi_admin, pi_pwd=pi_passwd, pi_use_legacy="false", + service_name=NORTH_INSTANCE_NAME, default_af_location=AF_HIERARCHY_LEVEL) + +class Test_linked_data_PIWebAPI: + # @pytest.mark.skip(reason="no way of currently testing this") + def test_linked_data(self, clean_setup_fledge_packages, reset_fledge, start_south_north, fledge_url, + pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries, pi_port, enable_schedule, disable_schedule, + verify_hierarchy_and_get_datapoints_from_pi_web_api, clear_pi_system_through_pi_web_api, + skip_verify_north_interface, asset_name=ASSET): + + """ Test that check data is inserted in Fledge and sent to PI are equal + clean_setup_fledge_packages: Fixture for removing fledge from system completely if it is already present + and reinstall it baased on commandline arguments. + reset_fledge: Fixture that reset and cleanup the fledge + start_south_north: Fixture that add south and north instance + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + verify_hierarchy_and_get_datapoints_from_pi_web_api: Fixture to read data from PI and Verify hierarchy + clear_pi_system_through_pi_web_api: Fixture for cleaning up PI Server + skip_verify_north_interface: Flag for assertion of data using PI web API + + Assertions: + on endpoint GET /fledge/statistics + on endpoint GET /fledge/service + on endpoint GET /fledge/asset/ + data received from PI is same as data sent""" + + clear_pi_system_through_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, AF_HIERARCHY_LEVEL_LIST, ASSET_DICT) + + for south_plugin in SOUTH_PLUGINS_LIST: + enable_schedule(fledge_url, "{0}_{1}".format(ASSET, south_plugin)) + + # Wait until south, north services are created and some data is loaded + time.sleep(wait_time) + + for south_plugin in SOUTH_PLUGINS_LIST: + disable_schedule(fledge_url,"{}_{}".format(ASSET, south_plugin)) + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + verify_service_added(fledge_url) + verify_asset(fledge_url) + verify_statistics_map(fledge_url, skip_verify_north_interface) + verify_asset_tracking_details(fledge_url, skip_verify_north_interface) + + # Verify Data from fledge sent to PI Server is same. + verify_data_between_fledge_and_piwebapi(fledge_url, pi_host, pi_admin, pi_passwd, pi_db, AF_HIERARCHY_LEVEL, ASSET, SOUTH_PLUGINS_LIST, verify_hierarchy_and_get_datapoints_from_pi_web_api, wait_time) + + # @pytest.mark.skip(reason="no way of currently testing this") + def test_linked_data_with_filter(self, reset_fledge, start_south_north, fledge_url, pi_host, pi_admin, pi_passwd, add_filter, pi_db, wait_time, + retries, pi_port, enable_schedule, disable_schedule, verify_hierarchy_and_get_datapoints_from_pi_web_api, + clear_pi_system_through_pi_web_api, skip_verify_north_interface, asset_name=ASSET): + + """ Test that apply filter and check data is inserted in Fledge and sent to PI are equal. + reset_fledge: Fixture that reset and cleanup the fledge + start_south_north: Fixture that add south and north instance + add_filter: Fixture that adds filter to the Services + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + verify_hierarchy_and_get_datapoints_from_pi_web_api: Fixture to read data from PI and Verify hierarchy + clear_pi_system_through_pi_web_api: Fixture for cleaning up PI Server + skip_verify_north_interface: Flag for assertion of data using PI web API + + Assertions: + on endpoint GET /fledge/statistics + on endpoint GET /fledge/service + on endpoint GET /fledge/asset/ + on endpoint GET /fledge/filter + data received from PI is same as data sent""" + + clear_pi_system_through_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, AF_HIERARCHY_LEVEL_LIST, ASSET_DICT) + + for south_plugin in SOUTH_PLUGINS_LIST: + add_configure_filter(add_filter, fledge_url, south_plugin) + enable_schedule(fledge_url, "{0}_{1}".format(ASSET, south_plugin)) + + # Wait until south, north services and filters are created and some data is loaded + time.sleep(wait_time) + + for south_plugin in SOUTH_PLUGINS_LIST: + disable_schedule(fledge_url,"{}_{}".format(ASSET, south_plugin)) + + verify_asset(fledge_url) + verify_service_added(fledge_url) + verify_statistics_map(fledge_url, skip_verify_north_interface) + verify_asset_tracking_details(fledge_url, skip_verify_north_interface) + verify_filter_added(fledge_url) + + # Verify Data from fledge sent to PI Server is same. + verify_data_between_fledge_and_piwebapi(fledge_url, pi_host, pi_admin, pi_passwd, pi_db, AF_HIERARCHY_LEVEL, ASSET, ASSET_DICT[ASSET], verify_hierarchy_and_get_datapoints_from_pi_web_api, wait_time) + + # @pytest.mark.skip(reason="no way of currently testing this") + def test_linked_data_with_onoff_filter(self, reset_fledge, start_south_north, fledge_url, pi_host, pi_admin, pi_passwd, add_filter, pi_db, wait_time, + retries, pi_port, enable_schedule, disable_schedule, verify_hierarchy_and_get_datapoints_from_pi_web_api, + clear_pi_system_through_pi_web_api, skip_verify_north_interface, asset_name=ASSET): + + """ Test that apply filter and check data is inserted in Fledge and sent to PI are equal. + reset_fledge: Fixture that reset and cleanup the fledge + start_south_north: Fixture that add south and north instance + add_filter: Fixture that adds filter to the Services + enable_schedule: Fixture for enabling schedules or services + disable_schedule: Fixture for disabling schedules or services + verify_hierarchy_and_get_datapoints_from_pi_web_api: Fixture to read data from PI and Verify hierarchy + clear_pi_system_through_pi_web_api: Fixture for cleaning up PI Server + skip_verify_north_interface: Flag for assertion of data using PI web API + + Assertions: + on endpoint GET /fledge/statistics + on endpoint GET /fledge/service + on endpoint GET /fledge/asset/ + on endpoint GET /fledge/filter + data received from PI is same as data sent""" + + clear_pi_system_through_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, AF_HIERARCHY_LEVEL_LIST, ASSET_DICT) + + for south_plugin in SOUTH_PLUGINS_LIST: + add_configure_filter(add_filter, fledge_url, south_plugin) + enable_schedule(fledge_url, "{0}_{1}".format(ASSET, south_plugin)) + + # Wait until south, north services and filters are created and some data is loaded + time.sleep(wait_time) + + for south_plugin in SOUTH_PLUGINS_LIST: + disable_schedule(fledge_url,"{}_{}".format(ASSET, south_plugin)) + + verify_asset(fledge_url) + verify_service_added(fledge_url) + verify_statistics_map(fledge_url, skip_verify_north_interface) + verify_asset_tracking_details(fledge_url, skip_verify_north_interface) + verify_filter_added(fledge_url) + + old_ping_result = verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + + for south_plugin in SOUTH_PLUGINS_LIST: + enable_schedule(fledge_url, "{0}_{1}".format(ASSET, south_plugin)) + time.sleep(wait_time) + + print("On/Off of filter starts") + count = 0 + while count<3: + for south_plugin in SOUTH_PLUGINS_LIST: + # For Disabling filter + update_filter_config(fledge_url, south_plugin, 'false') + time.sleep(wait_time*2) + for south_plugin in SOUTH_PLUGINS_LIST: + # For enabling filter + update_filter_config(fledge_url, south_plugin, 'true') + time.sleep(wait_time*2) + count+=1 + + for south_plugin in SOUTH_PLUGINS_LIST: + disable_schedule(fledge_url,"{}_{}".format(ASSET, south_plugin)) + + # Verify Data from fledge sent to PI Server is same. + verify_data_between_fledge_and_piwebapi(fledge_url, pi_host, pi_admin, pi_passwd, pi_db, AF_HIERARCHY_LEVEL, ASSET, ASSET_DICT[ASSET], verify_hierarchy_and_get_datapoints_from_pi_web_api, wait_time) + \ No newline at end of file diff --git a/tests/system/python/packages/test_rule_data_availability.py b/tests/system/python/packages/test_rule_data_availability.py new file mode 100644 index 0000000000..38faa3b97a --- /dev/null +++ b/tests/system/python/packages/test_rule_data_availability.py @@ -0,0 +1,275 @@ +# FLEDGE_BEGIN +# See: http://fledge-iot.readthedocs.io/ +# FLEDGE_END + +""" Test data availability notification rule system tests: + Creates notification instance with data availability rule + and notify asset plugin for triggering the notifications based on CONCH. +""" + +__author__ = "Yash Tatkondawar" +__copyright__ = "Copyright (c) 2023 Dianomic Systems, Inc." + + +import os +import subprocess +import time +import urllib.parse +import json +from pathlib import Path +import http +from datetime import datetime +import pytest +import utils +from pytest import PKG_MGR + +# This gives the path of directory where fledge is cloned. test_file < packages < python < system < tests < ROOT +PROJECT_ROOT = Path(__file__).parent.parent.parent.parent.parent +SCRIPTS_DIR_ROOT = "{}/tests/system/python/packages/data/".format(PROJECT_ROOT) +FLEDGE_ROOT = os.environ.get('FLEDGE_ROOT') +SOUTH_SERVICE_NAME = "Sine #1" +SOUTH_DP_NAME="sinusoid" +SOUTH_ASSET_NAME = "{}_sinusoid_assets".format(time.strftime("%Y%m%d")) +NORTH_PLUGIN = "OMF" +TASK_NAME = "EDS #1" +NOTIF_SERVICE_NAME = "notification" +NOTIF_INSTANCE_NAME = "notify #1" +AF_HIERARCHY_LEVEL = "{0}_teststatslvl1/{0}_teststatslvl2/{0}_teststatslvl3".format(time.strftime("%Y%m%d")) + +@pytest.fixture +def reset_fledge(wait_time): + try: + subprocess.run(["cd {}/tests/system/python/scripts/package && ./reset" + .format(PROJECT_ROOT)], shell=True, check=True) + except subprocess.CalledProcessError: + assert False, "reset package script failed!" + + # Wait for fledge server to start + time.sleep(wait_time) + +@pytest.fixture +def reset_eds(): + eds_reset_url = "/api/v1/Administration/Storage/Reset" + con = http.client.HTTPConnection("localhost", 5590) + con.request("POST", eds_reset_url, "") + resp = con.getresponse() + assert 204 == resp.status + +@pytest.fixture +def check_eds_installed(): + dpkg_list = os.popen('dpkg --list osisoft.edgedatastore >/dev/null; echo $?') + ls_output = dpkg_list.read() + assert ls_output == "0\n", "EDS not installed. Please install it first!" + eds_data_url = "/api/v1/diagnostics/productinformation" + con = http.client.HTTPConnection("localhost", 5590) + con.request("GET", eds_data_url) + resp = con.getresponse() + r = json.loads(resp.read().decode()) + assert len(r) != 0, "EDS not installed. Please install it first!" + +@pytest.fixture +def start_south(add_south, fledge_url): + south_plugin = "sinusoid" + config = {"assetName": {"value": SOUTH_ASSET_NAME}} + # south_branch does not matter as these are archives.fledge-iot.org version install + add_south(south_plugin, None, fledge_url, service_name=SOUTH_SERVICE_NAME, installation_type='package', config=config) + + +@pytest.fixture +def start_north(fledge_url, enabled=True): + conn = http.client.HTTPConnection(fledge_url) + data = {"name": TASK_NAME, + "plugin": NORTH_PLUGIN, + "type": "north", + "enabled": enabled, + "config": {"PIServerEndpoint": {"value": "Edge Data Store"}, + "NamingScheme": {"value": "Backward compatibility"}} + } + post_url = "/fledge/service" + utils.post_request(fledge_url, post_url, data) + +@pytest.fixture +def start_notification(fledge_url, add_service, add_notification_instance,wait_time, retries): + + # Install and Add Notification Service + add_service(fledge_url, "notification", None, retries, installation_type='package', service_name=NOTIF_SERVICE_NAME) + + # Wait and verify service created or not + time.sleep(wait_time) + verify_service_added(fledge_url, NOTIF_SERVICE_NAME) + + # Add Notification Instance + rule_config = {"auditCode": "CONAD,SCHAD"} + delivery_config = {"enable": "true"} + add_notification_instance(fledge_url, "asset", None, rule_config=rule_config, delivery_config=delivery_config, + rule_plugin="DataAvailability", installation_type='package', notification_type="retriggered", + notification_instance_name="test #1", retrigger_time=5) + + # Verify Notification Instance created or not + notification_url = "/fledge/notification" + resp = utils.get_request(fledge_url, notification_url) + assert "test #1" in [s["name"] for s in resp["notifications"]] + +def verify_service_added(fledge_url, name): + get_url = "/fledge/service" + result = utils.get_request(fledge_url, get_url) + assert len(result["services"]) + assert name in [s["name"] for s in result["services"]] + +def verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries): + get_url = "/fledge/ping" + ping_result = utils.get_request(fledge_url, get_url) + assert "dataRead" in ping_result + assert "dataSent" in ping_result + assert 0 < ping_result['dataRead'], "South data NOT seen in ping header" + + retry_count = 1 + sent = 0 + if not skip_verify_north_interface: + while retries > retry_count: + sent = ping_result["dataSent"] + if sent >= 1: + break + else: + time.sleep(wait_time) + + retry_count += 1 + ping_result = utils.get_request(fledge_url, get_url) + + assert 1 <= sent, "Failed to send data to Edge Data Store" + return ping_result + +def verify_eds_data(): + eds_data_url = "/api/v1/tenants/default/namespaces/default/streams/1measurement_{}/Data/Last".format(SOUTH_ASSET_NAME) + print (eds_data_url) + con = http.client.HTTPConnection("localhost", 5590) + con.request("GET", eds_data_url) + resp = con.getresponse() + r = json.loads(resp.read().decode()) + return r + +class TestDataAvailabilityAuditBasedNotificationRuleOnIngress: + def test_data_availability_multiple_audit(self, clean_setup_fledge_packages, reset_fledge, start_notification, + start_south, fledge_url, skip_verify_north_interface, wait_time, retries): + """ Test NTFSN triggered or not with CONAD, SCHAD. + clean_setup_fledge_packages: Fixture to remove and install latest fledge packages + reset_fledge: Fixture to reset fledge + start_south: Fixtures to add and start south services + start_notification: Fixture to add and start notification service with rule and delivery plugins + Assertions: + on endpoint GET /fledge/audit + on endpoint GET /fledge/ping + on endpoint GET /fledge/category """ + time.sleep(wait_time) + + verify_ping(fledge_url, True, wait_time, retries) + + get_url = "/fledge/audit?source=NTFSN" + resp1 = utils.get_request(fledge_url, get_url) + print (len(resp1['audit'])) + assert len(resp1['audit']) + + assert "test #1" in [s["details"]["name"] for s in resp1["audit"]] + for audit_detail in resp1['audit']: + if "test #1" == audit_detail['details']['name']: + assert "NTFSN" == audit_detail['source'], "ERROR: NTFSN not triggered properly on CONAD or SCHAD" + + def test_data_availability_single_audit(self, fledge_url, skip_verify_north_interface, wait_time, retries): + """ Test NTFSN triggered or not with CONCH in sinusoid plugin. + on endpoint GET /fledge/audit + on endpoint GET /fledge/ping + on endpoint GET /fledge/category """ + get_url = "/fledge/audit?source=NTFSN" + resp1 = utils.get_request(fledge_url, get_url) + + # Change the configuration of rule plugin + put_url = "/fledge/category/ruletest #1" + data = {"auditCode": "CONCH"} + utils.put_request(fledge_url, urllib.parse.quote(put_url), data) + + # Change the configuration of sinusoid plugin + put_url = "/fledge/category/Sine #1Advanced" + data = {"readingsPerSec": "10"} + utils.put_request(fledge_url, urllib.parse.quote(put_url), data) + + time.sleep(wait_time) + get_url = "/fledge/audit?source=NTFSN" + resp2 = utils.get_request(fledge_url, get_url) + assert len(resp2['audit']) - len(resp1['audit']) == 1, "ERROR: NTFSN not triggered properly on CONCH" + + def test_data_availability_all_audit(self, fledge_url, add_south, skip_verify_north_interface, wait_time, retries): + """ Test NTFSN triggered or not with all audit changes. + on endpoint GET /fledge/audit + on endpoint GET /fledge/ping + on endpoint GET /fledge/category """ + get_url = "/fledge/audit?source=NTFSN" + resp1 = utils.get_request(fledge_url, get_url) + + # Change the configuration of rule plugin + put_url = "/fledge/category/ruletest #1" + data = {"auditCode": "*"} + utils.put_request(fledge_url, urllib.parse.quote(put_url), data) + + # Add new service + south_plugin = "sinusoid" + config = {"assetName": {"value": "sine-test"}} + # south_branch does not matter as these are archives.fledge-iot.org version install + add_south(south_plugin, None, fledge_url, service_name="sine-test", installation_type='package', config=config) + + time.sleep(wait_time) + get_url = "/fledge/audit?source=NTFSN" + resp2 = utils.get_request(fledge_url, get_url) + assert len(resp2['audit']) > len(resp1['audit']), "ERROR: NTFSN not triggered properly with * audit code" + +class TestDataAvailabilityAssetBasedNotificationRuleOnIngress: + def test_data_availability_asset(self, fledge_url, add_south, skip_verify_north_interface, wait_time, retries): + """ Test NTFSN triggered or not with all audit changes. + on endpoint GET /fledge/audit + on endpoint GET /fledge/ping + on endpoint GET /fledge/category """ + get_url = "/fledge/audit?source=NTFSN" + resp1 = utils.get_request(fledge_url, get_url) + + # Change the configuration of rule plugin + put_url = "/fledge/category/ruletest #1" + data = {"auditCode": "", "assetCode": SOUTH_ASSET_NAME} + utils.put_request(fledge_url, urllib.parse.quote(put_url), data) + + time.sleep(wait_time) + get_url = "/fledge/audit?source=NTFSN" + resp2 = utils.get_request(fledge_url, get_url) + assert len(resp2['audit']) > len(resp1['audit']), "ERROR: NTFSN not triggered properly with asset code" + +class TestDataAvailabilityBasedNotificationRuleOnEgress: + def test_data_availability_north(self, check_eds_installed, reset_fledge, start_notification, reset_eds, + start_north, fledge_url, wait_time, skip_verify_north_interface, add_south, retries): + """ Test NTFSN triggered or not with configuration change in north EDS plugin. + start_north: Fixtures to add and start south services + Assertions: + on endpoint GET /fledge/audit + on endpoint GET /fledge/ping """ + + # Change the configuration of rule plugin + put_url = "/fledge/category/ruletest #1" + data = {"auditCode": "", "assetCode": SOUTH_ASSET_NAME} + utils.put_request(fledge_url, urllib.parse.quote(put_url), data) + + # Add new service + south_plugin = "sinusoid" + config = {"assetName": {"value": SOUTH_ASSET_NAME}} + # south_branch does not matter as these are archives.fledge-iot.org version install + add_south(south_plugin, None, fledge_url, service_name="sine-test", installation_type='package', config=config) + + get_url = "/fledge/audit?source=NTFSN" + resp1 = utils.get_request(fledge_url, get_url) + + get_url = "/fledge/audit?source=NTFSN" + resp2 = utils.get_request(fledge_url, get_url) + assert len(resp2['audit']) > len(resp1['audit']), "ERROR: NTFSN not triggered properly with asset code" + + time.sleep(wait_time) + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + r = verify_eds_data() + assert SOUTH_DP_NAME in r, "Data in EDS not found!" + ts = r.get("Time") + assert ts.find(datetime.now().strftime("%Y-%m-%d")) != -1, "Latest data not found in EDS!" diff --git a/tests/system/python/packages/test_statistics_history_notification_rule.py b/tests/system/python/packages/test_statistics_history_notification_rule.py new file mode 100644 index 0000000000..83619eb3d2 --- /dev/null +++ b/tests/system/python/packages/test_statistics_history_notification_rule.py @@ -0,0 +1,279 @@ +# FLEDGE_BEGIN +# See: http://fledge-iot.readthedocs.io/ +# FLEDGE_END + +""" Test statistics history notification rule system tests: + Creates notification instance with source as statistics history in threshold rule + and notify asset plugin for triggering the notifications based on rules. +""" + +__author__ = "Yash Tatkondawar" +__copyright__ = "Copyright (c) 2023 Dianomic Systems, Inc." + +import base64 +import http.client +import json +import os +import ssl +import subprocess +import time +import urllib.parse +from pathlib import Path + +import pytest +import utils +from pytest import PKG_MGR + +# This gives the path of directory where fledge is cloned. test_file < packages < python < system < tests < ROOT +PROJECT_ROOT = Path(__file__).parent.parent.parent.parent.parent +SCRIPTS_DIR_ROOT = "{}/tests/system/python/packages/data/".format(PROJECT_ROOT) +FLEDGE_ROOT = os.environ.get('FLEDGE_ROOT') +SOUTH_SERVICE_NAME = "Sine #1" +SOUTH_ASSET_NAME = "{}_sinusoid_assets".format(time.strftime("%Y%m%d")) +NOTIF_SERVICE_NAME = "notification" +NOTIF_INSTANCE_NAME = "notify #1" +AF_HIERARCHY_LEVEL = "{0}_teststatslvl1/{0}_teststatslvl2/{0}_teststatslvl3".format(time.strftime("%Y%m%d")) + +@pytest.fixture +def reset_fledge(wait_time): + try: + subprocess.run(["cd {}/tests/system/python/scripts/package && ./reset" + .format(PROJECT_ROOT)], shell=True, check=True) + except subprocess.CalledProcessError: + assert False, "reset package script failed!" + + # Wait for fledge server to start + time.sleep(wait_time) + + +@pytest.fixture +def start_south(add_south, fledge_url): + south_plugin = "sinusoid" + config = {"assetName": {"value": SOUTH_ASSET_NAME}} + # south_branch does not matter as these are archives.fledge-iot.org version install + add_south(south_plugin, None, fledge_url, service_name=SOUTH_SERVICE_NAME, installation_type='package', config=config) + + +@pytest.fixture +def start_north(start_north_omf_as_a_service, fledge_url, + pi_host, pi_port, pi_admin, pi_passwd, clear_pi_system_through_pi_web_api, pi_db): + + af_hierarchy_level_list = AF_HIERARCHY_LEVEL.split("/") + dp_list = ['sinusoid'] + asset_dict = {} + asset_dict[SOUTH_ASSET_NAME] = dp_list + clear_pi_system_through_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, + af_hierarchy_level_list, asset_dict) + + response = start_north_omf_as_a_service(fledge_url, pi_host, pi_port, pi_user=pi_admin, pi_pwd=pi_passwd, + default_af_location=AF_HIERARCHY_LEVEL) + + yield start_north + +@pytest.fixture +def start_notification(fledge_url, add_service, add_notification_instance,wait_time, retries): + + # Install and Add Notification Service + add_service(fledge_url, "notification", None, retries, installation_type='package', service_name=NOTIF_SERVICE_NAME) + + # Wait and verify service created or not + time.sleep(wait_time) + verify_service_added(fledge_url, NOTIF_SERVICE_NAME) + + # Add Notification Instance + rule_config = { + "source": "Statistics History", + "asset": "READINGS", + "trigger_value": "10.0", + } + delivery_config = {"enable": "true"} + add_notification_instance(fledge_url, "asset", None, rule_config=rule_config, delivery_config=delivery_config, installation_type='package', + notification_type="retriggered", notification_instance_name="test #1", retrigger_time=30) + + # Verify Notification Instance created or not + notification_url = "/fledge/notification" + resp = utils.get_request(fledge_url, notification_url) + assert "test #1" in [s["name"] for s in resp["notifications"]] + +def verify_service_added(fledge_url, name): + get_url = "/fledge/service" + result = utils.get_request(fledge_url, get_url) + assert len(result["services"]) + assert name in [s["name"] for s in result["services"]] + +def verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries): + get_url = "/fledge/ping" + ping_result = utils.get_request(fledge_url, get_url) + assert "dataRead" in ping_result + assert "dataSent" in ping_result + assert 0 < ping_result['dataRead'], "South data NOT seen in ping header" + + retry_count = 1 + sent = 0 + if not skip_verify_north_interface: + while retries > retry_count: + sent = ping_result["dataSent"] + if sent >= 1: + break + else: + time.sleep(wait_time) + + retry_count += 1 + ping_result = utils.get_request(fledge_url, get_url) + + assert 1 <= sent, "Failed to send data via PI Web API using Basic auth" + return ping_result + +def _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries): + + af_hierarchy_level_list = AF_HIERARCHY_LEVEL.split("/") + retry_count = 0 + data_from_pi = None + # Name of asset in the PI server + pi_asset_name = "{}".format(SOUTH_ASSET_NAME) + + while (data_from_pi is None or data_from_pi == []) and retry_count < retries: + data_from_pi = read_data_from_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, af_hierarchy_level_list, + pi_asset_name, '') + if data_from_pi is None: + retry_count += 1 + time.sleep(wait_time) + + if data_from_pi is None or retry_count == retries: + assert False, "Failed to read data from PI" + + +class TestStatisticsHistoryBasedNotificationRuleOnIngress: + def test_stats_readings_south(self, clean_setup_fledge_packages, reset_fledge, start_south, start_notification, fledge_url, + skip_verify_north_interface, wait_time, retries): + """ Test NTFSN triggered or not with source as statistics history and name as READINGS in threshold rule. + clean_setup_fledge_packages: Fixture to remove and install latest fledge packages + reset_fledge: Fixture to reset fledge + start_south: Fixtures to add and start south services + start_notification: Fixture to add and start notification service with rule and delivery plugins + Assertions: + on endpoint GET /fledge/audit + on endpoint GET /fledge/ping + on endpoint GET /fledge/statistics/history """ + time.sleep(wait_time * 4) + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + + # When rule is triggered, there should be audit entries for NTFSN + get_url = "/fledge/audit?source=NTFSN" + resp1 = utils.get_request(fledge_url, get_url) + assert len(resp1['audit']) + assert "test #1" in [s["details"]["name"] for s in resp1["audit"]] + for audit_detail in resp1['audit']: + if "test #1" == audit_detail['details']['name']: + assert "NTFSN" == audit_detail['source'] + # Waiting for 60 sec to get 2 more NTFSN entries if rule is triggered properly + time.sleep(60) + resp2 = utils.get_request(fledge_url, get_url) + assert len(resp2['audit']) - len(resp1['audit']) == 2, "ERROR: NTFSN not triggered properly" + + get_url = "/fledge/statistics/history?minutes=10" + r = utils.get_request(fledge_url, get_url) + if "READINGS" in r["statistics"][0]: + assert 0 < r["statistics"][0]["READINGS"] + + def test_stats_south_asset_ingest(self, fledge_url, wait_time, skip_verify_north_interface, retries): + """ Test NTFSN triggered or not with source as statistics history and name as ingested south asset in threshold rule. + Assertions: + on endpoint GET /fledge/audit + on endpoint GET /fledge/ping + on endpoint GET /fledge/statistics/history """ + # Change the config of threshold, name of statistics - READINGS replaced with statistics key name - Sine #1-Ingest + put_url = "/fledge/category/ruletest #1" + data = {"asset": "Sine #1-Ingest"} + utils.put_request(fledge_url, urllib.parse.quote(put_url), data) + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + + # When rule is triggered, there should be audit entries for NTFSN + get_url = "/fledge/audit?source=NTFSN" + resp1 = utils.get_request(fledge_url, get_url) + assert len(resp1['audit']) + assert "test #1" in [s["details"]["name"] for s in resp1["audit"]] + for audit_detail in resp1['audit']: + if "test #1" == audit_detail['details']['name']: + assert "NTFSN" == audit_detail['source'] + # Waiting for 60 sec to get more NTFSN entries + time.sleep(60) + resp2 = utils.get_request(fledge_url, get_url) + assert len(resp2['audit']) - len(resp1['audit']) == 2, "ERROR: NTFSN not triggered properly" + + get_url = "/fledge/statistics/history?minutes=10" + r = utils.get_request(fledge_url, get_url) + if "Sine #1-Ingest" in r["statistics"][0]: + assert 0 < r["statistics"][0]["Sine #1-Ingest"] + + def test_stats_south_asset(self, fledge_url, wait_time, skip_verify_north_interface, retries): + """ Test NTFSN triggered or not with source as statistics history and name as south asset name in threshold rule. + Assertions: + on endpoint GET /fledge/audit + on endpoint GET /fledge/ping + on endpoint GET /fledge/statistics/history """ + # Change the config of threshold, name of statistics - Sine #1-Ingest replaced with statistics key name - 20230420_SINUSOID_ASSETS + put_url = "/fledge/category/ruletest #1" + data = {"asset": SOUTH_ASSET_NAME.upper()} + utils.put_request(fledge_url, urllib.parse.quote(put_url), data) + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + + # When rule is triggered, there should be audit entries for NTFSN + get_url = "/fledge/audit?source=NTFSN" + resp1 = utils.get_request(fledge_url, get_url) + assert len(resp1['audit']) + assert "test #1" in [s["details"]["name"] for s in resp1["audit"]] + for audit_detail in resp1['audit']: + if "test #1" == audit_detail['details']['name']: + assert "NTFSN" == audit_detail['source'] + # Waiting for 60 sec to get more NTFSN entries + time.sleep(60) + resp2 = utils.get_request(fledge_url, get_url) + assert len(resp2['audit']) - len(resp1['audit']) == 2, "ERROR: NTFSN not triggered properly" + + get_url = "/fledge/statistics/history?minutes=10" + r = utils.get_request(fledge_url, get_url) + if SOUTH_ASSET_NAME.upper() in r["statistics"][0]: + assert 0 < r["statistics"][0][SOUTH_ASSET_NAME.upper()] + + +class TestStatisticsHistoryBasedNotificationRuleOnEgress: + def test_stats_readings_north(self, start_north, fledge_url, wait_time, skip_verify_north_interface, retries, + read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_db): + """ Test NTFSN triggered or not with source as statistics history and name as READINGS in threshold rule. + clean_setup_fledge_packages: Fixture to remove and install latest fledge packages + reset_fledge: Fixture to reset fledge + start_south_north: Fixtures to add and start south and north services + Assertions: + on endpoint GET /fledge/audit + on endpoint GET /fledge/ping + on endpoint GET /fledge/statistics/history """ + # Change the config of threshold, name of statistics - Sine #1-Ingest replaced with statistics key name - 20230420_SINUSOID_ASSETS + put_url = "/fledge/category/ruletest #1" + data = {"asset": "Readings Sent"} + utils.put_request(fledge_url, urllib.parse.quote(put_url), data) + + verify_ping(fledge_url, skip_verify_north_interface, wait_time, retries) + + # When rule is triggered, there should be audit entries for NTFSN + get_url = "/fledge/audit?source=NTFSN" + resp1 = utils.get_request(fledge_url, get_url) + assert len(resp1['audit']) + assert "test #1" in [s["details"]["name"] for s in resp1["audit"]] + for audit_detail in resp1['audit']: + if "test #1" == audit_detail['details']['name']: + assert "NTFSN" == audit_detail['source'] + # Waiting for 60 sec to get more NTFSN entries + time.sleep(60) + resp2 = utils.get_request(fledge_url, get_url) + assert len(resp2['audit']) - len(resp1['audit']) == 2, "ERROR: NTFSN for north not triggered properly" + + get_url = "/fledge/statistics/history?minutes=10" + r = utils.get_request(fledge_url, get_url) + if "Readings Sent" in r["statistics"][0]: + assert 0 < r["statistics"][0]["Readings Sent"] + + _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries) diff --git a/tests/system/python/pair/test_c_north_service_pair.py b/tests/system/python/pair/test_c_north_service_pair.py index 8f086d4f8f..8f1cf5ddb5 100644 --- a/tests/system/python/pair/test_c_north_service_pair.py +++ b/tests/system/python/pair/test_c_north_service_pair.py @@ -667,7 +667,7 @@ def test_north_C_service_with_delete_add_filter(self, setup_local, setup_remote, delete_url = "/fledge/filter/{}".format(filter_name) resp = utils.delete_request(fledge_url, urllib.parse.quote(delete_url)) - assert "Filter {} deleted successfully".format(filter_name) == resp['result'] + assert "Filter {} deleted successfully.".format(filter_name) == resp['result'] # Re-add filter in enabled mode filter_cfg_scale = {"enable": "true"} diff --git a/tests/system/python/pair/test_e2e_fledge_pair.py b/tests/system/python/pair/test_e2e_fledge_pair.py index 8996bd1971..4d9e2cf41e 100644 --- a/tests/system/python/pair/test_e2e_fledge_pair.py +++ b/tests/system/python/pair/test_e2e_fledge_pair.py @@ -80,12 +80,15 @@ def reset_and_start_fledge_remote(self, storage_plugin, remote_user, remote_ip, subprocess.run(["ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {} {}@{} 'export FLEDGE_ROOT={};$FLEDGE_ROOT/scripts/fledge kill'".format(key_path, remote_user, remote_ip, remote_fledge_path)], shell=True, check=True) - if storage_plugin == 'postgres': - subprocess.run(["ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {} {}@{} sed -i 's/sqlite/postgres/g' {}/data/etc/storage.json".format(key_path, remote_user, remote_ip, remote_fledge_path)], shell=True, check=True) - else: - subprocess.run(["ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {} {}@{} sed -i 's/postgres/sqlite/g' {}/data/etc/storage.json".format(key_path, remote_user, remote_ip, remote_fledge_path)], shell=True, check=True) - - subprocess.run(["ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {} {}@{} 'export FLEDGE_ROOT={};echo YES | $FLEDGE_ROOT/scripts/fledge reset'".format(key_path, remote_user, remote_ip, remote_fledge_path)], shell=True, check=True) + storage_plugin_val = "postgres" if storage_plugin == 'postgres' else "sqlite" + # Check whether storage.json file exist on remote machine or not, if it doesn't exist then raise assertion otherwise update its storage plugin value. + ssh = subprocess.Popen(["ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-i", "{}".format(key_path), "{}@{}".format(remote_user, remote_ip), "cat {}/data/etc/storage.json".format(remote_fledge_path)], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + result = ssh.stdout.readlines() + assert [] != result, "storage.json file not found on the remote machine {}".format(remote_ip) + data = json.loads(result[0]) + data['plugin']['value'] = storage_plugin_val + ssh = subprocess.Popen(["ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-i", "{}".format(key_path), "{}@{}".format(remote_user, remote_ip), "echo '" + json.dumps(data) + "' > {}/data/etc/storage.json".format(remote_fledge_path) ], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + subprocess.run(["ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {} {}@{} 'export FLEDGE_ROOT={};echo \"YES\nYES\" | $FLEDGE_ROOT/scripts/fledge reset'".format(key_path, remote_user, remote_ip, remote_fledge_path)], shell=True, check=True) subprocess.run(["ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {} {}@{} 'export FLEDGE_ROOT={};$FLEDGE_ROOT/scripts/fledge start'".format(key_path, remote_user, remote_ip, remote_fledge_path)], shell=True) stat = subprocess.run(["ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {} {}@{} 'export FLEDGE_ROOT={}; $FLEDGE_ROOT/scripts/fledge status'".format(key_path, remote_user, remote_ip, remote_fledge_path)], shell=True, stdout=subprocess.PIPE) assert "Fledge not running." not in stat.stdout.decode("utf-8") diff --git a/tests/system/python/pair/test_pyton_north_service_pair.py b/tests/system/python/pair/test_pyton_north_service_pair.py index d2a4884a4b..818372fef6 100644 --- a/tests/system/python/pair/test_pyton_north_service_pair.py +++ b/tests/system/python/pair/test_pyton_north_service_pair.py @@ -669,7 +669,7 @@ def test_north_python_service_with_delete_add_filter(self, setup_local, setup_re delete_url = "/fledge/filter/{}".format(filter_name) resp = utils.delete_request(fledge_url, urllib.parse.quote(delete_url)) - assert "Filter {} deleted successfully".format(filter_name) == resp['result'] + assert "Filter {} deleted successfully.".format(filter_name) == resp['result'] # Re-add filter in enabled mode filter_cfg_scale = {"enable": "true"} diff --git a/tests/system/python/scripts/install_c_plugin b/tests/system/python/scripts/install_c_plugin index fdedbff529..4ec8c0689a 100755 --- a/tests/system/python/scripts/install_c_plugin +++ b/tests/system/python/scripts/install_c_plugin @@ -19,13 +19,15 @@ PLUGIN_NAME=$3 [[ -z "${PLUGIN_TYPE}" ]] && echo "Plugin type not found." && exit 1 [[ -z "${PLUGIN_NAME}" ]] && echo "Plugin name not found." && exit 1 -os_name=`(grep -o '^NAME=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g')` +os_name=$(grep -o '^NAME=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g') +os_version=$(grep -o '^VERSION_ID=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g') REPO_NAME=fledge-${PLUGIN_TYPE}-${PLUGIN_NAME,,} -if [[ "${PLUGIN_TYPE}" = "rule" ]]; then rm -rf /tmp/fledge-service-notification; fi +if [[ "${PLUGIN_TYPE}" = "rule" || "${PLUGIN_TYPE}" == "notify" ]]; then rm -rf /tmp/fledge-service-notification; fi clean () { rm -rf /tmp/${REPO_NAME} + if [[ "${PLUGIN_TYPE}" = "rule" ]]; then rm -rf ${FLEDGE_ROOT}/plugins/notificationRule/${PLUGIN_NAME} ; elif [[ "${PLUGIN_TYPE}" == "notify" ]]; then rm -rf ${FLEDGE_ROOT}/plugins/notificationDelivery/${PLUGIN_NAME} ; fi rm -rf ${FLEDGE_ROOT}/plugins/${PLUGIN_TYPE}/${PLUGIN_NAME} } @@ -39,22 +41,25 @@ install_requirement (){ } install_binary_file () { - if [[ "${PLUGIN_TYPE}" = "rule" ]] + if [[ "${PLUGIN_TYPE}" = "rule" || "${PLUGIN_TYPE}" == "notify" ]] then + # fledge-service-notification repo is required to build notificationRule Plugins service_repo_name='fledge-service-notification' git clone -b ${BRANCH_NAME} --single-branch https://github.com/fledge-iot/${service_repo_name}.git /tmp/${service_repo_name} - export NOTIFICATION_SERVICE_INCLUDE_DIRS=/tmp/${service_repo_name}/C/services/common/include + export NOTIFICATION_SERVICE_INCLUDE_DIRS=/tmp/${service_repo_name}/C/services/notification/include fi if [ -f /tmp/${REPO_NAME}/build.sh ]; then cd /tmp/${REPO_NAME}; ./build.sh -DFLEDGE_INSTALL=${FLEDGE_ROOT}; cd build && make install; else if [[ $os_name == *"Red Hat"* || $os_name == *"CentOS"* ]]; then - set +e - source scl_source enable rh-postgresql13 - source scl_source enable devtoolset-7 - set -e + if [[ ${os_version} -eq "7" ]]; then + set +e + source scl_source enable rh-postgresql13 + source scl_source enable devtoolset-7 + set -e + fi fi mkdir -p /tmp/${REPO_NAME}/build; cd /tmp/${REPO_NAME}/build; cmake -DFLEDGE_INSTALL=${FLEDGE_ROOT} ..; make -j4 && make install; cd - fi diff --git a/tests/system/python/scripts/install_c_service b/tests/system/python/scripts/install_c_service index bfe42bef60..cdea645f90 100755 --- a/tests/system/python/scripts/install_c_service +++ b/tests/system/python/scripts/install_c_service @@ -17,7 +17,8 @@ SERVICE_NAME=$2 [[ -z "${BRANCH_NAME}" ]] && echo "Branch name not found." && exit 1 [[ -z "${SERVICE_NAME}" ]] && echo "Service name not found." && exit 1 -os_name=`(grep -o '^NAME=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g')` +os_name=$(grep -o '^NAME=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g') +os_version=$(grep -o '^VERSION_ID=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g') REPO_NAME=fledge-service-${SERVICE_NAME} clean () { @@ -35,10 +36,12 @@ install_binary_file () { cd /tmp/${REPO_NAME}; ./build.sh -DFLEDGE_INSTALL=${FLEDGE_ROOT}; cd build && make install; else if [[ $os_name == *"Red Hat"* || $os_name == *"CentOS"* ]]; then - set +e - source scl_source enable rh-postgresql13 - source scl_source enable devtoolset-7 - set -e + if [[ ${os_version} -eq "7" ]]; then + set +e + source scl_source enable rh-postgresql13 + source scl_source enable devtoolset-7 + set -e + fi fi mkdir -p /tmp/${REPO_NAME}/build; cd /tmp/${REPO_NAME}/build; cmake -DFLEDGE_INSTALL=${FLEDGE_ROOT} ..; make -j4 && make install; cd - fi diff --git a/tests/system/python/scripts/package/reset b/tests/system/python/scripts/package/reset index 0da7a13a92..f5353104e8 100755 --- a/tests/system/python/scripts/package/reset +++ b/tests/system/python/scripts/package/reset @@ -2,7 +2,7 @@ if [ "${FLEDGE_ENVIRONMENT}" == "docker" ]; then /usr/local/fledge/bin/fledge stop - echo "YES" | /usr/local/fledge/bin/fledge reset || exit 1 + echo -e "YES\nYES" | /usr/local/fledge/bin/fledge reset || exit 1 echo /usr/local/fledge/bin/fledge start echo "Fledge Status" @@ -10,7 +10,7 @@ if [ "${FLEDGE_ENVIRONMENT}" == "docker" ]; then else echo "Stopping Fledge using systemctl ..." sudo systemctl stop fledge - echo "YES" | /usr/local/fledge/bin/fledge reset || exit 1 + echo -e "YES\nYES" | /usr/local/fledge/bin/fledge reset || exit 1 echo echo "Starting Fledge using systemctl ..." sudo systemctl start fledge diff --git a/tests/system/python/scripts/package/setup b/tests/system/python/scripts/package/setup index f26cff35ea..6d61f86ed2 100755 --- a/tests/system/python/scripts/package/setup +++ b/tests/system/python/scripts/package/setup @@ -15,46 +15,47 @@ PACKAGE_BUILD_VERSION=$1 [[ -z "${PACKAGE_BUILD_VERSION}" ]] && echo "Build Version not found." && exit 1 -OS_NAME=`(grep -o '^NAME=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g')` +OS_NAME=$(grep -o '^NAME=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g') ID=$(cat /etc/os-release | grep -w ID | cut -f2 -d"=" | tr -d '"') -UNAME=`uname -m` +UNAME=$(uname -m) VERSION_ID=$(cat /etc/os-release | grep -w VERSION_ID | cut -f2 -d"=" | tr -d '"') -echo "version id is "${VERSION_ID} +echo "Version ID is ${VERSION_ID}" if [[ ${OS_NAME} == *"Red Hat"* || ${OS_NAME} == *"CentOS"* ]]; then - ID="${ID}7" - echo "Build version is "${PACKAGE_BUILD_VERSION} - echo "ID is "${ID} - echo "uname is "${UNAME} - + if [[ ${VERSION_ID} -eq "7" ]]; then ARCHIVE_PKG_OS="${ID}7"; else ARCHIVE_PKG_OS="${ID}-stream-9"; fi + + echo "Build version is ${PACKAGE_BUILD_VERSION}" + echo "ID is ${ID} and Archive package OS is ${ARCHIVE_PKG_OS}" + echo "Uname is ${UNAME}" + sudo cp -f /etc/yum.repos.d/fledge.repo /etc/yum.repos.d/fledge.repo.bak | true - sudo yum update -y - echo "==================== DONE update, upgrade ============================" - + # Configure Fledge repo echo -e "[fledge]\n\ name=fledge Repository\n\ -baseurl=http://archives.fledge-iot.org/${PACKAGE_BUILD_VERSION}/${ID}/${UNAME}/\n\ +baseurl=http://archives.fledge-iot.org/${PACKAGE_BUILD_VERSION}/${ARCHIVE_PKG_OS}/${UNAME}/\n\ enabled=1\n\ gpgkey=http://archives.fledge-iot.org/RPM-GPG-KEY-fledge\n\ gpgcheck=1" | sudo tee /etc/yum.repos.d/fledge.repo - - sudo yum update -y - + # Install prerequisites if [[ ${ID} = "centos" ]]; then - sudo yum install -y centos-release-scl-rh - sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + if [[ ${VERSION_ID} -eq "7" ]]; then + sudo yum install -y centos-release-scl-rh + sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + fi elif [[ ${ID} = "rhel" ]]; then sudo yum-config-manager --enable 'Red Hat Software Collections RPMs for Red Hat Enterprise Linux 7 Server from RHUI' fi - sudo yum update -y - + + sudo yum -y check-update && sudo yum -y update + echo "==================== DONE check-update, update ============================" + time sudo yum install -y fledge echo "==================== DONE INSTALLING Fledge ==================" - - if [ "${FLEDGE_ENVIRONMENT}" != "docker" ]; then + + if [ "${FLEDGE_ENVIRONMENT}" != "docker" ]; then time sudo yum install -y fledge-gui echo "==================== DONE INSTALLING Fledge GUI ======================" fi diff --git a/tests/unit/C/CMakeLists.txt b/tests/unit/C/CMakeLists.txt index 4460dd7a2c..53ba41dd3a 100644 --- a/tests/unit/C/CMakeLists.txt +++ b/tests/unit/C/CMakeLists.txt @@ -96,6 +96,7 @@ set(LIB_NAME OMF) file(GLOB OMF_LIB_SOURCES ../../../C/plugins/north/OMF/omf.cpp ../../../C/plugins/north/OMF/omfhints.cpp + ../../../C/plugins/north/OMF/OMFError.cpp ../../../C/plugins/north/OMF/linkdata.cpp) add_library(${LIB_NAME} SHARED ${OMF_LIB_SOURCES}) diff --git a/tests/unit/C/cmake_sqliteM/CMakeLists.txt b/tests/unit/C/cmake_sqliteM/CMakeLists.txt index 0acdcdbfb1..71d2683926 100644 --- a/tests/unit/C/cmake_sqliteM/CMakeLists.txt +++ b/tests/unit/C/cmake_sqliteM/CMakeLists.txt @@ -18,6 +18,7 @@ include_directories(../../../../C/services/common/include) include_directories(../../../../C/plugins/storage/common/include) include_directories(../../../../C/plugins/storage/sqlitelb/include) include_directories(../../../../C/plugins/storage/sqlitelb/common/include) +include_directories(../../../../C/plugins/storage/sqlite/common/include) # Check Sqlite3 required version set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}") diff --git a/tests/unit/C/cmake_sqlitelb/CMakeLists.txt b/tests/unit/C/cmake_sqlitelb/CMakeLists.txt index 7ce3b95857..568e90eba9 100644 --- a/tests/unit/C/cmake_sqlitelb/CMakeLists.txt +++ b/tests/unit/C/cmake_sqlitelb/CMakeLists.txt @@ -17,6 +17,7 @@ include_directories(../../../../C/plugins/storage/common/include) include_directories(../../../../C/plugins/storage/sqlite/schema/include) include_directories(../../../../C/plugins/storage/sqlitelb/include) include_directories(../../../../C/plugins/storage/sqlitelb/common/include) +include_directories(../../../../C/plugins/storage/sqlite/common/include) # Check Sqlite3 required version set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}") diff --git a/tests/unit/C/common/test_config_category.cpp b/tests/unit/C/common/test_config_category.cpp index 43cd08cb0c..0bbce88f8a 100644 --- a/tests/unit/C/common/test_config_category.cpp +++ b/tests/unit/C/common/test_config_category.cpp @@ -47,6 +47,8 @@ const char *myCategory_quoted = "{\"description\": {" "\"default\": {\"first\" : \"Fledge\", \"second\" : \"json\" }," "\"description\": \"A JSON configuration parameter\"}}"; +const char *myCategory_quotedSpecial = R"QQ({"description": { "value": "The \"Fledge\" admini\\strative API", "type": "string", "default": "The \"Fledge\" administra\tive API", "description": "The description of this \"Fledge\" service"}, "name": { "value": "\"Fledge\"", "type": "string", "default": "\"Fledge\"", "description": "The name of this \"Fledge\" service"}, "complex": { "value": { "first" : "Fledge", "second" : "json" }, "type": "json", "default": {"first" : "Fledge", "second" : "json" }, "description": "A JSON configuration parameter"} })QQ"; + const char *myCategoryDisplayName = "{\"description\": {" "\"value\": \"The Fledge administrative API\"," "\"type\": \"string\"," @@ -309,6 +311,36 @@ const char* bigCategory = "\"description\": \"Defines\"} " \ "}"; +const char *optionals = + "{\"item1\" : { "\ + "\"type\": \"integer\", \"displayName\": \"Item1\", " \ + "\"value\": \"3\", \"default\": \"3\", " \ + "\"description\": \"First Item\", " \ + "\"group\" : \"Group1\", " \ + "\"rule\" : \"1 = 0\", " \ + "\"deprecated\" : \"false\", " \ + "\"order\": \"10\"} " + "}"; + +const char *json_quotedSpecial = R"QS({ "key" : "test \"a\"", "description" : "Test \"description\"", "value" : {"description" : { "description" : "The description of this \"Fledge\" service", "type" : "string", "value" : "The \"Fledge\" admini\\strative API", "default" : "The \"Fledge\" administra\tive API" }, "name" : { "description" : "The name of this \"Fledge\" service", "type" : "string", "value" : "\"Fledge\"", "default" : "\"Fledge\"" }, "complex" : { "description" : "A JSON configuration parameter", "type" : "json", "value" : {"first":"Fledge","second":"json"}, "default" : {"first":"Fledge","second":"json"} }} })QS"; + +const char *json_parse_error = "{\"description\": {" + "\"value\": \"The Fledge administrative API\"," + "\"type\": \"string\"," + "\"default\": \"The Fledge administrative API\"," + "\"description\": \"The description of this Fledge service\"}," + "\"name\": {" + "\"value\": \"Fledge\"," + "\"type\": \"string\"," + "\"default\": \"Fledge\"," + "\"description\": \"The name of this Fledge service\"}," + "error : here," + "\"complex\": {" \ + "\"value\": { \"first\" : \"Fledge\", \"second\" : \"json\" }," + "\"type\": \"json\"," + "\"default\": {\"first\" : \"Fledge\", \"second\" : \"json\" }," + "\"description\": \"A JSON configuration parameter\"}}"; + TEST(CategoriesTest, Count) { ConfigCategories confCategories(categories); @@ -615,3 +647,32 @@ TEST(CategoryTest, categoryValues) ASSERT_EQ(true, complex.getValue("plugin").compare("OMF") == 0); ASSERT_EQ(true, complex.getValue("OMFMaxRetry").compare("3") == 0); } + + +/** + * Test optional attributes + */ +TEST(CategoryTest, optionalItems) +{ + ConfigCategory category("optional", optionals); + ASSERT_EQ(0, category.getItemAttribute("item1", ConfigCategory::GROUP_ATTR).compare("Group1")); + ASSERT_EQ(0, category.getItemAttribute("item1", ConfigCategory::DEPRECATED_ATTR).compare("false")); + ASSERT_EQ(0, category.getItemAttribute("item1", ConfigCategory::RULE_ATTR).compare("1 = 0")); + ASSERT_EQ(0, category.getItemAttribute("item1", ConfigCategory::DISPLAY_NAME_ATTR).compare("Item1")); +} + +/** + * Special quotes for \\s and \\t + */ + +TEST(CategoryTestQuoted, toJSONQuotedSpecial) +{ + ConfigCategory confCategory("test \"a\"", myCategory_quotedSpecial); + confCategory.setDescription("Test \"description\""); + ASSERT_EQ(0, confCategory.toJSON().compare(json_quotedSpecial)); +} + +TEST(Categorytest, parseError) +{ + EXPECT_THROW(ConfigCategory("parseTest", json_parse_error), ConfigMalformed*); +} diff --git a/tests/unit/C/common/test_default_config_category.cpp b/tests/unit/C/common/test_default_config_category.cpp index cb54dd6450..bc3caba683 100644 --- a/tests/unit/C/common/test_default_config_category.cpp +++ b/tests/unit/C/common/test_default_config_category.cpp @@ -46,6 +46,9 @@ const char *default_myCategory_quoted = "{\"description\": {" "\"value\": {\"first\" : \"Fledge\", \"second\" : \"json\" }," "\"default\": {\"first\" : \"Fledge\", \"second\" : \"json\" }," "\"description\": \"A JSON configuration parameter\"}}"; + +const char *default_myCategory_quotedSpecial = R"DQS({ "description": { "type": "string", "value": "The \"Fledge\" administra\tive API", "default": "The \"Fledge\" admini\\strative API", "description": "The description of this \"Fledge\" service"}, "name": { "type": "string", "value": "\"Fledge\"", "default": "\"Fledge\"", "description": "The name of this \"Fledge\" service"}, "complex": { "type": "json", "value": {"first" : "Fledge", "second" : "json" }, "default": {"first" : "Fledge", "second" : "json" }, "description": "A JSON configuration parameter"}})DQS"; + /** * The JSON output from DefaulltCategory::toJSON has "default" values olny */ @@ -141,6 +144,8 @@ const char *myDefCategoryRemoveItems = "{" \ "}"; +const char *default_json_quotedSpecial = R"SDQ({ "key" : "test \"a\"", "description" : "Test \"description\"", "value" : {"description" : { "description" : "The description of this \"Fledge\" service", "type" : "string", "default" : "The \"Fledge\" admini\\strative API" }, "name" : { "description" : "The name of this \"Fledge\" service", "type" : "string", "default" : "\"Fledge\"" }, "complex" : { "description" : "A JSON configuration parameter", "type" : "json", "default" : "{\"first\":\"Fledge\",\"second\":\"json\"}" }} })SDQ"; + TEST(DefaultCategoriesTest, Count) { ConfigCategories confCategories(default_categories); @@ -325,4 +330,15 @@ TEST(DefaultCategoryTest, removeItemsType) } +/** + * Test special quoted chars + */ + +TEST(DefaultCategoryTestQuoted, toJSONQuotedSpecial) +{ + DefaultConfigCategory confCategory("test \"a\"", default_myCategory_quotedSpecial); + confCategory.setDescription("Test \"description\""); + // Only "default" value in the output + ASSERT_EQ(0, confCategory.toJSON().compare(default_json_quotedSpecial)); +} diff --git a/tests/unit/C/common/test_reading_array.cpp b/tests/unit/C/common/test_reading_array.cpp new file mode 100644 index 0000000000..e52ef16371 --- /dev/null +++ b/tests/unit/C/common/test_reading_array.cpp @@ -0,0 +1,91 @@ +/* + * unit tests - FOGL-7748 : Support array data in reading json + * + * Copyright (c) 2023 Dianomic Systems, Inc. + * + * Released under the Apache 2.0 Licence + * + * Author: Devki Nandan Ghildiyal + */ + +#include +#include +#include +#include +#include +#include + +using namespace std; + + +const char *ReadingJSON = R"( + { + "floor1":30.25, "floor2":34.28, "floor3":[38.25,60.89,40.28] + } +)"; + +const char *unsupportedReadingJSON = R"( + { + "floor1":[38,"error",40] + } +)"; + +const char *NestedReadingJSON = R"( +{ + "pressure": {"floor1":30, "floor2":34, "floor3":[38,60,40] } +} +)"; + +TEST(TESTReading, TestUnspportedReadingForListType ) +{ + try + { + vector readings; + readings.push_back(new Reading("test", unsupportedReadingJSON)); + vector&dp = readings[0]->getReadingData(); + + ASSERT_EQ(readings[0]->getDatapointCount(),1); + ASSERT_EQ(readings[0]->getAssetName(),"test"); + } + catch(exception& ex) + { + string msg(ex.what()); + ASSERT_EQ(msg,"Only numeric lists are currently supported in datapoints"); + } + + +} + +TEST(TESTReading, TestReadingForListType ) +{ + vector readings; + readings.push_back(new Reading("test", ReadingJSON)); + vector&dp = readings[0]->getReadingData(); + + ASSERT_EQ(readings[0]->getDatapointCount(),3); + ASSERT_EQ(readings[0]->getAssetName(),"test"); + + ASSERT_EQ(dp[0]->getName(),"floor1"); + ASSERT_EQ(dp[0]->getData().toDouble(),30.25); + + ASSERT_EQ(dp[1]->getName(),"floor2"); + ASSERT_EQ(dp[1]->getData().toDouble(),34.28); + + ASSERT_EQ(dp[2]->getName(),"floor3"); + ASSERT_EQ(dp[2]->getData().toString(),"[38.25, 60.89, 40.28]"); +} + +TEST(TESTReading, TestReadingForNestedListType ) +{ + vector readings; + readings.push_back(new Reading("test", NestedReadingJSON)); + vector&dp = readings[0]->getReadingData(); + + ASSERT_EQ(readings[0]->getDatapointCount(),1); + ASSERT_EQ(readings[0]->getAssetName(),"test"); + + ASSERT_EQ(dp[0]->getName(),"pressure"); + ASSERT_EQ(dp[0]->getData().toString(),"{\"floor1\":30, \"floor2\":34, \"floor3\":[38, 60, 40]}"); + +} + diff --git a/tests/unit/C/common/test_string_utils.cpp b/tests/unit/C/common/test_string_utils.cpp index 4bcc557a90..57042ff689 100644 --- a/tests/unit/C/common/test_string_utils.cpp +++ b/tests/unit/C/common/test_string_utils.cpp @@ -215,4 +215,15 @@ TEST(TestIsRegex, AllCases) - +TEST(TestAround, Extract) +{ + string longString("not shownpreamble123This part is after the location"); + string s = StringAround(longString, 19); + EXPECT_STREQ(s.c_str(), "preamble123This part is after the locati"); + s = StringAround(longString, 19, 10); + EXPECT_STREQ(s.c_str(), "preamble123This part"); + s = StringAround(longString, 19, 10, 5); + EXPECT_STREQ(s.c_str(), "ble123This part"); + s = StringAround(longString, 5); + EXPECT_STREQ(s.c_str(), "not shownpreamble123This part is after t"); +} diff --git a/tests/unit/C/plugins/common/test_omf_translation.cpp b/tests/unit/C/plugins/common/test_omf_translation.cpp index 42c70ad399..90eecc6065 100644 --- a/tests/unit/C/plugins/common/test_omf_translation.cpp +++ b/tests/unit/C/plugins/common/test_omf_translation.cpp @@ -318,7 +318,7 @@ TEST(OMF_transation, OneReading) TEST(OMF_transation, SuperSet) { SimpleHttps sender("0.0.0.0:0", 10, 10, 10, 1); - OMF omf(sender, "/", 1, "ABC"); + OMF omf("test", sender, "/", 1, "ABC"); // Build a ReadingSet from JSON ReadingSet readingSet(readings_with_different_datapoints); vectorreadings = readingSet.getAllReadings(); @@ -444,7 +444,7 @@ TEST(OMF_AfHierarchy, HandleAFMapNamesGood) // Dummy initializations SimpleHttps sender("0.0.0.0:0", 10, 10, 10, 1); - OMF omf(sender, "/", 1, "ABC"); + OMF omf("test", sender, "/", 1, "ABC"); omf.setAFMap(af_hierarchy_test01); @@ -474,7 +474,7 @@ TEST(OMF_AfHierarchy, HandleAFMapEmpty) // Dummy initializations SimpleHttps sender("0.0.0.0:0", 10, 10, 10, 1); - OMF omf(sender, "/", 1, "ABC"); + OMF omf("test", sender, "/", 1, "ABC"); // Test omf.setAFMap(af_hierarchy_test02); @@ -494,7 +494,7 @@ TEST(OMF_AfHierarchy, HandleAFMapNamesBad) // Dummy initializations SimpleHttps sender("0.0.0.0:0", 10, 10, 10, 1); - OMF omf(sender, "/", 1, "ABC"); + OMF omf("test", sender, "/", 1, "ABC"); omf.setAFMap(af_hierarchy_test01); MetadataRulesExist = omf.getMetadataRulesExist(); @@ -510,7 +510,7 @@ TEST(PiServer_NamingRules, NamingRulesCheck) // Dummy initializations SimpleHttps sender("0.0.0.0:0", 10, 10, 10, 1); - OMF omf(sender, "/", 1, "ABC"); + OMF omf("test", sender, "/", 1, "ABC"); ASSERT_EQ(omf.ApplyPIServerNamingRulesInvalidChars("asset_1", &changed), "asset_1"); ASSERT_EQ(changed, false); @@ -547,7 +547,7 @@ TEST(PiServer_NamingRules, Suffix) string assetName; // Dummy initializations SimpleHttps sender("0.0.0.0:0", 10, 10, 10, 1); - OMF omf(sender, "/", 1, "ABC"); + OMF omf("test", sender, "/", 1, "ABC"); assetName = "asset_1"; @@ -578,7 +578,7 @@ TEST(PiServer_NamingRules, Prefix) // Dummy initializations SimpleHttps sender("0.0.0.0:0", 10, 10, 10, 1); - OMF omf(sender, "/", 1, "ABC"); + OMF omf("test", sender, "/", 1, "ABC"); asset="asset_1"; @@ -777,4 +777,4 @@ TEST(OMF_hints, variableExtract) ASSERT_EQ (variable, "${Orange:unknown12}"); ASSERT_EQ (value, "Orange"); ASSERT_EQ (deafult, "unknown12"); -} \ No newline at end of file +} diff --git a/tests/unit/C/scripts/RunAllTests.sh b/tests/unit/C/scripts/RunAllTests.sh index 572b8eb484..ec8e31e02f 100755 --- a/tests/unit/C/scripts/RunAllTests.sh +++ b/tests/unit/C/scripts/RunAllTests.sh @@ -34,8 +34,8 @@ if [ ! -d results ] ; then fi if [ -f "./CMakeLists.txt" ] ; then - echo -n "Compiling libraries..." - (rm -rf build && mkdir -p build && cd build && cmake -DCMAKE_BUILD_TYPE=Debug .. && make ${jobs} && cd ..) > /dev/null + echo "Compiling libraries..." + (rm -rf build && mkdir -p build && cd build && cmake -DCMAKE_BUILD_TYPE=Debug .. && make ${jobs} && cd ..) echo "done" fi diff --git a/tests/unit/C/services/core/reading_set_copy.cpp b/tests/unit/C/services/core/reading_set_copy.cpp new file mode 100644 index 0000000000..489e4c5fbd --- /dev/null +++ b/tests/unit/C/services/core/reading_set_copy.cpp @@ -0,0 +1,173 @@ +/* + * unit tests - FOGL-7353 Fledge Copy ReadingSet + * + * Copyright (c) 2023 Dianomic Systems, Inc. + * + * Released under the Apache 2.0 Licence + * + * Author: Devki Nandan Ghildiyal + */ + +#include +#include +#include +#include + +using namespace std; + +const char *ReadingJSON = R"( + { + "count" : 1, "rows" : [ + { + "id": 1, "asset_code": "temperature", + "reading": { "degrees": 200.65 }, + "user_ts": "2023-02-06 14:00:08.532958", + "ts": "2023-02-06 14:47:18.872708" + } + ] + } +)"; + +const char *NestedReadingJSON = R"( + { + "count" : 1, "rows" : [ + { + "id": 1, "asset_code": "SiteStatus", + "reading": { "degrees": [200.65,34.45,500.36],"pressure": {"floor1":30, "floor2":34, "floor3":36 } }, + "user_ts": "2023-02-06 14:00:08.532958", + "ts": "2023-02-06 14:47:18.872708" + } + ] + } +)"; + +TEST(READINGSET, DeepCopyCheckReadingFromNestedJSON) +{ + ReadingSet *readingSet1 = new ReadingSet(NestedReadingJSON); + ReadingSet *readingSet2 = new ReadingSet(); + readingSet2->copy(*readingSet1); + + auto r1 = readingSet1->getAllReadings(); + auto dp1 = r1[0]->getReadingData(); + + // Fetch nested datapoints + ASSERT_EQ(dp1[0]->getName(), "degrees"); + ASSERT_EQ(dp1[0]->getData().toString(), "[200.65, 34.45, 500.36]"); + ASSERT_EQ(dp1[1]->getName(), "pressure"); + ASSERT_EQ(dp1[1]->getData().toString(), "{\"floor1\":30, \"floor2\":34, \"floor3\":36}"); + + auto r2 = readingSet2->getAllReadings(); + auto dp2 = r2[0]->getReadingData(); + ASSERT_EQ(dp2[0]->getName(), "degrees"); + ASSERT_EQ(dp2[0]->getData().toString(), "[200.65, 34.45, 500.36]"); + ASSERT_EQ(dp2[1]->getName(), "pressure"); + ASSERT_EQ(dp2[1]->getData().toString(), "{\"floor1\":30, \"floor2\":34, \"floor3\":36}"); + + // Check the address of datapoints + ASSERT_NE(dp1[0], dp2[0]); + ASSERT_NE(dp1[1], dp2[1]); + + // Confirm there is no error of double delete + delete readingSet1; + delete readingSet2; +} + +TEST(READINGSET, DeepCopyCheckReadingFromJSON) +{ + ReadingSet *readingSet1 = new ReadingSet(ReadingJSON); + ReadingSet *readingSet2 = new ReadingSet(); + readingSet2->copy(*readingSet1); + + delete readingSet1; + + // Fetch value after deleting readingSet1 to check readingSet2 is pointing to different memory location + for (auto reading : readingSet2->getAllReadings()) + { + for (auto &dp : reading->getReadingData()) + { + std::string dataPointName = dp->getName(); + DatapointValue dv = dp->getData(); + ASSERT_EQ(dataPointName, "degrees"); + ASSERT_EQ(dv.toDouble(), 200.65); + } + } + + // Confirm there is no error of double delete + delete readingSet2; +} + +TEST(READINGSET, DeepCopyCheckReadingFromVector) +{ + vector *readings1 = new vector; + long integerValue = 100; + DatapointValue dpv(integerValue); + Datapoint *value = new Datapoint("kPa", dpv); + Reading *in = new Reading("Pressure", value); + readings1->push_back(in); + + ReadingSet *readingSet1 = new ReadingSet(readings1); + ReadingSet *readingSet2 = new ReadingSet(); + readingSet2->copy(*readingSet1); + + delete readingSet1; + + // Fetch value after deleting readingSet1 to check readingSet2 is pointing to different memory location + for (auto reading : readingSet2->getAllReadings()) + { + for (auto &dp : reading->getReadingData()) + { + std::string dataPointName = dp->getName(); + DatapointValue dv = dp->getData(); + ASSERT_EQ(dataPointName, "kPa"); + ASSERT_EQ(dv.toInt(), 100); + } + } + // Confirm there is no error of double delete + delete readingSet2; +} + +TEST(READINGSET, DeepCopyCheckAppend) +{ + vector *readings1 = new vector; + long integerValue = 100; + DatapointValue dpv(integerValue); + Datapoint *value = new Datapoint("kPa", dpv); + Reading *in = new Reading("Pressure", value); + readings1->push_back(in); + ReadingSet *readingSet1 = new ReadingSet(readings1); + + vector *readings2 = new vector; + long integerValue2 = 400; + DatapointValue dpv2(integerValue2); + Datapoint *value2 = new Datapoint("kPa", dpv2); + Reading *in2 = new Reading("Pressure", value2); + readings2->push_back(in2); + ReadingSet *readingSet2 = new ReadingSet(readings2); + + readingSet2->copy(*readingSet1); + + int size = readingSet2->getAllReadings().size(); + ASSERT_EQ(size, 2); +} + +TEST(READINGSET, DeepCopyCheckAddress) +{ + vector *readings1 = new vector; + long integerValue = 100; + DatapointValue dpv(integerValue); + Datapoint *value = new Datapoint("kPa", dpv); + Reading *in = new Reading("Pressure", value); + readings1->push_back(in); + + ReadingSet *readingSet1 = new ReadingSet(readings1); + ReadingSet *readingSet2 = new ReadingSet(); + readingSet2->copy(*readingSet1); + + auto r1 = readingSet1->getAllReadings(); + auto dp1 = r1[0]->getReadingData(); + + auto r2 = readingSet2->getAllReadings(); + auto dp2 = r2[0]->getReadingData(); + + ASSERT_NE(dp1, dp2); +} diff --git a/tests/unit/C/services/storage/postgres/CMakeLists.txt b/tests/unit/C/services/storage/postgres/CMakeLists.txt index 2a218ee9cb..975af18a90 100644 --- a/tests/unit/C/services/storage/postgres/CMakeLists.txt +++ b/tests/unit/C/services/storage/postgres/CMakeLists.txt @@ -7,7 +7,11 @@ set(GCOVR_PATH "$ENV{HOME}/.local/bin/gcovr") project(RunTests) set(CMAKE_CXX_FLAGS "-std=c++11 -O0") - + +set(COMMON_LIB common-lib) +set(SERVICE_COMMON_LIB services-common-lib) +set(PLUGINS_COMMON_LIB plugins-common-lib) + include(CodeCoverage) append_coverage_compiler_flags() @@ -20,17 +24,69 @@ include_directories(../../../../../../C/common/include) include_directories(../../../../../../C/thirdparty/rapidjson/include) file(GLOB test_sources "../../../../../../C/services/storage/configuration.cpp") -file(GLOB logger_sources "../../../../../../C/common/logger.cpp") -file(GLOB config_sources "../../../../../../C/common/config_category.cpp") -file(GLOB utils_sources "../../../../../../C/common/json_utils.cpp") + +link_directories(${PROJECT_BINARY_DIR}/../../../../lib) +# Find python3.x dev/lib package +find_package(PkgConfig REQUIRED) +if(${CMAKE_VERSION} VERSION_LESS "3.12.0") + pkg_check_modules(PYTHON REQUIRED python3) +else() + if("${OS_NAME}" STREQUAL "mendel") + # We will explicitly set include path later for NumPy. + find_package(Python3 REQUIRED COMPONENTS Interpreter Development ) + else() + find_package(Python3 REQUIRED COMPONENTS Interpreter Development NumPy) + endif() +endif() + +# Add Python 3.x header files +if(${CMAKE_VERSION} VERSION_LESS "3.12.0") + include_directories(${PYTHON_INCLUDE_DIRS}) +else() + if("${OS_NAME}" STREQUAL "mendel") + # The following command gets the location of NumPy. + execute_process( + COMMAND python3 + -c "import numpy; print(numpy.get_include())" + OUTPUT_VARIABLE Python3_NUMPY_INCLUDE_DIRS + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + # Now we can add include directories as usual. + include_directories(${Python3_INCLUDE_DIRS} ${Python3_NUMPY_INCLUDE_DIRS}) + else() + include_directories(${Python3_INCLUDE_DIRS} ${Python3_NUMPY_INCLUDE_DIRS}) + endif() +endif() + +if(${CMAKE_VERSION} VERSION_LESS "3.12.0") + link_directories(${PYTHON_LIBRARY_DIRS}) +else() + link_directories(${Python3_LIBRARY_DIRS}) +endif() + # Link runTests with what we want to test and the GTest and pthread library -add_executable(RunTests ${test_sources} ${logger_sources} ${config_sources} ${utils_sources} tests.cpp) +add_executable(RunTests ${test_sources} tests.cpp) #setting BOOST_COMPONENTS to use pthread library only set(BOOST_COMPONENTS thread) find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) target_link_libraries(RunTests ${GTEST_LIBRARIES} pthread) +target_link_libraries(RunTests ${COMMON_LIB}) +target_link_libraries(RunTests ${SERVICE_COMMON_LIB}) +target_link_libraries(RunTests ${PLUGINS_COMMON_LIB}) + +# Add Python 3.x library +if(${CMAKE_VERSION} VERSION_LESS "3.12.0") + target_link_libraries(RunTests ${PYTHON_LIBRARIES}) +else() + if("${OS_NAME}" STREQUAL "mendel") + target_link_libraries(${PROJECT_NAME} ${Python3_LIBRARIES}) + else() + target_link_libraries(${PROJECT_NAME} ${Python3_LIBRARIES} Python3::NumPy) + endif() +endif() + setup_target_for_coverage_gcovr_html( NAME CoverageHtml EXECUTABLE ${PROJECT_NAME} diff --git a/tests/unit/C/services/storage/postgres/etc/storage.json b/tests/unit/C/services/storage/postgres/etc/storage.json index dabc26a8cf..bfa3e91799 100644 --- a/tests/unit/C/services/storage/postgres/etc/storage.json +++ b/tests/unit/C/services/storage/postgres/etc/storage.json @@ -1 +1 @@ -{"plugin":{"value":"postgres","default":"postgres","description":"The main storage plugin to load","type":"string","displayName":"Storage Plugin","order":"1"},"readingPlugin":{"value":"","default":"","description":"The storage plugin to load for readings data. If blank the main storage plugin is used.","type":"string","displayName":"Readings Plugin","order":"2"},"threads":{"value":"1","default":"1","description":"The number of threads to run","type":"integer","displayName":"Database threads","order":"3"},"managedStatus":{"value":"false","default":"false","description":"Control if Fledge should manage the storage provider","type":"boolean","displayName":"Manage Storage","order":"4"},"port":{"value":"8080","default":"0","description":"The port to listen on","type":"integer","displayName":"Service Port","order":"5"},"managementPort":{"value":"1081","default":"0","description":"The management port to listen on.","type":"integer","displayName":"Management Port","order":"6"}} \ No newline at end of file +{"plugin":{"value":"postgres","default":"postgres","description":"The main storage plugin to load","type":"enumeration","options":["sqlite","sqlitelb","postgres"],"displayName":"Storage Plugin","order":"1"},"readingPlugin":{"value":"","default":"Use main plugin","description":"The storage plugin to load for readings data.","type":"enumeration","options":["Use main plugin","sqlite","sqlitelb","sqlitememory","postgres"],"displayName":"Readings Plugin","order":"2"},"threads":{"value":"1","default":"1","description":"The number of threads to run","type":"integer","displayName":"Database threads","order":"3"},"managedStatus":{"value":"false","default":"false","description":"Control if Fledge should manage the storage provider","type":"boolean","displayName":"Manage Storage","order":"4"},"port":{"value":"8080","default":"0","description":"The port to listen on","type":"integer","displayName":"Service Port","order":"5"},"managementPort":{"value":"1081","default":"0","description":"The management port to listen on.","type":"integer","displayName":"Management Port","order":"6"},"logLevel":{"value":"warning","default":"warning","description":"Minimum level of messages to log","type":"enumeration","displayName":"Log Level","options":["error","warning","info","debug"],"order":"7"}} \ No newline at end of file diff --git a/tests/unit/python/fledge/common/microservice_management_client/test_microservice_management_client.py b/tests/unit/python/fledge/common/microservice_management_client/test_microservice_management_client.py index 1f94cdde15..ad17fd7e28 100644 --- a/tests/unit/python/fledge/common/microservice_management_client/test_microservice_management_client.py +++ b/tests/unit/python/fledge/common/microservice_management_client/test_microservice_management_client.py @@ -60,8 +60,8 @@ def test_register_service_no_id(self): ms_mgt_client.register_service({}) assert excinfo.type is KeyError assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Could not register the microservice, From request %s, Reason: %s', '{}' - , "'id'") + args = log_exc.call_args + assert 'Could not register the microservice, From request {}'.format('{}') == args[0][1] response_patch.assert_called_once_with() request_patch.assert_called_once_with(body='{}', method='POST', url='/fledge/service') @@ -117,10 +117,10 @@ def test_unregister_service_no_id(self): with patch.object(_logger, "error") as log_error: with pytest.raises(Exception) as excinfo: ms_mgt_client.unregister_service('someid') - assert excinfo.type is KeyError - assert 1 == log_error.call_count - log_error.assert_called_once_with('Could not unregister the micro-service having ' - 'uuid %s, Reason: %s', 'someid', "'id'", exc_info=True) + assert excinfo.type is KeyError + assert 1 == log_error.call_count + args = log_error.call_args + assert 'Could not unregister the microservice having UUID {}'.format('someid') == args[0][1] response_patch.assert_called_once_with() request_patch.assert_called_once_with(method='DELETE', url='/fledge/service/someid') @@ -169,25 +169,24 @@ def test_register_interest_good_id(self): def test_register_interest_no_id(self): microservice_management_host = 'host1' microservice_management_port = 1 - ms_mgt_client = MicroserviceManagementClient( - microservice_management_host, microservice_management_port) + ms_mgt_client = MicroserviceManagementClient(microservice_management_host, microservice_management_port) response_mock = MagicMock(type=HTTPResponse) undecoded_data_mock = MagicMock() response_mock.read.return_value = undecoded_data_mock undecoded_data_mock.decode.return_value = json.dumps({'notid': 'bla'}) response_mock.status = 200 + payload = '{"category": "cat", "service": "msid"}' with patch.object(HTTPConnection, 'request') as request_patch: with patch.object(HTTPConnection, 'getresponse', return_value=response_mock) as response_patch: with patch.object(_logger, "error") as log_error: with pytest.raises(Exception) as excinfo: ms_mgt_client.register_interest('cat', 'msid') - assert excinfo.type is KeyError + assert excinfo.type is KeyError assert 1 == log_error.call_count - log_error.assert_called_once_with('Could not register interest, for request payload %s, Reason: %s', - '{"category": "cat", "service": "msid"}', "'id'", exc_info=True) + args = log_error.call_args + assert 'Could not register interest, for request payload {}'.format(payload) == args[0][1] response_patch.assert_called_once_with() - request_patch.assert_called_once_with(body='{"category": "cat", "service": "msid"}', method='POST', - url='/fledge/interest') + request_patch.assert_called_once_with(body=payload, method='POST', url='/fledge/interest') @pytest.mark.parametrize("status_code, host", [(450, 'Client'), (550, 'Server')]) def test_register_interest_status_client_err(self, status_code, host): @@ -244,8 +243,8 @@ def test_unregister_interest_no_id(self): ms_mgt_client.unregister_interest('someid') assert excinfo.type is KeyError assert 1 == log_error.call_count - log_error.assert_called_once_with('Could not unregister interest for %s, Reason: %s', 'someid', - "'id'", exc_info=True) + args = log_error.call_args + assert 'Could not unregister interest for {}'.format('someid') == args[0][1] response_patch.assert_called_once_with() request_patch.assert_called_once_with(method='DELETE', url='/fledge/interest/someid') @@ -313,8 +312,9 @@ def test_get_services_no_services(self): ms_mgt_client.get_services('foo', 'bar') assert excinfo.type is KeyError assert 1 == log_error.call_count - log_error.assert_called_once_with('Could not find the micro-service for requested url %s, Reason: %s', - '/fledge/service?name=foo&type=bar', "'services'", exc_info=True) + args = log_error.call_args + assert 'Could not find the microservice for requested url {}'.format( + '/fledge/service?name=foo&type=bar') == args[0][1] response_patch.assert_called_once_with() request_patch.assert_called_once_with(method='GET', url='/fledge/service?name=foo&type=bar') @@ -354,7 +354,7 @@ def test_get_configuration_category(self): test_dict = { 'ping_timeout': { 'type': 'integer', - 'description': 'Timeout for a response from any given micro-service. (must be greater than 0)', + 'description': 'Timeout for a response from any given microservice. (must be greater than 0)', 'value': '1', 'default': '1'}, 'sleep_interval': { @@ -406,7 +406,7 @@ def test_get_configuration_item(self): response_mock.read.return_value = undecoded_data_mock test_dict = { 'type': 'integer', - 'description': 'Timeout for a response from any given micro-service. (must be greater than 0)', + 'description': 'Timeout for a response from any given microservice. (must be greater than 0)', 'value': '1', 'default': '1' } @@ -457,7 +457,7 @@ def test_create_configuration_category(self): 'value': { 'ping_timeout': { 'type': 'integer', - 'description': 'Timeout for a response from any given micro-service. (must be greater than 0)', + 'description': 'Timeout for a response from any given microservice. (must be greater than 0)', 'value': '1', 'default': '1'}, 'sleep_interval': { @@ -495,7 +495,7 @@ def test_create_configuration_category_exception(self, status_code, host): 'value': { 'ping_timeout': { 'type': 'integer', - 'description': 'Timeout for a response from any given micro-service. (must be greater than 0)', + 'description': 'Timeout for a response from any given microservice. (must be greater than 0)', 'value': '1', 'default': '1'}, 'sleep_interval': { @@ -540,7 +540,7 @@ def test_create_configuration_category_keep_original(self): 'value': { 'ping_timeout': { 'type': 'integer', - 'description': 'Timeout for a response from any given micro-service. (must be greater than 0)', + 'description': 'Timeout for a response from any given microservice. (must be greater than 0)', 'value': '1', 'default': '1'}, 'sleep_interval': { @@ -559,7 +559,7 @@ def test_create_configuration_category_keep_original(self): 'value': { 'ping_timeout': { 'type': 'integer', - 'description': 'Timeout for a response from any given micro-service. (must be greater than 0)', + 'description': 'Timeout for a response from any given microservice. (must be greater than 0)', 'value': '1', 'default': '1'}, 'sleep_interval': { diff --git a/tests/unit/python/fledge/common/storage_client/data/payload_select_alias_with_timezone.json b/tests/unit/python/fledge/common/storage_client/data/payload_select_alias_with_timezone.json new file mode 100644 index 0000000000..32e791fae1 --- /dev/null +++ b/tests/unit/python/fledge/common/storage_client/data/payload_select_alias_with_timezone.json @@ -0,0 +1,7 @@ +{ + "return": [{ + "column": "user_ts", + "alias": "timestamp", + "timezone": "utc" + }] +} \ No newline at end of file diff --git a/tests/unit/python/fledge/common/storage_client/test_payload_builder.py b/tests/unit/python/fledge/common/storage_client/test_payload_builder.py index 49a831f682..dff213bb1c 100644 --- a/tests/unit/python/fledge/common/storage_client/test_payload_builder.py +++ b/tests/unit/python/fledge/common/storage_client/test_payload_builder.py @@ -49,7 +49,8 @@ def test_select_payload_with_alias1(self, test_input, expected): @pytest.mark.parametrize("test_input, expected", [ (("reading", "user_ts"), - {"return": ["reading", {"format": "YYYY-MM-DD HH24:MI:SS.MS", "column": "user_ts", "alias": "timestamp"}]}) + {"return": ["reading", {"format": "YYYY-MM-DD HH24:MI:SS.MS", "column": "user_ts", + "alias": "timestamp", "timezone": "utc"}]}) ]) def test_select_payload_with_alias_and_format(self, test_input, expected): res = PayloadBuilder().SELECT(test_input).ALIAS('return', ('user_ts', 'timestamp')).\ @@ -70,6 +71,13 @@ def test_select_payload_with_alias3(self, test_input, expected): res = PayloadBuilder().SELECT(test_input).ALIAS('return', ('name', 'my_name'), ('id', 'my_id')).payload() assert expected == json.loads(res) + @pytest.mark.parametrize("test_input, expected", [ + ("user_ts", _payload("data/payload_select_alias_with_timezone.json")) + ]) + def test_select_payload_with_alias_with_timezone(self, test_input, expected): + res = PayloadBuilder().SELECT(test_input).ALIAS('return', ('user_ts', 'timestamp'), ('timezone', 'utc')).payload() + assert expected == json.loads(res) + @pytest.mark.parametrize("test_input, expected", [ ("test", _payload("data/payload_from1.json")), ("test, test2", _payload("data/payload_from2.json")) diff --git a/tests/unit/python/fledge/common/storage_client/test_storage_client.py b/tests/unit/python/fledge/common/storage_client/test_storage_client.py index 0223d13245..d87f921d43 100644 --- a/tests/unit/python/fledge/common/storage_client/test_storage_client.py +++ b/tests/unit/python/fledge/common/storage_client/test_storage_client.py @@ -672,6 +672,7 @@ async def test_purge(self, event_loop): assert "{}:{}".format(HOST, PORT) == rsc.base_url RETAINALL_FLAG = "retainall" + PURGE = "purge" with pytest.raises(Exception) as excinfo: kwargs = dict(flag='blah', age=1, sent_id=0, size=None) await rsc.purge(**kwargs) @@ -717,21 +718,17 @@ async def test_purge(self, event_loop): assert excinfo.type is ValueError assert "invalid literal for int() with base 10" in str(excinfo.value) - with pytest.raises(Exception) as excinfo: - with patch.object(_LOGGER, "error") as log_e: - kwargs = dict(age=-1, sent_id=1, size=None, flag=RETAINALL_FLAG) - await rsc.purge(**kwargs) - log_e.assert_called_once_with('PUT url %s, Error code: %d, reason: %s, details: %s', - '/storage/reading/purge?age=-1&sent=1&flags=retain', 400, 'age should not be less than 0', {"key": "value"}) - assert excinfo.type is aiohttp.client_exceptions.ContentTypeError + with patch.object(_LOGGER, "error") as log_e: + kwargs = dict(age=-1, sent_id=1, size=None, flag=RETAINALL_FLAG) + result = await rsc.purge(**kwargs) + assert result is None + log_e.assert_called() - with pytest.raises(Exception) as excinfo: - with patch.object(_LOGGER, "error") as log_e: - kwargs = dict(age=None, sent_id=1, size=4294967296, flag=RETAINALL_FLAG) - await rsc.purge(**kwargs) - log_e.assert_called_once_with('PUT url %s, Error code: %d, reason: %s, details: %s', - '/storage/reading/purge?size=4294967296&sent=1&flags=retain', 500, 'unsigned int range', {"key": "value"}) - assert excinfo.type is aiohttp.client_exceptions.ContentTypeError + with patch.object(_LOGGER, "error") as log_e: + kwargs = dict(age=None, sent_id=1, size=4294967296, flag=RETAINALL_FLAG) + result = await rsc.purge(**kwargs) + assert result is None + log_e.assert_called() kwargs = dict(age=1, sent_id=1, size=0, flag=RETAINALL_FLAG) response = await rsc.purge(**kwargs) @@ -749,4 +746,24 @@ async def test_purge(self, event_loop): response = await rsc.purge(**kwargs) assert 1 == response["called"] + with patch.object(_LOGGER, "error") as log_e: + kwargs = dict(age=-1, sent_id=1, size=None, flag=PURGE) + result = await rsc.purge(**kwargs) + assert result is None + log_e.assert_called() + + with patch.object(_LOGGER, "error") as log_e: + kwargs = dict(age=None, sent_id=1, size=4294967296, flag=PURGE) + result = await rsc.purge(**kwargs) + assert result is None + log_e.assert_called() + + kwargs = dict(age=10, sent_id=1, size=None, flag=PURGE) + response = await rsc.purge(**kwargs) + assert 1 == response["called"] + + kwargs = dict(age=None, sent_id=1, size=100, flag=PURGE) + response = await rsc.purge(**kwargs) + assert 1 == response["called"] + await fake_storage_srvr.stop() diff --git a/tests/unit/python/fledge/common/test_configuration_manager.py b/tests/unit/python/fledge/common/test_configuration_manager.py index c26546d62f..5048a75f03 100644 --- a/tests/unit/python/fledge/common/test_configuration_manager.py +++ b/tests/unit/python/fledge/common/test_configuration_manager.py @@ -1258,15 +1258,23 @@ async def async_mock(return_value): with patch.object(ConfigurationManager, '_validate_category_val', side_effect=[_se, _se]) as valpatch: with patch.object(ConfigurationManager, '_read_category_val', return_value=_rv1) as readpatch: - with patch.object(ConfigurationManager, '_read_all_category_names', return_value=_rv2) as read_all_patch: + with patch.object(ConfigurationManager, '_read_all_category_names', + return_value=_rv2) as read_all_patch: with patch.object(ConfigurationManager, '_merge_category_vals', return_value=_rv3) as mergepatch: with patch.object(ConfigurationManager, '_run_callbacks', return_value=_rv4) as callbackpatch: - with patch.object(ConfigurationManager, '_update_category', return_value=_rv4) as updatepatch: - with patch.object(ConfigurationManager, 'search_for_ACL_recursive_from_cat_name', - return_value=_sr) as searchaclpatch: - cat = await c_mgr.create_category('catname', 'catvalue', 'catdesc') - assert cat is None - searchaclpatch.assert_called_once_with('catname') + with patch.object(ConfigurationManager, '_update_category', + return_value=_rv4) as updatepatch: + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=_rv4) as auditinfopatch: + with patch.object(ConfigurationManager, + 'search_for_ACL_recursive_from_cat_name', + return_value=_sr) as searchaclpatch: + cat = await c_mgr.create_category('catname', 'catvalue', 'catdesc') + assert cat is None + searchaclpatch.assert_called_once_with('catname') + auditinfopatch.assert_called_once_with( + 'CONCH', {'category': 'catname', 'item': 'configurationChange', 'oldValue': {}, + 'newValue': {'bla': 'bla'}}) updatepatch.assert_called_once_with('catname', {'bla': 'bla'}, 'catdesc', 'catname') callbackpatch.assert_called_once_with('catname') mergepatch.assert_called_once_with({}, {}, False, 'catname') @@ -1746,7 +1754,7 @@ async def test_get_category_all_items_bad(self, reset_singleton): await c_mgr.get_category_all_items(category_name) readpatch.assert_called_once_with(category_name) assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Unable to get all category names based on category_name %s', 'catname') + log_exc.assert_called_once_with('Unable to get all category items of {} category.'.format(category_name)) async def test_get_category_item_good(self, reset_singleton): diff --git a/tests/unit/python/fledge/common/test_jqfilter.py b/tests/unit/python/fledge/common/test_jqfilter.py index 5cab91b79d..5cf87ca336 100644 --- a/tests/unit/python/fledge/common/test_jqfilter.py +++ b/tests/unit/python/fledge/common/test_jqfilter.py @@ -10,7 +10,7 @@ from unittest.mock import patch import pytest import pyjq -from fledge.common import logger +from fledge.common.logger import FLCoreLogger from fledge.common.jqfilter import JQFilter __author__ = "Vaibhav Singhal" @@ -23,9 +23,9 @@ @pytest.allure.story("common", "jqfilter") class TestJQFilter: def test_init(self): - with patch.object(logger, "setup") as log: + with patch.object(FLCoreLogger, "get_logger") as log: jqfilter_instance = JQFilter() - assert isinstance(jqfilter_instance, JQFilter) + assert isinstance(jqfilter_instance, JQFilter) log.assert_called_once_with("JQFilter") @pytest.mark.parametrize("input_filter_string, input_reading_block, expected_return", [ @@ -39,14 +39,16 @@ def test_transform(self, input_filter_string, input_reading_block, expected_retu mock_pyjq.assert_called_once_with(input_reading_block, input_filter_string) @pytest.mark.parametrize("input_filter_string, input_reading_block, expected_error, expected_log", [ - (".", '{"a" 1}', TypeError, 'Invalid JSON passed, exception %s'), - ("..", '{"a" 1}', ValueError, 'Failed to transform, please check the transformation rule, exception %s') + (".", '{"a" 1}', TypeError, 'Invalid JSON passed during jq transform.'), + ("..", '{"a" 1}', ValueError, 'Failed to transform, please check the transformation rule.') ]) def test_transform_exceptions(self, input_filter_string, input_reading_block, expected_error, expected_log): jqfilter_instance = JQFilter() with patch.object(pyjq, "all", side_effect=expected_error) as mock_pyjq: - with patch.object(jqfilter_instance._logger, "error") as log: + with patch.object(jqfilter_instance._logger, "error") as patch_log: with pytest.raises(expected_error): jqfilter_instance.transform(input_filter_string, input_reading_block) + args = patch_log.call_args + assert expected_error == args[0][0].__class__ + assert expected_log == args[0][1] mock_pyjq.assert_called_once_with(input_reading_block, input_filter_string) - log.assert_called_once_with(expected_log, '') diff --git a/tests/unit/python/fledge/common/test_plugin_discovery.py b/tests/unit/python/fledge/common/test_plugin_discovery.py index 039b9db0d5..1af95266b6 100644 --- a/tests/unit/python/fledge/common/test_plugin_discovery.py +++ b/tests/unit/python/fledge/common/test_plugin_discovery.py @@ -559,25 +559,25 @@ def test_bad_fetch_c_north_plugin_installed(self, info, exc_count): assert exc_count == patch_log_exc.call_count @pytest.mark.parametrize("exc_name, log_exc_name, msg", [ - (FileNotFoundError, "error", 'Plugin "modbus" import problem from path "modbus".'), - (Exception, "exception", 'Plugin "modbus" raised exception "" while fetching config') + (FileNotFoundError, "error", 'Import problem from path "modbus" for modbus plugin.'), + (Exception, "exception", 'Failed to fetch config for modbus plugin.') ]) def test_bad_get_south_plugin_config(self, exc_name, log_exc_name, msg): with patch.object(_logger, log_exc_name) as patch_log_exc: with patch.object(common, 'load_and_fetch_python_plugin_info', side_effect=[exc_name]): PluginDiscovery.get_plugin_config("modbus", "south", "south", False) assert 1 == patch_log_exc.call_count - args, kwargs = patch_log_exc.call_args - assert msg in args[0] + args = patch_log_exc.call_args + assert msg == args[0][1] @pytest.mark.parametrize("exc_name, log_exc_name, msg", [ - (FileNotFoundError, "error", 'Plugin "http" import problem from path "http".'), - (Exception, "exception", 'Plugin "http" raised exception "" while fetching config') + (FileNotFoundError, "error", 'Import problem from path "http" for http plugin.'), + (Exception, "exception", 'Failed to fetch config for http plugin.') ]) def test_bad_get_north_plugin_config(self, exc_name, log_exc_name, msg): with patch.object(_logger, log_exc_name) as patch_log_exc: with patch.object(common, 'load_and_fetch_python_plugin_info', side_effect=[exc_name]): PluginDiscovery.get_plugin_config("http", "north", "north", False) assert 1 == patch_log_exc.call_count - args, kwargs = patch_log_exc.call_args - assert msg in args[0] + args = patch_log_exc.call_args + assert msg == args[0][1] diff --git a/tests/unit/python/fledge/common/test_process.py b/tests/unit/python/fledge/common/test_process.py index 3176709baf..c979c02fdd 100644 --- a/tests/unit/python/fledge/common/test_process.py +++ b/tests/unit/python/fledge/common/test_process.py @@ -5,6 +5,7 @@ from unittest.mock import patch +from fledge.common import process from fledge.common.storage_client.storage_client import ReadingsStorageClientAsync, StorageClientAsync from fledge.common.process import FledgeProcess, ArgumentParserError from fledge.common.microservice_management_client.microservice_management_client import MicroserviceManagementClient @@ -39,9 +40,11 @@ def run(self): pass with patch.object(sys, 'argv', argslist): with pytest.raises(ArgumentParserError) as excinfo: - fp = FledgeProcessImp() - assert '' in str( - excinfo.value) + with patch.object(process._logger, "error") as patch_logger: + fp = FledgeProcessImp() + assert 1 == patch_logger.call_count + patch_logger.assert_called_once_with() + assert '' in str(excinfo.value) def test_constructor_good(self): class FledgeProcessImp(FledgeProcess): diff --git a/tests/unit/python/fledge/common/test_statistics.py b/tests/unit/python/fledge/common/test_statistics.py index dd67112d03..1276b31eb8 100644 --- a/tests/unit/python/fledge/common/test_statistics.py +++ b/tests/unit/python/fledge/common/test_statistics.py @@ -170,8 +170,8 @@ async def mock_coro(): with patch.object(statistics._logger, 'exception') as logger_exception: with patch.object(s._storage, 'query_tbl_with_payload', return_value=_rv): await s._load_keys() - args, kwargs = logger_exception.call_args - assert args[0] == 'Failed to retrieve statistics keys, %s' + args = logger_exception.call_args + assert args[0][1] == 'Failed to retrieve statistics keys' async def test_update(self): storage_client_mock = MagicMock(spec=StorageClientAsync) @@ -212,13 +212,13 @@ async def test_update_with_invalid_params(self, key, value_increment, exception_ async def test_update_exception(self): storage_client_mock = MagicMock(spec=StorageClientAsync) s = statistics.Statistics(storage_client_mock) - msg = 'Unable to update statistics value based on statistics_key %s and value_increment %d,' \ - ' error %s', 'BUFFERED', 5, '' + msg = 'Unable to update statistics value based on statistics_key {} and value_increment {}'.format('BUFFERED', 5) with patch.object(s._storage, 'update_tbl', side_effect=Exception()): with pytest.raises(Exception): with patch.object(statistics._logger, 'exception') as logger_exception: await s.update('BUFFERED', 5) - logger_exception.assert_called_once_with(*msg) + args = logger_exception.call_args + assert msg == args[0][1] async def test_add_update(self): stat_dict = {'FOGBENCH/TEMPERATURE': 1} diff --git a/tests/unit/python/fledge/services/common/test_microservice.py b/tests/unit/python/fledge/services/common/test_microservice.py index e6132968b3..ba6824c379 100644 --- a/tests/unit/python/fledge/services/common/test_microservice.py +++ b/tests/unit/python/fledge/services/common/test_microservice.py @@ -160,7 +160,8 @@ async def add_track(self): with patch.object(_logger, 'exception') as logger_patch: with pytest.raises(Exception) as excinfo: fm = FledgeMicroserviceImp() - logger_patch.assert_called_once_with('Unable to intialize FledgeMicroservice due to exception %s', '') + args = logger_patch.call_args + assert 'Unable to initialize FledgeMicroservice' == args[0][1] @pytest.mark.asyncio async def test_ping(self, loop): diff --git a/tests/unit/python/fledge/services/common/test_services_common_utils.py b/tests/unit/python/fledge/services/common/test_services_common_utils.py index 0112e7f73b..338eea8679 100644 --- a/tests/unit/python/fledge/services/common/test_services_common_utils.py +++ b/tests/unit/python/fledge/services/common/test_services_common_utils.py @@ -57,15 +57,14 @@ async def test_ping_service_pass(self, aiohttp_server, loop): await server.start_server(loop=loop) # WHEN the service is pinged with a valid URL - with patch.object(utils._logger, "info") as log: + with patch.object(utils._logger, "debug") as patch_logger: service = ServiceRecord("d", "test", "Southbound", "http", server.host, 1, server.port) url_ping = "{}://{}:{}/fledge/service/ping".format(service._protocol, service._address, service._management_port) log_params = 'Ping received for Service %s id %s at url %s', service._name, service._id, url_ping resp = await utils.ping_service(service, loop=loop) - - # THEN ping response is received - assert resp is True - log.assert_called_once_with(*log_params) + # THEN ping response is received + assert resp is True + patch_logger.assert_called_once_with(*log_params) async def test_ping_service_fail_bad_url(self, aiohttp_server, loop): # GIVEN a service is running at a given URL @@ -86,9 +85,8 @@ async def test_ping_service_fail_bad_url(self, aiohttp_server, loop): log_params = 'Ping not received for Service %s id %s at url %s attempt_count %s', service._name, service._id, \ url_ping, utils._MAX_ATTEMPTS+1 resp = await utils.ping_service(service, loop=loop) - - # THEN ping response is NOT received - assert resp is False + # THEN ping response is NOT received + assert resp is False log.assert_called_once_with(*log_params) async def test_shutdown_service_pass(self, aiohttp_server, loop): @@ -107,14 +105,12 @@ async def test_shutdown_service_pass(self, aiohttp_server, loop): service = ServiceRecord("d", "test", "Southbound", "http", server.host, 1, server.port) url_shutdown = "{}://{}:{}/fledge/service/shutdown".format(service._protocol, service._address, service._management_port) - log_params1 = "Shutting down the %s service %s ...", service._type, service._name - log_params2 = 'Service %s, id %s at url %s successfully shutdown', service._name, service._id, url_shutdown + log_params = 'Service %s, id %s at url %s successfully shutdown', service._name, service._id, url_shutdown resp = await utils.shutdown_service(service, loop=loop) - - # THEN shutdown returns success - assert resp is True - log.assert_called_with(*log_params2) + # THEN shutdown returns success + assert resp is True assert 2 == log.call_count + log.assert_called_with(*log_params) async def test_shutdown_service_fail_bad_url(self, aiohttp_server, loop): # GIVEN a service is running at a given URL @@ -133,8 +129,8 @@ async def test_shutdown_service_fail_bad_url(self, aiohttp_server, loop): service = ServiceRecord("d", "test", "Southbound", "http", server.host, 1, server.port+1) log_params1 = "Shutting down the %s service %s ...", service._type, service._name resp = await utils.shutdown_service(service, loop=loop) - - # THEN shutdown fails - assert resp is False + # THEN shutdown fails + assert resp is False + assert log2.called is True + assert log1.called is True log1.assert_called_with(*log_params1) - assert log2.called is True diff --git a/tests/unit/python/fledge/services/core/api/control_service/test_acl_management.py b/tests/unit/python/fledge/services/core/api/control_service/test_acl_management.py index c610e71a57..0953357de7 100644 --- a/tests/unit/python/fledge/services/core/api/control_service/test_acl_management.py +++ b/tests/unit/python/fledge/services/core/api/control_service/test_acl_management.py @@ -5,6 +5,7 @@ import pytest from aiohttp import web +from fledge.common.audit_logger import AuditLogger from fledge.common.configuration_manager import ConfigurationManager from fledge.common.storage_client.storage_client import StorageClientAsync from fledge.common.web import middleware @@ -138,19 +139,24 @@ async def test_good_add_acl(self, client): if sys.version_info >= (3, 8): value = await mock_coro(result) insert_value = await mock_coro(insert_result) + _rv = await mock_coro(None) else: value = asyncio.ensure_future(mock_coro(result)) insert_value = asyncio.ensure_future(mock_coro(insert_result)) + _rv = asyncio.ensure_future(mock_coro(None)) storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=value) as query_tbl_patch: with patch.object(storage_client_mock, 'insert_into_tbl', return_value=insert_value ) as insert_tbl_patch: - resp = await client.post('/fledge/ACL', data=json.dumps(request_payload)) - assert 200 == resp.status - result = await resp.text() - json_response = json.loads(result) - assert {'name': acl_name, 'service': [], 'url': []} == json_response + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=_rv) as audit_info_patch: + resp = await client.post('/fledge/ACL', data=json.dumps(request_payload)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {'name': acl_name, 'service': [], 'url': []} == json_response + audit_info_patch.assert_called_once_with('ACLAD', request_payload) args, _ = insert_tbl_patch.call_args_list[0] assert 'control_acl' == args[0] assert {'name': acl_name, 'service': '[]', 'url': '[]'} == json.loads(args[1]) @@ -177,7 +183,8 @@ async def test_update_acl_not_found(self, client): req_payload = {"service": []} result = {"count": 0, "rows": []} value = await mock_coro(result) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(result)) - query_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": acl_name}} + query_payload = {"return": ["name", "service", "url"], "where": { + "column": "name", "condition": "=", "value": acl_name}} message = "ACL with name {} is not found.".format(acl_name) storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): @@ -205,21 +212,18 @@ async def test_update_acl(self, client, payload): acl_q_result = {"count": 0, "rows": []} update_result = {"response": "updated", "rows_affected": 1} query_tbl_result = {"count": 1, "rows": [{"name": acl_name, "service": [], "url": []}]} - query_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": acl_name}} + query_payload = {"return": ["name", "service", "url"], "where": {"column": "name", "condition": "=", "value": acl_name}} if sys.version_info >= (3, 8): - rv = await mock_coro(query_tbl_result) + arv = await mock_coro(None) update_value = await mock_coro(update_result) else: - rv = asyncio.ensure_future(mock_coro(query_tbl_result)) + arv = asyncio.ensure_future(mock_coro(None)) update_value = asyncio.ensure_future(mock_coro(update_result)) storage_client_mock = MagicMock(StorageClientAsync) - acl_query_payload_service = {"return": ["entity_name"], "where": {"column": "entity_type", - "condition": "=", - "value": "service", - "and": - {"column": "name", - "condition": "=", - "value": "{}".format(acl_name)}}} + acl_query_payload_service = {"return": ["entity_name"], + "where": {"column": "entity_type", "condition": "=", "value": "service", + "and": {"column": "name", "condition": "=", "value": "{}".format( + acl_name)}}} @asyncio.coroutine def q_result(*args): @@ -234,13 +238,22 @@ def q_result(*args): return {} with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=q_result) as patch_query_tbl: - with patch.object(storage_client_mock, 'update_tbl', return_value=update_value) as patch_update_tbl: - resp = await client.put('/fledge/ACL/{}'.format(acl_name), data=json.dumps(payload)) - assert 200 == resp.status - result = await resp.text() - json_response = json.loads(result) - assert {"message": "ACL {} updated successfully.".format(acl_name)} == json_response + with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=q_result): + with patch.object(storage_client_mock, 'update_tbl', return_value=update_value + ) as patch_update_tbl: + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=arv) as audit_info_patch: + resp = await client.put('/fledge/ACL/{}'.format(acl_name), data=json.dumps(payload)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {"message": "ACL {} updated successfully.".format(acl_name)} == json_response + args, _ = audit_info_patch.call_args + assert 'ACLCH' == args[0] + if 'url' not in payload: + payload['url'] = None + payload['name'] = acl_name + assert {"acl": payload, "old_acl": query_tbl_result['rows'][0]} == args[1] update_args, _ = patch_update_tbl.call_args assert 'control_acl' == update_args[0] @@ -272,12 +285,13 @@ async def test_delete_acl(self, client): payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": acl_name}} delete_payload = {"where": {"column": "name", "condition": "=", "value": acl_name}} delete_result = {"response": "deleted", "rows_affected": 1} + message = '{} ACL deleted successfully.'.format(acl_name) if sys.version_info >= (3, 8): - value = await mock_coro(result) del_value = await mock_coro(delete_result) + arv = await mock_coro(None) else: - value = asyncio.ensure_future(mock_coro(result)) del_value = asyncio.ensure_future(mock_coro(delete_result)) + arv = asyncio.ensure_future(mock_coro(None)) acl_query_payload_service = {"return": ["entity_name"], "where": {"column": "entity_type", "condition": "=", @@ -310,13 +324,17 @@ def q_result(*args): return {} with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=q_result) as query_tbl_patch: - with patch.object(storage_client_mock, 'delete_from_tbl', return_value=del_value) as patch_delete_tbl: - resp = await client.delete('/fledge/ACL/{}'.format(acl_name)) - assert 200 == resp.status - result = await resp.text() - json_response = json.loads(result) - assert {'message': '{} ACL deleted successfully.'.format(acl_name)} == json_response + with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=q_result): + with patch.object(storage_client_mock, 'delete_from_tbl', return_value=del_value + ) as patch_delete_tbl: + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=arv) as audit_info_patch: + resp = await client.delete('/fledge/ACL/{}'.format(acl_name)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {'message': message} == json_response + audit_info_patch.assert_called_once_with('ACLDL', {'message': message, "name": acl_name}) delete_args, _ = patch_delete_tbl.call_args assert 'control_acl' == delete_args[0] assert delete_payload == json.loads(delete_args[1]) diff --git a/tests/unit/python/fledge/services/core/api/control_service/test_entrypoint.py b/tests/unit/python/fledge/services/core/api/control_service/test_entrypoint.py new file mode 100644 index 0000000000..13c842ffdc --- /dev/null +++ b/tests/unit/python/fledge/services/core/api/control_service/test_entrypoint.py @@ -0,0 +1,550 @@ +import asyncio +import json +import sys + +from unittest.mock import MagicMock, patch +import pytest +from aiohttp import web + +from fledge.common.audit_logger import AuditLogger +from fledge.common.storage_client.storage_client import StorageClientAsync +from fledge.common.web import middleware +from fledge.services.core import connect, routes +from fledge.services.core.api.control_service import entrypoint + + +__author__ = "Ashish Jabble" +__copyright__ = "Copyright (c) 2023 Dianomic Systems Inc." +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + + +async def mock_coro(*args): + return None if len(args) == 0 else args[0] + + +@pytest.allure.feature("unit") +@pytest.allure.story("api", "entrypoint") +class TestEntrypoint: + """ Control Flow Entrypoint API tests """ + + @pytest.fixture + def client(self, loop, test_client): + app = web.Application(loop=loop, middlewares=[middleware.optional_auth_middleware]) + routes.setup(app) + return loop.run_until_complete(test_client(app)) + + async def test_get_all_entrypoints(self, client): + storage_client_mock = MagicMock(StorageClientAsync) + storage_result = {'count': 3, 'rows': [ + {'name': 'EP1', 'description': 'EP1', 'type': 1, 'operation_name': 'OP1', 'destination': 0, + 'destination_arg': '', 'anonymous': 't'}, + {'name': 'EP2', 'description': 'Ep2', 'type': 0, 'operation_name': '', 'destination': 0, + 'destination_arg': '', 'anonymous': 'f'}, + {'name': 'EP3', 'description': 'EP3', 'type': 1, 'operation_name': 'OP2', 'destination': 0, + 'destination_arg': '', 'anonymous': 'f'}]} + expected_api_response = {"controls": [{"name": "EP1", "description": "EP1", "permitted": True}, + {"name": "EP2", "description": "Ep2", "permitted": True}, + {"name": "EP3", "description": "EP3", "permitted": True}]} + rv = await mock_coro(storage_result) if sys.version_info >= (3, 8) else asyncio.ensure_future( + mock_coro(storage_result)) + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl', return_value=rv) as patch_query_tbl: + resp = await client.get('/fledge/control/manage') + assert 200 == resp.status + json_response = json.loads(await resp.text()) + assert 'controls' in json_response + assert expected_api_response == json_response + patch_query_tbl.assert_called_once_with('control_api') + + @pytest.mark.parametrize("exception, message, status_code", [ + (ValueError, 'name should be in string.', 400), + (KeyError, 'EP control entrypoint not found.', 404), + (KeyError, '', 404), + (Exception, 'Interval Server error.', 500) + ]) + async def test_bad_get_entrypoint_by_name(self, client, exception, message, status_code): + ep_name = "EP" + with patch.object(entrypoint, '_get_entrypoint', side_effect=exception(message)): + with patch.object(entrypoint._logger, 'error') as patch_logger: + resp = await client.get('/fledge/control/manage/{}'.format(ep_name)) + assert status_code == resp.status + assert message == resp.reason + result = await resp.text() + json_response = json.loads(result) + assert {"message": message} == json_response + if exception == Exception: + patch_logger.assert_called() + + async def test_get_entrypoint_by_name(self, client): + ep_name = "EP" + storage_result = {'name': ep_name, 'description': 'EP1', 'type': 'operation', 'operation_name': 'OP1', + 'destination': 'broadcast', 'anonymous': True, 'constants': {'x': '640', 'y': '480'}, + 'variables': {'rpm': '800', 'distance': '138'}, 'allow': ['admin', 'user']} + if sys.version_info >= (3, 8): + rv1 = await mock_coro(storage_result) + rv2 = await mock_coro(True) + else: + rv1 = asyncio.ensure_future(mock_coro(storage_result)) + rv2 = asyncio.ensure_future(mock_coro(True)) + with patch.object(entrypoint, '_get_entrypoint', return_value=rv1) as patch_entrypoint: + with patch.object(entrypoint, '_get_permitted', return_value=rv2) as patch_permitted: + resp = await client.get('/fledge/control/manage/{}'.format(ep_name)) + assert 200 == resp.status + json_response = json.loads(await resp.text()) + assert 'permitted' in json_response + assert storage_result == json_response + assert 1 == patch_permitted.call_count + patch_entrypoint.assert_called_once_with(ep_name) + + async def test_create_entrypoint_in_use(self, client): + ep_name = "SetLatheSpeed" + payload = {"name": ep_name, "description": "Set the speed of the lathe", "type": "write", + "destination": "asset", "asset": "lathe", "constants": {"units": "spin"}, + "variables": {"rpm": "100"}, "allow": [], "anonymous": False} + storage_client_mock = MagicMock(StorageClientAsync) + storage_result = {"count": 1, "rows": [{"name": ep_name}]} + rv = await mock_coro(storage_result) if sys.version_info >= (3, 8) else asyncio.ensure_future( + mock_coro(storage_result)) + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl', return_value=rv) as patch_query_tbl: + resp = await client.post('/fledge/control/manage', data=json.dumps(payload)) + assert 400 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {'message': '{} control entrypoint is already in use.'.format(ep_name)} == json_response + patch_query_tbl.assert_called_once_with('control_api') + + async def test_create_entrypoint(self, client): + ep_name = "SetLatheSpeed" + payload = {"name": ep_name, "description": "Set the speed of the lathe", "type": "write", + "destination": "asset", "asset": "lathe", "constants": {"units": "spin"}, + "variables": {"rpm": "100"}, "allow": [], "anonymous": False} + storage_client_mock = MagicMock(StorageClientAsync) + storage_result = {"count": 0, "rows": []} + insert_result = {"response": "inserted", "rows_affected": 1} + + @asyncio.coroutine + def i_result(*args): + table = args[0] + insert_payload = args[1] + if table == 'control_api': + p = {'name': payload['name'], 'description': payload['description'], 'type': 0, 'operation_name': '', + 'destination': 2, 'destination_arg': payload['asset'], + 'anonymous': 'f' if payload['anonymous'] is False else 't'} + assert p == json.loads(insert_payload) + elif table == 'control_api_parameters': + if json.loads(insert_payload)['constant'] == 't': + assert {'name': ep_name, 'parameter': 'units', 'value': 'spin', 'constant': 't' + } == json.loads(insert_payload) + else: + assert {'name': ep_name, 'parameter': 'rpm', 'value': '100', 'constant': 'f' + } == json.loads(insert_payload) + elif table == 'control_api_acl': + pass + return insert_result + + if sys.version_info >= (3, 8): + rv = await mock_coro(storage_result) + arv = await mock_coro(None) + else: + rv = asyncio.ensure_future(mock_coro(storage_result)) + arv = asyncio.ensure_future(mock_coro(None)) + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl', return_value=rv) as patch_query_tbl: + with patch.object(storage_client_mock, 'insert_into_tbl', side_effect=i_result + ) as patch_insert_tbl: + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=arv) as audit_info_patch: + resp = await client.post('/fledge/control/manage', data=json.dumps(payload)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {'message': '{} control entrypoint has been created successfully.'.format(ep_name) + } == json_response + audit_info_patch.assert_called_once_with('CTEAD', payload) + assert 3 == patch_insert_tbl.call_count + patch_query_tbl.assert_called_once_with('control_api') + + async def test_update_entrypoint_not_found(self, client): + ep_name = "EP" + message = '{} control entrypoint not found.'.format(ep_name) + payload = {"where": {"column": "name", "condition": "=", "value": ep_name}} + storage_client_mock = MagicMock(StorageClientAsync) + storage_result = {"count": 0, "rows": []} + rv = await mock_coro(storage_result) if sys.version_info >= (3, 8) else asyncio.ensure_future( + mock_coro(storage_result)) + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=rv + ) as patch_query_tbl: + resp = await client.put('/fledge/control/manage/{}'.format(ep_name)) + assert 404 == resp.status + assert message == resp.reason + result = await resp.text() + json_response = json.loads(result) + assert {"message": message} == json_response + args, kwargs = patch_query_tbl.call_args + assert 'control_api' == args[0] + assert payload == json.loads(args[1]) + + async def test_update_entrypoint(self, client): + storage_client_mock = MagicMock(StorageClientAsync) + ep_name = "SetLatheSpeed" + payload = {"description": "Updated"} + query_payload = '{"where": {"column": "name", "condition": "=", "value": "SetLatheSpeed"}}' + storage_result = {"count": 1, "rows": [{"name": ep_name}]} + ep_info = {'name': ep_name, 'description': 'Perform speed of lathe', 'type': 'operation', + 'operation_name': 'Speed', 'destination': 'broadcast', 'anonymous': False, + 'constants': {'x': '640', 'y': '480'}, 'variables': {'rpm': '800', 'distance': '138'}, 'allow': []} + new_ep_info = {'name': ep_name, 'description': payload['description'], 'type': 'operation', + 'operation_name': 'Speed', 'destination': 'broadcast', 'anonymous': False, + 'constants': {'x': '640', 'y': '480'}, 'variables': {'rpm': '800', 'distance': '138'}, + 'allow': []} + + update_payload = ('{"values": {"description": "Updated"}, ' + '"where": {"column": "name", "condition": "=", "value": "SetLatheSpeed"}}') + update_result = {"response": "updated", "rows_affected": 1} + if sys.version_info >= (3, 8): + rv1 = await mock_coro(storage_result) + rv2 = await mock_coro(ep_info) + rv3 = await mock_coro(new_ep_info) + rv4 = await mock_coro(update_result) + arv = await mock_coro(None) + else: + rv1 = asyncio.ensure_future(mock_coro(storage_result)) + arv = asyncio.ensure_future(mock_coro(None)) + rv2 = asyncio.ensure_future(mock_coro(ep_info)) + rv3 = asyncio.ensure_future(mock_coro(new_ep_info)) + rv4 = asyncio.ensure_future(mock_coro(update_result)) + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=rv1 + ) as patch_query_tbl: + with patch.object(entrypoint, '_get_entrypoint', side_effect=[rv2, rv3]) as patch_entrypoint: + with patch.object(storage_client_mock, 'update_tbl', return_value=rv4) as patch_update_tbl: + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=arv + ) as audit_info_patch: + resp = await client.put('/fledge/control/manage/{}'.format(ep_name), + data=json.dumps(payload)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {'message': '{} control entrypoint has been updated successfully.'.format( + ep_name)} == json_response + audit_info_patch.assert_called_once_with( + 'CTECH', {"entrypoint": new_ep_info, "old_entrypoint": ep_info}) + patch_update_tbl.assert_called_once_with('control_api', update_payload) + assert 2 == patch_entrypoint.call_count + patch_query_tbl.assert_called_once_with('control_api', query_payload) + + async def test_delete_entrypoint_not_found(self, client): + ep_name = "EP" + message = '{} control entrypoint not found.'.format(ep_name) + payload = {"where": {"column": "name", "condition": "=", "value": ep_name}} + storage_client_mock = MagicMock(StorageClientAsync) + storage_result = {"count": 0, "rows": []} + rv = await mock_coro(storage_result) if sys.version_info >= (3, 8) else asyncio.ensure_future( + mock_coro(storage_result)) + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=rv + ) as patch_query_tbl: + resp = await client.delete('/fledge/control/manage/{}'.format(ep_name)) + assert 404 == resp.status + assert message == resp.reason + result = await resp.text() + json_response = json.loads(result) + assert {"message": message} == json_response + args, kwargs = patch_query_tbl.call_args + assert 'control_api' == args[0] + assert payload == json.loads(args[1]) + + async def test_delete_entrypoint(self, client): + ep_name = "EP" + payload = {"where": {"column": "name", "condition": "=", "value": ep_name}} + storage_result = {"count": 0, "rows": [ + {'name': ep_name, 'description': 'EP1', 'type': 'operation', 'operation_name': 'OP1', + 'destination': 'broadcast', 'anonymous': True, 'constants': {'x': '640', 'y': '480'}, + 'variables': {'rpm': '800', 'distance': '138'}, 'allow': ['admin', 'user']}]} + message = "{} control entrypoint has been deleted successfully.".format(ep_name) + if sys.version_info >= (3, 8): + rv1 = await mock_coro(storage_result) + rv2 = await mock_coro(None) + arv = await mock_coro(None) + else: + rv1 = asyncio.ensure_future(mock_coro(storage_result)) + rv2 = asyncio.ensure_future(mock_coro(None)) + arv = asyncio.ensure_future(mock_coro(None)) + storage_client_mock = MagicMock(StorageClientAsync) + del_payload = {"where": {"column": "name", "condition": "=", "value": ep_name}} + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=rv1 + ) as patch_query_tbl: + with patch.object(storage_client_mock, 'delete_from_tbl', return_value=rv2 + ) as patch_delete_tbl: + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=arv) as audit_info_patch: + resp = await client.delete('/fledge/control/manage/{}'.format(ep_name)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {"message": message} == json_response + audit_info_patch.assert_called_once_with('CTEDL', {'message': message, "name": ep_name}) + assert 3 == patch_delete_tbl.call_count + del_args = patch_delete_tbl.call_args_list + args1, _ = del_args[0] + assert 'control_api_acl' == args1[0] + assert del_payload == json.loads(args1[1]) + args2, _ = del_args[1] + assert 'control_api_parameters' == args2[0] + assert del_payload == json.loads(args2[1]) + args3, _ = del_args[2] + assert 'control_api' == args3[0] + assert del_payload == json.loads(args3[1]) + args, kwargs = patch_query_tbl.call_args + assert 'control_api' == args[0] + assert payload == json.loads(args[1]) + + @pytest.mark.parametrize("ep_type", ["operation", "write"]) + async def test_update_request_entrypoint(self, client, ep_type): + from fledge.services.core.service_registry.service_registry import ServiceRegistry + from fledge.common.service_record import ServiceRecord + + ServiceRegistry._registry = [] + + with patch.object(ServiceRegistry._logger, 'info'): + ServiceRegistry.register('Fledge Storage', 'Storage', '127.0.0.1', 1, 1, 'http') + ServiceRegistry.register('Dispatcher Service', 'Dispatcher', '127.0.0.1', 8, 8, 'http') + + ep_name = "SetLatheSpeed" + if ep_type == "operation": + storage_result = {'name': ep_name, 'description': 'Perform speed of lathe', 'type': 'operation', + 'operation_name': 'Speed', 'destination': 'broadcast', 'anonymous': False, + 'constants': {'x': '640', 'y': '480'}, 'variables': {'rpm': '800', 'distance': '138'}, + 'allow': []} + dispatch_payload = {'destination': 'broadcast', 'source': 'API', 'source_name': 'Anonymous', + 'operation': {'Speed': {'x': '420', 'y': '480', 'rpm': '800', 'distance': '200'}}} + payload = {"x": "420", "distance": "200"} + dispatch_endpoint = 'dispatch/operation' + else: + storage_result = {'name': ep_name, 'description': 'Perform speed of lathe', 'type': 'write', + 'destination': 'broadcast', 'anonymous': False, 'constants': {'x': '640', 'y': '480'}, + 'variables': {'rpm': '800', 'distance': '138'}, 'allow': ['admin', 'user']} + payload = {"rpm": "1200"} + dispatch_endpoint = 'dispatch/write' + dispatch_payload = {'destination': 'broadcast', 'source': 'API', 'source_name': 'Anonymous', + 'write': {'x': '640', 'y': '480', 'rpm': '1200', 'distance': '138'}} + + svc_info = (ServiceRecord("d607c5be-792f-4993-96b7-b513674e7d3b", + ep_name, "Dispatcher", "http", "127.0.0.1", "8118", "8118"), "Token") + + if sys.version_info >= (3, 8): + rv1 = await mock_coro(storage_result) + rv2 = await mock_coro(svc_info) + rv3 = await mock_coro(None) + else: + rv1 = asyncio.ensure_future(mock_coro(storage_result)) + rv2 = asyncio.ensure_future(mock_coro(svc_info)) + rv3 = asyncio.ensure_future(mock_coro(None)) + + with patch.object(entrypoint, '_get_entrypoint', return_value=rv1): + with patch.object(entrypoint, '_get_service_record_info_along_with_bearer_token', + return_value=rv2) as patch_service: + with patch.object(entrypoint, '_call_dispatcher_service_api', + return_value=rv3) as patch_call_service: + resp = await client.put('/fledge/control/request/{}'.format(ep_name), data=json.dumps(payload)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {'message': '{} control entrypoint URL called.'.format(ep_name)} == json_response + if ep_type == "operation": + op = dispatch_payload['operation']['Speed'] + assert storage_result['constants']['x'] != op['x'] + assert storage_result['constants']['y'] == op['y'] + assert storage_result['variables']['distance'] != op['distance'] + assert storage_result['variables']['rpm'] == op['rpm'] + else: + write = dispatch_payload['write'] + assert storage_result['constants']['x'] == write['x'] + assert storage_result['constants']['y'] == write['y'] + assert storage_result['variables']['distance'] == write['distance'] + assert storage_result['variables']['rpm'] != write['rpm'] + patch_call_service.assert_called_once_with('http', '127.0.0.1', 8118, dispatch_endpoint, + svc_info[1], dispatch_payload) + patch_service.assert_called_once_with() + + @pytest.mark.parametrize("identifier, identifier_value", [ + (0, 'write'), + (1, 'operation'), + ('write', 0), + ('operation', 1) + ]) + async def test__get_type(self, identifier, identifier_value): + assert identifier_value == await entrypoint._get_type(identifier) + + @pytest.mark.parametrize("identifier, identifier_value", [ + (0, 'broadcast'), + (1, 'service'), + (2, 'asset'), + (3, 'script'), + ('broadcast', 0), + ('service', 1), + ('asset', 2), + ('script', 3) + ]) + async def test__get_destination(self, identifier, identifier_value): + assert identifier_value == await entrypoint._get_destination(identifier) + + async def test__update_params(self): + ep_name = "SetLatheSpeed" + old = {'x': '640', 'y': '480'} + new = {'x': '180', 'z': '90'} + is_constant = 't' + storage_client_mock = MagicMock(StorageClientAsync) + rows_affected = {"response": "updated", "rows_affected": 1} + rv = await mock_coro(rows_affected) if sys.version_info >= (3, 8) else ( + asyncio.ensure_future(mock_coro(rows_affected))) + tbl_name = 'control_api_parameters' + delete_payload = {"where": {"column": "name", "condition": "=", "value": ep_name, + "and": {"column": "constant", "condition": "=", "value": "t", + "and": {"column": "parameter", "condition": "=", "value": list(old)[1]}}}} + insert_payload = {'name': ep_name, 'parameter': 'z', 'value': new['z'], 'constant': 't'} + update_payload = {"where": {"column": "name", "condition": "=", "value": ep_name, + "and": {"column": "constant", "condition": "=", "value": "t", + "and": {"column": "parameter", "condition": "=", "value": "x"}}}, + "values": {"value": new['x']}} + with patch.object(storage_client_mock, 'update_tbl', return_value=rv) as patch_update_tbl: + with patch.object(storage_client_mock, 'delete_from_tbl', return_value=rv) as patch_delete_tbl: + with patch.object(storage_client_mock, 'insert_into_tbl', return_value=rv) as patch_insert_tbl: + await entrypoint._update_params(ep_name, old, new, is_constant, storage_client_mock) + args, _ = patch_insert_tbl.call_args + assert tbl_name == args[0] + assert insert_payload == json.loads(args[1]) + args, _ = patch_delete_tbl.call_args + assert tbl_name == args[0] + assert delete_payload == json.loads(args[1]) + args, _ = patch_update_tbl.call_args + assert tbl_name == args[0] + assert update_payload == json.loads(args[1]) + + async def test__get_entrypoint(self): + ep_name = "SetLatheSpeed" + storage_client_mock = MagicMock(StorageClientAsync) + payload = {"where": {"column": "name", "condition": "=", "value": ep_name}} + storage_result1 = {"count": 1, "rows": [ + {'name': ep_name, 'description': 'Perform lathe Speed', 'type': 'operation', 'operation_name': 'Speed', + 'destination': 'broadcast', 'destination_arg': '', 'anonymous': True, + 'constants': {}, 'variables': {}, + 'allow': []}]} + storage_result2 = {"count": 0, "rows": []} + if sys.version_info >= (3, 8): + rv1 = await mock_coro(storage_result1) + rv2 = await mock_coro(storage_result2) + rv3 = await mock_coro(storage_result2) + else: + rv1 = asyncio.ensure_future(mock_coro(storage_result1)) + rv2 = asyncio.ensure_future(mock_coro(storage_result2)) + rv3 = asyncio.ensure_future(mock_coro(storage_result2)) + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=[rv1, rv2, rv3] + ) as patch_query_tbl: + await entrypoint._get_entrypoint(ep_name) + assert 3 == patch_query_tbl.call_count + args1 = patch_query_tbl.call_args_list[0] + assert 'control_api' == args1[0][0] + assert payload == json.loads(args1[0][1]) + args2 = patch_query_tbl.call_args_list[1] + assert 'control_api_parameters' == args2[0][0] + assert payload == json.loads(args2[0][1]) + args3 = patch_query_tbl.call_args_list[2] + assert 'control_api_acl' == args3[0][0] + assert payload == json.loads(args3[0][1]) + + @pytest.mark.parametrize("payload", [ + {'name': 'FocusCamera', 'description': 'Perform focus on camera', 'type': 'operation', + 'operation_name': 'OP', 'destination': 'script', 'script': 'S1', 'anonymous': False}, + {'name': 'FocusCamera', 'description': 'Perform focus on camera', 'type': 'write', + 'destination': 'script', 'script': 'S1', 'constants': {'unit': 'cm'}, 'variables': {'aperture': 'f/11'}}, + {'name': 'FocusCamera', 'description': 'Perform focus on camera', 'type': 'operation', + 'operation_name': 'OP', 'destination': 'asset', 'asset': 'AS'}, + {'name': 'FocusCamera', 'description': 'Perform focus on camera', 'type': 'write', + 'destination': 'asset', 'asset': 'AS', 'constants': {'unit': 'cm'}, 'variables': {'aperture': 'f/11'}}, + {'name': 'FocusCamera', 'description': 'Perform focus on camera', 'type': 'operation', + 'operation_name': 'OP', 'destination': 'broadcast'}, + {'name': 'FocusCamera', 'description': 'Perform focus on camera', 'type': 'write', + 'destination': 'broadcast', 'constants': {'unit': 'cm'}, 'variables': {'aperture': 'f/11'}, 'anonymous': True}, + {'name': 'FocusCamera', 'description': 'Perform focus on camera', 'type': 'operation', + 'operation_name': 'OP', 'destination': 'service', 'service': 'Camera'}, + {'name': 'FocusCamera', 'description': 'Perform focus on camera', 'type': 'write', + 'destination': 'service', 'service': 'Camera', 'constants': {'unit': 'cm'}, 'variables': {'aperture': 'f/11'}}, + {'name': 'FocusCamera', 'description': 'Perform focus on camera', 'type': 'operation', + 'operation_name': 'OP', 'destination': 'script', 'script': 'S1', 'anonymous': False, + 'constants': {'unit': 'cm'}, 'variables': {'aperture': 'f/16'}} + ]) + async def test__check_parameters(self, payload): + cols = await entrypoint._check_parameters(payload) + assert isinstance(cols, dict) + + @pytest.mark.parametrize("payload", [ + {'name': 'FocusCamera', 'description': 'Perform focus on camera', 'type': 'operation', + 'operation_name': 'OP', 'destination': 'script', 'script': 'S1', 'anonymous': False}, + {'name': 'FocusCamera', 'description': 'Perform focus on camera', 'type': 'write', + 'destination': 'script', 'script': 'S1', 'constants': {'unit': 'cm'}, 'variables': {'aperture': 'f/11'}}, + {'anonymous': True}, + {'description': 'updated'}, + {'type': 'operation', 'operation_name': 'Distance'}, + {'type': 'operation', 'operation_name': 'Test', 'constants': {'unit': 'cm'}, 'variables': {'aperture': 'f/11'}}, + {'type': 'write', 'constants': {'unit': 'cm'}, 'variables': {'aperture': 'f/11'}}, + {'destination': 'asset', 'asset': 'AS'}, + {'constants': {'unit': 'cm'}}, + {'variables': {'aperture': 'f/11'}} + ]) + async def test__check_parameters_without_required_keys(self, payload): + cols = await entrypoint._check_parameters(payload, skip_required=True) + assert isinstance(cols, dict) + + @pytest.mark.parametrize("payload, exception_name, error_msg", [ + # ({"a": 1}, KeyError, + # "{'name', 'type', 'destination', 'description'} required keys are missing in request payload.") + ({"name": 1}, ValueError, "Control entrypoint name should be in string."), + ({"name": ""}, ValueError, "Control entrypoint name cannot be empty."), + ({"description": 1}, ValueError, "Control entrypoint description should be in string."), + ({"description": ""}, ValueError, "Control entrypoint description cannot be empty."), + ({"type": 1}, ValueError, "Control entrypoint type should be in string."), + ({"type": ""}, ValueError, "Control entrypoint type cannot be empty."), + ({"type": "Blah"}, ValueError, "Possible types are: ['write', 'operation']."), + ({"type": "operation"}, KeyError, "operation_name KV pair is missing."), + ({"type": "operation", "operation_name": ""}, ValueError, "Control entrypoint operation name cannot be empty."), + ({"type": "operation", "operation_name": 1}, ValueError, + "Control entrypoint operation name should be in string."), + ({"destination": ""}, ValueError, "Control entrypoint destination cannot be empty."), + ({"destination": 1}, ValueError, "Control entrypoint destination should be in string."), + ({"destination": "Blah"}, ValueError, + "Possible destination values are: ['broadcast', 'service', 'asset', 'script']."), + ({"destination": "script", "destination_arg": ""}, KeyError, "script destination argument is missing."), + ({"destination": "script", "script": 1}, ValueError, + "Control entrypoint destination argument should be in string."), + ({"destination": "script", "script": ""}, ValueError, + "Control entrypoint destination argument cannot be empty."), + ({"anonymous": "t"}, ValueError, "anonymous should be a bool."), + ({"constants": "t"}, ValueError, "constants should be a dictionary."), + ({"type": "write", "constants": {}}, ValueError, "constants should not be empty."), + ({"type": "write", "constants": None}, ValueError, + "For type write constants must have passed in payload and cannot have empty value."), + ({"variables": "t"}, ValueError, "variables should be a dictionary."), + ({"type": "write", "constants": {"unit": "cm"}, "variables": {}}, ValueError, "variables should not be empty."), + ({"type": "write", "constants": {"unit": "cm"}, "variables": None}, ValueError, + "For type write variables must have passed in payload and cannot have empty value."), + ({"allow": "user"}, ValueError, "allow should be an array of list of users.") + ]) + async def test_bad__check_parameters(self, payload, exception_name, error_msg): + with pytest.raises(Exception) as exc_info: + await entrypoint._check_parameters(payload, skip_required=True) + assert exc_info.type is exception_name + assert exc_info.value.args[0] == error_msg + + # TODO: add more tests + """ + a) authentication based + b) allow + c) exception handling tests + """ diff --git a/tests/unit/python/fledge/services/core/api/control_service/test_script_management.py b/tests/unit/python/fledge/services/core/api/control_service/test_script_management.py index 3267ea86ee..b7d2b34517 100644 --- a/tests/unit/python/fledge/services/core/api/control_service/test_script_management.py +++ b/tests/unit/python/fledge/services/core/api/control_service/test_script_management.py @@ -1,4 +1,5 @@ import asyncio +import copy import json import sys import uuid @@ -7,6 +8,7 @@ import pytest from aiohttp import web +from fledge.common.audit_logger import AuditLogger from fledge.common.configuration_manager import ConfigurationManager from fledge.common.storage_client.storage_client import StorageClientAsync from fledge.common.web import middleware @@ -82,6 +84,7 @@ async def test_get_all_scripts(self, client): with patch.object(c_mgr, 'get_category_all_items', return_value=get_cat) as patch_get_all_items: resp = await client.get('/fledge/control/script') assert 200 == resp.status + server.Server.scheduler = None result = await resp.text() json_response = json.loads(result) assert 'scripts' in json_response @@ -160,6 +163,7 @@ async def mock_manual_schedule(name): with patch.object(server.Server.scheduler, 'get_schedule_by_name', return_value=get_sch) as patch_get_schedule_by_name: resp = await client.get('/fledge/control/script/{}'.format(script_name)) + server.Server.scheduler = None assert 200 == resp.status result = await resp.text() json_response = json.loads(result) @@ -268,19 +272,25 @@ async def test_good_add_script(self, client): if sys.version_info >= (3, 8): value = await mock_coro(result) insert_value = await mock_coro(insert_result) + arv = await mock_coro(None) else: value = asyncio.ensure_future(mock_coro(result)) insert_value = asyncio.ensure_future(mock_coro(insert_result)) + arv = asyncio.ensure_future(mock_coro(None)) storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=value) as query_tbl_patch: + with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=value + ) as query_tbl_patch: with patch.object(storage_client_mock, 'insert_into_tbl', return_value=insert_value ) as insert_tbl_patch: - resp = await client.post('/fledge/control/script', data=json.dumps(request_payload)) - assert 200 == resp.status - result = await resp.text() - json_response = json.loads(result) - assert {'name': script_name, 'steps': []} == json_response + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=arv) as audit_info_patch: + resp = await client.post('/fledge/control/script', data=json.dumps(request_payload)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {'name': script_name, 'steps': []} == json_response + audit_info_patch.assert_called_once_with('CTSAD', request_payload) args, _ = insert_tbl_patch.call_args_list[0] assert 'control_script' == args[0] expected = json.loads(args[1]) @@ -299,8 +309,7 @@ async def test_good_add_script_with_acl(self, client): insert_result = {"response": "inserted", "rows_affected": 1} script_query_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": script_name}} acl_query_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": acl_name}} - insert_value = await mock_coro(insert_result) if sys.version_info >= (3, 8) else \ - asyncio.ensure_future(mock_coro(insert_result)) + arv = await mock_coro(None) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(None)) @asyncio.coroutine def q_result(*args): @@ -323,19 +332,22 @@ def i_result(*args): payload = args[1] if table == 'control_script': assert {'name': script_name, 'steps': '[]', 'acl': acl_name} == json.loads(payload) - return insert_result elif table == "acl_usage": - assert {'name': acl_name, 'entity_type': 'script', - 'entity_name': script_name} == json.loads(payload) - return insert_result + assert {'name': acl_name, 'entity_type': 'script', 'entity_name': script_name} == json.loads(payload) + return insert_result storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=q_result): - with patch.object(storage_client_mock, 'insert_into_tbl', side_effect=i_result - ) as insert_tbl_patch: - resp = await client.post('/fledge/control/script', data=json.dumps(request_payload)) - assert 200 == resp.status + with patch.object(storage_client_mock, 'insert_into_tbl', side_effect=i_result): + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=arv) as audit_info_patch: + resp = await client.post('/fledge/control/script', data=json.dumps(request_payload)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert request_payload == json_response + audit_info_patch.assert_called_once_with('CTSAD', request_payload) @pytest.mark.parametrize("payload, message", [ ({}, "Nothing to update for the given payload."), @@ -367,7 +379,8 @@ async def test_update_script_not_found(self, client): req_payload = {"steps": []} result = {"count": 0, "rows": []} value = await mock_coro(result) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(result)) - query_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": script_name}} + query_payload = {"return": ["name", "steps", "acl"], + "where": {"column": "name", "condition": "=", "value": script_name}} message = "No such {} script found.".format(script_name) storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): @@ -387,7 +400,8 @@ async def test_update_script_when_acl_not_found(self, client): acl_name = "blah" payload = {"steps": [{"write": {"order": 1, "speed": 420}}], "acl": acl_name} script_result = {"count": 1, "rows": [{"name": script_name, "steps": [{"write": {"order": 1, "speed": 420}}]}]} - script_query_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": script_name}} + script_query_payload = {"return": ["name", "steps", "acl"], + "where": {"column": "name", "condition": "=", "value": script_name}} acl_query_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": acl_name}} acl_result = {"count": 0, "rows": []} @@ -420,12 +434,15 @@ def q_result(*args): async def test_update_script(self, client, payload): script_name = "test" acl_name = "testACL" - script_result = {"count": 1, "rows": [{"name": script_name, "steps": [{"write": {"order": 1, "speed": 420}}]}]} + new_script = copy.deepcopy(payload) + new_script['name'] = script_name + script_result = {"count": 1, "rows": [{"steps": [{"write": {"order": 1, "speed": 420}}]}]} update_result = {"response": "updated", "rows_affected": 1} steps_payload = payload["steps"] update_value = await mock_coro(update_result) if sys.version_info >= (3, 8) else \ asyncio.ensure_future(mock_coro(update_result)) - script_query_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": script_name}} + script_query_payload = {"return": ["name", "steps", "acl"], + "where": {"column": "name", "condition": "=", "value": script_name}} acl_query_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": acl_name}} acl_result = {"count": 1, "rows": [{"name": acl_name, "service": [], "url": []}]} insert_result = {"response": "inserted", "rows_affected": 1} @@ -453,22 +470,28 @@ def i_result(*args): 'entity_name': script_name} == json.loads(payload_ins) return insert_result + arv = await mock_coro(None) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(None)) storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=q_result): with patch.object(storage_client_mock, 'update_tbl', return_value=update_value) as patch_update_tbl: with patch.object(storage_client_mock, 'insert_into_tbl', side_effect=i_result): - resp = await client.put('/fledge/control/script/{}'.format(script_name), - data=json.dumps(payload)) - assert 200 == resp.status - result = await resp.text() - json_response = json.loads(result) - assert {"message": "Control script {} updated successfully.".format(script_name)}\ - == json_response + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=arv) as audit_info_patch: + resp = await client.put('/fledge/control/script/{}'.format(script_name), + data=json.dumps(payload)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {"message": "Control script {} updated successfully.".format(script_name)}\ + == json_response + args, _ = audit_info_patch.call_args + audit_info_patch.assert_called_once_with( + 'CTSCH', {"script": new_script, "old_script": script_result['rows'][0]}) update_args, _ = patch_update_tbl.call_args assert 'control_script' == update_args[0] - update_payload = {"values": payload, "where": {"column": "name", "condition": "=", - "value": script_name}} + update_payload = {"values": payload, + "where": {"column": "name", "condition": "=", "value": script_name}} update_payload["values"]["steps"] = str(steps_payload) assert update_payload == json.loads(update_args[1]) @@ -503,6 +526,9 @@ async def test_delete_script_along_with_category_and_schedule(self, client): q_result = {"count": 0, "rows": [ {"name": script_name, "steps": [{"delay": {"order": 0, "duration": 9003}}], "acl": ""}]} q_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": script_name}} + q_tbl_payload = {"return": ["name"], "where": {"column": "entity_type", "condition": "=", "value": "script", + "and": {"column": "entity_name", "condition": "=", + "value": script_name}}} delete_payload = {"where": {"column": "name", "condition": "=", "value": script_name}} delete_result = {"response": "deleted", "rows_affected": 1} disable_sch_result = (True, "Schedule successfully disabled") @@ -534,11 +560,13 @@ def d_schedule(*args): get_sch = await mock_schedule(script_name) disable_sch = await mock_coro(disable_sch_result) delete_sch = await mock_coro(delete_sch_result) + arv = await mock_coro(None) else: del_cat_and_child = asyncio.ensure_future(mock_coro(delete_result)) get_sch = asyncio.ensure_future(mock_schedule(script_name)) disable_sch = asyncio.ensure_future(mock_coro(disable_sch_result)) delete_sch = asyncio.ensure_future(mock_coro(delete_sch_result)) + arv = asyncio.ensure_future(mock_coro(None)) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(c_mgr, 'delete_category_and_children_recursively', @@ -553,11 +581,22 @@ def d_schedule(*args): side_effect=query_schedule) as patch_query_tbl: with patch.object(storage_client_mock, 'delete_from_tbl', side_effect=d_schedule) as patch_delete_tbl: - resp = await client.delete('/fledge/control/script/{}'.format(script_name)) - assert 200 == resp.status - result = await resp.text() - json_response = json.loads(result) - assert {'message': message} == json_response + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', + return_value=arv) as audit_info_patch: + resp = await client.delete('/fledge/control/script/{}'.format(script_name)) + server.Server.scheduler = None + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {'message': message} == json_response + audit_info_patch.assert_called_once_with( + 'CTSDL', {'message': message, "name": script_name}) + patch_delete_tbl.assert_called_once_with( + 'control_script', json.dumps(delete_payload)) + args, _ = patch_query_tbl.call_args + assert 'acl_usage' == args[0] + assert json.dumps(q_tbl_payload) == args[1] patch_delete_sch.assert_called_once_with(uuid.UUID(schedule_id)) patch_disable_sch.assert_called_once_with(uuid.UUID(schedule_id)) patch_get_schedules.assert_called_once_with() @@ -572,6 +611,10 @@ async def test_delete_script_acl_not_attached(self, client): q_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": script_name}} delete_payload = {"where": {"column": "name", "condition": "=", "value": script_name}} delete_result = {"response": "deleted", "rows_affected": 1} + message = '{} script deleted successfully.'.format(script_name) + q_tbl_payload = {"return": ["name"], "where": {"column": "entity_type", "condition": "=", "value": "script", + "and": {"column": "entity_name", "condition": "=", + "value": script_name}}} @asyncio.coroutine def query_result(*args): @@ -593,17 +636,28 @@ def d_result(*args): elif table == "acl_usage": return delete_result + arv = await mock_coro(None) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(None)) + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(c_mgr, 'delete_category_and_children_recursively', side_effect=Exception): with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=query_result) as patch_query_tbl: with patch.object(storage_client_mock, 'delete_from_tbl', side_effect=d_result) as patch_delete_tbl: - resp = await client.delete('/fledge/control/script/{}'.format(script_name)) - assert 200 == resp.status - result = await resp.text() - json_response = json.loads(result) - assert {'message': '{} script deleted successfully.'.format(script_name)} == json_response + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', + return_value=arv) as audit_info_patch: + resp = await client.delete('/fledge/control/script/{}'.format(script_name)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {'message': message} == json_response + audit_info_patch.assert_called_once_with( + 'CTSDL', {'message': message, "name": script_name}) + patch_delete_tbl.assert_called_once_with('control_script', json.dumps(delete_payload)) + args, _ = patch_query_tbl.call_args + assert 'acl_usage' == args[0] + assert json.dumps(q_tbl_payload) == args[1] @pytest.mark.parametrize("payload, message", [ ({}, "parameters field is required."), @@ -688,6 +742,7 @@ async def test_schedule_found_for_configuration_script(self, client): with patch.object(server.Server.scheduler, 'get_schedules', return_value=get_sch) as patch_get_schedules: resp = await client.post('/fledge/control/script/{}/schedule'.format(script_name)) + server.Server.scheduler = None assert 400 == resp.status result = await resp.text() json_response = json.loads(result) @@ -722,7 +777,7 @@ async def test_schedule_configuration_for_script(self, client): query_payload = {"return": ["name", "steps", "acl"], "where": {"column": "name", "condition": "=", "value": script_name}} - message = "Schedule and configuration is created for an automation script with name {}".format(script_name) + message = "Schedule and configuration is created for control script {}".format(script_name) storage_client_mock = MagicMock(StorageClientAsync) c_mgr = ConfigurationManager(storage_client_mock) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): @@ -737,6 +792,7 @@ async def test_schedule_configuration_for_script(self, client): with patch.object(server.Server.scheduler, 'queue_task', return_value=queue) as patch_queue_task: resp = await client.post('/fledge/control/script/{}/schedule'.format(script_name)) + server.Server.scheduler = None assert 200 == resp.status result = await resp.text() json_response = json.loads(result) diff --git a/tests/unit/python/fledge/services/core/api/plugins/test_discovery.py b/tests/unit/python/fledge/services/core/api/plugins/test_discovery.py index 591f10c82e..eac44ae114 100644 --- a/tests/unit/python/fledge/services/core/api/plugins/test_discovery.py +++ b/tests/unit/python/fledge/services/core/api/plugins/test_discovery.py @@ -170,7 +170,7 @@ async def test_bad_type_get_plugins_available(self, client): async def test_bad_get_plugins_available(self, client): log_path = "log/190801-12-01-05.log" - msg = "Fetch available plugins package request failed" + msg = "Fetch available plugins package request failed." with patch.object(common, 'fetch_available_packages', side_effect=PackageError(log_path)) as patch_fetch_available_package: resp = await client.get('/fledge/plugins/available') assert 400 == resp.status diff --git a/tests/unit/python/fledge/services/core/api/plugins/test_remove.py b/tests/unit/python/fledge/services/core/api/plugins/test_remove.py index 3068360b01..e105ccb6b3 100644 --- a/tests/unit/python/fledge/services/core/api/plugins/test_remove.py +++ b/tests/unit/python/fledge/services/core/api/plugins/test_remove.py @@ -12,12 +12,12 @@ from aiohttp import web -from fledge.services.core import routes -from fledge.services.core import connect +from fledge.common.plugin_discovery import PluginDiscovery +from fledge.common.storage_client.storage_client import StorageClientAsync +from fledge.services.core import connect, routes +from fledge.services.core.api import common from fledge.services.core.api.plugins import remove as plugins_remove from fledge.services.core.api.plugins.exceptions import * -from fledge.common.storage_client.storage_client import StorageClientAsync -from fledge.common.plugin_discovery import PluginDiscovery __author__ = "Ashish Jabble" @@ -36,22 +36,25 @@ def client(self, loop, test_client): routes.setup(app) return loop.run_until_complete(test_client(app)) - @pytest.mark.parametrize("_type", [ - "blah", - 1, - "notificationDelivery" - "notificationRule" - ]) + RUN_TESTS_BEFORE_210_VERSION = False if common.get_version() <= "2.1.0" else True + + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") + @pytest.mark.parametrize("_type", ["blah", 1, "notificationDelivery", "notificationRule"]) async def test_bad_type_plugin(self, client, _type): resp = await client.delete('/fledge/plugins/{}/name'.format(_type), data=None) assert 400 == resp.status assert "Invalid plugin type. Please provide valid type: ['north', 'south', 'filter', 'notify', 'rule']" == \ resp.reason - @pytest.mark.parametrize("name", [ - "http-south", - "random" - ]) + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") + @pytest.mark.parametrize("name", ["OMF", "omf", "Omf"]) + async def test_bad_update_of_inbuilt_plugin(self, client, name): + resp = await client.delete('/fledge/plugins/north/{}'.format(name), data=None) + assert 400 == resp.status + assert "Cannot delete an inbuilt OMF plugin." == resp.reason + + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") + @pytest.mark.parametrize("name", ["http-south", "random"]) async def test_bad_name_plugin(self, client, name): plugin_installed = [{"name": "sinusoid", "type": "south", "description": "Sinusoid Poll Plugin", "version": "1.8.1", "installedDirectory": "south/sinusoid", @@ -67,13 +70,14 @@ async def test_bad_name_plugin(self, client, name): ) as plugin_installed_patch: resp = await client.delete('/fledge/plugins/south/{}'.format(name), data=None) assert 404 == resp.status - expected_msg = "'Invalid plugin name {} or plugin is not installed'".format(name) + expected_msg = "'Invalid plugin name {} or plugin is not installed.'".format(name) assert expected_msg == resp.reason result = await resp.text() response = json.loads(result) assert {'message': expected_msg} == response plugin_installed_patch.assert_called_once_with('south', False) + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") async def test_plugin_in_use(self, client): async def async_mock(return_value): return return_value @@ -98,19 +102,20 @@ async def async_mock(return_value): with patch.object(PluginDiscovery, 'get_plugins_installed', return_value=plugin_installed ) as plugin_installed_patch: with patch.object(plugins_remove, '_check_plugin_usage', return_value=_rv) as plugin_usage_patch: - with patch.object(plugins_remove._logger, "error") as log_err_patch: + with patch.object(plugins_remove._logger, "warning") as patch_logger: resp = await client.delete('/fledge/plugins/{}/{}'.format(_type, name), data=None) assert 400 == resp.status - expected_msg = "{} cannot be removed. This is being used by {} instances".format(name, svc_list) + expected_msg = "{} cannot be removed. This is being used by {} instances.".format(name, svc_list) assert expected_msg == resp.reason result = await resp.text() response = json.loads(result) assert {'message': expected_msg} == response - assert 1 == log_err_patch.call_count - log_err_patch.assert_called_once_with(expected_msg) + assert 1 == patch_logger.call_count + patch_logger.assert_called_once_with(expected_msg) plugin_usage_patch.assert_called_once_with(_type, name) plugin_installed_patch.assert_called_once_with(_type, False) + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") async def test_notify_plugin_in_use(self, client): async def async_mock(return_value): return return_value @@ -135,28 +140,29 @@ async def async_mock(return_value): with patch.object(PluginDiscovery, 'get_plugins_installed', return_value=plugin_installed ) as plugin_installed_patch: with patch.object(plugins_remove, '_check_plugin_usage_in_notification_instances', return_value=_rv) as plugin_usage_patch: - with patch.object(plugins_remove._logger, "error") as log_err_patch: + with patch.object(plugins_remove._logger, "warning") as patch_logger: resp = await client.delete('/fledge/plugins/{}/{}'.format(plugin_type, plugin_installed_dirname), data=None) assert 400 == resp.status - expected_msg = "{} cannot be removed. This is being used by {} instances".format( + expected_msg = "{} cannot be removed. This is being used by {} instances.".format( plugin_installed_dirname, notify_instances_list) assert expected_msg == resp.reason result = await resp.text() response = json.loads(result) assert {'message': expected_msg} == response - assert 1 == log_err_patch.call_count - log_err_patch.assert_called_once_with(expected_msg) + assert 1 == patch_logger.call_count + patch_logger.assert_called_once_with(expected_msg) plugin_usage_patch.assert_called_once_with(plugin_installed_dirname) - plugin_installed_patch.assert_called_once_with(plugin_type_installed_dir, False) + plugin_installed_patch.assert_called_once_with(plugin_type, False) + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") async def test_package_already_in_progress(self, client): async def async_mock(return_value): return return_value _type = "south" name = 'http_south' - pkg_name = "fledge-south-http-south" + pkg_name = "fledge-south-http" payload = {"return": ["status"], "where": {"column": "action", "condition": "=", "value": "purge", "and": {"column": "name", "condition": "=", "value": pkg_name}}} select_row_resp = {'count': 1, 'rows': [{ @@ -166,14 +172,14 @@ async def async_mock(return_value): "status": -1, "log_file_uri": "" }]} - expected_msg = '{} package purge already in progress'.format(pkg_name) + expected_msg = '{} package purge already in progress.'.format(pkg_name) storage_client_mock = MagicMock(StorageClientAsync) plugin_installed = [{"name": "sinusoid", "type": _type, "description": "Sinusoid Poll Plugin", "version": "1.8.1", "installedDirectory": "{}/{}".format(_type, name), "packageName": "fledge-{}-sinusoid".format(_type)}, {"name": name, "type": _type, "description": "HTTP Listener South Plugin", "version": "1.8.1", "installedDirectory": "{}/{}".format(_type, name), - "packageName": "fledge-{}-{}".format(_type, name)} + "packageName": pkg_name} ] # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. @@ -203,10 +209,11 @@ async def async_mock(return_value): assert 1 == log_info_patch.call_count log_info_patch.assert_called_once_with( 'No entry found for http_south plugin in asset tracker; ' - 'or {} plugin may have been added in disabled state & never used'.format(name)) + 'or {} plugin may have been added in disabled state & never used.'.format(name)) plugin_usage_patch.assert_called_once_with(_type, name) plugin_installed_patch.assert_called_once_with(_type, False) + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") async def test_package_when_not_in_use(self, client): async def async_mock(return_value): @@ -214,7 +221,7 @@ async def async_mock(return_value): _type = "south" name = 'http_south' - pkg_name = "fledge-south-http-south" + pkg_name = "fledge-south-http" payload = {"return": ["status"], "where": {"column": "action", "condition": "=", "value": "purge", "and": {"column": "name", "condition": "=", "value": pkg_name}}} select_row_resp = {'count': 1, 'rows': [{ @@ -241,7 +248,7 @@ async def async_mock(return_value): "packageName": "fledge-{}-sinusoid".format(_type)}, {"name": name, "type": _type, "description": "HTTP Listener South Plugin", "version": "1.8.1", "installedDirectory": "{}/{}".format(_type, name), - "packageName": "fledge-{}-{}".format(_type, name)} + "packageName": pkg_name} ] # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. @@ -276,7 +283,7 @@ async def async_mock(return_value): result = await resp.text() response = json.loads(result) assert 'id' in response - assert '{} plugin purge started.'.format(name) == response['message'] + assert '{} plugin remove started.'.format(name) == response['message'] assert response['statusLink'].startswith('fledge/package/purge/status?id=') args, kwargs = insert_tbl_patch.call_args_list[0] assert 'packages' == args[0] @@ -295,6 +302,6 @@ async def async_mock(return_value): assert 1 == log_info_patch.call_count log_info_patch.assert_called_once_with( 'No entry found for http_south plugin in asset tracker; ' - 'or {} plugin may have been added in disabled state & never used'.format(name)) + 'or {} plugin may have been added in disabled state & never used.'.format(name)) plugin_usage_patch.assert_called_once_with(_type, name) plugin_installed_patch.assert_called_once_with(_type, False) diff --git a/tests/unit/python/fledge/services/core/api/plugins/test_update.py b/tests/unit/python/fledge/services/core/api/plugins/test_update.py index 301058ef64..d5a7fb22a5 100644 --- a/tests/unit/python/fledge/services/core/api/plugins/test_update.py +++ b/tests/unit/python/fledge/services/core/api/plugins/test_update.py @@ -13,15 +13,14 @@ from aiohttp import web -from fledge.services.core import routes -from fledge.services.core import server -from fledge.services.core import connect +from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.plugin_discovery import PluginDiscovery +from fledge.common.storage_client.storage_client import StorageClientAsync +from fledge.services.core import connect, routes, server +from fledge.services.core.api import common from fledge.services.core.api.plugins import update as plugins_update from fledge.services.core.api.plugins.exceptions import * from fledge.services.core.scheduler.scheduler import Scheduler -from fledge.common.storage_client.storage_client import StorageClientAsync -from fledge.common.plugin_discovery import PluginDiscovery -from fledge.common.configuration_manager import ConfigurationManager __author__ = "Ashish Jabble" @@ -40,17 +39,23 @@ def client(self, loop, test_client): routes.setup(app) return loop.run_until_complete(test_client(app)) - @pytest.mark.parametrize("param", [ - "blah", - 1, - "notificationDelivery" - "notificationRule" - ]) + RUN_TESTS_BEFORE_210_VERSION = False if common.get_version() <= "2.1.0" else True + + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") + @pytest.mark.parametrize("param", ["blah", 1, "notificationDelivery", "notificationRule"]) async def test_bad_type_plugin(self, client, param): resp = await client.put('/fledge/plugins/{}/name/update'.format(param), data=None) assert 400 == resp.status assert "Invalid plugin type. Must be one of 'south' , north', 'filter', 'notify' or 'rule'" == resp.reason + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") + @pytest.mark.parametrize("name", ["OMF", "omf", "Omf"]) + async def test_bad_update_of_inbuilt_plugin(self, client, name): + resp = await client.put('/fledge/plugins/north/{}/update'.format(name), data=None) + assert 400 == resp.status + assert "Cannot update an inbuilt OMF plugin." == resp.reason + + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") @pytest.mark.parametrize("_type, plugin_installed_dirname", [ ('south', 'Random'), ('north', 'http_north') @@ -70,7 +75,7 @@ async def async_mock(return_value): "status": -1, "log_file_uri": "" }]} - msg = '{} package update already in progress'.format(pkg_name) + msg = '{} package update already in progress.'.format(pkg_name) storage_client_mock = MagicMock(StorageClientAsync) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. @@ -78,58 +83,47 @@ async def async_mock(return_value): _rv = await async_mock(select_row_resp) else: _rv = asyncio.ensure_future(async_mock(select_row_resp)) - - with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'query_tbl_with_payload', - return_value=_rv) as query_tbl_patch: - resp = await client.put('/fledge/plugins/{}/{}/update'.format(_type, plugin_installed_dirname), - data=None) - assert 429 == resp.status - assert msg == resp.reason - r = await resp.text() - actual = json.loads(r) - assert {'message': msg} == actual - args, kwargs = query_tbl_patch.call_args_list[0] - assert 'packages' == args[0] - assert payload == json.loads(args[1]) + plugin_installed = [{"name": plugin_installed_dirname, "type": _type, "description": "{} plugin".format(_type), + "version": "2.1.0", "installedDirectory": "{}/{}".format(_type, plugin_installed_dirname), + "packageName": pkg_name}] + with patch.object(PluginDiscovery, 'get_plugins_installed', + return_value=plugin_installed) as plugin_installed_patch: + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl_with_payload', + return_value=_rv) as query_tbl_patch: + resp = await client.put('/fledge/plugins/{}/{}/update'.format(_type, plugin_installed_dirname), + data=None) + assert 429 == resp.status + assert msg == resp.reason + r = await resp.text() + actual = json.loads(r) + assert {'message': msg} == actual + args, kwargs = query_tbl_patch.call_args_list[0] + assert 'packages' == args[0] + assert payload == json.loads(args[1]) + plugin_installed_patch.assert_called_once_with(_type, False) + + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") @pytest.mark.parametrize("_type, plugin_installed_dirname", [ ('south', 'Random'), ('north', 'http_north') ]) async def test_plugin_not_found(self, client, _type, plugin_installed_dirname): - async def async_mock(return_value): - return return_value - plugin_name = 'sinusoid' pkg_name = "fledge-{}-{}".format(_type, plugin_installed_dirname.lower().replace("_", "-")) - payload = {"return": ["status"], "where": {"column": "action", "condition": "=", "value": "update", - "and": {"column": "name", "condition": "=", "value": pkg_name}}} plugin_installed = [{"name": plugin_name, "type": _type, "description": "{} plugin".format(_type), "version": "1.8.1", "installedDirectory": "{}/{}".format(_type, plugin_name), "packageName": pkg_name}] - storage_client_mock = MagicMock(StorageClientAsync) - - # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. - if sys.version_info.major == 3 and sys.version_info.minor >= 8: - _rv = await async_mock({'count': 0, 'rows': []}) - else: - _rv = asyncio.ensure_future(async_mock({'count': 0, 'rows': []})) - - with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv) as query_tbl_patch: - with patch.object(PluginDiscovery, 'get_plugins_installed', return_value=plugin_installed - ) as plugin_installed_patch: - resp = await client.put('/fledge/plugins/{}/{}/update'.format(_type, plugin_installed_dirname), - data=None) - assert 404 == resp.status - assert "'{} plugin is not yet installed. So update is not possible.'".format( - plugin_installed_dirname) == resp.reason - plugin_installed_patch.assert_called_once_with(_type, False) - args, kwargs = query_tbl_patch.call_args_list[0] - assert 'packages' == args[0] - assert payload == json.loads(args[1]) + with patch.object(PluginDiscovery, 'get_plugins_installed', + return_value=plugin_installed) as plugin_installed_patch: + resp = await client.put('/fledge/plugins/{}/{}/update'.format(_type, plugin_installed_dirname), data=None) + assert 404 == resp.status + assert "'{} plugin is not yet installed. So update is not possible.'".format( + plugin_installed_dirname) == resp.reason + plugin_installed_patch.assert_called_once_with(_type, False) + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") @pytest.mark.parametrize("_type, plugin_installed_dirname", [ ('south', 'Random'), ('north', 'http_north') @@ -208,6 +202,7 @@ async def async_mock(return_value): assert 'packages' == args[0] assert payload == json.loads(args[1]) + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") @pytest.mark.parametrize("_type, plugin_installed_dirname", [ ('south', 'Random'), ('north', 'http_north') @@ -311,6 +306,7 @@ async def async_mock(return_value): assert 'packages' == args[0] assert payload == json.loads(args[1]) + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") async def test_filter_plugin_update_when_not_in_use(self, client, _type='filter', plugin_installed_dirname='delta'): async def async_mock(return_value): return return_value @@ -389,6 +385,7 @@ async def async_mock(return_value): assert 'packages' == args[0] assert payload == json.loads(args[1]) + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") async def test_filter_update_when_in_use(self, client, _type='filter', plugin_installed_dirname='delta'): async def async_mock(return_value): return return_value @@ -492,6 +489,7 @@ async def async_mock(return_value): assert 'packages' == args[0] assert payload == json.loads(args[1]) + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") @pytest.mark.parametrize("_type, plugin_installed_dirname", [ ('notify', 'Telegram'), ('rule', 'OutOfBound') @@ -556,11 +554,12 @@ async def async_mock(return_value): assert 'update' == actual['action'] assert -1 == actual['status'] assert '' == actual['log_file_uri'] - plugin_installed_patch.assert_called_once_with(plugin_type_installed_dir, False) + plugin_installed_patch.assert_called_once_with(_type, False) args, kwargs = query_tbl_patch.call_args_list[0] assert 'packages' == args[0] assert payload == json.loads(args[1]) + @pytest.mark.skipif(RUN_TESTS_BEFORE_210_VERSION, reason="requires lesser or equal to core 2.1.0 version") @pytest.mark.parametrize("_type, plugin_installed_dirname", [ ('notify', 'alexa'), ('rule', 'OutOfBound') @@ -662,7 +661,7 @@ async def async_mock(return_value): notification_name, plugin_installed_dirname, _type)) cat_value_patch.assert_called_once_with(notification_name) child_cat_patch.assert_called_once_with(parent_name) - plugin_installed_patch.assert_called_once_with(plugin_type_installed_dir, False) + plugin_installed_patch.assert_called_once_with(_type, False) args, kwargs = query_tbl_patch.call_args_list[0] assert 'packages' == args[0] assert payload == json.loads(args[1]) diff --git a/tests/unit/python/fledge/services/core/api/test_api_utils.py b/tests/unit/python/fledge/services/core/api/test_api_utils.py index 229a437623..fd09101a82 100644 --- a/tests/unit/python/fledge/services/core/api/test_api_utils.py +++ b/tests/unit/python/fledge/services/core/api/test_api_utils.py @@ -15,10 +15,10 @@ @pytest.allure.story("api", "utils") class TestUtils: - @pytest.mark.parametrize("direction", ['south', 'north']) + @pytest.mark.parametrize("direction", ['south', 'north', 'filter', 'notificationDelivery', 'notificationRule']) def test_find_c_plugin_libs_if_empty(self, direction): - with patch('os.walk') as mockwalk: - mockwalk.return_value = [([], [], [])] + with patch('os.listdir') as mockwalk: + mockwalk.return_value = [] assert [] == utils.find_c_plugin_libs(direction) @pytest.mark.parametrize("direction, plugin_name, plugin_type, libs", [ @@ -35,49 +35,42 @@ def test_find_c_plugin_libs(self, direction, plugin_name, plugin_type, libs): def test_get_plugin_info_value_error(self): plugin_name = 'Random' - with patch.object(utils, '_find_c_util', return_value='plugins/utils/get_plugin_info') as patch_util: - with patch.object(utils, '_find_c_lib', return_value=None) as patch_lib: - with patch.object(utils._logger, 'error') as patch_logger: - assert {} == utils.get_plugin_info(plugin_name, dir='south') - assert 1 == patch_logger.call_count - args, kwargs = patch_logger.call_args - assert 'The plugin {} does not exist'.format(plugin_name) == args[0] - patch_lib.assert_called_once_with(plugin_name, 'south') - patch_util.assert_called_once_with('get_plugin_info') + with patch.object(utils, '_find_c_lib', return_value=None) as patch_lib: + with patch.object(utils._logger, 'error') as patch_logger: + assert {} == utils.get_plugin_info(plugin_name, dir='south') + assert 1 == patch_logger.call_count + args = patch_logger.call_args + assert '{} C plugin get info failed.'.format(plugin_name) == args[0][1] + patch_lib.assert_called_once_with(plugin_name, 'south') @pytest.mark.parametrize("exc_name", [Exception, OSError, subprocess.CalledProcessError]) def test_get_plugin_info_exception(self, exc_name): plugin_name = 'OMF' plugin_lib_path = 'fledge/plugins/north/{}/lib{}'.format(plugin_name, plugin_name) - with patch.object(utils, '_find_c_util', return_value='plugins/utils/get_plugin_info') as patch_util: - with patch.object(utils, '_find_c_lib', return_value=plugin_lib_path) as patch_lib: - with patch.object(utils.subprocess, "Popen", side_effect=exc_name): - with patch.object(utils._logger, 'error') as patch_logger: - assert {} == utils.get_plugin_info(plugin_name, dir='south') - assert 1 == patch_logger.call_count - args, kwargs = patch_logger.call_args - assert '%s C plugin get info failed due to %s' == args[0] - assert plugin_name == args[1] - patch_lib.assert_called_once_with(plugin_name, 'south') - patch_util.assert_called_once_with('get_plugin_info') + with patch.object(utils, '_find_c_lib', return_value=plugin_lib_path) as patch_lib: + with patch.object(utils.subprocess, "Popen", side_effect=exc_name): + with patch.object(utils._logger, 'error') as patch_logger: + assert {} == utils.get_plugin_info(plugin_name, dir='south') + assert 1 == patch_logger.call_count + args = patch_logger.call_args + assert '{} C plugin get info failed.'.format(plugin_name) == args[0][1] + patch_lib.assert_called_once_with(plugin_name, 'south') @patch('subprocess.Popen') def test_get_plugin_info(self, mock_subproc_popen): - with patch.object(utils, '_find_c_util', return_value='plugins/utils/get_plugin_info') as patch_util: - with patch.object(utils, '_find_c_lib', return_value='fledge/plugins/south/Random/libRandom') as patch_lib: - process_mock = MagicMock() - attrs = {'communicate.return_value': (b'{"name": "Random", "version": "1.0.0", "type": "south", ' - b'"interface": "1.0.0", "config": {"plugin" : ' - b'{ "description" : "Random C south plugin", "type" : "string", ' - b'"default" : "Random" }, "asset" : { "description" : ' - b'"Asset name", "type" : "string", ' - b'"default" : "Random" } } }\n', 'error')} - process_mock.configure_mock(**attrs) - mock_subproc_popen.return_value = process_mock - j = utils.get_plugin_info('Random', dir='south') - assert {'name': 'Random', 'type': 'south', 'version': '1.0.0', 'interface': '1.0.0', - 'config': {'plugin': {'description': 'Random C south plugin', 'type': 'string', - 'default': 'Random'}, - 'asset': {'description': 'Asset name', 'type': 'string', 'default': 'Random'}}} == j - patch_lib.assert_called_once_with('Random', 'south') - patch_util.assert_called_once_with('get_plugin_info') + with patch.object(utils, '_find_c_lib', return_value='fledge/plugins/south/Random/libRandom') as patch_lib: + process_mock = MagicMock() + attrs = {'communicate.return_value': (b'{"name": "Random", "version": "1.0.0", "type": "south", ' + b'"interface": "1.0.0", "config": {"plugin" : ' + b'{ "description" : "Random C south plugin", "type" : "string", ' + b'"default" : "Random" }, "asset" : { "description" : ' + b'"Asset name", "type" : "string", ' + b'"default" : "Random" } } }\n', 'error')} + process_mock.configure_mock(**attrs) + mock_subproc_popen.return_value = process_mock + j = utils.get_plugin_info('Random', dir='south') + assert {'name': 'Random', 'type': 'south', 'version': '1.0.0', 'interface': '1.0.0', + 'config': {'plugin': {'description': 'Random C south plugin', 'type': 'string', + 'default': 'Random'}, + 'asset': {'description': 'Asset name', 'type': 'string', 'default': 'Random'}}} == j + patch_lib.assert_called_once_with('Random', 'south') diff --git a/tests/unit/python/fledge/services/core/api/test_asset_tracker_api.py b/tests/unit/python/fledge/services/core/api/test_asset_tracker_api.py index 9914b115cf..c1f55dcc14 100644 --- a/tests/unit/python/fledge/services/core/api/test_asset_tracker_api.py +++ b/tests/unit/python/fledge/services/core/api/test_asset_tracker_api.py @@ -86,8 +86,10 @@ async def test_bad_deprecate_entry(self, client): storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv): - resp = await client.put('/fledge/track/service/XXX/asset/XXX/event/XXXX') - assert 500 == resp.status + with patch.object(_logger, 'error') as patch_logger: + resp = await client.put('/fledge/track/service/XXX/asset/XXX/event/XXXX') + assert 500 == resp.status + assert 1 == patch_logger.call_count async def test_deprecate_entry_not_found(self, client): result = {"count": 0, "rows": []} diff --git a/tests/unit/python/fledge/services/core/api/test_audit.py b/tests/unit/python/fledge/services/core/api/test_audit.py index 8975e6a5e0..5417fe0ab3 100644 --- a/tests/unit/python/fledge/services/core/api/test_audit.py +++ b/tests/unit/python/fledge/services/core/api/test_audit.py @@ -180,9 +180,11 @@ async def async_mock_log(): async def test_get_audit_http_exception(self, client): msg = 'Internal Server Error' with patch.object(connect, 'get_storage_async', side_effect=Exception(msg)): - resp = await client.get('/fledge/audit') - assert 500 == resp.status - assert msg == resp.reason + with patch.object(audit._logger, 'error') as patch_logger: + resp = await client.get('/fledge/audit') + assert 500 == resp.status + assert msg == resp.reason + assert 1 == patch_logger.call_count async def test_create_audit_entry(self, client, loop): request_data = {"source": "LMTR", "severity": "warning", "details": {"message": "Engine oil pressure low"}} @@ -225,17 +227,16 @@ async def test_create_audit_entry_with_bad_data(self, client, request_data, expe async def test_create_audit_entry_with_attribute_error(self, client): request_data = {"source": "LMTR", "severity": "blah", "details": {"message": "Engine oil pressure low"}} - with patch.object(audit._logger, "error", return_value=None) as audit_logger_patch: - with patch.object(AuditLogger, "__init__", return_value=None): - resp = await client.post('/fledge/audit', data=json.dumps(request_data)) - assert 404 == resp.status - assert 'severity type blah is not supported' == resp.reason - args, kwargs = audit_logger_patch.call_args - assert ('Error in create_audit_entry(): %s | %s', 'severity type blah is not supported', "'AuditLogger' object has no attribute 'blah'") == args + with patch.object(AuditLogger, "__init__", return_value=None): + resp = await client.post('/fledge/audit', data=json.dumps(request_data)) + assert 404 == resp.status + assert 'severity type blah is not supported' == resp.reason async def test_create_audit_entry_with_exception(self, client): request_data = {"source": "LMTR", "severity": "blah", "details": {"message": "Engine oil pressure low"}} with patch.object(AuditLogger, "__init__", return_value=""): - resp = await client.post('/fledge/audit', data=json.dumps(request_data)) - assert 500 == resp.status - assert "__init__() should return None, not 'str'" == resp.reason + with patch.object(audit._logger, 'error') as patch_logger: + resp = await client.post('/fledge/audit', data=json.dumps(request_data)) + assert 500 == resp.status + assert "__init__() should return None, not 'str'" == resp.reason + assert 1 == patch_logger.call_count diff --git a/tests/unit/python/fledge/services/core/api/test_auth_mandatory.py b/tests/unit/python/fledge/services/core/api/test_auth_mandatory.py index d454ac655d..dc8976c27d 100644 --- a/tests/unit/python/fledge/services/core/api/test_auth_mandatory.py +++ b/tests/unit/python/fledge/services/core/api/test_auth_mandatory.py @@ -11,14 +11,13 @@ import pytest import sys -from fledge.common.web import middleware -from fledge.services.core import routes -from fledge.services.core import connect +from fledge.common.audit_logger import AuditLogger from fledge.common.storage_client.storage_client import StorageClientAsync -from fledge.services.core.user_model import User -from fledge.services.core.api import auth -from fledge.services.core import server +from fledge.common.web import middleware from fledge.common.web.ssl_wrapper import SSLVerifier +from fledge.services.core import connect, routes, server +from fledge.services.core.api import auth +from fledge.services.core.user_model import User __author__ = "Ashish Jabble" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" @@ -27,7 +26,8 @@ ADMIN_USER_HEADER = {'content-type': 'application/json', 'Authorization': 'admin_user_token'} NORMAL_USER_HEADER = {'content-type': 'application/json', 'Authorization': 'normal_user_token'} - +PASSWORD_ERROR_MSG = 'Password must contain at least one digit, one lowercase, one uppercase & one special character ' \ + 'and length of minimum 6 characters.' async def mock_coro(*args, **kwargs): return None if len(args) == 0 else args[0] @@ -58,79 +58,61 @@ async def auth_token_fixture(self, mocker, is_admin=True): _rv1 = asyncio.ensure_future(mock_coro(user['id'])) _rv2 = asyncio.ensure_future(mock_coro(None)) _rv3 = asyncio.ensure_future(mock_coro(user)) - patch_logger_info = mocker.patch.object(middleware._logger, 'info') + patch_logger_debug = mocker.patch.object(middleware._logger, 'debug') patch_validate_token = mocker.patch.object(User.Objects, 'validate_token', return_value=_rv1) patch_refresh_token = mocker.patch.object(User.Objects, 'refresh_token_expiry', return_value=_rv2) patch_user_get = mocker.patch.object(User.Objects, 'get', return_value=_rv3) - - return patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get + return patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get @pytest.mark.parametrize("payload, msg", [ - ({}, "Username is required to create user"), - ({"username": 1}, "Values should be passed in string"), - ({"username": "bla"}, "Username should be of minimum 4 characters"), - ({"username": " b"}, "Username should be of minimum 4 characters"), - ({"username": "b "}, "Username should be of minimum 4 characters"), - ({"username": " b la"}, "Username should be of minimum 4 characters"), - ({"username": "b l A "}, "Username should be of minimum 4 characters"), - ({"username": "Bla"}, "Username should be of minimum 4 characters"), - ({"username": "BLA"}, "Username should be of minimum 4 characters"), - ({"username": "aj!aj"}, "Dot, hyphen, underscore special characters are allowed for username"), - ({"username": "aj.aj", "access_method": "PEM"}, "Invalid access method. Must be 'any' or 'cert' or 'pwd'"), - ({"username": "aj.aj", "access_method": 1}, "Values should be passed in string"), - ({"username": "aj.aj", "access_method": 'pwd'}, "Password should not be an empty"), - ({"username": "aj_123!"}, "Dot, hyphen, underscore special characters are allowed for username"), - ({"username": "aj_123", "password": 1}, "Password must contain at least one digit, one lowercase, one uppercase" - " & one special character and length of minimum 6 characters"), - ({"username": "12-aj", "password": "blah"}, "Password must contain at least one digit, one lowercase, one " - "uppercase & one special character and length of minimum 6 " - "characters"), - ({"username": "12-aj", "password": "12B l"}, "Password must contain at least one digit, one lowercase, one " - "uppercase & one special character and length of minimum 6 " - "characters"), - ({"username": "aj.123", "password": "a!23"}, "Password must contain at least one digit, one lowercase, " - "one uppercase & one special character and length of minimum 6 " - "characters"), - ({"username": "aj.123", "password": "A!23"}, "Password must contain at least one digit, one lowercase, " - "one uppercase & one special character and length of minimum 6 " - "characters"), - ({"username": "aj.aj", "access_method": "any", "password": "blah"}, "Password must contain at least " - "one digit, one lowercase, one uppercase " - "& one special character and length " - "of minimum 6 characters"), - ({"username": "aj.aj", "access_method": "pwd", "password": "blah"}, "Password must contain at least one digit," - " one lowercase, one uppercase & one " - "special character and length of minimum " - "6 characters") + ({}, "Username is required to create user."), + ({"username": 1}, "Values should be passed in string."), + ({"username": "bla"}, "Username should be of minimum 4 characters."), + ({"username": " b"}, "Username should be of minimum 4 characters."), + ({"username": "b "}, "Username should be of minimum 4 characters."), + ({"username": " b la"}, "Username should be of minimum 4 characters."), + ({"username": "b l A "}, "Username should be of minimum 4 characters."), + ({"username": "Bla"}, "Username should be of minimum 4 characters."), + ({"username": "BLA"}, "Username should be of minimum 4 characters."), + ({"username": "aj!aj"}, "Dot, hyphen, underscore special characters are allowed for username."), + ({"username": "aj.aj", "access_method": "PEM"}, "Invalid access method. Must be 'any' or 'cert' or 'pwd'."), + ({"username": "aj.aj", "access_method": 1}, "Values should be passed in string."), + ({"username": "aj.aj", "access_method": 'pwd'}, "Password should not be an empty."), + ({"username": "aj_123!"}, "Dot, hyphen, underscore special characters are allowed for username."), + ({"username": "aj_123", "password": 1}, PASSWORD_ERROR_MSG), + ({"username": "12-aj", "password": "blah"}, PASSWORD_ERROR_MSG), + ({"username": "12-aj", "password": "12B l"}, PASSWORD_ERROR_MSG), + ({"username": "aj.123", "password": "a!23"}, PASSWORD_ERROR_MSG), + ({"username": "aj.123", "password": "A!23"}, PASSWORD_ERROR_MSG), + ({"username": "aj.aj", "access_method": "any", "password": "blah"}, PASSWORD_ERROR_MSG), + ({"username": "aj.aj", "access_method": "pwd", "password": "blah"}, PASSWORD_ERROR_MSG) ]) async def test_create_bad_user(self, client, mocker, payload, msg): ret_val = [{'id': '1'}] - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv) as patch_role_id: - with patch.object(auth._logger, 'error') as patch_logger_error: - resp = await client.post('/fledge/admin/user', data=json.dumps(payload), headers=ADMIN_USER_HEADER) - assert 400 == resp.status - assert msg == resp.reason - result = await resp.text() - json_response = json.loads(result) - assert {"message": msg} == json_response - patch_logger_error.assert_called_once_with(msg) + resp = await client.post('/fledge/admin/user', data=json.dumps(payload), headers=ADMIN_USER_HEADER) + assert 400 == resp.status + assert msg == resp.reason + result = await resp.text() + json_response = json.loads(result) + assert {"message": msg} == json_response patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') @pytest.mark.parametrize("request_data", [ {"username": "AdMin", "password": "F0gl@mp", "role_id": -3}, {"username": "aj.aj", "password": "F0gl@mp", "role_id": "blah"} ]) async def test_create_user_with_bad_role(self, client, mocker, request_data): - msg = "Invalid role id" - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + msg = "Invalid role ID." + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -141,67 +123,68 @@ async def test_create_user_with_bad_role(self, client, mocker, request_data): _rv2 = asyncio.ensure_future(mock_coro(False)) with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv1) as patch_role_id: with patch.object(auth, 'is_valid_role', return_value=_rv2) as patch_role: - with patch.object(auth._logger, 'error') as patch_logger_err: - resp = await client.post('/fledge/admin/user', data=json.dumps(request_data), - headers=ADMIN_USER_HEADER) - assert 400 == resp.status - assert msg == resp.reason - result = await resp.text() - json_response = json.loads(result) - assert {"message": msg} == json_response - patch_logger_err.assert_called_once_with(msg) + resp = await client.post('/fledge/admin/user', data=json.dumps(request_data), headers=ADMIN_USER_HEADER) + assert 400 == resp.status + assert msg == resp.reason + result = await resp.text() + json_response = json.loads(result) + assert {"message": msg} == json_response patch_role.assert_called_once_with(request_data['role_id']) patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') async def test_create_dupe_user_name(self, client): - msg = "Username already exists" - request_data = {"username": "ajtest", "password": "F0gl@mp"} + msg = "Username already exists." + request_data = {"username": "dviewer", "password": "F0gl@mp"} valid_user = {'id': 1, 'uname': 'admin', 'role_id': '1'} + users = [{'id': 1, 'uname': 'admin', 'real_name': 'Admin user', 'role_id': 1, 'description': 'admin user', + 'enabled': 't', 'access_method': 'any'}, + {'id': 2, 'uname': 'user', 'real_name': 'Normal user', 'role_id': 2, 'description': 'normal user', + 'enabled': 'f', 'access_method': 'any'}, + {'id': 3, 'uname': 'dviewer', 'real_name': 'Data Viewer', 'role_id': 4, 'description': 'Test', + 'enabled': 'f', 'access_method': 'any'}] # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv1 = await mock_coro(valid_user['id']) _rv2 = await mock_coro(None) _rv3 = await mock_coro([{'id': '1'}]) _rv4 = await mock_coro(True) - _se1 = await mock_coro(valid_user) - _se2 = await mock_coro({'role_id': '2', 'uname': 'ajtest', 'id': '2'}) + _rv5 = await mock_coro(valid_user) + _rv6 = await mock_coro(users) else: _rv1 = asyncio.ensure_future(mock_coro(valid_user['id'])) _rv2 = asyncio.ensure_future(mock_coro(None)) _rv3 = asyncio.ensure_future(mock_coro([{'id': '1'}])) _rv4 = asyncio.ensure_future(mock_coro(True)) - _se1 = asyncio.ensure_future(mock_coro(valid_user)) - _se2 = asyncio.ensure_future(mock_coro({'role_id': '2', 'uname': 'ajtest', 'id': '2'})) + _rv5 = asyncio.ensure_future(mock_coro(valid_user)) + _rv6 = asyncio.ensure_future(mock_coro(users)) - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger_debug: with patch.object(User.Objects, 'validate_token', return_value=_rv1) as patch_validate_token: with patch.object(User.Objects, 'refresh_token_expiry', return_value=_rv2) as patch_refresh_token: - with patch.object(User.Objects, 'get', side_effect=[_se1, _se2]) as patch_user_get: - with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv3) as patch_role_id: - with patch.object(auth, 'is_valid_role', return_value=_rv4) as patch_role: - with patch.object(auth._logger, 'warning') as patch_logger_warning: - resp = await client.post('/fledge/admin/user', data=json.dumps(request_data), - headers=ADMIN_USER_HEADER) - assert 409 == resp.status - assert msg == resp.reason - result = await resp.text() - json_response = json.loads(result) - assert {"message": msg} == json_response - patch_logger_warning.assert_called_once_with(msg) - patch_role.assert_called_once_with(2) - patch_role_id.assert_called_once_with('admin') - assert 2 == patch_user_get.call_count - args, kwargs = patch_user_get.call_args_list[0] - assert {'uid': valid_user['id']} == kwargs - args, kwargs = patch_user_get.call_args_list[1] - assert {'username': request_data['username']} == kwargs + with patch.object(User.Objects, 'get', return_value=_rv5) as patch_user_get: + with patch.object(User.Objects, 'all', return_value=_rv6) as patch_user_all: + with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv3) as patch_role_id: + with patch.object(auth, 'is_valid_role', return_value=_rv4) as patch_role: + with patch.object(auth._logger, 'warning') as patch_logger_warning: + resp = await client.post('/fledge/admin/user', data=json.dumps(request_data), + headers=ADMIN_USER_HEADER) + assert 409 == resp.status + assert msg == resp.reason + result = await resp.text() + json_response = json.loads(result) + assert {"message": msg} == json_response + patch_logger_warning.assert_called_once_with(msg) + patch_role.assert_called_once_with(2) + patch_role_id.assert_called_once_with('admin') + patch_user_all.assert_called_once_with() + patch_user_get.assert_called_once_with(uid=valid_user['id']) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') async def test_create_user(self, client): request_data = {"username": "aj123", "password": "F0gl@mp"} @@ -209,8 +192,14 @@ async def test_create_user(self, client): 'real_name': '', 'description': ''} expected = {} expected.update(data) + users = [{'id': 1, 'uname': 'admin', 'real_name': 'Admin user', 'role_id': 1, 'description': 'admin user', + 'enabled': 't', 'access_method': 'any'}, + {'id': 2, 'uname': 'user', 'real_name': 'Normal user', 'role_id': 2, 'description': 'normal user', + 'enabled': 'f', 'access_method': 'any'}, + {'id': 3, 'uname': 'dviewer', 'real_name': 'Data Viewer', 'role_id': 4, 'description': 'Test', + 'enabled': 'f', 'access_method': 'any'}] ret_val = {"response": "inserted", "rows_affected": 1} - msg = '{} user has been created successfully'.format(request_data['username']) + msg = '{} user has been created successfully.'.format(request_data['username']) valid_user = {'id': 1, 'uname': 'admin', 'role_id': '1'} # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -219,6 +208,7 @@ async def test_create_user(self, client): _rv3 = await mock_coro([{'id': '1'}]) _rv4 = await mock_coro(True) _rv5 = await mock_coro(ret_val) + _rv6 = await mock_coro(users) _se1 = await mock_coro(valid_user) _se2 = await mock_coro(data) else: @@ -227,117 +217,125 @@ async def test_create_user(self, client): _rv3 = asyncio.ensure_future(mock_coro([{'id': '1'}])) _rv4 = asyncio.ensure_future(mock_coro(True)) _rv5 = asyncio.ensure_future(mock_coro(ret_val)) + _rv6 = asyncio.ensure_future(mock_coro(users)) _se1 = asyncio.ensure_future(mock_coro(valid_user)) _se2 = asyncio.ensure_future(mock_coro(data)) - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger_debug: with patch.object(User.Objects, 'validate_token', return_value=_rv1) as patch_validate_token: with patch.object(User.Objects, 'refresh_token_expiry', return_value=_rv2) as patch_refresh_token: - with patch.object(User.Objects, 'get', side_effect=[_se1, User.DoesNotExist, _se2]) as patch_user_get: - with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv3) as patch_role_id: - with patch.object(auth, 'is_valid_role', return_value=_rv4) as patch_role: - with patch.object(User.Objects, 'create', return_value=_rv5) as patch_create_user: - with patch.object(auth._logger, 'info') as patch_auth_logger_info: - resp = await client.post('/fledge/admin/user', data=json.dumps(request_data), - headers=ADMIN_USER_HEADER) - assert 200 == resp.status - r = await resp.text() - actual = json.loads(r) - assert msg == actual['message'] - assert expected['id'] == actual['user']['userId'] - assert expected['uname'] == actual['user']['userName'] - assert expected['role_id'] == actual['user']['roleId'] - patch_auth_logger_info.assert_called_once_with(msg) - patch_create_user.assert_called_once_with(request_data['username'], - request_data['password'], - int(expected['role_id']), 'any', '', '') - patch_role.assert_called_once_with(int(expected['role_id'])) - patch_role_id.assert_called_once_with('admin') - assert 3 == patch_user_get.call_count + with patch.object(User.Objects, 'get', side_effect=[_se1, _se2]) as patch_user_get: + with patch.object(User.Objects, 'all', return_value=_rv6) as patch_user_all: + with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv3) as patch_role_id: + with patch.object(auth, 'is_valid_role', return_value=_rv4) as patch_role: + with patch.object(User.Objects, 'create', return_value=_rv5) as patch_create_user: + with patch.object(auth._logger, 'info') as patch_auth_logger_info: + resp = await client.post('/fledge/admin/user', + data=json.dumps(request_data), + headers=ADMIN_USER_HEADER) + assert 200 == resp.status + r = await resp.text() + actual = json.loads(r) + assert msg == actual['message'] + assert expected['id'] == actual['user']['userId'] + assert expected['uname'] == actual['user']['userName'] + assert expected['role_id'] == actual['user']['roleId'] + patch_auth_logger_info.assert_called_once_with(msg) + patch_create_user.assert_called_once_with(request_data['username'], + request_data['password'], + int(expected['role_id']), 'any', '', '') + patch_role.assert_called_once_with(int(expected['role_id'])) + patch_role_id.assert_called_once_with('admin') + patch_user_all.assert_called_once_with() + assert 2 == patch_user_get.call_count args, kwargs = patch_user_get.call_args_list[0] assert {'uid': valid_user['id']} == kwargs args, kwargs = patch_user_get.call_args_list[1] - assert {'username': request_data['username']} == kwargs - args, kwargs = patch_user_get.call_args_list[2] assert {'username': expected['uname']} == kwargs patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') async def test_create_user_unknown_exception(self, client): request_data = {"username": "ajtest", "password": "F0gl@mp"} exc_msg = "Internal Server Error" valid_user = {'id': 1, 'uname': 'admin', 'role_id': '1'} - + users = [{'id': 1, 'uname': 'admin', 'real_name': 'Admin user', 'role_id': 1, 'description': 'admin user', + 'enabled': 't', 'access_method': 'any'}] # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv1 = await mock_coro(valid_user['id']) _rv2 = await mock_coro(None) _rv3 = await mock_coro([{'id': '1'}]) _rv4 = await mock_coro(True) - _se1 = await mock_coro(valid_user) + _rv5 = await mock_coro(valid_user) + _rv6 = await mock_coro(users) else: _rv1 = asyncio.ensure_future(mock_coro(valid_user['id'])) _rv2 = asyncio.ensure_future(mock_coro(None)) _rv3 = asyncio.ensure_future(mock_coro([{'id': '1'}])) _rv4 = asyncio.ensure_future(mock_coro(True)) - _se1 = asyncio.ensure_future(mock_coro(valid_user)) - - with patch.object(middleware._logger, 'info') as patch_logger_info: + _rv5 = asyncio.ensure_future(mock_coro(valid_user)) + _rv6 = asyncio.ensure_future(mock_coro(users)) + with patch.object(middleware._logger, 'debug') as patch_logger_debug: with patch.object(User.Objects, 'validate_token', return_value=_rv1) as patch_validate_token: with patch.object(User.Objects, 'refresh_token_expiry', return_value=_rv2) as patch_refresh_token: - with patch.object(User.Objects, 'get', side_effect=[_se1, User.DoesNotExist]) as patch_user_get: - with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv3) as patch_role_id: - with patch.object(auth, 'is_valid_role', return_value=_rv4) as patch_role: - with patch.object(User.Objects, 'create', side_effect=Exception(exc_msg)) as patch_create_user: - with patch.object(auth._logger, 'exception') as patch_audit_logger_exc: - resp = await client.post('/fledge/admin/user', data=json.dumps(request_data), - headers=ADMIN_USER_HEADER) - assert 500 == resp.status - assert exc_msg == resp.reason - result = await resp.text() - json_response = json.loads(result) - assert {"message": exc_msg} == json_response - patch_audit_logger_exc.assert_called_once_with(exc_msg) - patch_create_user.assert_called_once_with(request_data['username'], - request_data['password'], 2, 'any', '', '') - patch_role.assert_called_once_with(2) - patch_role_id.assert_called_once_with('admin') - assert 2 == patch_user_get.call_count - args, kwargs = patch_user_get.call_args_list[0] - assert {'uid': valid_user['id']} == kwargs - args, kwargs = patch_user_get.call_args_list[1] - assert {'username': request_data['username']} == kwargs + with patch.object(User.Objects, 'get', return_value=_rv5) as patch_user_get: + with patch.object(User.Objects, 'all', return_value=_rv6) as patch_user_all: + with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv3) as patch_role_id: + with patch.object(auth, 'is_valid_role', return_value=_rv4) as patch_role: + with patch.object(User.Objects, 'create', side_effect=Exception( + exc_msg)) as patch_create_user: + with patch.object(auth._logger, 'error') as patch_logger: + resp = await client.post('/fledge/admin/user', + data=json.dumps(request_data), + headers=ADMIN_USER_HEADER) + assert 500 == resp.status + assert exc_msg == resp.reason + result = await resp.text() + json_response = json.loads(result) + assert {"message": exc_msg} == json_response + args = patch_logger.call_args + assert 'Failed to create user.' == args[0][1] + patch_create_user.assert_called_once_with( + request_data['username'], request_data['password'], 2, 'any', '', '') + patch_role.assert_called_once_with(2) + patch_role_id.assert_called_once_with('admin') + patch_user_all.assert_called_once_with() + patch_user_get.assert_called_once_with(uid=valid_user['id']) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') async def test_create_user_value_error(self, client): valid_user = {'id': 1, 'uname': 'admin', 'role_id': '1'} request_data = {"username": "ajtest", "password": "F0gl@mp"} exc_msg = "Value Error occurred" - + users = [{'id': 1, 'uname': 'admin', 'real_name': 'Admin user', 'role_id': 1, 'description': 'admin user', + 'enabled': 't', 'access_method': 'any'}] # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv1 = await mock_coro(valid_user['id']) _rv2 = await mock_coro(None) _rv3 = await mock_coro([{'id': '1'}]) _rv4 = await mock_coro(True) - _se1 = await mock_coro(valid_user) + _rv5 = await mock_coro(valid_user) + _rv6 = await mock_coro(users) else: _rv1 = asyncio.ensure_future(mock_coro(valid_user['id'])) _rv2 = asyncio.ensure_future(mock_coro(None)) _rv3 = asyncio.ensure_future(mock_coro([{'id': '1'}])) _rv4 = asyncio.ensure_future(mock_coro(True)) - _se1 = asyncio.ensure_future(mock_coro(valid_user)) - - with patch.object(middleware._logger, 'info') as patch_logger_info: + _rv5 = asyncio.ensure_future(mock_coro(valid_user)) + _rv6 = asyncio.ensure_future(mock_coro(users)) + with patch.object(middleware._logger, 'debug') as patch_logger_debug: with patch.object(User.Objects, 'validate_token', return_value=_rv1) as patch_validate_token: with patch.object(User.Objects, 'refresh_token_expiry', return_value=_rv2) as patch_refresh_token: - with patch.object(User.Objects, 'get', side_effect=[_se1, User.DoesNotExist]) as patch_user_get: - with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv3) as patch_role_id: - with patch.object(auth, 'is_valid_role', return_value=_rv4) as patch_role: - with patch.object(User.Objects, 'create', side_effect=ValueError(exc_msg)) as patch_create_user: - with patch.object(auth._logger, 'error') as patch_audit_logger_error: + with patch.object(User.Objects, 'get', return_value=_rv5) as patch_user_get: + with patch.object(User.Objects, 'all', return_value=_rv6) as patch_user_all: + with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv3) as patch_role_id: + with patch.object(auth, 'is_valid_role', return_value=_rv4) as patch_role: + with patch.object(User.Objects, 'create', side_effect=ValueError( + exc_msg)) as patch_create_user: resp = await client.post('/fledge/admin/user', data=json.dumps(request_data), headers=ADMIN_USER_HEADER) assert 400 == resp.status @@ -345,27 +343,23 @@ async def test_create_user_value_error(self, client): result = await resp.text() json_response = json.loads(result) assert {"message": exc_msg} == json_response - patch_audit_logger_error.assert_called_once_with(exc_msg) - patch_create_user.assert_called_once_with(request_data['username'], - request_data['password'], 2, 'any', '', '') - patch_role.assert_called_once_with(2) - patch_role_id.assert_called_once_with('admin') - assert 2 == patch_user_get.call_count - args, kwargs = patch_user_get.call_args_list[0] - assert {'uid': valid_user['id']} == kwargs - args, kwargs = patch_user_get.call_args_list[1] - assert {'username': request_data['username']} == kwargs + patch_create_user.assert_called_once_with( + request_data['username'], request_data['password'], 2, 'any', '', '') + patch_role.assert_called_once_with(2) + patch_role_id.assert_called_once_with('admin') + patch_user_all.assert_called_once_with() + patch_user_get.assert_called_once_with(uid=valid_user['id']) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') @pytest.mark.parametrize("payload, status_reason", [ - ({"realname": "dd"}, 'Nothing to update'), - ({"real_name": ""}, 'Real Name should not be empty'), - ({"real_name": " "}, 'Real Name should not be empty') + ({"realname": "dd"}, 'Nothing to update.'), + ({"real_name": ""}, 'Real Name should not be empty.'), + ({"real_name": " "}, 'Real Name should not be empty.') ]) async def test_bad_update_me(self, client, mocker, payload, status_reason): - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) user_info = {'role_id': '1', 'id': '2', 'uname': 'user', 'access_method': 'any', 'real_name': 'Sat', 'description': 'Normal User'} @@ -379,13 +373,13 @@ async def test_bad_update_me(self, client, mocker, payload, status_reason): patch_get_user.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user') @pytest.mark.parametrize("payload", [ {"real_name": "AJ"}, {"real_name": " AJ "}, {"real_name": "AJ "}, {"real_name": " AJ"} ]) async def test_update_me(self, client, mocker, payload): - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) user_info = {'role_id': '1', 'id': '2', 'uname': 'user', 'access_method': 'any', 'real_name': 'AJ', 'description': 'Normal User'} @@ -418,18 +412,18 @@ async def test_update_me(self, client, mocker, payload): patch_get_user.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user') @pytest.mark.parametrize("payload, status_reason", [ - ({"realname": "dd"}, 'Nothing to update'), - ({"real_name": ""}, 'Real Name should not be empty'), - ({"real_name": " "}, 'Real Name should not be empty'), - ({"access_method": ""}, 'Access method should not be empty'), - ({"access_method": "blah"}, "Accepted access method values are ('any', 'pwd', 'cert')") + ({"realname": "dd"}, 'Nothing to update.'), + ({"real_name": ""}, 'Real Name should not be empty.'), + ({"real_name": " "}, 'Real Name should not be empty.'), + ({"access_method": ""}, 'Access method should not be empty.'), + ({"access_method": "blah"}, "Accepted access method values are ('any', 'pwd', 'cert').") ]) async def test_bad_update_user(self, client, mocker, payload, status_reason): uid = 2 - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) user_info = {'role_id': '1', 'id': str(uid), 'uname': 'user', 'access_method': 'any', 'real_name': 'Sat', 'description': 'Normal User'} @@ -453,7 +447,7 @@ async def test_bad_update_user(self, client, mocker, payload, status_reason): patch_role_id.assert_called_once_with('admin') patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/{}'.format(uid)) + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/{}'.format(uid)) @pytest.mark.parametrize("payload, exp_result", [ ({"real_name": "Sat"}, {'role_id': '2', 'id': '2', 'uname': 'user', 'access_method': 'any', @@ -469,7 +463,7 @@ async def test_bad_update_user(self, client, mocker, payload, status_reason): ]) async def test_update_user(self, client, mocker, payload, exp_result): uid = 2 - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -493,98 +487,95 @@ async def test_update_user(self, client, mocker, payload, exp_result): patch_role_id.assert_called_once_with('admin') patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/{}'.format( + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/{}'.format( uid)) @pytest.mark.parametrize("request_data, msg", [ - ({}, "Current or new password is missing"), - ({"invalid": 1}, "Current or new password is missing"), - ({"current_password": 1}, "Current or new password is missing"), - ({"current_password": "fledge"}, "Current or new password is missing"), - ({"new_password": 1}, "Current or new password is missing"), - ({"new_password": "fledge"}, "Current or new password is missing"), - ({"current_pwd": "fledge", "new_pwd": "fledge1"}, "Current or new password is missing"), - ({"current_password": "F0gl@mp", "new_password": "F0gl@mp"}, "New password should not be same as current password"), - ({"current_password": "F0gl@mp", "new_password": "fledge"}, "Password must contain at least one digit, one lowercase, one uppercase & one special character and length of minimum 6 characters"), - ({"current_password": "F0gl@mp", "new_password": 1}, "Password must contain at least one digit, one lowercase, one uppercase & one special character and length of minimum 6 characters") + ({}, "Current or new password is missing."), + ({"invalid": 1}, "Current or new password is missing."), + ({"current_password": 1}, "Current or new password is missing."), + ({"current_password": "fledge"}, "Current or new password is missing."), + ({"new_password": 1}, "Current or new password is missing."), + ({"new_password": "fledge"}, "Current or new password is missing."), + ({"current_pwd": "fledge", "new_pwd": "fledge1"}, "Current or new password is missing."), + ({"current_password": "F0gl@mp", "new_password": "F0gl@mp"}, + "New password should not be the same as current password."), + ({"current_password": "F0gl@mp", "new_password": "fledge"}, PASSWORD_ERROR_MSG), + ({"current_password": "F0gl@mp", "new_password": 1}, PASSWORD_ERROR_MSG) ]) async def test_update_password_with_bad_data(self, client, request_data, msg): uid = 2 - with patch.object(middleware._logger, 'info') as patch_logger_info: - with patch.object(auth._logger, 'warning') as patch_logger_warning: - resp = await client.put('/fledge/user/{}/password'.format(uid), data=json.dumps(request_data)) - assert 400 == resp.status - assert msg == resp.reason - patch_logger_warning.assert_called_once_with(msg) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user/{}/password' - .format(uid)) + with patch.object(middleware._logger, 'debug') as patch_logger_debug: + resp = await client.put('/fledge/user/{}/password'.format(uid), data=json.dumps(request_data)) + assert 400 == resp.status + assert msg == resp.reason + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', + '/fledge/user/{}/password'.format(uid)) async def test_update_password_with_invalid_current_password(self, client): request_data = {"current_password": "blah", "new_password": "F0gl@mp"} uid = 2 - msg = 'Invalid current password' + msg = 'Invalid current password.' # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(None) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(None)) - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger_debug: with patch.object(User.Objects, 'is_user_exists', return_value=_rv) as patch_user_exists: - with patch.object(auth._logger, 'warning') as patch_logger_warning: - resp = await client.put('/fledge/user/{}/password'.format(uid), data=json.dumps(request_data)) - assert 404 == resp.status - assert msg == resp.reason - patch_logger_warning.assert_called_once_with(msg) + resp = await client.put('/fledge/user/{}/password'.format(uid), data=json.dumps(request_data)) + assert 404 == resp.status + assert msg == resp.reason patch_user_exists.assert_called_once_with(str(uid), request_data['current_password']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', - '/fledge/user/{}/password'.format(uid)) + patch_logger_debug.assert_called_once_with('Received %s request for %s', + 'PUT', '/fledge/user/{}/password'.format(uid)) @pytest.mark.parametrize("exception_name, status_code, msg", [ (ValueError, 400, 'None'), - (User.DoesNotExist, 404, 'User with id:<2> does not exist'), - (User.PasswordAlreadyUsed, 400, 'The new password should be different from previous 3 used') + (User.DoesNotExist, 404, 'User with ID:<2> does not exist.'), + (User.PasswordAlreadyUsed, 400, 'The new password should be different from previous 3 used.') ]) async def test_update_password_exceptions(self, client, exception_name, status_code, msg): request_data = {"current_password": "fledge", "new_password": "F0gl@mp"} uid = 2 # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(uid) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(uid)) - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger_debug: with patch.object(User.Objects, 'is_user_exists', return_value=_rv) as patch_user_exists: with patch.object(User.Objects, 'update', side_effect=exception_name(msg)) as patch_update: - with patch.object(auth._logger, 'warning') as patch_logger_warning: - resp = await client.put('/fledge/user/{}/password'.format(uid), data=json.dumps(request_data)) - assert status_code == resp.status - assert msg == resp.reason - patch_logger_warning.assert_called_once_with(msg) + resp = await client.put('/fledge/user/{}/password'.format(uid), data=json.dumps(request_data)) + assert status_code == resp.status + assert msg == resp.reason patch_update.assert_called_once_with(2, {'password': request_data['new_password']}) patch_user_exists.assert_called_once_with(str(uid), request_data['current_password']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user/{}/password' - .format(uid)) + patch_logger_debug.assert_called_once_with('Received %s request for %s', + 'PUT', '/fledge/user/{}/password'.format(uid)) async def test_update_password_unknown_exception(self, client): request_data = {"current_password": "fledge", "new_password": "F0gl@mp"} uid = 2 msg = 'Something went wrong' + logger_msg = 'Failed to update the user ID:<{}>.'.format(uid) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(uid) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(uid)) - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger_debug: with patch.object(User.Objects, 'is_user_exists', return_value=_rv) as patch_user_exists: with patch.object(User.Objects, 'update', side_effect=Exception(msg)) as patch_update: - with patch.object(auth._logger, 'exception') as patch_logger_exception: + with patch.object(auth._logger, 'error') as patch_logger: resp = await client.put('/fledge/user/{}/password'.format(uid), data=json.dumps(request_data)) assert 500 == resp.status assert msg == resp.reason - patch_logger_exception.assert_called_once_with(msg) + args = patch_logger.call_args + assert logger_msg == args[0][1] patch_update.assert_called_once_with(2, {'password': request_data['new_password']}) patch_user_exists.assert_called_once_with(str(uid), request_data['current_password']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user/{}/password' - .format(uid)) + patch_logger_debug.assert_called_once_with('Received %s request for %s', + 'PUT', '/fledge/user/{}/password'.format(uid)) async def test_update_password(self, client): request_data = {"current_password": "fledge", "new_password": "F0gl@mp"} ret_val = {'response': 'updated', 'rows_affected': 1} uname = 'aj' user_id = 2 - msg = "Password has been updated successfully for user id:<{}>".format(user_id) + msg = "Password has been updated successfully for user ID:<{}>.".format(user_id) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -594,7 +585,7 @@ async def test_update_password(self, client): _rv1 = asyncio.ensure_future(mock_coro(user_id)) _rv2 = asyncio.ensure_future(mock_coro(ret_val)) - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger_debug: with patch.object(User.Objects, 'is_user_exists', return_value=_rv1) as patch_user_exists: with patch.object(User.Objects, 'update', return_value=_rv2) as patch_update: with patch.object(auth._logger, 'info') as patch_auth_logger_info: @@ -606,56 +597,51 @@ async def test_update_password(self, client): patch_auth_logger_info.assert_called_once_with(msg) patch_update.assert_called_once_with(user_id, {'password': request_data['new_password']}) patch_user_exists.assert_called_once_with(str(user_id), request_data['current_password']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user/{}/password' - .format(user_id)) + patch_logger_debug.assert_called_once_with('Received %s request for %s', + 'PUT', '/fledge/user/{}/password'.format(user_id)) - @pytest.mark.parametrize("request_data", [ - 'blah', - '123blah' - ]) + @pytest.mark.parametrize("request_data", ['blah', '123blah']) async def test_delete_bad_user(self, client, mocker, request_data): msg = "invalid literal for int() with base 10: '{}'".format(request_data) ret_val = [{'id': '1'}] - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv) as patch_role_id: - with patch.object(auth._logger, 'warning') as patch_auth_logger_warn: - resp = await client.delete('/fledge/admin/{}/delete'.format(request_data), headers=ADMIN_USER_HEADER) - assert 400 == resp.status - assert msg == resp.reason - patch_auth_logger_warn.assert_called_once_with(msg) + resp = await client.delete('/fledge/admin/{}/delete'.format(request_data), headers=ADMIN_USER_HEADER) + assert 400 == resp.status + assert msg == resp.reason patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', - '/fledge/admin/{}/delete'.format(request_data)) + patch_logger_debug.assert_called_once_with('Received %s request for %s', + 'DELETE', '/fledge/admin/{}/delete'.format(request_data)) async def test_delete_admin_user(self, client, mocker): - msg = "Super admin user can not be deleted" + msg = "Super admin user can not be deleted." ret_val = [{'id': '1'}] - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv) as patch_role_id: with patch.object(auth._logger, 'warning') as patch_auth_logger_warn: resp = await client.delete('/fledge/admin/1/delete', headers=ADMIN_USER_HEADER) - assert 406 == resp.status + assert 403 == resp.status assert msg == resp.reason patch_auth_logger_warn.assert_called_once_with(msg) patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/1/delete') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/1/delete') async def test_delete_own_account(self, client, mocker): - msg = "You can not delete your own account" + msg = "You can not delete your own account." ret_val = [{'id': '2'}] - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker, is_admin=False) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) @@ -669,12 +655,12 @@ async def test_delete_own_account(self, client, mocker): patch_user_get.assert_called_once_with(uid=2) patch_refresh_token.assert_called_once_with(NORMAL_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(NORMAL_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/2/delete') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/2/delete') async def test_delete_invalid_user(self, client, mocker): ret_val = {"response": "deleted", "rows_affected": 0} - msg = 'User with id:<2> does not exist' - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + msg = 'User with ID:<2> does not exist.' + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -685,22 +671,20 @@ async def test_delete_invalid_user(self, client, mocker): _rv2 = asyncio.ensure_future(mock_coro(ret_val)) with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv1) as patch_role_id: - with patch.object(auth._logger, 'warning') as patch_auth_logger_warning: - with patch.object(User.Objects, 'delete', return_value=_rv2) as patch_user_delete: - resp = await client.delete('/fledge/admin/2/delete', headers=ADMIN_USER_HEADER) - assert 404 == resp.status - assert msg == resp.reason - patch_user_delete.assert_called_once_with(2) - patch_auth_logger_warning.assert_called_once_with(msg) + with patch.object(User.Objects, 'delete', return_value=_rv2) as patch_user_delete: + resp = await client.delete('/fledge/admin/2/delete', headers=ADMIN_USER_HEADER) + assert 404 == resp.status + assert msg == resp.reason + patch_user_delete.assert_called_once_with(2) patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/2/delete') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/2/delete') async def test_delete_user(self, client, mocker): ret_val = {"response": "deleted", "rows_affected": 1} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -716,63 +700,62 @@ async def test_delete_user(self, client, mocker): resp = await client.delete('/fledge/admin/2/delete', headers=ADMIN_USER_HEADER) assert 200 == resp.status r = await resp.text() - assert {'message': 'User has been deleted successfully'} == json.loads(r) + assert {'message': 'User has been deleted successfully.'} == json.loads(r) patch_user_delete.assert_called_once_with(2) - patch_auth_logger_info.assert_called_once_with('User with id:<2> has been deleted successfully.') + patch_auth_logger_info.assert_called_once_with('User with ID:<2> has been deleted successfully.') patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/2/delete') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/2/delete') @pytest.mark.parametrize("exception_name, code, msg", [ (ValueError, 400, 'None'), - (User.DoesNotExist, 404, 'User with id:<2> does not exist') + (User.DoesNotExist, 404, 'User with ID:<2> does not exist.') ]) async def test_delete_user_exceptions(self, client, mocker, exception_name, code, msg): ret_val = [{'id': '1'}] - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv) as patch_role_id: - with patch.object(auth._logger, 'warning') as patch_auth_logger_warn: - with patch.object(User.Objects, 'delete', side_effect=exception_name(msg)) as patch_user_delete: - resp = await client.delete('/fledge/admin/2/delete', headers=ADMIN_USER_HEADER) - assert code == resp.status - assert msg == resp.reason - patch_user_delete.assert_called_once_with(2) - patch_auth_logger_warn.assert_called_once_with(msg) + with patch.object(User.Objects, 'delete', side_effect=exception_name(msg)) as patch_user_delete: + resp = await client.delete('/fledge/admin/2/delete', headers=ADMIN_USER_HEADER) + assert code == resp.status + assert msg == resp.reason + patch_user_delete.assert_called_once_with(2) patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/2/delete') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/2/delete') async def test_delete_user_unknown_exception(self, client, mocker): msg = 'Something went wrong' ret_val = [{'id': '1'}] - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv) as patch_role_id: - with patch.object(auth._logger, 'exception') as patch_auth_logger_exc: + with patch.object(auth._logger, 'error') as patch_logger: with patch.object(User.Objects, 'delete', side_effect=Exception(msg)) as patch_user_delete: resp = await client.delete('/fledge/admin/2/delete', headers=ADMIN_USER_HEADER) assert 500 == resp.status assert msg == resp.reason patch_user_delete.assert_called_once_with(2) - patch_auth_logger_exc.assert_called_once_with(msg) + args = patch_logger.call_args + assert 'Failed to delete the user ID:<2>.' == args[0][1] patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/2/delete') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/2/delete') async def test_logout(self, client, mocker): ret_val = {'response': 'deleted', 'rows_affected': 1} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) @@ -783,25 +766,23 @@ async def test_logout(self, client, mocker): r = await resp.text() assert {'logout': True} == json.loads(r) patch_delete_user_token.assert_called_once_with("2") - patch_auth_logger_info.assert_called_once_with('User with id:<2> has been logged out successfully') + patch_auth_logger_info.assert_called_once_with('User with ID:<2> has been logged out successfully.') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/2/logout') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/2/logout') async def test_logout_with_bad_user(self, client, mocker): ret_val = {'response': 'deleted', 'rows_affected': 0} user_id = 111 - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) with patch.object(User.Objects, 'delete_user_tokens', return_value=_rv) as patch_delete_user_token: - with patch.object(auth._logger, 'warning') as patch_logger: - resp = await client.put('/fledge/{}/logout'.format(user_id), headers=ADMIN_USER_HEADER) - assert 404 == resp.status - assert 'Not Found' == resp.reason - patch_logger.assert_called_once_with('Logout requested with bad user') + resp = await client.put('/fledge/{}/logout'.format(user_id), headers=ADMIN_USER_HEADER) + assert 404 == resp.status + assert 'Not Found' == resp.reason patch_delete_user_token.assert_called_once_with(str(user_id)) patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) @@ -809,7 +790,7 @@ async def test_logout_with_bad_user(self, client, mocker): async def test_logout_me(self, client, mocker): ret_val = {'response': 'deleted', 'rows_affected': 1} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) @@ -820,33 +801,32 @@ async def test_logout_me(self, client, mocker): r = await resp.text() assert {'logout': True} == json.loads(r) patch_delete_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_auth_logger_info.assert_called_once_with('User has been logged out successfully') + patch_auth_logger_info.assert_called_once_with('User has been logged out successfully.') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/logout') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/logout') async def test_logout_me_with_bad_token(self, client, mocker): ret_val = {'response': 'deleted', 'rows_affected': 0} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) - with patch.object(auth._logger, 'warning') as patch_auth_logger_warn: + with patch.object(auth._logger, 'error') as patch_auth_logger: with patch.object(User.Objects, 'delete_token', return_value=_rv) as patch_delete_token: resp = await client.put('/fledge/logout', headers=ADMIN_USER_HEADER) assert 404 == resp.status patch_delete_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_auth_logger_warn.assert_called_once_with('Logout requested with bad user token') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/logout') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/logout') async def test_enable_with_super_admin_user(self, client, mocker): - msg = 'Restricted for Super Admin user' + msg = 'Restricted for Super Admin user.' ret_val = [{'id': '1'}] - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) @@ -854,7 +834,7 @@ async def test_enable_with_super_admin_user(self, client, mocker): with patch.object(auth._logger, 'warning') as patch_logger_warning: resp = await client.put('/fledge/admin/1/enable', data=json.dumps({'role_id': 2}), headers=ADMIN_USER_HEADER) - assert 406 == resp.status + assert 403 == resp.status assert msg == resp.reason r = await resp.text() assert {'message': msg} == json.loads(r) @@ -863,16 +843,16 @@ async def test_enable_with_super_admin_user(self, client, mocker): patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/1/enable') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/1/enable') @pytest.mark.parametrize("request_data, msg", [ - ({}, "Nothing to enable user update"), - ({"enable": 1}, "Nothing to enable user update"), - ({"enabled": 1}, "Accepted values are True/False only"), + ({}, "Nothing to enable user update."), + ({"enable": 1}, "Nothing to enable user update."), + ({"enabled": 1}, "Accepted values are True/False only."), ]) async def test_enable_with_bad_data(self, client, mocker, request_data, msg): ret_val = [{'id': '1'}] - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) @@ -887,7 +867,7 @@ async def test_enable_with_bad_data(self, client, mocker, request_data, msg): patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/enable') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/enable') @pytest.mark.parametrize("request_data", [ {"enabled": 'true'}, {"enabled": 'True'}, {"enabled": 'TRUE'}, {"enabled": 'tRUe'}, @@ -895,27 +875,36 @@ async def test_enable_with_bad_data(self, client, mocker, request_data, msg): ]) async def test_enable_user(self, client, mocker, request_data): uid = 2 - user_record = {'rows': [{'id': uid, 'role_id': '1', 'uname': 'AJ'}], 'count': 1} - update_user_record = {'rows': [{'id': uid, 'role_id': '1', 'uname': 'AJ', 'enabled': request_data['enabled']}], - 'count': 1} + if request_data['enabled'].lower() == 'true': + _modified_enabled_val = 't' + _text = 'enabled' + _payload = '{"values": {"enabled": "t"}, "where": {"column": "id", "condition": "=", "value": "2"}}' + else: + _modified_enabled_val = 'f' + _text = 'disabled' + _payload = '{"values": {"enabled": "f"}, "where": {"column": "id", "condition": "=", "value": "2"}}' + + user_record = {'rows': [{'id': uid, 'role_id': '1', 'uname': 'AJ', 'enabled': 't'}], 'count': 1} + update_user_record = {'rows': [{'id': uid, 'role_id': '1', 'uname': 'AJ', + 'enabled': _modified_enabled_val}], 'count': 1} update_result = {"rows_affected": 1, "response": "updated"} - update_payload = '{"values": {"enabled": "t"}, "where": {"column": "id", "condition": "=", "value": "2"}}' - _text, _enable, _payload = ('enabled', 't', '{"values": {"enabled": "t"}, ' - '"where": {"column": "id", "condition": "=", "value": "2"}}') \ - if str(request_data['enabled']).lower() == 'true' else ( - 'disabled', 'f', '{"values": {"enabled": "f"}, "where": {"column": "id", "condition": "=", "value": "2"}}') - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) + audit_details = {'user_id': uid, 'old_value': {'enabled': 't'}, + 'new_value': {'enabled': _modified_enabled_val}, + 'message': "'AJ' user has been {}.".format(_text)} storage_client_mock = MagicMock(StorageClientAsync) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv1 = await mock_coro([{'id': '1'}]) _rv2 = await mock_coro(update_result) + _rv3 = await mock_coro(None) _se1 = await mock_coro(user_record) _se2 = await mock_coro(update_user_record) else: _rv1 = asyncio.ensure_future(mock_coro([{'id': '1'}])) _rv2 = asyncio.ensure_future(mock_coro(update_result)) + _rv3 = asyncio.ensure_future(mock_coro(None)) _se1 = asyncio.ensure_future(mock_coro(user_record)) _se2 = asyncio.ensure_future(mock_coro(update_user_record)) @@ -925,11 +914,15 @@ async def test_enable_user(self, client, mocker, request_data): side_effect=[_se1, _se2]) as q_tbl_patch: with patch.object(storage_client_mock, 'update_tbl', return_value=_rv2) as update_tbl_patch: - resp = await client.put('/fledge/admin/{}/enable'.format(uid), data=json.dumps(request_data), - headers=ADMIN_USER_HEADER) - assert 200 == resp.status - r = await resp.text() - assert {"message": "User with id:<2> has been {} successfully".format(_text)} == json.loads(r) + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=_rv3) as patch_audit: + resp = await client.put('/fledge/admin/{}/enable'.format(uid), data=json.dumps( + request_data), headers=ADMIN_USER_HEADER) + assert 200 == resp.status + r = await resp.text() + assert {"message": "User with ID:<2> has been {} successfully.".format(_text) + } == json.loads(r) + patch_audit.assert_called_once_with('USRCH', audit_details) update_tbl_patch.assert_called_once_with('users', _payload) assert 2 == q_tbl_patch.call_count args, kwargs = q_tbl_patch.call_args_list[0] @@ -942,12 +935,12 @@ async def test_enable_user(self, client, mocker, request_data): patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/enable') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/enable') async def test_reset_super_admin(self, client, mocker): - msg = 'Restricted for Super Admin user' + msg = 'Restricted for Super Admin user.' ret_val = [{'id': '1'}] - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) @@ -955,44 +948,41 @@ async def test_reset_super_admin(self, client, mocker): with patch.object(auth._logger, 'warning') as patch_logger_warning: resp = await client.put('/fledge/admin/1/reset', data=json.dumps({'role_id': 2}), headers=ADMIN_USER_HEADER) - assert 406 == resp.status + assert 403 == resp.status assert msg == resp.reason patch_logger_warning.assert_called_once_with(msg) patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/1/reset') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/1/reset') @pytest.mark.parametrize("request_data, msg", [ - ({}, "Nothing to update the user"), - ({"invalid": 1}, "Nothing to update the user"), - ({"password": "fledge"}, "Password must contain at least one digit, one lowercase, one uppercase & one special character and length of minimum 6 characters"), - ({"password": 1}, "Password must contain at least one digit, one lowercase, one uppercase & one special character and length of minimum 6 characters") + ({}, "Nothing to update the user."), + ({"invalid": 1}, "Nothing to update the user."), + ({"password": "fledge"}, PASSWORD_ERROR_MSG), + ({"password": 1}, PASSWORD_ERROR_MSG) ]) async def test_reset_with_bad_data(self, client, mocker, request_data, msg): ret_val = [{'id': '1'}] - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await mock_coro(ret_val) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(ret_val)) with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv) as patch_role_id: - with patch.object(auth._logger, 'warning') as patch_logger_warning: - resp = await client.put('/fledge/admin/2/reset', data=json.dumps(request_data), - headers=ADMIN_USER_HEADER) - assert 400 == resp.status - assert msg == resp.reason - patch_logger_warning.assert_called_once_with(msg) + resp = await client.put('/fledge/admin/2/reset', data=json.dumps(request_data), headers=ADMIN_USER_HEADER) + assert 400 == resp.status + assert msg == resp.reason patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') async def test_reset_with_bad_role(self, client, mocker): request_data = {"role_id": "blah"} - msg = "Invalid or bad role id" - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + msg = "Invalid or bad role id." + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -1004,28 +994,26 @@ async def test_reset_with_bad_role(self, client, mocker): with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv1) as patch_role_id: with patch.object(auth, 'is_valid_role', return_value=_rv2) as patch_role: - with patch.object(auth._logger, 'warning') as patch_logger_warning: - resp = await client.put('/fledge/admin/2/reset', data=json.dumps(request_data), - headers=ADMIN_USER_HEADER) - assert 400 == resp.status - assert msg == resp.reason - patch_logger_warning.assert_called_once_with(msg) + resp = await client.put('/fledge/admin/2/reset', data=json.dumps(request_data), + headers=ADMIN_USER_HEADER) + assert 400 == resp.status + assert msg == resp.reason patch_role.assert_called_once_with(request_data['role_id']) patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') @pytest.mark.parametrize("exception_name, status_code, msg", [ (ValueError, 400, 'None'), - (User.DoesNotExist, 404, 'User with id:<2> does not exist'), - (User.PasswordAlreadyUsed, 400, 'The new password should be different from previous 3 used') + (User.DoesNotExist, 404, 'User with ID:<2> does not exist.'), + (User.PasswordAlreadyUsed, 400, 'The new password should be different from previous 3 used.') ]) async def test_reset_exceptions(self, client, mocker, exception_name, status_code, msg): request_data = {'role_id': '2'} user_id = 2 - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -1038,25 +1026,28 @@ async def test_reset_exceptions(self, client, mocker, exception_name, status_cod with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv1) as patch_role_id: with patch.object(auth, 'is_valid_role', return_value=_rv2) as patch_role: with patch.object(User.Objects, 'update', side_effect=exception_name(msg)) as patch_update: - with patch.object(auth._logger, 'warning') as patch_logger_warning: + with patch.object(auth._logger, 'warning') as patch_logger: resp = await client.put('/fledge/admin/{}/reset'.format(user_id), data=json.dumps(request_data), headers=ADMIN_USER_HEADER) assert status_code == resp.status assert msg == resp.reason - patch_logger_warning.assert_called_once_with(msg) + if exception_name == User.PasswordAlreadyUsed: + patch_logger.assert_called_once_with(msg) patch_update.assert_called_once_with(str(user_id), request_data) patch_role.assert_called_once_with(request_data['role_id']) patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') async def test_reset_unknown_exception(self, client, mocker): request_data = {'role_id': '2'} user_id = 2 msg = 'Something went wrong' - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + logger_msg = 'Failed to reset the user ID:<{}>.'.format(user_id) + + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -1069,26 +1060,27 @@ async def test_reset_unknown_exception(self, client, mocker): with patch.object(User.Objects, 'get_role_id_by_name', return_value=_rv1) as patch_role_id: with patch.object(auth, 'is_valid_role', return_value=_rv2) as patch_role: with patch.object(User.Objects, 'update', side_effect=Exception(msg)) as patch_update: - with patch.object(auth._logger, 'exception') as patch_logger_exception: + with patch.object(auth._logger, 'error') as patch_logger: resp = await client.put('/fledge/admin/{}/reset'.format(user_id), data=json.dumps(request_data), headers=ADMIN_USER_HEADER) assert 500 == resp.status assert msg == resp.reason - patch_logger_exception.assert_called_once_with(msg) + args = patch_logger.call_args + assert logger_msg == args[0][1] patch_update.assert_called_once_with(str(user_id), request_data) patch_role.assert_called_once_with(request_data['role_id']) patch_role_id.assert_called_once_with('admin') patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') async def test_reset_role_and_password(self, client, mocker): request_data = {'role_id': '2', 'password': 'Test@123'} user_id = 2 - msg = 'User with id:<{}> has been updated successfully'.format(user_id) + msg = 'User with ID:<{}> has been updated successfully.'.format(user_id) ret_val = {'response': 'updated', 'rows_affected': 1} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. @@ -1117,7 +1109,7 @@ async def test_reset_role_and_password(self, client, mocker): patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') @pytest.mark.parametrize("auth_method, request_data, ret_val", [ ("certificate", "-----BEGIN CERTIFICATE----- Test -----END CERTIFICATE-----", (2, "token2", False)) @@ -1158,12 +1150,13 @@ async def async_get_user(): @pytest.mark.skip(reason="Request mock required") @pytest.mark.parametrize("auth_method, request_data, ret_val, expected", [ - ("certificate", {"username": "admin", "password": "fledge"}, (1, "token1", True), "Invalid authentication method, use certificate instead."), + ("certificate", {"username": "admin", "password": "fledge"}, (1, "token1", True), + "Invalid authentication method, use certificate instead."), ]) async def test_login_auth_exception1(self, client, auth_method, request_data, ret_val, expected): async def async_mock(): return ret_val - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'info') as patch_logger_debug: with patch.object(server.Server, "auth_method", auth_method) as patch_auth_method: req_data = json.dumps(request_data) if isinstance(request_data, dict) else request_data resp = await client.post('/fledge/login', data=req_data) @@ -1173,14 +1166,15 @@ async def async_mock(): @pytest.mark.skip(reason="Request mock required") @pytest.mark.parametrize("auth_method, request_data, ret_val, expected", [ - ("password", "-----BEGIN CERTIFICATE----- Test -----END CERTIFICATE-----", (2, "token2", False), "Invalid authentication method, use password instead.") + ("password", "-----BEGIN CERTIFICATE----- Test -----END CERTIFICATE-----", + (2, "token2", False), "Invalid authentication method, use password instead.") ]) async def test_login_auth_exception2(self, client, auth_method, request_data, ret_val, expected): TEXT_HEADER = {'content-type': 'text/plain'} async def async_mock(): return ret_val - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'info') as patch_logger_debug: with patch.object(server.Server, "auth_method", auth_method) as patch_auth_method: req_data = request_data resp = await client.post('/fledge/login', data=req_data, headers=TEXT_HEADER) diff --git a/tests/unit/python/fledge/services/core/api/test_auth_optional.py b/tests/unit/python/fledge/services/core/api/test_auth_optional.py index 69d7554b8e..0412f09751 100644 --- a/tests/unit/python/fledge/services/core/api/test_auth_optional.py +++ b/tests/unit/python/fledge/services/core/api/test_auth_optional.py @@ -52,36 +52,44 @@ async def test_get_roles(self, client): else: _rv = asyncio.ensure_future(mock_coro([])) - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(User.Objects, 'get_roles', return_value=_rv) as patch_user_obj: resp = await client.get('/fledge/user/role') assert 200 == resp.status r = await resp.text() assert {'roles': []} == json.loads(r) patch_user_obj.assert_called_once_with() - patch_logger_info.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/user/role') + patch_logger.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/user/role') @pytest.mark.parametrize("ret_val, exp_result", [ ([], []), - ([{'uname': 'admin', 'role_id': '1', 'access_method': 'any', 'id': '1', 'real_name': 'Admin', 'description': 'Admin user'}, {'uname': 'user', 'role_id': '2', 'access_method': 'any', 'id': '2', 'real_name': 'Non-admin', 'description': 'Normal user'}], - [{"userId": "1", "userName": "admin", "roleId": "1", "accessMethod": "any", "realName": "Admin", "description": "Admin user"}, {"userId": "2", "userName": "user", "roleId": "2", "accessMethod": "any", "realName": "Non-admin", "description": "Normal user"}]) + ([{'uname': 'admin', 'role_id': '1', 'access_method': 'any', 'id': '1', 'real_name': 'Admin', + 'description': 'Admin user', 'enabled': 't'}, + {'uname': 'user', 'role_id': '2', 'access_method': 'any', 'id': '2', 'real_name': 'Non-admin', + 'description': 'Normal user', 'enabled': 't'}, + {'uname': 'dviewer', 'role_id': '3', 'access_method': 'any', 'id': '3', 'real_name': 'Data-Viewer', + 'description': 'Data user', 'enabled': 'f'} + ], + [{"userId": "1", "userName": "admin", "roleId": "1", "accessMethod": "any", "realName": "Admin", + "description": "Admin user"}, + {"userId": "2", "userName": "user", "roleId": "2", "accessMethod": "any", "realName": "Non-admin", + "description": "Normal user"}]) ]) async def test_get_all_users(self, client, ret_val, exp_result): - # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv = await mock_coro(ret_val) else: _rv = asyncio.ensure_future(mock_coro(ret_val)) - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(User.Objects, 'all', return_value=_rv) as patch_user_obj: resp = await client.get('/fledge/user') assert 200 == resp.status r = await resp.text() assert {'users': exp_result} == json.loads(r) patch_user_obj.assert_called_once_with() - patch_logger_info.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/user') + patch_logger.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/user') @pytest.mark.parametrize("request_params, exp_result, arg1, arg2", [ ('?id=1', {'uname': 'admin', 'role_id': '1', 'id': '1', 'access_method': 'any', 'real_name': 'Admin', 'description': 'Admin user'}, 1, None), @@ -100,7 +108,7 @@ async def test_get_user_by_param(self, client, request_params, exp_result, arg1, else: _rv = asyncio.ensure_future(mock_coro(result)) - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(User.Objects, 'get', return_value=_rv) as patch_user_obj: resp = await client.get('/fledge/user{}'.format(request_params)) assert 200 == resp.status @@ -113,32 +121,28 @@ async def test_get_user_by_param(self, client, request_params, exp_result, arg1, assert actual['realName'] == exp_result['real_name'] assert actual['description'] == exp_result['description'] patch_user_obj.assert_called_once_with(arg1, arg2) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/user') + patch_logger.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/user') @pytest.mark.parametrize("request_params, error_msg, arg1, arg2", [ ('?id=10', 'User with id:<10> does not exist', 10, None), ('?username=blah', 'User with name: does not exist', None, 'blah') ]) async def test_get_user_exception_by_param(self, client, request_params, error_msg, arg1, arg2): - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(User.Objects, 'get', side_effect=User.DoesNotExist(error_msg)) as patch_user_get: - with patch.object(auth._logger, 'warning') as patch_logger: - resp = await client.get('/fledge/user{}'.format(request_params)) - assert 404 == resp.status - assert error_msg == resp.reason - patch_logger.assert_called_once_with(error_msg) + resp = await client.get('/fledge/user{}'.format(request_params)) + assert 404 == resp.status + assert error_msg == resp.reason patch_user_get.assert_called_once_with(arg1, arg2) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/user') + patch_logger.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/user') @pytest.mark.parametrize("request_params", ['?id=0', '?id=blah', '?id=-1']) async def test_get_bad_user_id_param_exception(self, client, request_params): - with patch.object(middleware._logger, 'info') as patch_logger_info: - with patch.object(auth._logger, 'warning') as patch_logger: - resp = await client.get('/fledge/user{}'.format(request_params)) - assert 400 == resp.status - assert 'Bad user id' == resp.reason - patch_logger.assert_called_once_with('Get user requested with bad user id') - patch_logger_info.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/user') + with patch.object(middleware._logger, 'debug') as patch_logger: + resp = await client.get('/fledge/user{}'.format(request_params)) + assert 400 == resp.status + assert 'Bad user ID' == resp.reason + patch_logger.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/user') @pytest.mark.parametrize("request_data", [ {}, @@ -151,20 +155,19 @@ async def test_get_bad_user_id_param_exception(self, client, request_params): {"uname": "blah", "password": "blah"}, ]) async def test_bad_login(self, client, request_data): - with patch.object(middleware._logger, 'info') as patch_logger_info: - with patch.object(auth._logger, 'warning') as patch_logger: - resp = await client.post('/fledge/login', data=json.dumps(request_data)) - assert 400 == resp.status - assert 'Username or password is missing' == resp.reason - patch_logger.assert_called_once_with('Username and password are required to login') - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/login') + with patch.object(middleware._logger, 'debug') as patch_logger: + resp = await client.post('/fledge/login', data=json.dumps(request_data)) + assert 400 == resp.status + assert 'Username or password is missing' == resp.reason + patch_logger.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/login') @pytest.mark.parametrize("request_data, status_code, exception_name, msg", [ ({"username": "blah", "password": "blah"}, 404, User.DoesNotExist, 'User does not exist'), ({"username": "admin", "password": "blah"}, 404, User.PasswordDoesNotMatch, 'Username or Password do not match'), ({"username": "admin", "password": 123}, 404, User.PasswordDoesNotMatch, 'Username or Password do not match'), ({"username": 1, "password": 1}, 404, ValueError, 'Username should be a valid string'), - ({"username": "user", "password": "fledge"}, 401, User.PasswordExpired, 'Your password has been expired. Please set your password again') + ({"username": "user", "password": "fledge"}, 401, User.PasswordExpired, + 'Your password has been expired. Please set your password again.') ]) async def test_login_exception(self, client, request_data, status_code, exception_name, msg): @@ -174,14 +177,15 @@ async def test_login_exception(self, client, request_data, status_code, exceptio else: _rv = asyncio.ensure_future(mock_coro([])) - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(User.Objects, 'login', side_effect=exception_name(msg)) as patch_user_login: with patch.object(User.Objects, 'delete_user_tokens', return_value=_rv) as patch_delete_token: - with patch.object(auth._logger, 'warning') as patch_logger: + with patch.object(auth._logger, 'warning') as patch_auth_logger: resp = await client.post('/fledge/login', data=json.dumps(request_data)) assert status_code == resp.status assert msg == resp.reason - patch_logger.assert_called_once_with(msg) + if status_code == 401: + patch_auth_logger.assert_called_once_with(msg) if status_code == 401: patch_delete_token.assert_called_once_with(msg) # TODO: host arg patch transport.request.extra_info @@ -189,7 +193,7 @@ async def test_login_exception(self, client, request_data, status_code, exceptio assert str(request_data['username']) == args[0] assert request_data['password'] == args[1] # patch_user_login.assert_called_once_with() - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/login') + patch_logger.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/login') @pytest.mark.parametrize("request_data, ret_val", [ ({"username": "admin", "password": "fledge"}, (1, "token1", True)), @@ -205,9 +209,9 @@ async def async_mock(): else: _rv = asyncio.ensure_future(async_mock()) - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(User.Objects, 'login', return_value=_rv) as patch_user_login: - with patch.object(auth._logger, 'info') as patch_logger: + with patch.object(auth._logger, 'info') as patch_auth_logger: resp = await client.post('/fledge/login', data=json.dumps(request_data)) assert 200 == resp.status r = await resp.text() @@ -215,86 +219,87 @@ async def async_mock(): assert ret_val[0] == actual['uid'] assert ret_val[1] == actual['token'] assert ret_val[2] == actual['admin'] - patch_logger.assert_called_once_with('User with username:<{}> logged in successfully.'.format(request_data['username'])) + patch_auth_logger.assert_called_once_with('User with username:<{}> logged in successfully.'.format( + request_data['username'])) # TODO: host arg patch transport.request.extra_info args, kwargs = patch_user_login.call_args assert request_data['username'] == args[0] assert request_data['password'] == args[1] # patch_user_login.assert_called_once_with() - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/login') + patch_logger.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/login') async def test_logout(self, client): - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(auth._logger, 'warning') as patch_logger_warning: resp = await client.put('/fledge/2/logout') assert 403 == resp.status assert FORBIDDEN == resp.reason patch_logger_warning.assert_called_once_with(WARN_MSG) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/2/logout') + patch_logger.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/2/logout') async def test_update_password(self, client): - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(auth._logger, 'warning') as patch_logger_warning: resp = await client.put('/fledge/user/1/password') assert 403 == resp.status assert FORBIDDEN == resp.reason patch_logger_warning.assert_called_once_with(WARN_MSG) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user/1/password') + patch_logger.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user/1/password') async def test_update_me(self, client): - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(auth._logger, 'warning') as patch_logger_warning: resp = await client.put('/fledge/user') assert 403 == resp.status assert FORBIDDEN == resp.reason patch_logger_warning.assert_called_once_with(WARN_MSG) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user') + patch_logger.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/user') async def test_update_user(self, client): - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(auth._logger, 'warning') as patch_logger_warning: resp = await client.put('/fledge/admin/1') assert 403 == resp.status assert FORBIDDEN == resp.reason patch_logger_warning.assert_called_once_with(WARN_MSG) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/1') + patch_logger.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/1') async def test_delete_user(self, client): - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(auth._logger, 'warning') as patch_auth_logger_warn: resp = await client.delete('/fledge/admin/1/delete') assert 403 == resp.status assert FORBIDDEN == resp.reason patch_auth_logger_warn.assert_called_once_with(WARN_MSG) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/1/delete') + patch_logger.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/admin/1/delete') async def test_create_user(self, client): request_data = {"username": "ajtest", "password": "F0gl@mp"} - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(auth._logger, 'warning') as patch_logger_warning: resp = await client.post('/fledge/admin/user', data=json.dumps(request_data)) assert 403 == resp.status assert FORBIDDEN == resp.reason patch_logger_warning.assert_called_once_with(WARN_MSG) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') + patch_logger.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/admin/user') async def test_enable_user(self, client): - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(auth._logger, 'warning') as patch_logger_warning: resp = await client.put('/fledge/admin/2/enable') assert 403 == resp.status assert FORBIDDEN == resp.reason patch_logger_warning.assert_called_once_with(WARN_MSG) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/enable') + patch_logger.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/enable') async def test_reset(self, client): - with patch.object(middleware._logger, 'info') as patch_logger_info: + with patch.object(middleware._logger, 'debug') as patch_logger: with patch.object(auth._logger, 'warning') as patch_logger_warning: resp = await client.put('/fledge/admin/2/reset') assert 403 == resp.status assert FORBIDDEN == resp.reason patch_logger_warning.assert_called_once_with(WARN_MSG) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') + patch_logger.assert_called_once_with('Received %s request for %s', 'PUT', '/fledge/admin/2/reset') @pytest.mark.parametrize("role_id, expected", [ (1, True), diff --git a/tests/unit/python/fledge/services/core/api/test_backup_restore.py b/tests/unit/python/fledge/services/core/api/test_backup_restore.py index 990a7e4a7e..b27c529b8c 100644 --- a/tests/unit/python/fledge/services/core/api/test_backup_restore.py +++ b/tests/unit/python/fledge/services/core/api/test_backup_restore.py @@ -111,9 +111,11 @@ async def test_get_backups_bad_data(self, client, request_params, response_code, async def test_get_backups_exceptions(self, client): msg = "Internal Server Error" with patch.object(connect, 'get_storage_async', side_effect=Exception(msg)): - resp = await client.get('/fledge/backup') - assert 500 == resp.status - assert msg == resp.reason + with patch.object(backup_restore._logger, 'error') as patch_logger: + resp = await client.get('/fledge/backup') + assert 500 == resp.status + assert msg == resp.reason + assert 1 == patch_logger.call_count async def test_create_backup(self, client): async def mock_create(): @@ -135,9 +137,11 @@ async def mock_create(): async def test_create_backup_exception(self, client): msg = "Internal Server Error" with patch.object(connect, 'get_storage_async', side_effect=Exception(msg)): - resp = await client.post('/fledge/backup') - assert 500 == resp.status - assert msg == resp.reason + with patch.object(backup_restore._logger, 'error') as patch_logger: + resp = await client.post('/fledge/backup') + assert 500 == resp.status + assert msg == resp.reason + assert 1 == patch_logger.call_count async def test_get_backup_details(self, client): storage_client_mock = MagicMock(StorageClientAsync) @@ -167,9 +171,12 @@ async def test_get_backup_details_exceptions(self, client, input_exception, resp storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(Backup, 'get_backup_details', side_effect=input_exception): - resp = await client.get('/fledge/backup/{}'.format(8)) - assert response_code == resp.status - assert response_message == resp.reason + with patch.object(backup_restore._logger, 'error') as patch_logger: + resp = await client.get('/fledge/backup/{}'.format(8)) + assert response_code == resp.status + assert response_message == resp.reason + if response_code == 500: + assert 1 == patch_logger.call_count async def test_get_backup_details_bad_data(self, client): resp = await client.get('/fledge/backup/{}'.format('BLA')) @@ -201,9 +208,12 @@ async def test_delete_backup_exceptions(self, client, input_exception, response_ storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(Backup, 'delete_backup', side_effect=input_exception): - resp = await client.delete('/fledge/backup/{}'.format(8)) - assert response_code == resp.status - assert response_message == resp.reason + with patch.object(backup_restore._logger, 'error') as patch_logger: + resp = await client.delete('/fledge/backup/{}'.format(8)) + assert response_code == resp.status + assert response_message == resp.reason + if response_code == 500: + assert 1 == patch_logger.call_count async def test_delete_backup_bad_data(self, client): resp = await client.delete('/fledge/backup/{}'.format('BLA')) @@ -235,12 +245,15 @@ async def test_get_backup_download_exceptions(self, client, input_exception, res with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(Backup, 'get_backup_details', side_effect=input_exception): with patch('os.path.isfile', return_value=False): - resp = await client.get('/fledge/backup/{}/download'.format(8)) - assert response_code == resp.status - assert response_message == resp.reason - result = await resp.text() - json_response = json.loads(result) - assert {"message": response_message} == json_response + with patch.object(backup_restore._logger, 'error') as patch_logger: + resp = await client.get('/fledge/backup/{}/download'.format(8)) + assert response_code == resp.status + assert response_message == resp.reason + result = await resp.text() + json_response = json.loads(result) + assert {"message": response_message} == json_response + if response_code == 500: + assert 1 == patch_logger.call_count async def test_get_backup_download(self, client): # FIXME: py3.9 fails to recognise this in default installed mimetypes known-file @@ -308,6 +321,9 @@ async def test_restore_backup_exceptions(self, client, backup_id, input_exceptio storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(Restore, 'restore_backup', side_effect=input_exception): - resp = await client.put('/fledge/backup/{}/restore'.format(backup_id)) - assert code == resp.status - assert message == resp.reason + with patch.object(backup_restore._logger, 'error') as patch_logger: + resp = await client.put('/fledge/backup/{}/restore'.format(backup_id)) + assert code == resp.status + assert message == resp.reason + if code == 500: + assert 1 == patch_logger.call_count diff --git a/tests/unit/python/fledge/services/core/api/test_browser_assets.py b/tests/unit/python/fledge/services/core/api/test_browser_assets.py index 93c0518233..dd8b65d35d 100644 --- a/tests/unit/python/fledge/services/core/api/test_browser_assets.py +++ b/tests/unit/python/fledge/services/core/api/test_browser_assets.py @@ -30,8 +30,8 @@ '/fledge/asset/fogbench%2fhumidity/temperature/series'] PAYLOADS = ['{"aggregate": {"column": "*", "alias": "count", "operation": "count"}, "group": "asset_code"}', - '{"return": ["reading", {"column": "user_ts", "alias": "timestamp"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"}, "limit": 20, "sort": {"column": "user_ts", "direction": "desc"}}', - '{"return": [{"column": "user_ts", "alias": "timestamp"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"}, "limit": 20, "sort": {"column": "user_ts", "direction": "desc"}}', + '{"return": ["reading", {"column": "user_ts", "alias": "timestamp", "timezone": "utc"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"}, "limit": 20, "sort": {"column": "user_ts", "direction": "desc"}}', + '{"return": [{"column": "user_ts", "alias": "timestamp", "timezone": "utc"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"}, "limit": 20, "sort": {"column": "user_ts", "direction": "desc"}}', '{"aggregate": [{"operation": "min", "alias": "min", "json": {"properties": "temperature", "column": "reading"}}, {"operation": "max", "alias": "max", "json": {"properties": "temperature", "column": "reading"}}, {"operation": "avg", "alias": "average", "json": {"properties": "temperature", "column": "reading"}}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"}, "group": {"format": "YYYY-MM-DD HH24:MI:SS", "column": "user_ts", "alias": "timestamp"}, "limit": 20, "sort": {"column": "user_ts", "direction": "desc"}}' ] RESULTS = [{'rows': [{'count': 10, 'asset_code': 'TI sensorTag/luxometer'}], 'count': 1}, @@ -49,9 +49,9 @@ ] FILTERING_IMAGE_PAYLOADS = [ - '{"return": ["reading", {"column": "user_ts", "alias": "timestamp"}], "where": {"column": "asset_code", "condition": "=", "value": "testcard"}, "limit": 20, "sort": {"column": "user_ts", "direction": "desc"}}', + '{"return": ["reading", {"column": "user_ts", "alias": "timestamp", "timezone": "utc"}], "where": {"column": "asset_code", "condition": "=", "value": "testcard"}, "limit": 20, "sort": {"column": "user_ts", "direction": "desc"}}', '{"return": ["reading"], "where": {"column": "asset_code", "condition": "=", "value": "testcard"}, "limit": 20, "sort": {"column": "user_ts", "direction": "desc"}}', - '{"return": [{"column": "user_ts", "alias": "timestamp"}, {"json": {"column": "reading", "properties": "testcard"}, "alias": "testcard"}], "where": {"column": "asset_code", "condition": "=", "value": "testcard"}, "limit": 20, "sort": {"column": "user_ts", "direction": "desc"}}', + '{"return": [{"column": "user_ts", "alias": "timestamp", "timezone": "utc"}, {"json": {"column": "reading", "properties": "testcard"}, "alias": "testcard"}], "where": {"column": "asset_code", "condition": "=", "value": "testcard"}, "limit": 20, "sort": {"column": "user_ts", "direction": "desc"}}', '{"aggregate": [{"operation": "min", "json": {"column": "reading", "properties": "testcard"}, "alias": "min"}, {"operation": "max", "json": {"column": "reading", "properties": "testcard"}, "alias": "max"}, {"operation": "avg", "json": {"column": "reading", "properties": "testcard"}, "alias": "average"}], "where": {"column": "asset_code", "condition": "=", "value": "testcard"}}', '{"aggregate": [{"operation": "min", "json": {"column": "reading", "properties": "testcard"}, "alias": "min"}, {"operation": "max", "json": {"column": "reading", "properties": "testcard"}, "alias": "max"}, {"operation": "avg", "json": {"column": "reading", "properties": "testcard"}, "alias": "average"}], "where": {"column": "asset_code", "condition": "=", "value": "testcard"}, "limit": 20, "group": {"column": "user_ts", "alias": "timestamp", "format": "YYYY-MM-DD HH24:MI:SS"}, "sort": {"column": "user_ts", "direction": "desc"}}' ] @@ -333,16 +333,16 @@ async def test_request_params_with_bad_data(self, client, request_param, respons assert response_message == resp.reason @pytest.mark.parametrize("request_params, payload", [ - ('?limit=5', '{"return": [{"alias": "timestamp", "column": "user_ts"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"}, "limit": 5, "sort": {"column": "user_ts", "direction": "desc"}}'), - ('?skip=1', '{"return": [{"alias": "timestamp", "column": "user_ts"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"}, "limit": 20, "skip": 1, "sort": {"column": "user_ts", "direction": "desc"}}'), - ('?limit=5&skip=1', '{"return": [{"alias": "timestamp", "column": "user_ts"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"}, "limit": 5, "skip": 1, "sort": {"column": "user_ts", "direction": "desc"}}'), - ('?seconds=3600', '{"return": [{"alias": "timestamp", "column": "user_ts"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 3600}}, "sort": {"column": "user_ts", "direction": "desc"}}'), - ('?minutes=20', '{"return": [{"alias": "timestamp", "column": "user_ts"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 1200}}, "sort": {"column": "user_ts", "direction": "desc"}}'), - ('?hours=3', '{"return": [{"alias": "timestamp", "column": "user_ts"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 10800}}, "sort": {"column": "user_ts", "direction": "desc"}}'), - ('?seconds=60&minutes=10', '{"return": [{"alias": "timestamp", "column": "user_ts"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 60}}, "sort": {"column": "user_ts", "direction": "desc"}}'), - ('?seconds=600&hours=1', '{"return": [{"alias": "timestamp", "column": "user_ts"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 600}}, "sort": {"column": "user_ts", "direction": "desc"}}'), - ('?minutes=20&hours=1', '{"return": [{"alias": "timestamp", "column": "user_ts"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 1200}}, "sort": {"column": "user_ts", "direction": "desc"}}'), - ('?seconds=10&minutes=10&hours=1', '{"return": [{"alias": "timestamp", "column": "user_ts"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 10}}, "sort": {"column": "user_ts", "direction": "desc"}}') + ('?limit=5', '{"return": [{"alias": "timestamp", "column": "user_ts", "timezone": "utc"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"}, "limit": 5, "sort": {"column": "user_ts", "direction": "desc"}}'), + ('?skip=1', '{"return": [{"alias": "timestamp", "column": "user_ts", "timezone": "utc"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"}, "limit": 20, "skip": 1, "sort": {"column": "user_ts", "direction": "desc"}}'), + ('?limit=5&skip=1', '{"return": [{"alias": "timestamp", "column": "user_ts", "timezone": "utc"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"}, "limit": 5, "skip": 1, "sort": {"column": "user_ts", "direction": "desc"}}'), + ('?seconds=3600', '{"return": [{"alias": "timestamp", "column": "user_ts", "timezone": "utc"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 3600}}, "sort": {"column": "user_ts", "direction": "desc"}}'), + ('?minutes=20', '{"return": [{"alias": "timestamp", "column": "user_ts", "timezone": "utc"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 1200}}, "sort": {"column": "user_ts", "direction": "desc"}}'), + ('?hours=3', '{"return": [{"alias": "timestamp", "column": "user_ts", "timezone": "utc"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 10800}}, "sort": {"column": "user_ts", "direction": "desc"}}'), + ('?seconds=60&minutes=10', '{"return": [{"alias": "timestamp", "column": "user_ts", "timezone": "utc"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 60}}, "sort": {"column": "user_ts", "direction": "desc"}}'), + ('?seconds=600&hours=1', '{"return": [{"alias": "timestamp", "column": "user_ts", "timezone": "utc"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 600}}, "sort": {"column": "user_ts", "direction": "desc"}}'), + ('?minutes=20&hours=1', '{"return": [{"alias": "timestamp", "column": "user_ts", "timezone": "utc"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 1200}}, "sort": {"column": "user_ts", "direction": "desc"}}'), + ('?seconds=10&minutes=10&hours=1', '{"return": [{"alias": "timestamp", "column": "user_ts", "timezone": "utc"}, {"json": {"properties": "temperature", "column": "reading"}, "alias": "temperature"}], "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity", "and": {"column": "user_ts", "condition": "newer", "value": 10}}, "sort": {"column": "user_ts", "direction": "desc"}}') ]) async def test_limit_skip_time_units_payload(self, client, request_params, payload): readings_storage_client_mock = MagicMock(ReadingsStorageClientAsync) @@ -536,19 +536,19 @@ async def test_bad_asset_bucket_size_and_optional_params(self, client, url, code @pytest.mark.parametrize("request_params, payload", [ ('?limit=5&skip=1&order=asc', - '{"return": ["reading", {"column": "user_ts", "alias": "timestamp"}],' + '{"return": ["reading", {"column": "user_ts", "alias": "timestamp", "timezone": "utc"}],' ' "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"},' ' "skip": 1, "limit": 5, ' '"sort": {"column": "user_ts", "direction": "asc"}}' ), ('?limit=5&skip=1&order=desc', - '{"return": ["reading", {"column": "user_ts", "alias": "timestamp"}],' + '{"return": ["reading", {"column": "user_ts", "alias": "timestamp", "timezone": "utc"}],' ' "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"},' ' "skip": 1,"limit": 5, ' '"sort": {"column": "user_ts", "direction": "desc"}}' ), ('?limit=5&skip=1', - '{"return": ["reading", {"column": "user_ts", "alias": "timestamp"}],' + '{"return": ["reading", {"column": "user_ts", "alias": "timestamp", "timezone": "utc"}],' ' "where": {"column": "asset_code", "condition": "=", "value": "fogbench/humidity"},' ' "skip": 1,"limit": 5, ' '"sort": {"column": "user_ts", "direction": "desc"}}' @@ -622,15 +622,15 @@ async def test_filtering_image_data(self, client, request_url, payload, result): query_patch.assert_called_once_with(args[0]) @pytest.mark.parametrize("request_url, payload, is_image_excluded", [ - ('fledge/asset/testcard?images=include', '{"return": ["reading", {"column": "user_ts", "alias": "timestamp"}], ' - '"where": {"column": "asset_code", "condition": "=", ' - '"value": "testcard"}, "limit": 20, "sort": {"column": "user_ts", ' - '"direction": "desc"}}', + ('fledge/asset/testcard?images=include', + '{"return": ["reading", {"column": "user_ts", "alias": "timestamp", "timezone": "utc"}], ' + '"where": {"column": "asset_code", "condition": "=", "value": "testcard"}, "limit": 20, ' + '"sort": {"column": "user_ts", "direction": "desc"}}', True), - ('fledge/asset/testcard?images=exclude', '{"return": ["reading", {"column": "user_ts", "alias": "timestamp"}], ' - '"where": {"column": "asset_code", "condition": "=", ' - '"value": "testcard"}, "limit": 20, "sort": {"column": "user_ts", ' - '"direction": "desc"}}', + ('fledge/asset/testcard?images=exclude', + '{"return": ["reading", {"column": "user_ts", "alias": "timestamp", "timezone": "utc"}], ' + '"where": {"column": "asset_code", "condition": "=", "value": "testcard"}, "limit": 20, ' + '"sort": {"column": "user_ts", "direction": "desc"}}', False) ]) async def test_data_with_images_request_param(self, client, request_url, payload, is_image_excluded): diff --git a/tests/unit/python/fledge/services/core/api/test_certificate_store.py b/tests/unit/python/fledge/services/core/api/test_certificate_store.py index f32a1cf9c0..1f30bb1551 100644 --- a/tests/unit/python/fledge/services/core/api/test_certificate_store.py +++ b/tests/unit/python/fledge/services/core/api/test_certificate_store.py @@ -284,16 +284,16 @@ async def auth_token_fixture(self, mocker, is_admin=True): _rv1 = asyncio.ensure_future(mock_coro(user['id'])) _rv2 = asyncio.ensure_future(mock_coro(None)) _rv3 = asyncio.ensure_future(mock_coro(user)) - patch_logger_info = mocker.patch.object(middleware._logger, 'info') + patch_logger_debug = mocker.patch.object(middleware._logger, 'debug') patch_validate_token = mocker.patch.object(User.Objects, 'validate_token', return_value=_rv1) patch_refresh_token = mocker.patch.object(User.Objects, 'refresh_token_expiry', return_value=_rv2) patch_user_get = mocker.patch.object(User.Objects, 'get', return_value=_rv3) - return patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get + return patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get async def test_bad_upload_when_admin_role_is_required(self, client, certs_path, mocker): files = {'key': open(str(certs_path / 'certs/fledge.key'), 'rb'), 'cert': open(str(certs_path / 'certs/fledge.cert'), 'rb')} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker, is_admin=False) msg = 'admin role permissions required to overwrite the default installed auth/TLS certificates.' with patch.object(certificate_store._logger, 'warning') as patch_logger: @@ -307,11 +307,11 @@ async def test_bad_upload_when_admin_role_is_required(self, client, certs_path, patch_user_get.assert_called_once_with(uid=2) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') async def test_bad_upload_when_cert_in_use_and_with_non_admin_role(self, client, certs_path, mocker): files = {'cert': open(str(certs_path / 'certs/test.cer'), 'rb')} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker, is_admin=False) msg = 'Certificate with name test.cer is configured to be used, ' \ 'An `admin` role permissions required to add/overwrite.' @@ -335,12 +335,12 @@ async def test_bad_upload_when_cert_in_use_and_with_non_admin_role(self, client, patch_user_get.assert_called_once_with(uid=2) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') async def test_upload_as_admin(self, client, certs_path, mocker): files = {'key': open(str(certs_path / 'certs/fledge.key'), 'rb'), 'cert': open(str(certs_path / 'certs/fledge.cert'), 'rb')} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) with patch.object(certificate_store, '_get_certs_dir', return_value=certs_path / 'certs'): with patch.object(certificate_store, '_find_file', return_value=[]) as patch_find_file: @@ -357,12 +357,12 @@ async def test_upload_as_admin(self, client, certs_path, mocker): patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') @pytest.mark.parametrize("filename", ["fledge.pem", "fledge.cert", "test.cer", "test.crt"]) async def test_upload_with_cert_only(self, client, certs_path, mocker, filename): files = {'cert': open(str(certs_path / 'certs/{}'.format(filename)), 'rb')} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) with patch.object(certificate_store, '_get_certs_dir', return_value=certs_path / 'certs/pem'): with patch.object(certificate_store, '_find_file', return_value=[]) as patch_find_file: @@ -377,13 +377,13 @@ async def test_upload_with_cert_only(self, client, certs_path, mocker, filename) patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') async def test_file_upload_with_overwrite(self, client, certs_path, mocker): files = {'key': open(str(certs_path / 'certs/fledge.key'), 'rb'), 'cert': open(str(certs_path / 'certs/fledge.cert'), 'rb'), 'overwrite': '1'} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) with patch.object(certificate_store, '_get_certs_dir', return_value=certs_path / 'certs'): with patch.object(certificate_store, '_find_file', return_value=[]) as patch_find_file: @@ -400,13 +400,13 @@ async def test_file_upload_with_overwrite(self, client, certs_path, mocker): patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') async def test_bad_extension_key_file_upload(self, client, certs_path, mocker): key_valid_extensions = ('.key', '.pem') files = {'cert': open(str(certs_path / 'certs/fledge.cert'), 'rb'), 'key': open(str(certs_path / 'certs/fledge.txt'), 'rb')} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) resp = await client.post('/fledge/certificate', data=files, headers=self.AUTH_HEADER) assert 400 == resp.status @@ -414,12 +414,12 @@ async def test_bad_extension_key_file_upload(self, client, certs_path, mocker): patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') async def test_upload_with_existing_and_no_overwrite(self, client, certs_path, mocker): files = {'key': open(str(certs_path / 'certs/fledge.key'), 'rb'), 'cert': open(str(certs_path / 'certs/fledge.cert'), 'rb')} - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) with patch.object(certificate_store, '_get_certs_dir', return_value=certs_path / 'certs'): with patch.object(certificate_store, '_find_file', return_value=["v"]) as patch_file: @@ -433,7 +433,7 @@ async def test_upload_with_existing_and_no_overwrite(self, client, certs_path, m patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'POST', '/fledge/certificate') @pytest.allure.feature("unit") @@ -460,11 +460,11 @@ async def auth_token_fixture(self, mocker, is_admin=True): _rv1 = asyncio.ensure_future(mock_coro(user['id'])) _rv2 = asyncio.ensure_future(mock_coro(None)) _rv3 = asyncio.ensure_future(mock_coro(user)) - patch_logger_info = mocker.patch.object(middleware._logger, 'info') + patch_logger_debug = mocker.patch.object(middleware._logger, 'debug') patch_validate_token = mocker.patch.object(User.Objects, 'validate_token', return_value=_rv1) patch_refresh_token = mocker.patch.object(User.Objects, 'refresh_token_expiry', return_value=_rv2) patch_user_get = mocker.patch.object(User.Objects, 'get', return_value=_rv3) - return patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get + return patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get @pytest.mark.parametrize("cert_name, actual_code, actual_reason", [ ('root.pem', 404, "Certificate with name root.pem does not exist"), @@ -473,7 +473,7 @@ async def auth_token_fixture(self, mocker, is_admin=True): async def test_bad_delete_cert_with_invalid_filename(self, client, mocker, cert_name, actual_code, actual_reason): storage_client_mock = MagicMock(StorageClientAsync) c_mgr = ConfigurationManager(storage_client_mock) - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -496,7 +496,7 @@ async def test_bad_delete_cert_with_invalid_filename(self, client, mocker, cert_ patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/certificate/{}'.format(cert_name)) @pytest.mark.parametrize("cert_name, actual_code, actual_reason", [ @@ -504,7 +504,7 @@ async def test_bad_delete_cert_with_invalid_filename(self, client, mocker, cert_ "('.cert', '.cer', '.csr', '.crl', '.crt', '.der', '.json', '.key', '.pem', '.p12', '.pfx')") ]) async def test_bad_delete_cert(self, client, mocker, cert_name, actual_code, actual_reason): - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _payload = [{'id': '1'}] @@ -519,7 +519,7 @@ async def test_bad_delete_cert(self, client, mocker, cert_name, actual_code, act patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/certificate/{}'.format(cert_name)) async def test_delete_cert_if_configured_to_use(self, client, mocker): @@ -528,7 +528,7 @@ async def test_delete_cert_if_configured_to_use(self, client, mocker): cert_name = 'fledge.cert' msg = 'Certificate with name {} is configured for use, you can not delete but overwrite if required.'.format( cert_name) - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -553,7 +553,7 @@ async def test_delete_cert_if_configured_to_use(self, client, mocker): patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/certificate/{}'.format(cert_name)) async def test_bad_type_delete_cert(self, client, mocker): @@ -561,7 +561,7 @@ async def test_bad_type_delete_cert(self, client, mocker): c_mgr = ConfigurationManager(storage_client_mock) cert_name = 'server.cert' msg = 'Only cert and key are allowed for the value of type param' - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -586,7 +586,7 @@ async def test_bad_type_delete_cert(self, client, mocker): patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/certificate/{}'.format(cert_name)) @pytest.mark.parametrize("cert_name, param", [ @@ -599,7 +599,7 @@ async def test_bad_type_delete_cert(self, client, mocker): async def test_delete_cert_with_type(self, client, mocker, cert_name, param): storage_client_mock = MagicMock(StorageClientAsync) c_mgr = ConfigurationManager(storage_client_mock) - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. cat_info = {'certificateName': {'value': 'foo'}, 'authCertificateName': {'value': 'ca'}} @@ -626,13 +626,13 @@ async def test_delete_cert_with_type(self, client, mocker, cert_name, param): patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/certificate/{}'.format(cert_name)) async def test_delete_cert(self, client, mocker, certs_path, cert_name='server.cert'): storage_client_mock = MagicMock(StorageClientAsync) c_mgr = ConfigurationManager(storage_client_mock) - patch_logger_info, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( + patch_logger_debug, patch_validate_token, patch_refresh_token, patch_user_get = await self.auth_token_fixture( mocker) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -662,7 +662,7 @@ async def test_delete_cert(self, client, mocker, certs_path, cert_name='server.c patch_user_get.assert_called_once_with(uid=1) patch_refresh_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) patch_validate_token.assert_called_once_with(ADMIN_USER_HEADER['Authorization']) - patch_logger_info.assert_called_once_with('Received %s request for %s', 'DELETE', + patch_logger_debug.assert_called_once_with('Received %s request for %s', 'DELETE', '/fledge/certificate/{}'.format(cert_name)) diff --git a/tests/unit/python/fledge/services/core/api/test_common_ping.py b/tests/unit/python/fledge/services/core/api/test_common_ping.py index fcd7cf2811..f957e8839e 100644 --- a/tests/unit/python/fledge/services/core/api/test_common_ping.py +++ b/tests/unit/python/fledge/services/core/api/test_common_ping.py @@ -85,7 +85,7 @@ async def mock_coro(*args, **kwargs): host_name, ip_addresses = get_machine_detail attrs = {"query_tbl_with_payload.return_value": await mock_coro()} mock_storage_client_async = MagicMock(spec=StorageClientAsync, **attrs) - with patch.object(middleware._logger, 'info') as logger_info: + with patch.object(middleware._logger, 'debug') as logger_info: with patch.object(connect, 'get_storage_async', return_value=mock_storage_client_async): with patch.object(mock_storage_client_async, 'query_tbl_with_payload', return_value=_rv) as query_patch: app = web.Application(loop=loop, middlewares=[middleware.optional_auth_middleware]) @@ -144,7 +144,7 @@ async def mock_coro(*args, **kwargs): host_name, ip_addresses = get_machine_detail mock_storage_client_async = MagicMock(StorageClientAsync) - with patch.object(middleware._logger, 'info') as logger_info: + with patch.object(middleware._logger, 'debug') as logger_info: with patch.object(connect, 'get_storage_async', return_value=mock_storage_client_async): with patch.object(mock_storage_client_async, 'query_tbl_with_payload', return_value=_rv) as query_patch: app = web.Application(loop=loop, middlewares=[middleware.optional_auth_middleware]) @@ -207,7 +207,7 @@ async def mock_get_category_item(): host_name, ip_addresses = get_machine_detail mock_storage_client_async = MagicMock(StorageClientAsync) - with patch.object(middleware._logger, 'info') as logger_info: + with patch.object(middleware._logger, 'debug') as logger_info: with patch.object(connect, 'get_storage_async', return_value=mock_storage_client_async): with patch.object(mock_storage_client_async, 'query_tbl_with_payload', return_value=_rv1) as query_patch: with patch.object(ConfigurationManager, "get_category_item", return_value=_rv2) as mock_get_cat: @@ -270,7 +270,7 @@ async def mock_get_category_item(): _rv2 = asyncio.ensure_future(mock_get_category_item()) mock_storage_client_async = MagicMock(StorageClientAsync) - with patch.object(middleware._logger, 'info') as logger_info: + with patch.object(middleware._logger, 'debug') as logger_info: with patch.object(connect, 'get_storage_async', return_value=mock_storage_client_async): with patch.object(mock_storage_client_async, 'query_tbl_with_payload', return_value=_rv1) as query_patch: with patch.object(ConfigurationManager, "get_category_item", return_value=_rv2) as mock_get_cat: @@ -319,7 +319,7 @@ def mock_coro(*args, **kwargs): host_name, ip_addresses = get_machine_detail mock_storage_client_async = MagicMock(StorageClientAsync) - with patch.object(middleware._logger, 'info') as logger_info: + with patch.object(middleware._logger, 'debug') as logger_info: with patch.object(connect, 'get_storage_async', return_value=mock_storage_client_async): with patch.object(mock_storage_client_async, 'query_tbl_with_payload', return_value=_rv) as query_patch: app = web.Application(loop=loop, middlewares=[middleware.optional_auth_middleware]) @@ -390,7 +390,7 @@ def mock_coro(*args, **kwargs): host_name, ip_addresses = get_machine_detail mock_storage_client_async = MagicMock(StorageClientAsync) - with patch.object(middleware._logger, 'info') as logger_info: + with patch.object(middleware._logger, 'debug') as logger_info: with patch.object(connect, 'get_storage_async', return_value=mock_storage_client_async): with patch.object(mock_storage_client_async, 'query_tbl_with_payload', return_value=_rv) as query_patch: app = web.Application(loop=loop, middlewares=[middleware.optional_auth_middleware]) @@ -460,7 +460,7 @@ async def mock_get_category_item(): host_name, ip_addresses = get_machine_detail mock_storage_client_async = MagicMock(StorageClientAsync) - with patch.object(middleware._logger, 'info') as logger_info: + with patch.object(middleware._logger, 'debug') as logger_info: with patch.object(connect, 'get_storage_async', return_value=mock_storage_client_async): with patch.object(mock_storage_client_async, 'query_tbl_with_payload', return_value=_rv1) as query_patch: with patch.object(ConfigurationManager, "get_category_item", return_value=_rv2) as mock_get_cat: @@ -535,7 +535,7 @@ async def mock_get_category_item(): _rv2 = asyncio.ensure_future(mock_get_category_item()) mock_storage_client_async = MagicMock(StorageClientAsync) - with patch.object(middleware._logger, 'info') as logger_info: + with patch.object(middleware._logger, 'debug') as logger_info: with patch.object(connect, 'get_storage_async', return_value=mock_storage_client_async): with patch.object(mock_storage_client_async, 'query_tbl_with_payload', return_value=_rv1) as query_patch: with patch.object(ConfigurationManager, "get_category_item", return_value=_rv2) as mock_get_cat: diff --git a/tests/unit/python/fledge/services/core/api/test_configuration.py b/tests/unit/python/fledge/services/core/api/test_configuration.py index 8872a0f901..92cf71a21d 100644 --- a/tests/unit/python/fledge/services/core/api/test_configuration.py +++ b/tests/unit/python/fledge/services/core/api/test_configuration.py @@ -13,11 +13,11 @@ import pytest from fledge.common.audit_logger import AuditLogger -from fledge.common.storage_client.storage_client import StorageClientAsync from fledge.common.configuration_manager import ConfigurationManager, ConfigurationManagerSingleton, _logger +from fledge.common.storage_client.storage_client import StorageClientAsync from fledge.common.web import middleware -from fledge.services.core import routes -from fledge.services.core import connect +from fledge.services.core import connect, routes +from fledge.services.core.api import configuration __author__ = "Ashish Jabble" @@ -569,8 +569,8 @@ async def async_mock(return_value): ({"key": "test", "description": "des"}, "\"'value' param required to create a category\""), ({"key": "test", "value": "val"}, "\"'description' param required to create a category\""), ({"description": "desc", "value": "val"}, "\"'key' param required to create a category\""), - ({"key":"test", "description":"test", "value": {"test1": {"type": "string", "description": "d", "default": "", - "mandatory": "true"}}}, + ({"key": "test", "description": "test", "value": + {"test1": {"type": "string", "description": "d", "default": "", "mandatory": "true"}}}, "For test category, A default value must be given for test1"), ({"key": "", "description": "test", "value": "val"}, "Key should not be empty"), ({"key": " ", "description": "test", "value": "val"}, "Key should not be empty") @@ -578,9 +578,10 @@ async def async_mock(return_value): async def test_create_category_bad_request(self, client, payload, message): storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - resp = await client.post('/fledge/category', data=json.dumps(payload)) - assert 400 == resp.status - assert message == resp.reason + with patch.object(_logger, 'error'): + resp = await client.post('/fledge/category', data=json.dumps(payload)) + assert 400 == resp.status + assert message == resp.reason @pytest.mark.parametrize("payload, hide_password", [ ({"key": "T1", "description": "Test"}, False), @@ -682,9 +683,11 @@ async def test_create_category_http_exception(self, client, name="test_cat", des payload = {"key": name, "description": desc, "value": info} msg = 'Something went wrong' with patch.object(connect, 'get_storage_async', side_effect=Exception(msg)): - resp = await client.post('/fledge/category', data=json.dumps(payload)) - assert 500 == resp.status - assert msg == resp.reason + with patch.object(configuration._logger, 'error') as patch_logger: + resp = await client.post('/fledge/category', data=json.dumps(payload)) + assert 500 == resp.status + assert msg == resp.reason + assert 1 == patch_logger.call_count @pytest.mark.parametrize("payload, message", [ # FIXME: keys order mismatch assertion @@ -813,9 +816,11 @@ async def test_unknown_exception_for_add_config_item(self, client): data = {"default": "d", "description": "Test description", "type": "boolean"} msg = 'Internal Server Error' with patch.object(connect, 'get_storage_async', side_effect=Exception(msg)): - resp = await client.post('/fledge/category/{}/{}'.format("blah", "blah"), data=json.dumps(data)) - assert 500 == resp.status - assert msg == resp.reason + with patch.object(configuration._logger, 'error') as patch_logger: + resp = await client.post('/fledge/category/{}/{}'.format("blah", "blah"), data=json.dumps(data)) + assert 500 == resp.status + assert msg == resp.reason + assert 1 == patch_logger.call_count async def test_get_child_category(self, client): @asyncio.coroutine @@ -972,9 +977,12 @@ async def test_update_bulk_config_exception(self, client, code, exception_name, c_mgr = ConfigurationManager(storage_client_mock) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(c_mgr, 'get_category_item', side_effect=exception_name) as patch_get_cat_item: - resp = await client.put('/fledge/category/{}'.format(category_name), data=json.dumps(payload)) - assert code == resp.status - assert resp.reason is '' + with patch.object(configuration._logger, 'error') as patch_logger: + resp = await client.put('/fledge/category/{}'.format(category_name), data=json.dumps(payload)) + assert code == resp.status + assert resp.reason is '' + if code == 500: + assert 1 == patch_logger.call_count patch_get_cat_item.assert_called_once_with(category_name, config_item_name) async def test_update_bulk_config_item_not_found(self, client, category_name='rest_api'): diff --git a/tests/unit/python/fledge/services/core/api/test_filters.py b/tests/unit/python/fledge/services/core/api/test_filters.py index 5eff7d9180..08eb7c649c 100644 --- a/tests/unit/python/fledge/services/core/api/test_filters.py +++ b/tests/unit/python/fledge/services/core/api/test_filters.py @@ -11,14 +11,13 @@ import pytest import sys -from fledge.services.core import routes -from fledge.services.core import connect -from fledge.common.storage_client.storage_client import StorageClientAsync +from fledge.common.configuration_manager import ConfigurationManager from fledge.common.storage_client.exceptions import StorageServerError -from fledge.services.core.api import filters +from fledge.common.storage_client.storage_client import StorageClientAsync +from fledge.services.core import connect, routes +from fledge.services.core.api import filters, utils as apiutils from fledge.services.core.api.filters import _LOGGER -from fledge.common.configuration_manager import ConfigurationManager -from fledge.services.core.api import utils as apiutils +from fledge.services.core.api.plugins import common __author__ = "Ashish Jabble" __copyright__ = "Copyright (c) 2018 OSIsoft, LLC" @@ -64,13 +63,15 @@ async def get_filters(): async def test_get_filters_storage_exception(self, client): storage_client_mock = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'query_tbl', side_effect=StorageServerError(None, None, error='something went wrong')) as query_tbl_patch: - with patch.object(_LOGGER, 'exception') as log_exc: + with patch.object(storage_client_mock, 'query_tbl', side_effect=StorageServerError( + None, None, error='something went wrong')) as query_tbl_patch: + with patch.object(_LOGGER, 'error') as patch_logger: resp = await client.get('/fledge/filter') assert 500 == resp.status assert "something went wrong" == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Get filters, caught exception: %s', 'something went wrong') + assert 1 == patch_logger.call_count + args, kwargs = patch_logger.call_args + assert 'Get all filters, caught storage exception: {}'.format('something went wrong') in args[0] query_tbl_patch.assert_called_once_with('filters') async def test_get_filters_exception(self, client): @@ -87,9 +88,11 @@ async def get_filters(): with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(storage_client_mock, 'query_tbl', return_value=_rv) as query_tbl_patch: - resp = await client.get('/fledge/filter') - assert 500 == resp.status - assert "'rows'" == resp.reason + with patch.object(_LOGGER, 'error') as patch_logger: + resp = await client.get('/fledge/filter') + assert 500 == resp.status + assert "'rows'" == resp.reason + assert 1 == patch_logger.call_count query_tbl_patch.assert_called_once_with('filters') async def test_get_filter_by_name(self, client): @@ -181,13 +184,16 @@ async def test_get_filter_by_name_storage_error(self, client): storage_client_mock = MagicMock(StorageClientAsync) cf_mgr = ConfigurationManager(storage_client_mock) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(cf_mgr, 'get_category_all_items', side_effect=StorageServerError(None, None, error='something went wrong')) as get_cat_info_patch: - with patch.object(_LOGGER, 'exception') as log_exc: + with patch.object(cf_mgr, 'get_category_all_items', side_effect=StorageServerError( + None, None, error='something went wrong')) as get_cat_info_patch: + with patch.object(_LOGGER, 'error') as patch_logger: resp = await client.get('/fledge/filter/{}'.format(filter_name)) assert 500 == resp.status assert "something went wrong" == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Get filter: %s, caught exception: %s', filter_name, 'something went wrong') + assert 1 == patch_logger.call_count + args, kwargs = patch_logger.call_args + assert 'Failed to get filter name: {}. Storage error occurred: {}'.format( + filter_name, 'something went wrong') in args[0] get_cat_info_patch.assert_called_once_with(filter_name) async def test_get_filter_by_name_type_error(self, client): @@ -208,9 +214,11 @@ async def test_get_filter_by_name_exception(self, client): cf_mgr = ConfigurationManager(storage_client_mock) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', side_effect=Exception) as get_cat_info_patch: - resp = await client.get('/fledge/filter/{}'.format(filter_name)) - assert 500 == resp.status - assert resp.reason is '' + with patch.object(_LOGGER, 'error') as patch_logger: + resp = await client.get('/fledge/filter/{}'.format(filter_name)) + assert 500 == resp.status + assert resp.reason is '' + assert 1 == patch_logger.call_count get_cat_info_patch.assert_called_once_with(filter_name) @pytest.mark.parametrize("data", [ @@ -220,12 +228,10 @@ async def test_get_filter_by_name_exception(self, client): {"blah": "blah"} ]) async def test_bad_create_filter(self, client, data): - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps(data)) - assert 400 == resp.status - assert 'Filter name, plugin name are mandatory.' == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Add filter, caught exception: Filter name, plugin name are mandatory.') + msg = "Filter name, plugin name are mandatory." + resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps(data)) + assert 400 == resp.status + assert msg == resp.reason async def test_create_filter_value_error_1(self, client): storage_client_mock = MagicMock(StorageClientAsync) @@ -236,15 +242,13 @@ async def test_create_filter_value_error_1(self, client): _rv = await self.async_mock({"result": "test"}) else: _rv = asyncio.ensure_future(self.async_mock({"result": "test"})) - + msg = "This 'test' filter already exists" with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', return_value=_rv) as get_cat_info_patch: - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps({"name": "test", "plugin": "benchmark"})) - assert 404 == resp.status - assert "This 'test' filter already exists" == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with("Add filter, caught exception: This 'test' filter already exists") + resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps( + {"name": "test", "plugin": "benchmark"})) + assert 404 == resp.status + assert msg == resp.reason get_cat_info_patch.assert_called_once_with(category_name='test') async def test_create_filter_value_error_2(self, client): @@ -257,16 +261,16 @@ async def test_create_filter_value_error_2(self, client): _rv = await self.async_mock(None) else: _rv = asyncio.ensure_future(self.async_mock(None)) - + msg = "Can not get 'plugin_info' detail from plugin '{}'".format(plugin_name) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', return_value=_rv) as get_cat_info_patch: with patch.object(apiutils, 'get_plugin_info', return_value=None) as api_utils_patch: - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps({"name": "test", "plugin": plugin_name})) + with patch.object(common._logger, 'warning') as patch_logger: + resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps( + {"name": "test", "plugin": plugin_name})) assert 404 == resp.status - assert "Can not get 'plugin_info' detail from plugin '{}'".format(plugin_name) == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with("Add filter, caught exception: Can not get 'plugin_info' detail from plugin '{}'".format(plugin_name)) + assert msg == resp.reason + assert 2 == patch_logger.call_count api_utils_patch.assert_called_once_with(plugin_name, dir='filter') get_cat_info_patch.assert_called_once_with(category_name='test') @@ -280,16 +284,19 @@ async def test_create_filter_value_error_3(self, client): _rv = await self.async_mock(None) else: _rv = asyncio.ensure_future(self.async_mock(None)) - + msg = "Loaded plugin 'python35', type 'south', doesn't match the specified one '{}', type 'filter'".format( + plugin_name) + ret_val = {"config": {'plugin': {'description': 'Python 3.5 filter plugin', 'type': 'string', + 'default': 'python35'}}, "type": "south"} with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', return_value=_rv) as get_cat_info_patch: - with patch.object(apiutils, 'get_plugin_info', return_value={"config": {'plugin': {'description': 'Python 3.5 filter plugin', 'type': 'string', 'default': 'python35'}}, "type": "south"}) as api_utils_patch: - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps({"name": "test", "plugin": plugin_name})) + with patch.object(apiutils, 'get_plugin_info', return_value=ret_val) as api_utils_patch: + with patch.object(common._logger, 'warning') as patch_logger: + resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps( + {"name": "test", "plugin": plugin_name})) assert 404 == resp.status - assert "Loaded plugin 'python35', type 'south', doesn't match the specified one '{}', type 'filter'".format(plugin_name) == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with("Add filter, caught exception: Loaded plugin 'python35', type 'south', doesn't match the specified one '{}', type 'filter'".format(plugin_name)) + assert msg == resp.reason + assert 2 == patch_logger.call_count api_utils_patch.assert_called_once_with(plugin_name, dir='filter') get_cat_info_patch.assert_called_once_with(category_name='test') @@ -305,20 +312,24 @@ async def test_create_filter_value_error_4(self, client): else: _rv1 = asyncio.ensure_future(self.async_mock(None)) _rv2 = asyncio.ensure_future(self.async_mock({'count': 0, 'rows': []})) - + msg = "filter_config must be a JSON object" with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', return_value=_rv1) as get_cat_info_patch: with patch.object(apiutils, 'get_plugin_info', return_value={"config": {'plugin': {'description': 'Python 3.5 filter plugin', 'type': 'string', 'default': 'filter'}}, "type": "filter"}) as api_utils_patch: with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv2) as query_tbl_patch: with patch.object(storage_client_mock, 'insert_into_tbl', return_value=_rv1) as insert_tbl_patch: with patch.object(cf_mgr, 'create_category', return_value=_rv1) as create_cat_patch: - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps({"name": "test", "plugin": plugin_name, "filter_config": "blah"})) + with patch.object(common._logger, 'warning') as patch_logger: + resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps( + {"name": "test", "plugin": plugin_name, "filter_config": "blah"})) assert 404 == resp.status - assert "filter_config must be a JSON object" == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with("Add filter, caught exception: filter_config must be a JSON object") - create_cat_patch.assert_called_once_with(category_description="Configuration of 'test' filter for plugin 'filter'", category_name='test', category_value={'plugin': {'description': 'Python 3.5 filter plugin', 'type': 'string', 'default': 'filter'}}, keep_original_items=True) + assert msg == resp.reason + assert 2 == patch_logger.call_count + create_cat_patch.assert_called_once_with( + category_description="Configuration of 'test' filter for plugin 'filter'", + category_name='test', category_value= + {'plugin': {'description': 'Python 3.5 filter plugin', 'type': 'string', + 'default': 'filter'}}, keep_original_items=True) args, kwargs = insert_tbl_patch.call_args_list[0] assert 'filters' == args[0] assert {"name": "test", "plugin": "filter"} == json.loads(args[1]) @@ -344,15 +355,20 @@ async def test_create_filter_storage_error(self, client): with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', return_value=_rv) as get_cat_info_patch: - with patch.object(apiutils, 'get_plugin_info', return_value={"config": {'plugin': {'description': 'Python 3.5 filter plugin', 'type': 'string', 'default': 'filter'}}, "type": "filter"}) as api_utils_patch: - with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=StorageServerError(None, None, error='something went wrong')): + with patch.object(apiutils, 'get_plugin_info', return_value={ + "config": {'plugin': {'description': 'Python 3.5 filter plugin', 'type': 'string', + 'default': 'filter'}}, "type": "filter"}) as api_utils_patch: + with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=StorageServerError( + None, None, error='something went wrong')): with patch.object(filters, '_delete_configuration_category', return_value=_rv) as _delete_cfg_patch: - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps({"name": name, "plugin": plugin_name})) - assert 500 == resp.status - assert 'Failed to create filter.' == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Failed to create filter. %s', 'something went wrong') + with patch.object(_LOGGER, 'error') as patch_logger: + with patch.object(common._logger, 'warning') as patch_logger2: + resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps( + {"name": name, "plugin": plugin_name})) + assert 500 == resp.status + assert 'something went wrong' == resp.reason + assert 2 == patch_logger2.call_count + assert 1 == patch_logger.call_count args, kwargs = _delete_cfg_patch.call_args assert name == args[1] api_utils_patch.assert_called_once_with(plugin_name, dir='filter') @@ -365,12 +381,14 @@ async def test_create_filter_exception(self, client): name = 'test' with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', side_effect=Exception) as get_cat_info_patch: - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps({"name": name, "plugin": plugin_name})) + with patch.object(_LOGGER, 'error') as patch_logger: + resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps( + {"name": name, "plugin": plugin_name})) assert 500 == resp.status assert resp.reason is '' - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Add filter, caught exception: %s', '') + assert 1 == patch_logger.call_count + args = patch_logger.call_args + assert 'Add filter failed.' == args[0][1] get_cat_info_patch.assert_called_once_with(category_name=name) async def test_create_filter(self, client): @@ -398,11 +416,14 @@ async def test_create_filter(self, client): with patch.object(storage_client_mock, 'insert_into_tbl', return_value=_rv1) as insert_tbl_patch: with patch.object(cf_mgr, 'create_category', return_value=_rv1) as create_cat_patch: with patch.object(cf_mgr, 'update_configuration_item_bulk', return_value=_rv1) as update_cfg_bulk_patch: - resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps({"name": name, "plugin": plugin_name, "filter_config": {}})) - assert 200 == resp.status - r = await resp.text() - json_response = json.loads(r) - assert {'filter': name, 'description': "Configuration of 'test' filter for plugin 'filter'", 'value': {}} == json_response + with patch.object(common._logger, 'warning') as patch_logger2: + resp = await client.post('/fledge/filter'.format("bench"), data=json.dumps( + {"name": name, "plugin": plugin_name, "filter_config": {}})) + assert 200 == resp.status + r = await resp.text() + json_response = json.loads(r) + assert {'filter': name, 'description': "Configuration of 'test' filter for plugin 'filter'", 'value': {}} == json_response + assert 2 == patch_logger2.call_count update_cfg_bulk_patch.assert_called_once_with(name, {}) create_cat_patch.assert_called_once_with(category_description="Configuration of 'test' filter for plugin 'filter'", category_name='test', category_value={'plugin': {'description': 'Python 3.5 filter plugin', 'type': 'string', 'default': 'filter'}}, keep_original_items=True) args, kwargs = insert_tbl_patch.call_args_list[0] @@ -459,12 +480,13 @@ def q_result(*args): assert 200 == resp.status r = await resp.text() json_response = json.loads(r) - assert {'result': 'Filter AssetFilter deleted successfully'} == json_response + assert {'result': 'Filter AssetFilter deleted successfully.'} == json_response args, kwargs = update_tbl_patch.call_args assert 'asset_tracker' == args[0] args, kwargs = delete_cfg_patch.call_args assert filter_name == args[1] - delete_tbl_patch.assert_called_once_with('filters', '{"where": {"column": "name", "condition": "=", "value": "AssetFilter"}}') + delete_tbl_patch.assert_called_once_with( + 'filters', '{"where": {"column": "name", "condition": "=", "value": "AssetFilter"}}') async def test_delete_filter_value_error(self, client): storage_client_mock = MagicMock(StorageClientAsync) @@ -504,22 +526,24 @@ async def test_delete_filter_storage_error(self, client): storage_client_mock = MagicMock(StorageClientAsync) filter_name = "AssetFilter" with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=StorageServerError(None, None, error='something went wrong')) as get_cat_info_patch: - with patch.object(_LOGGER, 'exception') as log_exc: + with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=StorageServerError( + None, None, error='something went wrong')) as get_cat_info_patch: + with patch.object(_LOGGER, 'exception') as patch_logger: resp = await client.delete('/fledge/filter/{}'.format(filter_name)) assert 500 == resp.status assert "something went wrong" == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Delete filter: %s, caught exception: %s', filter_name, 'something went wrong') - get_cat_info_patch.assert_called_once_with('filters', '{"where": {"column": "name", "condition": "=", "value": "AssetFilter"}}') + assert 1 == patch_logger.call_count + patch_logger.assert_called_once_with('Delete {} filter, caught storage exception: {}'.format( + filter_name, 'something went wrong')) + get_cat_info_patch.assert_called_once_with( + 'filters', '{"where": {"column": "name", "condition": "=", "value": "AssetFilter"}}') async def test_add_filter_pipeline_type_error(self, client): - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.put('/fledge/filter/{}/pipeline'.format("bench"), data=json.dumps({"pipeline": "AssetFilter"})) - assert 400 == resp.status - assert "Pipeline must be a list of filters or an empty value" == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Add filters pipeline, caught exception: %s', 'Pipeline must be a list of filters or an empty value') + msg = "Pipeline must be a list of filters or an empty value" + resp = await client.put('/fledge/filter/{}/pipeline'.format("bench"), data=json.dumps( + {"pipeline": "AssetFilter"})) + assert 400 == resp.status + assert msg == resp.reason @pytest.mark.parametrize("request_param, param, val", [ ('?append_filter=T', 'append_filter', 't'), @@ -537,11 +561,10 @@ async def test_add_filter_pipeline_type_error(self, client): ]) async def test_add_filter_pipeline_bad_request_param_val(self, client, request_param, param, val): user = "bench" - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.put('/fledge/filter/{}/pipeline{}'.format(user, request_param), data=json.dumps({"pipeline": ["AssetFilter"]})) - assert 404 == resp.status - assert "Only 'true' and 'false' are allowed for {}. {} given.".format(param, val) == resp.reason - assert 1 == log_exc.call_count + resp = await client.put('/fledge/filter/{}/pipeline{}'.format(user, request_param), data=json.dumps( + {"pipeline": ["AssetFilter"]})) + assert 404 == resp.status + assert "Only 'true' and 'false' are allowed for {}. {} given.".format(param, val) == resp.reason async def test_add_filter_pipeline_value_error_1(self, client): user = "bench" @@ -553,14 +576,13 @@ async def test_add_filter_pipeline_value_error_1(self, client): _rv = await self.async_mock(None) else: _rv = asyncio.ensure_future(self.async_mock(None)) - + msg = "No such '{}' category found.".format(user) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', return_value=_rv) as get_cat_info_patch: - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.put('/fledge/filter/{}/pipeline'.format(user), data=json.dumps({"pipeline": ["AssetFilter"]})) - assert 404 == resp.status - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Add filters pipeline, caught exception: %s', "No such '{}' category found.".format(user)) + resp = await client.put('/fledge/filter/{}/pipeline'.format(user), data=json.dumps( + {"pipeline": ["AssetFilter"]})) + assert 404 == resp.status + assert msg == resp.reason get_cat_info_patch.assert_called_once_with(category_name=user) async def test_add_filter_pipeline_value_error_2(self, client): @@ -573,14 +595,13 @@ async def test_add_filter_pipeline_value_error_2(self, client): _rv = await self.async_mock(None) else: _rv = asyncio.ensure_future(self.async_mock(None)) - + msg = "No such '{}' category found.".format(user) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', return_value=_rv) as get_cat_info_patch: - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.put('/fledge/filter/{}/pipeline'.format(user), data=json.dumps({"pipeline": ["AssetFilter"]})) - assert 404 == resp.status - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Add filters pipeline, caught exception: %s', "No such '{}' category found.".format(user)) + resp = await client.put('/fledge/filter/{}/pipeline'.format(user), data=json.dumps( + {"pipeline": ["AssetFilter"]})) + assert 404 == resp.status + assert msg == resp.reason get_cat_info_patch.assert_called_once_with(category_name=user) async def test_add_filter_pipeline_value_error_3(self, client): @@ -598,17 +619,16 @@ async def test_add_filter_pipeline_value_error_3(self, client): else: _rv1 = asyncio.ensure_future(self.async_mock(cat_info)) _rv2 = asyncio.ensure_future(self.async_mock({'count': 1, 'rows': []})) - + msg = "No such 'AssetFilter' filter found in filters table." with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', return_value=_rv1) as get_cat_info_patch: with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv2) as query_tbl_patch: - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.put('/fledge/filter/{}/pipeline'.format(user), data=json.dumps({"pipeline": ["AssetFilter"]})) - assert 404 == resp.status - assert "No such 'AssetFilter' filter found in filters table." == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Add filters pipeline, caught exception: %s', "No such 'AssetFilter' filter found in filters table.") - query_tbl_patch.assert_called_once_with('filters', '{"where": {"column": "name", "condition": "=", "value": "AssetFilter"}}') + resp = await client.put('/fledge/filter/{}/pipeline'.format(user), data=json.dumps( + {"pipeline": ["AssetFilter"]})) + assert 404 == resp.status + assert msg == resp.reason + query_tbl_patch.assert_called_once_with( + 'filters', '{"where": {"column": "name", "condition": "=", "value": "AssetFilter"}}') get_cat_info_patch.assert_called_once_with(category_name=user) async def test_add_filter_pipeline_value_error_4(self, client): @@ -632,7 +652,7 @@ async def test_add_filter_pipeline_value_error_4(self, client): _rv1 = asyncio.ensure_future(self.async_mock(cat_info)) _rv2 = asyncio.ensure_future(self.async_mock(query_tbl_payload_res)) _rv3 = asyncio.ensure_future(self.async_mock(None)) - + msg = 'No detail found for user: {} and filter: filter'.format(user) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', return_value=_rv1) as get_cat_info_patch: @@ -645,13 +665,10 @@ async def test_add_filter_pipeline_value_error_4(self, client): with patch.object(filters, '_add_child_filters', return_value=_rv3) as _add_child_patch: with patch.object(cf_mgr, 'get_category_item', return_value=_rv3) as get_cat_item_patch: - with patch.object(_LOGGER, 'exception') as log_exc: - resp = await client.put('/fledge/filter/{}/pipeline'.format(user), - data=json.dumps({"pipeline": ["AssetFilter"]})) - assert 404 == resp.status - assert 'No detail found for user: {} and filter: filter'.format(user) == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Add filters pipeline, caught exception: %s', 'No detail found for user: bench and filter: filter') + resp = await client.put('/fledge/filter/{}/pipeline'.format(user), + data=json.dumps({"pipeline": ["AssetFilter"]})) + assert 404 == resp.status + assert msg == resp.reason get_cat_item_patch.assert_called_once_with(user, 'filter') args, kwargs = _add_child_patch.call_args assert user == args[2] @@ -682,12 +699,11 @@ async def test_add_filter_pipeline_storage_error(self, client): with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', return_value=_rv) as get_cat_info_patch: with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=StorageServerError(None, None, error='something went wrong')): - with patch.object(_LOGGER, 'exception') as log_exc: + with patch.object(_LOGGER, 'error') as patch_logger: resp = await client.put('/fledge/filter/{}/pipeline'.format(user), data=json.dumps({"pipeline": ["AssetFilter"]})) assert 500 == resp.status assert "something went wrong" == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Add filters pipeline, caught exception: %s', 'something went wrong') + assert 1 == patch_logger.call_count get_cat_info_patch.assert_called_once_with(category_name=user) async def test_add_filter_pipeline(self, client): @@ -858,15 +874,12 @@ async def test_get_filter_pipeline_key_error(self, client): _rv = await self.async_mock({}) else: _rv = asyncio.ensure_future(self.async_mock({})) - + msg = "No filter pipeline exists for {}.".format(user) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', return_value=_rv) as get_cat_info_patch: - with patch.object(_LOGGER, 'info') as log_exc: - resp = await client.get('/fledge/filter/{}/pipeline'.format(user)) - assert 404 == resp.status - assert "No filter pipeline exists for {}".format(user) == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('No filter pipeline exists for {}'.format(user)) + resp = await client.get('/fledge/filter/{}/pipeline'.format(user)) + assert 404 == resp.status + assert msg == resp.reason get_cat_info_patch.assert_called_once_with(category_name=user) async def test_get_filter_pipeline_storage_error(self, client): @@ -874,13 +887,16 @@ async def test_get_filter_pipeline_storage_error(self, client): cf_mgr = ConfigurationManager(storage_client_mock) user = "Random" with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(cf_mgr, 'get_category_all_items', side_effect=StorageServerError(None, None, error='something went wrong')) as get_cat_info_patch: - with patch.object(_LOGGER, 'exception') as log_exc: + with patch.object(cf_mgr, 'get_category_all_items', side_effect=StorageServerError( + None, None, error='something went wrong')) as get_cat_info_patch: + with patch.object(_LOGGER, 'error') as patch_logger: resp = await client.get('/fledge/filter/{}/pipeline'.format(user)) assert 500 == resp.status assert "something went wrong" == resp.reason - assert 1 == log_exc.call_count - log_exc.assert_called_once_with('Get pipeline: %s, caught exception: %s', user, 'something went wrong') + assert 1 == patch_logger.call_count + patch_logger.assert_called_once_with( + 'Failed to delete filter pipeline {}. Storage error occurred: {}'.format( + user, 'something went wrong'), exc_info=True) get_cat_info_patch.assert_called_once_with(category_name=user) async def test_get_filter_pipeline_exception(self, client): @@ -889,9 +905,11 @@ async def test_get_filter_pipeline_exception(self, client): cf_mgr = ConfigurationManager(storage_client_mock) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(cf_mgr, 'get_category_all_items', side_effect=Exception) as get_cat_info_patch: - resp = await client.get('/fledge/filter/{}/pipeline'.format(user)) - assert 500 == resp.status - assert resp.reason is '' + with patch.object(_LOGGER, 'error') as patch_logger: + resp = await client.get('/fledge/filter/{}/pipeline'.format(user)) + assert 500 == resp.status + assert resp.reason is '' + assert 1 == patch_logger.call_count get_cat_info_patch.assert_called_once_with(category_name=user) @pytest.mark.skip(reason='Incomplete') diff --git a/tests/unit/python/fledge/services/core/api/test_package_log.py b/tests/unit/python/fledge/services/core/api/test_package_log.py index 28fa6b2719..1bdf93a4d2 100644 --- a/tests/unit/python/fledge/services/core/api/test_package_log.py +++ b/tests/unit/python/fledge/services/core/api/test_package_log.py @@ -43,8 +43,17 @@ def logs_path(self): return "{}/logs".format(pathlib.Path(__file__).parent) async def test_get_logs(self, client, logs_path): - files = ["190801-13-21-56.log", "190801-13-18-02-fledge-north-httpc-install.log", - "190801-14-55-25-fledge-south-sinusoid-install.log", "191024-04-21-56-list.log"] + files = ["190801-13-21-56.log", + "190801-13-18-02-fledge-north-httpc-install.log", + "190801-14-55-25-fledge-south-sinusoid-install.log", + "191024-04-21-56-list.log", + "230619-10-20-31-fledge-south-http-south-remove.log", + "230619-10-17-36-fledge-south-s2opcua-update.log", + "trace.log", + "20230609_093006_Trace_00000.log", + "trace.txt", + "syslog" + ] with patch.object(package_log, '_get_logs_dir', side_effect=[logs_path]): with patch('os.walk') as mockwalk: mockwalk.return_value = [(str(logs_path), [], files)] @@ -53,19 +62,39 @@ async def test_get_logs(self, client, logs_path): res = await resp.text() jdict = json.loads(res) logs = jdict["logs"] - assert 4 == len(logs) - assert files[0] == logs[0]['filename'] - assert "2019-08-01 13:21:56" == logs[0]['timestamp'] - assert "" == logs[0]['name'] - assert files[1] == logs[1]['filename'] - assert "2019-08-01 13:18:02" == logs[1]['timestamp'] - assert "fledge-north-httpc-install" == logs[1]['name'] - assert files[2] == logs[2]['filename'] - assert "2019-08-01 14:55:25" == logs[2]['timestamp'] - assert "fledge-south-sinusoid-install" == logs[2]['name'] - assert files[3] == logs[3]['filename'] - assert "2019-10-24 04:21:56" == logs[3]['timestamp'] - assert "list" == logs[3]['name'] + assert len(files) - 2 == len(logs) + obj = logs[0] + assert files[0] == obj['filename'] + assert "2019-08-01 13:21:56" == obj['timestamp'] + assert "190801-13-21-56" == obj['name'] + obj = logs[1] + assert files[1] == obj['filename'] + assert "2019-08-01 13:18:02" == obj['timestamp'] + assert "fledge-north-httpc-install" == obj['name'] + obj = logs[2] + assert files[2] == obj['filename'] + assert "2019-08-01 14:55:25" == obj['timestamp'] + assert "fledge-south-sinusoid-install" == obj['name'] + obj = logs[3] + assert files[3] == obj['filename'] + assert "2019-10-24 04:21:56" == obj['timestamp'] + assert "list" == obj['name'] + obj = logs[4] + assert files[4] == obj['filename'] + assert "2023-06-19 10:20:31" == obj['timestamp'] + assert "fledge-south-http-south-remove" == obj['name'] + obj = logs[5] + assert files[5] == obj['filename'] + assert "2023-06-19 10:17:36" == obj['timestamp'] + assert "fledge-south-s2opcua-update" == obj['name'] + obj = logs[6] + assert files[6] == obj['filename'] + assert len(obj['timestamp']) > 0 + assert "trace" == obj['name'] + obj = logs[7] + assert files[7] == obj['filename'] + assert len(obj['timestamp']) > 0 + assert "20230609_093006_Trace_00000" == obj['name'] mockwalk.assert_called_once_with(logs_path) async def test_get_log_by_name_with_invalid_extension(self, client): diff --git a/tests/unit/python/fledge/services/core/api/test_scheduler_api.py b/tests/unit/python/fledge/services/core/api/test_scheduler_api.py index 4425d8e0b4..ea03613d73 100644 --- a/tests/unit/python/fledge/services/core/api/test_scheduler_api.py +++ b/tests/unit/python/fledge/services/core/api/test_scheduler_api.py @@ -7,11 +7,13 @@ import asyncio import json -from unittest.mock import MagicMock, patch, call from datetime import timedelta, datetime +from unittest.mock import MagicMock, patch, call +from uuid import UUID + +import sys import uuid import pytest -import sys from aiohttp import web from fledge.services.core import routes @@ -586,7 +588,7 @@ async def mock_schedules(): @pytest.mark.parametrize("request_data, expected_response", [ ({"name": "new"}, {'schedule': {'id': '{}'.format(_random_uuid), 'time': 0, 'processName': 'bar', 'repeat': 30.0, - 'exclusive': True, 'enabled': True, 'type': 'STARTUP', 'day': None, 'name': 'new'}}), + 'exclusive': True, 'enabled': True, 'type': 'INTERVAL', 'day': None, 'name': 'new'}}) ]) async def test_update_schedule(self, client, request_data, expected_response): async def mock_coro(): @@ -618,13 +620,13 @@ async def mock_schedules(): return [schedule1, schedule2, schedule3] async def mock_schedule(*args): - schedule = StartUpSchedule() + schedule = IntervalSchedule() schedule.schedule_id = self._random_uuid schedule.exclusive = True schedule.enabled = True schedule.process_name = "bar" schedule.repeat = timedelta(seconds=30) - schedule.time = None + schedule.time = 0 schedule.day = None schedule.name = "foo" if args[0] == 1 else "new" return schedule @@ -659,7 +661,7 @@ async def mock_schedule(*args): assert 2 == patch_get_schedule.call_count assert call(uuid.UUID(str(self._random_uuid))) == patch_get_schedule.call_args arguments, kwargs = patch_save_schedule.call_args - assert isinstance(arguments[0], StartUpSchedule) + assert isinstance(arguments[0], IntervalSchedule) patch_get_schedules.assert_called_once_with() async def test_update_schedule_bad_param(self, client): @@ -671,6 +673,131 @@ async def test_update_schedule_bad_param(self, client): json_response = json.loads(result) assert {'message': error_msg} == json_response + @pytest.mark.parametrize("payload, status_code, message", [ + ({"name": "Updated"}, 400, "South Service is a STARTUP schedule type and cannot be renamed."), + ({"type": 3}, 400, "South Service is a STARTUP schedule type and cannot be changed its type."), + ({"name": "Updated", "type": 3}, 400, "South Service is a STARTUP schedule type and cannot be renamed."), + ({"name": "Updated", "enabled": False}, 400, "South Service is a STARTUP schedule type and cannot be renamed."), + ({"type": 4, "enabled": False}, 400, "South Service is a STARTUP schedule type and cannot be changed its type.") + ]) + async def test_bad_update_startup_schedule(self, client, payload, status_code, message): + uuid = "5affb5d1-96bb-4334-96ea-f91904cacc9b" + + async def mock_schedule(): + schedule = StartUpSchedule() + schedule.schedule_id = self._random_uuid + schedule.exclusive = True + schedule.enabled = True + schedule.process_name = "south_c" + schedule.repeat = 0 + schedule.time = 0 + schedule.day = None + schedule.name = "South Service" + return schedule + + _rv1 = await mock_schedule() if sys.version_info.major == 3 and sys.version_info.minor >= 8 \ + else asyncio.ensure_future(mock_schedule()) + with patch.object(server.Server.scheduler, 'get_schedule', return_value=_rv1) as patch_get_schedule: + resp = await client.put('/fledge/schedule/{}'.format(uuid), data=json.dumps(payload)) + assert status_code == resp.status + assert message == resp.reason + patch_get_schedule.assert_called_once_with(UUID(uuid)) + + @pytest.mark.parametrize("payload", [ + ({"enabled": True}), + ({"enabled": False}), + ({"exclusive": False}), + ({"exclusive": True}), + ({"exclusive": True, "enabled": False}), + ({"exclusive": False, "enabled": True}), + ({"exclusive": False, "enabled": False}), + ({"exclusive": True, "enabled": True}), + ]) + async def test_good_update_startup_schedule(self, client, payload): + startup_uuid = "5affb5d1-96bb-4334-96ea-f91904cacc9b" + + async def mock_coro(): + return "" + + async def mock_schedules(): + schedule1 = ManualSchedule() + schedule1.schedule_id = self._random_uuid + schedule1.exclusive = True + schedule1.enabled = True + schedule1.name = "purge" + schedule1.process_name = "purge" + + schedule2 = StartUpSchedule() + schedule2.schedule_id = startup_uuid + schedule2.exclusive = True + schedule2.enabled = True + schedule2.name = "South Service" + schedule2.process_name = "south_c" + schedule2.repeat = 0 + schedule2.time = 0 + schedule2.day = None + + schedule3 = IntervalSchedule() + schedule3.schedule_id = self._random_uuid + schedule3.repeat = timedelta(seconds=15) + schedule3.exclusive = True + schedule3.enabled = True + schedule3.name = "stats collection" + schedule3.process_name = "stats collector" + + return [schedule1, schedule2, schedule3] + + async def mock_schedule(): + sch = await mock_schedules() + return sch[1] + + async def final_schedule(): + schedule = StartUpSchedule() + schedule.schedule_id = startup_uuid + schedule.exclusive = payload['exclusive'] if 'exclusive' in payload else True + schedule.enabled = payload['enabled'] if 'enabled' in payload else True + schedule.name = "South Service" + schedule.process_name = "south_c" + schedule.repeat = 0 + schedule.time = 0 + schedule.day = None + return schedule + + storage_client_mock = MagicMock(StorageClientAsync) + response = {'rows': [{'name': 'SCH'}], 'count': 1} + # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. + if sys.version_info.major == 3 and sys.version_info.minor >= 8: + _rv0 = await mock_coro_response(response) + _rv1 = await mock_schedule() + _rv11 = await final_schedule() + _rv2 = await mock_coro() + _rv3 = await mock_schedules() + else: + _rv0 = asyncio.ensure_future(mock_coro_response(response)) + _rv1 = asyncio.ensure_future(mock_schedule()) + _rv11 = asyncio.ensure_future(final_schedule()) + _rv2 = asyncio.ensure_future(mock_coro()) + _rv3 = asyncio.ensure_future(mock_schedules()) + + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv0): + with patch.object(server.Server.scheduler, 'get_schedule', side_effect=[_rv1, _rv11]): + with patch.object(server.Server.scheduler, 'save_schedule', return_value=_rv2 + ) as patch_save_schedule: + with patch.object(server.Server.scheduler, 'get_schedules', return_value=_rv3 + ) as patch_get_schedules: + resp = await client.put('/fledge/schedule/{}'.format(startup_uuid), data=json.dumps(payload)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + if 'exclusive' in payload: + assert payload['exclusive'] == json_response['schedule']['exclusive'] + if 'enabled' in payload: + assert payload['enabled'] == json_response['schedule']['enabled'] + assert 1 == patch_get_schedules.call_count + arguments, kwargs = patch_save_schedule.call_args + assert isinstance(arguments[0], StartUpSchedule) + async def test_update_schedule_data_not_exist(self, client): async def mock_coro(): return "" @@ -715,14 +842,14 @@ async def mock_coro(): ]) async def test_update_schedule_bad_data(self, client, request_data, response_code, error_message, storage_return): async def mock_coro(): - schedule = StartUpSchedule() + schedule = IntervalSchedule() schedule.schedule_id = self._random_uuid schedule.exclusive = True schedule.enabled = True schedule.name = "foo" schedule.process_name = "bar" schedule.repeat = timedelta(seconds=30) - schedule.time = None + schedule.time = 0 schedule.day = None return schedule @@ -775,13 +902,13 @@ async def mock_schedules(): return [schedule1, schedule2] async def mock_schedule(*args): - schedule = StartUpSchedule() + schedule = ManualSchedule() schedule.schedule_id = self._random_uuid schedule.exclusive = True schedule.enabled = True schedule.process_name = "bar" - schedule.repeat = timedelta(seconds=30) - schedule.time = None + schedule.repeat = 0 + schedule.time = 0 schedule.day = None schedule.name = "foo" if args[0] == 1 else "new" return schedule diff --git a/tests/unit/python/fledge/services/core/api/test_service.py b/tests/unit/python/fledge/services/core/api/test_service.py index 794cbd03f5..767954568b 100644 --- a/tests/unit/python/fledge/services/core/api/test_service.py +++ b/tests/unit/python/fledge/services/core/api/test_service.py @@ -266,7 +266,7 @@ def q_result(*arg): _rv = asyncio.ensure_future(self.async_mock(None)) with patch.object(common, 'load_and_fetch_python_plugin_info', side_effect=[mock_plugin_info]): - with patch.object(service._logger, 'exception') as ex_logger: + with patch.object(service._logger, 'error') as patch_logger: with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(c_mgr, 'get_category_all_items', return_value=_rv) as patch_get_cat_info: @@ -284,7 +284,7 @@ def q_result(*arg): 'value': '[\"services/south_c\"]'}} } == p2 patch_get_cat_info.assert_called_once_with(category_name=data['name']) - assert 1 == ex_logger.call_count + assert 1 == patch_logger.call_count async def test_dupe_category_name_add_service(self, client): mock_plugin_info = { @@ -1233,7 +1233,7 @@ def q_result(*arg): async def test_bad_type_update_package(self, client, param): resp = await client.put('/fledge/service/{}/name/update'.format(param), data=None) assert 400 == resp.status - assert "Invalid service type. Must be 'notification'" == resp.reason + assert "Invalid service type." == resp.reason async def test_bad_update_package(self, client, _type="notification", name="notification"): svc_list = ["storage", "south"] diff --git a/tests/unit/python/fledge/services/core/api/test_statistics_api.py b/tests/unit/python/fledge/services/core/api/test_statistics_api.py index 6b4bd6f338..fe93989a48 100644 --- a/tests/unit/python/fledge/services/core/api/test_statistics_api.py +++ b/tests/unit/python/fledge/services/core/api/test_statistics_api.py @@ -492,39 +492,42 @@ async def test_bad_get_statistics_rate(self, client, params, msg): assert 400 == resp.status assert msg == resp.reason - async def test_get_statistics_rate(self, client, params='?periods=1,5&statistics=readings'): - output = {'rates': {'readings': {'1': 120.52585669781932, '5': 120.52585669781932}}} - p1 = {'where': {'value': 'stats collector', 'condition': '=', 'column': 'process_name'}, - 'return': ['schedule_interval']} - p2 = {"return": ["key"], "aggregate": [{"operation": "sum", "column": "value"}, - {"operation": "count", "column": "value"}], - "where": {"column": "history_ts", "condition": ">=", "value": "1590126369.123255", - "and": {"column": "key", "condition": "=", "value": "READINGS"}}, "group": "key"} - p3 = {"return": ["key"], "aggregate": [{"operation": "sum", "column": "value"}, - {"operation": "count", "column": "value"}], - "where": {"column": "history_ts", "condition": ">=", "value": "1590126369.123255", - "and": {"column": "key", "condition": "=", "value": "READINGS"}}, "group": "key"} - - @asyncio.coroutine - def q_result(*args): - table = args[0] - payload = args[1] - - if table == 'schedules': - assert p1 == json.loads(payload) - return {"rows": [{"schedule_interval": "00:00:15"}]} - - if table == 'statistics_history': - # TODO: datetime patch required which is a bit tricky - # assert p2 == json.loads(payload) - return {"rows": [{'sum_value': 96722, 'count_value': 3210, "key": "READINGS"}], "count": 1} + async def test_get_statistics_rate(self, client, params='?periods=1,5&statistics=READINGS'): + output = {'rates': {'READINGS': {'1': 45.0, '5': 9.0}}} + p1 = ({"where": {"value": "stats collector", "condition": "=", "column": "process_name"}, + "return": ["schedule_interval"]}) + p2 = {"return": ["value"], "where": {"column": "key", "condition": "=", "value": "READINGS"}, + "sort": {"column": "history_ts", "direction": "desc"}, "limit": 4} + p3 = {"return": ["value"], "where": {"column": "key", "condition": "=", "value": "READINGS"}, + "sort": {"column": "history_ts", "direction": "desc"}, "limit": 20} + + async def async_mock(return_value): + return return_value + + storage_rows = {"rows": [{"value": 15}, {"value": 10}, {"value": 5}, {"value": 15}], "count": 4} + if sys.version_info.major == 3 and sys.version_info.minor >= 8: + _rv1 = await async_mock({"rows": [{"schedule_interval": "00:00:15"}]}) + _rv2 = await async_mock(storage_rows) + else: + _rv1 = asyncio.ensure_future(async_mock({"rows": [{"schedule_interval": "00:00:15"}]})) + _rv2 = asyncio.ensure_future(async_mock(storage_rows)) mock_async_storage_client = MagicMock(StorageClientAsync) with patch.object(connect, 'get_storage_async', return_value=mock_async_storage_client): - with patch.object(mock_async_storage_client, 'query_tbl_with_payload', side_effect=q_result) as query_patch: + with patch.object(mock_async_storage_client, 'query_tbl_with_payload', + side_effect=[_rv1, _rv2, _rv2]) as query_patch: resp = await client.get("/fledge/statistics/rate{}".format(params)) assert 200 == resp.status r = await resp.text() assert output == json.loads(r) assert query_patch.called assert 3 == query_patch.call_count + args, _ = query_patch.call_args_list[0] + assert 'schedules' == args[0] + assert p1 == json.loads(args[1]) + args, _ = query_patch.call_args_list[1] + assert 'statistics_history' == args[0] + assert p2 == json.loads(args[1]) + args, _ = query_patch.call_args_list[2] + assert 'statistics_history' == args[0] + assert p3 == json.loads(args[1]) diff --git a/tests/unit/python/fledge/services/core/api/test_support.py b/tests/unit/python/fledge/services/core/api/test_support.py index 2257a81709..4b60a8d4be 100644 --- a/tests/unit/python/fledge/services/core/api/test_support.py +++ b/tests/unit/python/fledge/services/core/api/test_support.py @@ -133,11 +133,16 @@ async def mock_build(): assert {"bundle created": "support-180301-13-35-23.tar.gz"} == jdict async def test_create_support_bundle_exception(self, client): + msg = "Failed to create support bundle." with patch.object(SupportBuilder, "__init__", return_value=None): with patch.object(SupportBuilder, "build", side_effect=RuntimeError("blah")): - resp = await client.post('/fledge/support') - assert 500 == resp.status - assert "Support bundle could not be created. blah" == resp.reason + with patch.object(support._logger, "error") as patch_logger: + resp = await client.post('/fledge/support') + assert 500 == resp.status + assert msg == resp.reason + assert 1 == patch_logger.call_count + args = patch_logger.call_args + assert msg == args[0][1] async def test_get_syslog_entries_all_ok(self, client): def mock_syslog(): @@ -253,12 +258,14 @@ async def test_bad_limit_and_offset_in_get_syslog_entries(self, client, param, m async def test_get_syslog_entries_cmd_exception(self, client): msg = 'Internal Server Error' with patch.object(subprocess, "Popen", side_effect=Exception(msg)): - resp = await client.get('/fledge/syslog') - assert 500 == resp.status - assert msg == resp.reason - res = await resp.text() - jdict = json.loads(res) - assert {"message": msg} == jdict + with patch.object(support._logger, "error") as patch_logger: + resp = await client.get('/fledge/syslog') + assert 500 == resp.status + assert msg == resp.reason + res = await resp.text() + jdict = json.loads(res) + assert {"message": msg} == jdict + assert 1 == patch_logger.call_count async def test_get_syslog_entries_from_name(self, client): def mock_syslog(): diff --git a/tests/unit/python/fledge/services/core/api/test_task.py b/tests/unit/python/fledge/services/core/api/test_task.py index 5e765cf217..4f4345aa2f 100644 --- a/tests/unit/python/fledge/services/core/api/test_task.py +++ b/tests/unit/python/fledge/services/core/api/test_task.py @@ -22,7 +22,7 @@ from fledge.common.configuration_manager import ConfigurationManager from fledge.services.core.api import task from fledge.services.core.api.plugins import common -from fledge.services.core.api.service import _logger +from fledge.services.core.api.task import _logger __author__ = "Amarendra K Sinha" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" @@ -107,7 +107,7 @@ def q_result(*arg): with patch.object(common, 'load_and_fetch_python_plugin_info', side_effect=[mock_plugin_info]): with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(_logger, 'exception') as ex_logger: + with patch.object(_logger, 'error') as patch_logger: with patch.object(c_mgr, 'get_category_all_items', return_value=_rv) as patch_get_cat_info: with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=q_result): @@ -115,8 +115,8 @@ def q_result(*arg): resp = await client.post('/fledge/scheduled/task', data=json.dumps(data)) assert 500 == resp.status assert 'Failed to create north instance.' == resp.reason - assert 1 == ex_logger.call_count patch_get_cat_info.assert_called_once_with(category_name=data['name']) + assert 1 == patch_logger.call_count async def test_dupe_category_name_add_task(self, client): @@ -304,20 +304,11 @@ async def q_result(*arg): assert p['script'] == '["tasks/north_c"]' patch_get_cat_info.assert_called_once_with(category_name=data['name']) - @pytest.mark.parametrize( - "expected_count," - "expected_http_code," - "expected_message", - [ - ( 1, 400, '400: Unable to reuse name north bound, already used by a previous task.'), - (10, 400, '400: Unable to reuse name north bound, already used by a previous task.') - ] - ) - async def test_add_task_twice(self, - client, - expected_count, - expected_http_code, - expected_message): + @pytest.mark.parametrize("expected_count, expected_http_code, expected_message", [ + (1, 400, '400: Unable to reuse name north bound, already used by a previous task.'), + (10, 400, '400: Unable to reuse name north bound, already used by a previous task.') + ]) + async def test_add_task_twice(self, client, expected_count, expected_http_code, expected_message): @asyncio.coroutine def q_result(*arg): @@ -351,15 +342,16 @@ def q_result(*arg): } storage_client_mock = MagicMock(StorageClientAsync) - with patch.object(_logger, 'exception') as ex_logger: + with patch.object(_logger, 'warning') as patch_logger: with patch.object(common, 'load_and_fetch_python_plugin_info', side_effect=[mock_plugin_info]): with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=q_result): - resp = await client.post('/fledge/scheduled/task', data=json.dumps(data)) - result = await resp.text() - assert resp.status == expected_http_code - assert result == expected_message - assert 1 == ex_logger.call_count + with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=q_result): + resp = await client.post('/fledge/scheduled/task', data=json.dumps(data)) + result = await resp.text() + assert resp.status == expected_http_code + assert result == expected_message + print(expected_message) + assert 1 == patch_logger.call_count async def test_add_task_with_config(self, client): async def async_mock_get_schedule(): @@ -580,9 +572,13 @@ async def test_delete_task_exception(self, mocker, client): mocker.patch.object(connect, 'get_storage_async') mocker.patch.object(task, "get_schedule", side_effect=Exception) - resp = await client.delete("/fledge/scheduled/task/Test") - assert 500 == resp.status - assert resp.reason is '' + with patch.object(_logger, 'error') as patch_logger: + resp = await client.delete("/fledge/scheduled/task/Test") + assert 500 == resp.status + assert resp.reason is '' + assert 1 == patch_logger.call_count + args = patch_logger.call_args + assert 'Failed to delete Test north task.' == args[0][1] async def mock_bad_result(): return {"count": 0, "rows": []} diff --git a/tests/unit/python/fledge/services/core/interest_registry/test_change_callback.py b/tests/unit/python/fledge/services/core/interest_registry/test_change_callback.py index c3666f0021..c984f87573 100644 --- a/tests/unit/python/fledge/services/core/interest_registry/test_change_callback.py +++ b/tests/unit/python/fledge/services/core/interest_registry/test_change_callback.py @@ -257,9 +257,11 @@ async def async_mock(return_value): with patch.object(ConfigurationManager, 'get_category_all_items', return_value=_rv) as cm_get_patch: with patch.object(aiohttp.ClientSession, 'post', side_effect=Exception) as post_patch: - with patch.object(cb._LOGGER, 'exception') as exception_patch: + with patch.object(cb._LOGGER, 'exception') as patch_logger: await cb.run('catname1') - exception_patch.assert_called_once_with( - 'Unable to notify microservice with uuid %s due to exception: %s', s_id_1, '') - post_patch.assert_has_calls([call('http://saddress1:1/fledge/change', data='{"category": "catname1", "items": null}', headers={'content-type': 'application/json'})]) + args = patch_logger.call_args + assert 'Unable to notify microservice with uuid {}'.format(s_id_1) == args[0][1] + post_patch.assert_has_calls( + [call('http://saddress1:1/fledge/change', data='{"category": "catname1", "items": null}', + headers={'content-type': 'application/json'})]) cm_get_patch.assert_called_once_with('catname1') diff --git a/tests/unit/python/fledge/services/core/scheduler/test_scheduler.py b/tests/unit/python/fledge/services/core/scheduler/test_scheduler.py index 7c9ab84756..2d97fea859 100644 --- a/tests/unit/python/fledge/services/core/scheduler/test_scheduler.py +++ b/tests/unit/python/fledge/services/core/scheduler/test_scheduler.py @@ -6,6 +6,7 @@ import asyncio import datetime +import logging import uuid import time import json @@ -28,6 +29,7 @@ async def mock_task(): return "" + async def mock_process(): m = MagicMock() m.pid = 9999 @@ -38,6 +40,7 @@ async def mock_process(): @pytest.allure.feature("unit") @pytest.allure.story("scheduler") class TestScheduler: + async def scheduler_fixture(self, mocker): # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: @@ -46,6 +49,7 @@ async def scheduler_fixture(self, mocker): _rv = asyncio.ensure_future(mock_process()) scheduler = Scheduler() + scheduler._logger.level = logging.INFO scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') @@ -253,6 +257,7 @@ async def test__check_schedules(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() + scheduler._logger.level = logging.INFO scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") @@ -605,10 +610,9 @@ async def test_stop(self, mocker): calls = [call('Processing stop request'), call('Stopped')] log_info.assert_has_calls(calls, any_order=True) - # TODO: Find why these exceptions are being raised despite mocking _purge_tasks_task, _scheduler_loop_task - calls = [call('An exception was raised by Scheduler._purge_tasks %s', "object MagicMock can't be used in 'await' expression"), - call('An exception was raised by Scheduler._scheduler_loop %s', "object MagicMock can't be used in 'await' expression")] - log_exception.assert_has_calls(calls) + # FIXME: Find why exception is being raised despite mocking _scheduler_loop_task + args = log_exception.call_args + assert 'An exception was raised by Scheduler._scheduler_loop' == args[0][1] @pytest.mark.asyncio async def test_get_scheduled_processes(self, mocker): @@ -754,7 +758,6 @@ def mock_coro(): audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') - log_info = mocker.patch.object(scheduler._logger, "info") enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro()) @@ -804,7 +807,6 @@ def mock_coro(): audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') - log_info = mocker.patch.object(scheduler._logger, "info") schedule_id = uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34") # OMF to PI North schedule_row = scheduler._ScheduleRow( id=schedule_id, @@ -828,9 +830,12 @@ def mock_coro(): # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == audit_logger.call_count - calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0, - 'exclusive': False, 'day': 1, 'time': '0:0:0', - 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] + + new = {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0, 'exclusive': False, 'day': 1, + 'time': '0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}} + old = {'old_schedule': {'enabled': True, 'exclusive': True, 'name': 'OMF to PI north', + 'processName': 'North Readings to PI', 'repeat': 30.0, 'type': Schedule.Type.INTERVAL}} + calls = [call('SCHCH', {**new, **old})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count @@ -848,7 +853,6 @@ def mock_coro(): audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') - log_info = mocker.patch.object(scheduler._logger, "info") schedule_id = uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34") # OMF to PI North schedule_row = scheduler._ScheduleRow( id=schedule_id, @@ -872,9 +876,11 @@ def mock_coro(): # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == audit_logger.call_count - calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0, - 'exclusive': False, 'day': 1, 'time': '0:0:0', - 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] + new = {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0, 'exclusive': False, 'day': 1, + 'time': '0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}} + old = {'old_schedule': {'enabled': True, 'exclusive': True, 'name': 'OMF to PI north', + 'processName': 'North Readings to PI', 'repeat': 30.0, 'type': Schedule.Type.INTERVAL}} + calls = [call('SCHCH', {**new, **old})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count @@ -981,9 +987,13 @@ async def test_disable_schedule(self, mocker): '2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings to PI')] log_info.assert_has_calls(calls) assert 1 == audit_logger.call_count - calls = [call('SCHCH', {'schedule': {'name': 'OMF to PI north', 'repeat': 30.0, 'enabled': False, + new = {'schedule': {'name': 'OMF to PI north', 'repeat': 30.0, 'enabled': False, 'type': Schedule.Type.INTERVAL, 'exclusive': True, - 'processName': 'North Readings to PI'}})] + 'processName': 'North Readings to PI'}} + old = {'old_schedule': {'name': 'OMF to PI north', 'repeat': 30.0, 'enabled': True, + 'type': Schedule.Type.INTERVAL, 'exclusive': True, + 'processName': 'North Readings to PI'}} + calls = [call('SCHCH', {**new, **old})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio @@ -1050,7 +1060,11 @@ async def test_enable_schedule(self, mocker): calls = [call("Enabled Schedule '%s/%s' process '%s'\n", 'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')] log_info.assert_has_calls(calls, any_order=True) assert 1 == audit_logger.call_count - calls = [call('SCHCH', {'schedule': {'name': 'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup', 'exclusive': True, 'repeat': 3600.0, 'enabled': True}})] + new = {'schedule': {'name': 'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup', + 'exclusive': True, 'repeat': 3600.0, 'enabled': True}} + old = {'old_schedule': {'name': 'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup', + 'exclusive': True, 'repeat': 3600.0, 'enabled': False}} + calls = [call('SCHCH', {**new, **old})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio @@ -1091,7 +1105,6 @@ async def test_queue_task(self, mocker): scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') - # log_info = mocker.patch.object(scheduler._logger, "info") await scheduler._get_schedules() sch_id = uuid.UUID("cea17db8-6ccc-11e7-907b-a6006ad3dba0") # backup @@ -1437,6 +1450,12 @@ async def test_not_ready_and_paused(self, mocker): async def test__terminate_child_processes(self, mocker): pass + @pytest.mark.asyncio + async def test_cleanup(self): + scheduler = Scheduler() + scheduler._logger.level = logging.WARNING + + class MockStorage(StorageClientAsync): def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) @@ -1452,6 +1471,7 @@ def _get_storage_service(self, host, port): "protocol": "http" } + class MockStorageAsync(StorageClientAsync): schedules = [ { diff --git a/tests/unit/python/fledge/services/core/service_registry/test_monitor.py b/tests/unit/python/fledge/services/core/service_registry/test_monitor.py index 5c4f843788..3cf251b85a 100644 --- a/tests/unit/python/fledge/services/core/service_registry/test_monitor.py +++ b/tests/unit/python/fledge/services/core/service_registry/test_monitor.py @@ -94,7 +94,6 @@ class TestMonitorException(Exception): """ print(ServiceRegistry.get(idx=s_id_1)[0]._status) - @pytest.mark.asyncio async def test__monitor_exceed_attempts(self, mocker): class AsyncSessionContextManagerMock(MagicMock): @@ -113,8 +112,14 @@ class TestMonitorException(Exception): pass # register a service - s_id_1 = ServiceRegistry.register( - 'sname1', 'Storage', 'saddress1', 1, 1, 'protocol1') + with patch.object(ServiceRegistry._logger, 'info') as log_info: + s_id_1 = ServiceRegistry.register( + 'sname1', 'Storage', 'saddress1', 1, 1, 'protocol1') + assert 1 == log_info.call_count + args, kwargs = log_info.call_args + assert args[0].startswith('Registered service instance id=') + assert args[0].endswith(': ') monitor = Monitor() monitor._sleep_interval = Monitor._DEFAULT_SLEEP_INTERVAL monitor._max_attempts = Monitor._DEFAULT_MAX_ATTEMPTS diff --git a/tests/unit/python/fledge/services/core/test_connect.py b/tests/unit/python/fledge/services/core/test_connect.py index 840689d88c..c5ce8ea601 100644 --- a/tests/unit/python/fledge/services/core/test_connect.py +++ b/tests/unit/python/fledge/services/core/test_connect.py @@ -44,7 +44,7 @@ def test_exception_when_no_storage(self, mock_logger): with pytest.raises(DoesNotExist) as excinfo: connect.get_storage_async() assert str(excinfo).endswith('DoesNotExist') - mock_logger.exception.assert_called_once_with('') + assert 1 == mock_logger.error.call_count @patch('fledge.services.core.connect._logger') def test_exception_when_non_fledge_storage(self, mock_logger): @@ -55,8 +55,7 @@ def test_exception_when_non_fledge_storage(self, mock_logger): assert args[0].startswith('Registered service instance id=') assert args[0].endswith(': ') - with pytest.raises(DoesNotExist) as excinfo: connect.get_storage_async() assert str(excinfo).endswith('DoesNotExist') - mock_logger.exception.assert_called_once_with('') + assert 1 == mock_logger.error.call_count diff --git a/tests/unit/python/fledge/services/core/test_server.py b/tests/unit/python/fledge/services/core/test_server.py index c7c151daf8..07f92eb9d2 100644 --- a/tests/unit/python/fledge/services/core/test_server.py +++ b/tests/unit/python/fledge/services/core/test_server.py @@ -632,16 +632,19 @@ async def test_service_not_registered(self, client): assert (request_data['name'], request_data['type'], request_data['address'], request_data['service_port'], request_data['management_port'], 'http', None) == args async def test_register_service(self, client): - async def async_mock(return_value): - return return_value + async def async_mock(): + return "" Server._storage_client = MagicMock(StorageClientAsync) Server._storage_client_async = MagicMock(StorageClientAsync) - request_data = {"type": "Storage", "name": "Storage Services", "address": "127.0.0.1", "service_port": 8090, "management_port": 1090} + request_data = {"type": "Storage", "name": "Storage Services", "address": "127.0.0.1", "service_port": 8090, + "management_port": 1090} + _rv = await async_mock() if sys.version_info.major == 3 and sys.version_info.minor >= 8 else \ + asyncio.ensure_future(async_mock()) with patch.object(ServiceRegistry, 'getStartupToken', return_value=None): with patch.object(ServiceRegistry, 'register', return_value='1') as patch_register: with patch.object(AuditLogger, '__init__', return_value=None): - with patch.object(AuditLogger, 'information', return_value=(await async_mock(None))) as audit_info_patch: + with patch.object(AuditLogger, 'information', return_value=_rv) as audit_info_patch: resp = await client.post('/fledge/service', data=json.dumps(request_data)) assert 200 == resp.status r = await resp.text() @@ -651,7 +654,8 @@ async def async_mock(return_value): assert 'SRVRG' == args[0] assert {'name': request_data['name']} == args[1] args, _ = patch_register.call_args - assert (request_data['name'], request_data['type'], request_data['address'], request_data['service_port'], request_data['management_port'], 'http', None) == args + assert (request_data['name'], request_data['type'], request_data['address'], + request_data['service_port'], request_data['management_port'], 'http', None) == args async def test_service_not_found_when_unregister(self, client): with patch.object(ServiceRegistry, 'get', side_effect=service_registry_exceptions.DoesNotExist) as patch_unregister: @@ -677,10 +681,12 @@ async def async_mock(): data.append(record) Server._storage_client = MagicMock(StorageClientAsync) Server._storage_client_async = MagicMock(StorageClientAsync) + _rv = await async_mock() if sys.version_info.major == 3 and sys.version_info.minor >= 8 else\ + asyncio.ensure_future(async_mock()) with patch.object(ServiceRegistry, 'get', return_value=data) as patch_get_unregister: with patch.object(ServiceRegistry, 'unregister') as patch_unregister: with patch.object(AuditLogger, '__init__', return_value=None): - with patch.object(AuditLogger, 'information', return_value=(await async_mock())) as audit_info_patch: + with patch.object(AuditLogger, 'information', return_value=_rv) as audit_info_patch: resp = await client.delete('/fledge/service/{}'.format(service_id)) assert 200 == resp.status r = await resp.text() diff --git a/tests/unit/python/fledge/services/core/test_user_model.py b/tests/unit/python/fledge/services/core/test_user_model.py index a62eaed502..7392942f53 100644 --- a/tests/unit/python/fledge/services/core/test_user_model.py +++ b/tests/unit/python/fledge/services/core/test_user_model.py @@ -1,20 +1,21 @@ # -*- coding: utf-8 -*- - # FLEDGE_BEGIN # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END +import copy import json import asyncio from unittest.mock import MagicMock, patch -import pytest import sys +import pytest -from fledge.services.core import connect +from fledge.common.audit_logger import AuditLogger +from fledge.common.configuration_manager import ConfigurationManager from fledge.common.storage_client.storage_client import StorageClientAsync from fledge.common.storage_client.exceptions import StorageServerError +from fledge.services.core import connect from fledge.services.core.user_model import User -from fledge.common.configuration_manager import ConfigurationManager __author__ = "Ashish Jabble" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" @@ -87,7 +88,6 @@ async def test_get_role_id_by_name(self): async def test_get_all(self): expected = {'rows': [], 'count': 0} - payload = '{"return": ["id", "uname", "role_id", "access_method", "real_name", "description"], "where": {"column": "enabled", "condition": "=", "value": "t"}}' storage_client_mock = MagicMock(StorageClientAsync) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. @@ -97,10 +97,10 @@ async def test_get_all(self): _rv = asyncio.ensure_future(mock_coro(expected)) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv) as query_tbl_patch: + with patch.object(storage_client_mock, 'query_tbl', return_value=_rv) as query_tbl_patch: actual = await User.Objects.all() assert actual == expected['rows'] - query_tbl_patch.assert_called_once_with('users', payload) + query_tbl_patch.assert_called_once_with('users') @pytest.mark.parametrize("kwargs, payload", [ ({'username': None, 'uid': None}, '{"return": ["id", "uname", "role_id", "access_method", "real_name", "description"], "where": {"column": "enabled", "condition": "=", "value": "t"}}'), @@ -173,19 +173,27 @@ async def test_create_user(self): expected = {'rows_affected': 1, "response": "inserted"} payload = {"pwd": "dd7171406eaf4baa8bc805857f719bca", "role_id": 1, "uname": "aj", 'access_method': 'any', 'description': '', 'real_name': ''} + audit_details = copy.deepcopy(payload) + audit_details.pop('pwd', None) + audit_details['message'] = "'{}' username created for '{}' user.".format(payload['uname'], payload['real_name']) storage_client_mock = MagicMock(StorageClientAsync) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv = await mock_coro(expected) + _rv2 = await mock_coro(None) else: - _rv = asyncio.ensure_future(mock_coro(expected)) + _rv = asyncio.ensure_future(mock_coro(expected)) + _rv2 = asyncio.ensure_future(mock_coro(None)) with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(User.Objects, 'hash_password', return_value=hashed_password) as hash_pwd_patch: with patch.object(storage_client_mock, 'insert_into_tbl', return_value=_rv) as insert_tbl_patch: - actual = await User.Objects.create("aj", "fledge", 1) - assert actual == expected + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=_rv2) as patch_audit: + actual = await User.Objects.create("aj", "fledge", 1) + assert actual == expected + patch_audit.assert_called_once_with('USRAD', audit_details) assert 1 == insert_tbl_patch.call_count assert insert_tbl_patch.called is True args, kwargs = insert_tbl_patch.call_args @@ -226,20 +234,26 @@ async def test_delete_user(self): r2 = {'response': 'updated', 'rows_affected': 1} storage_client_mock = MagicMock(StorageClientAsync) - + user_id = 2 + audit_details = {"user_id": user_id, "message": "User ID: <{}> has been disabled.".format(user_id)} # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv1 = await mock_coro(r1) _rv2 = await mock_coro(r2) + _rv3 = await mock_coro(None) else: _rv1 = asyncio.ensure_future(mock_coro(r1)) _rv2 = asyncio.ensure_future(mock_coro(r2)) - + _rv3 = asyncio.ensure_future(mock_coro(None)) + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): with patch.object(storage_client_mock, 'delete_from_tbl', return_value=_rv1) as delete_tbl_patch: with patch.object(storage_client_mock, 'update_tbl', return_value=_rv2) as update_tbl_patch: - actual = await User.Objects.delete(2) - assert r2 == actual + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=_rv3) as patch_audit: + actual = await User.Objects.delete(user_id) + assert r2 == actual + patch_audit.assert_called_once_with('USRDL', audit_details) update_tbl_patch.assert_called_once_with('users', p2) delete_tbl_patch.assert_called_once_with('user_logins', p1) @@ -275,25 +289,38 @@ async def test_delete_user_exception(self): async def test_update_user_role(self, user_data, payload): expected = {'response': 'updated', 'rows_affected': 1} storage_client_mock = MagicMock(StorageClientAsync) - + user_id = 2 + user_info = {'id': user_id, 'uname': 'dianomic', 'role_id': 4, 'access_method': 'cert', 'real_name': 'D System', + 'description': ''} + audit_details = {'user_id': user_id, 'old_value': {'role_id': 4}, + 'message': "'dianomic' user has been changed.", 'new_value': user_data} # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: + _rv0 = await mock_coro(user_info) _rv1 = await mock_coro() _rv2 = await mock_coro(expected) + _rv3 = await mock_coro(None) else: + _rv0 = asyncio.ensure_future(mock_coro(user_info)) _rv1 = asyncio.ensure_future(mock_coro()) _rv2 = asyncio.ensure_future(mock_coro(expected)) - - with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'update_tbl', return_value=_rv2) as update_tbl_patch: - with patch.object(User.Objects, 'delete_user_tokens', return_value=_rv1) as delete_token_patch: - actual = await User.Objects.update(2, user_data) - assert actual is True - delete_token_patch.assert_called_once_with(2) - args, kwargs = update_tbl_patch.call_args - assert 'users' == args[0] - p = json.loads(args[1]) - assert payload == p + _rv3 = asyncio.ensure_future(mock_coro(None)) + + with patch.object(User.Objects, 'get', return_value=_rv0) as patch_get: + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'update_tbl', return_value=_rv2) as update_tbl_patch: + with patch.object(User.Objects, 'delete_user_tokens', return_value=_rv1) as delete_token_patch: + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=_rv3) as patch_audit: + actual = await User.Objects.update(user_id, user_data) + assert actual is True + patch_audit.assert_called_once_with('USRCH', audit_details) + delete_token_patch.assert_called_once_with(user_id) + args, kwargs = update_tbl_patch.call_args + assert 'users' == args[0] + p = json.loads(args[1]) + assert payload == p + patch_get.assert_called_once_with(uid=user_id) @pytest.mark.parametrize("user_data, payload", [ ({'password': "Test@123"}, {"values": {"pwd": "HASHED_PASSWORD"}, "where": {"column": "id", "condition": "=", "value": 2}}) @@ -301,59 +328,96 @@ async def test_update_user_role(self, user_data, payload): async def test_update_user_password(self, user_data, payload): expected = {'response': 'updated', 'rows_affected': 1} storage_client_mock = MagicMock(StorageClientAsync) - + user_id = 2 + user_info = {'id': user_id, 'uname': 'dianomic', 'role_id': 4, 'access_method': 'cert', 'real_name': 'D System', + 'description': ''} + audit_details = {'user_id': user_id, 'old_value': {'pwd': '****'}, + 'new_value': {'pwd': 'Password has been updated.'}, + 'message': "'dianomic' user has been changed."} + # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: + _rv0 = await mock_coro(user_info) _rv1 = await mock_coro() _rv2 = await mock_coro(expected) _rv3 = await mock_coro(['HASHED_PWD']) + _rv4 = await mock_coro(None) else: + _rv0 = asyncio.ensure_future(mock_coro(user_info)) _rv1 = asyncio.ensure_future(mock_coro()) _rv2 = asyncio.ensure_future(mock_coro(expected)) _rv3 = asyncio.ensure_future(mock_coro(['HASHED_PWD'])) - - with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(User.Objects, 'hash_password', return_value='HASHED_PWD') as hash_pwd_patch: - with patch.object(User.Objects, '_get_password_history', return_value=_rv3) as pwd_list_patch: - with patch.object(storage_client_mock, 'update_tbl', return_value=_rv2) as update_tbl_patch: - with patch.object(User.Objects, 'delete_user_tokens', return_value=_rv1) as delete_token_patch: - with patch.object(User.Objects, '_insert_pwd_history_with_oldest_pwd_deletion_if_count_exceeds', return_value=_rv1) as pwd_history_patch: - actual = await User.Objects.update(2, user_data) - assert actual is True - pwd_history_patch.assert_called_once_with(storage_client_mock, 2, 'HASHED_PWD', ['HASHED_PWD']) - delete_token_patch.assert_called_once_with(2) - args, kwargs = update_tbl_patch.call_args - assert 'users' == args[0] - # FIXME: payload ordering issue after datetime patch - # update_tbl_patch.assert_called_once_with('users', payload) - pwd_list_patch.assert_called_once_with(storage_client_mock, 2, user_data) - hash_pwd_patch.assert_called_once_with(user_data['password']) + _rv4 = asyncio.ensure_future(mock_coro(None)) + + with patch.object(User.Objects, 'get', return_value=_rv0) as patch_get: + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(User.Objects, 'hash_password', return_value='HASHED_PWD') as hash_pwd_patch: + with patch.object(User.Objects, '_get_password_history', return_value=_rv3) as pwd_list_patch: + with patch.object(storage_client_mock, 'update_tbl', return_value=_rv2) as update_tbl_patch: + with patch.object(User.Objects, 'delete_user_tokens', return_value=_rv1) as delete_token_patch: + with patch.object(User.Objects, + '_insert_pwd_history_with_oldest_pwd_deletion_if_count_exceeds', + return_value=_rv1) as pwd_history_patch: + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=_rv4) as patch_audit: + actual = await User.Objects.update(user_id, user_data) + assert actual is True + patch_audit.assert_called_once_with('USRCH', audit_details) + pwd_history_patch.assert_called_once_with( + storage_client_mock, user_id, 'HASHED_PWD', ['HASHED_PWD']) + delete_token_patch.assert_called_once_with(user_id) + args, kwargs = update_tbl_patch.call_args + assert 'users' == args[0] + # FIXME: payload ordering issue after datetime patch + # update_tbl_patch.assert_called_once_with('users', payload) + pwd_list_patch.assert_called_once_with(storage_client_mock, user_id, user_data) + hash_pwd_patch.assert_called_once_with(user_data['password']) + patch_get.assert_called_once_with(uid=user_id) async def test_update_user_storage_exception(self): expected = {'message': 'Something went wrong', 'retryable': False, 'entryPoint': 'update'} - payload = '{"values": {"role_id": 2}, "where": {"column": "id", "condition": "=", "value": 2, "and": {"column": "enabled", "condition": "=", "value": "t"}}}' - storage_client_mock = MagicMock(StorageClientAsync) - with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'update_tbl', side_effect=StorageServerError(code=400, reason="blah", error=expected)) as update_tbl_patch: - with pytest.raises(ValueError) as excinfo: - await User.Objects.update(2, {'role_id': 2}) - assert str(excinfo.value) == expected['message'] - update_tbl_patch.assert_called_once_with('users', payload) + payload = '{"values": {"role_id": 2}, "where": {"column": "id", "condition": "=", "value": 2, ' \ + '"and": {"column": "enabled", "condition": "=", "value": "t"}}}' + user_id = 2 + user_info = {'id': user_id, 'uname': 'dianomic', 'role_id': 4, 'access_method': 'cert', 'real_name': 'D System', + 'description': ''} + _rv0 = await mock_coro(user_info) if sys.version_info.major == 3 and sys.version_info.minor >= 8 else \ + asyncio.ensure_future(mock_coro(user_info)) + storage_client_mock = MagicMock(StorageClientAsync) + with patch.object(User.Objects, 'get', return_value=_rv0) as patch_get: + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'update_tbl', side_effect=StorageServerError( + code=400, reason="blah", error=expected)) as update_tbl_patch: + with pytest.raises(ValueError) as excinfo: + await User.Objects.update(user_id, {'role_id': 2}) + assert str(excinfo.value) == expected['message'] + update_tbl_patch.assert_called_once_with('users', payload) + patch_get.assert_called_once_with(uid=user_id) async def test_update_user_exception(self): - payload = '{"values": {"role_id": "blah"}, "where": {"column": "id", "condition": "=", "value": 2, "and": {"column": "enabled", "condition": "=", "value": "t"}}}' + payload = '{"values": {"role_id": "blah"}, "where": {"column": "id", "condition": "=", "value": 2, ' \ + '"and": {"column": "enabled", "condition": "=", "value": "t"}}}' msg = 'Bad role id' storage_client_mock = MagicMock(StorageClientAsync) - with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'update_tbl', side_effect=ValueError(msg)) as update_tbl_patch: - with pytest.raises(Exception) as excinfo: - await User.Objects.update(2, {'role_id': 'blah'}) - assert excinfo.type is ValueError - assert str(excinfo.value) == msg - update_tbl_patch.assert_called_once_with('users', payload) + user_id = 2 + user_info = {'id': user_id, 'uname': 'dianomic', 'role_id': 4, 'access_method': 'cert', 'real_name': 'D System', + 'description': ''} + _rv0 = await mock_coro(user_info) if sys.version_info.major == 3 and sys.version_info.minor >= 8 else \ + asyncio.ensure_future(mock_coro(user_info)) + with patch.object(User.Objects, 'get', return_value=_rv0) as patch_get: + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'update_tbl', side_effect=ValueError(msg)) as update_tbl_patch: + with pytest.raises(Exception) as excinfo: + await User.Objects.update(user_id, {'role_id': 'blah'}) + assert excinfo.type is ValueError + assert str(excinfo.value) == msg + update_tbl_patch.assert_called_once_with('users', payload) + patch_get.assert_called_once_with(uid=user_id) @pytest.mark.parametrize("user_data", [ - {'real_name': 'MSD'}, {'description': 'Captain Cool'}, {'real_name': 'MSD', 'description': 'Captain Cool'}, + {'real_name': 'MSD'}, + {'description': 'Captain Cool'}, + {'real_name': 'MSD', 'description': 'Captain Cool'}, {'access_method': 'pwd'} ]) async def test_update_user_other_fields(self, user_data): @@ -362,25 +426,42 @@ async def test_update_user_other_fields(self, user_data): 'and': {'column': 'enabled', 'condition': '=', 'value': 't'}}} expected_payload.update({'values': user_data}) storage_client_mock = MagicMock(StorageClientAsync) - + user_id = 2 + user_info = {'id': user_id, 'uname': 'dianomic', 'role_id': 4, 'access_method': 'cert', 'real_name': 'D System', + 'description': ''} + + audit_details = {'user_id': user_id, 'new_value': user_data, 'message': "'dianomic' user has been changed."} + temp = {} + for u in user_data.keys(): + temp[u] = user_info[u] + audit_details['old_value'] = temp # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: + _rv0 = await mock_coro(user_info) _rv1 = await mock_coro() _rv2 = await mock_coro(expected) + _rv3 = await mock_coro(None) else: + _rv0 = asyncio.ensure_future(mock_coro(user_info)) _rv1 = asyncio.ensure_future(mock_coro()) _rv2 = asyncio.ensure_future(mock_coro(expected)) - - with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): - with patch.object(storage_client_mock, 'update_tbl', return_value=_rv2) as update_tbl_patch: - with patch.object(User.Objects, 'delete_user_tokens', return_value=_rv1) as delete_token_patch: - actual = await User.Objects.update(2, user_data) - assert actual is True - delete_token_patch.assert_not_called() - args, kwargs = update_tbl_patch.call_args - assert 'users' == args[0] - p = json.loads(args[1]) - assert expected_payload == p + _rv3 = asyncio.ensure_future(mock_coro(None)) + + with patch.object(User.Objects, 'get', return_value=_rv0) as patch_get: + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'update_tbl', return_value=_rv2) as update_tbl_patch: + with patch.object(User.Objects, 'delete_user_tokens', return_value=_rv1) as delete_token_patch: + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=_rv3) as patch_audit: + actual = await User.Objects.update(user_id, user_data) + assert actual is True + patch_audit.assert_called_once_with('USRCH', audit_details) + delete_token_patch.assert_not_called() + args, kwargs = update_tbl_patch.call_args + assert 'users' == args[0] + p = json.loads(args[1]) + assert expected_payload == p + patch_get.assert_called_once_with(uid=user_id) async def test_login_if_no_user_exists(self): async def mock_get_category_item(): diff --git a/tests/unit/python/fledge/tasks/purge/test_purge.py b/tests/unit/python/fledge/tasks/purge/test_purge.py index 1fd5282096..d211667e0d 100644 --- a/tests/unit/python/fledge/tasks/purge/test_purge.py +++ b/tests/unit/python/fledge/tasks/purge/test_purge.py @@ -9,14 +9,14 @@ import asyncio import sys from unittest.mock import patch, call, MagicMock -from fledge.common import logger +from fledge.common.audit_logger import AuditLogger +from fledge.common.configuration_manager import ConfigurationManager +from fledge.common.logger import FLCoreLogger +from fledge.common.process import FledgeProcess +from fledge.common.storage_client.exceptions import * from fledge.common.storage_client.storage_client import StorageClientAsync, ReadingsStorageClientAsync from fledge.common.statistics import Statistics from fledge.tasks.purge.purge import Purge -from fledge.common.process import FledgeProcess -from fledge.common.configuration_manager import ConfigurationManager -from fledge.common.audit_logger import AuditLogger -from fledge.common.storage_client.exceptions import * __author__ = "Vaibhav Singhal" @@ -49,7 +49,7 @@ def test_init(self): mock_storage_client_async = MagicMock(spec=StorageClientAsync) mock_audit_logger = AuditLogger(mock_storage_client_async) with patch.object(FledgeProcess, "__init__") as mock_process: - with patch.object(logger, "setup") as log: + with patch.object(FLCoreLogger, "get_logger") as log: with patch.object(mock_audit_logger, "__init__", return_value=None): p = Purge() assert isinstance(p, Purge) @@ -106,7 +106,7 @@ async def test_set_configuration(self): async def store_purge(self, **kwargs): if kwargs.get('age') == '-1' or kwargs.get('size') == '-1': raise StorageServerError(400, "Bla", "Some Error") - return {"readings": 10, "removed": 1, "unsentPurged": 2, "unsentRetained": 7} + return {"readings": 10, "removed": 1, "unsentPurged": 2, "unsentRetained": 7, "duration": 100, "method":"mock"} config = {"purgeAgeSize": {"retainUnsent": {"value": "purge unsent"}, "age": {"value": "72"}, "size": {"value": "20"}}, "purgeAge": {"retainUnsent": {"value": "purge unsent"}, "age": {"value": "72"}, "size": {"value": "0"}}, @@ -146,7 +146,7 @@ async def test_purge_data(self, conf, expected_return, expected_calls): with patch.object(FledgeProcess, '__init__'): with patch.object(mock_audit_logger, "__init__", return_value=None): p = Purge() - p._logger = logger + p._logger = FLCoreLogger p._logger.info = MagicMock() p._logger.error = MagicMock() p._logger.debug = MagicMock() @@ -190,7 +190,7 @@ async def test_purge_data_no_data_purged(self, conf, expected_return): with patch.object(FledgeProcess, '__init__'): with patch.object(mock_audit_logger, "__init__", return_value=None): p = Purge() - p._logger = logger + p._logger = FLCoreLogger p._logger.info = MagicMock() p._logger.error = MagicMock() p._storage_async = MagicMock(spec=StorageClientAsync) @@ -225,7 +225,7 @@ async def test_purge_error_storage_response(self, conf, expected_return): with patch.object(FledgeProcess, '__init__'): with patch.object(mock_audit_logger, "__init__", return_value=None): p = Purge() - p._logger = logger + p._logger = FLCoreLogger p._logger.info = MagicMock() p._logger.error = MagicMock() p._storage_async = MagicMock(spec=StorageClientAsync) @@ -260,7 +260,7 @@ async def test_purge_data_invalid_conf(self, conf, expected_error_key): with patch.object(FledgeProcess, '__init__'): with patch.object(mock_audit_logger, "__init__", return_value=None): p = Purge() - p._logger = logger + p._logger = FLCoreLogger p._logger.info = MagicMock() p._logger.error = MagicMock() p._storage_async = MagicMock(spec=StorageClientAsync) @@ -330,4 +330,5 @@ async def mock_purge(x): with patch.object(p, 'write_statistics'): await p.run() # Test the negative case when function purge_data raise some exception - p._logger.exception.assert_called_once_with("") + assert 1 == p._logger.exception.call_count + diff --git a/tests/unit/python/fledge/tasks/statistics/test_statistics_history.py b/tests/unit/python/fledge/tasks/statistics/test_statistics_history.py index 22f4064bec..2f77c0d9e4 100644 --- a/tests/unit/python/fledge/tasks/statistics/test_statistics_history.py +++ b/tests/unit/python/fledge/tasks/statistics/test_statistics_history.py @@ -12,10 +12,10 @@ import sys import ast -from fledge.common import logger -from fledge.common.storage_client.storage_client import StorageClientAsync -from fledge.tasks.statistics.statistics_history import StatisticsHistory +from fledge.common.logger import FLCoreLogger from fledge.common.process import FledgeProcess +from fledge.tasks.statistics.statistics_history import StatisticsHistory +from fledge.common.storage_client.storage_client import StorageClientAsync __author__ = "Vaibhav Singhal" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" @@ -42,7 +42,7 @@ class TestStatisticsHistory: async def test_init(self): """Test that creating an instance of StatisticsHistory calls init of FledgeProcess and creates loggers""" with patch.object(FledgeProcess, "__init__") as mock_process: - with patch.object(logger, "setup") as log: + with patch.object(FLCoreLogger, "get_logger") as log: sh = StatisticsHistory() assert isinstance(sh, StatisticsHistory) log.assert_called_once_with("StatisticsHistory") @@ -56,7 +56,7 @@ async def test_update_previous_value(self): _rv = asyncio.ensure_future(mock_coro(None)) with patch.object(FledgeProcess, '__init__'): - with patch.object(logger, "setup"): + with patch.object(FLCoreLogger, "get_logger"): sh = StatisticsHistory() sh._storage_async = MagicMock(spec=StorageClientAsync) payload = {'updates': [{'where': {'value': 'Bla', 'condition': '=', 'column': 'key'}, 'values': {'previous_value': 1}}]} @@ -70,7 +70,7 @@ async def test_update_previous_value(self): async def test_run(self): with patch.object(FledgeProcess, '__init__'): - with patch.object(logger, "setup"): + with patch.object(FLCoreLogger, "get_logger"): sh = StatisticsHistory() sh._storage_async = MagicMock(spec=StorageClientAsync) retval = {'count': 2,