diff --git a/C/common/asset_tracking.cpp b/C/common/asset_tracking.cpp index 2f6e9dedc7..8e040affa1 100644 --- a/C/common/asset_tracking.cpp +++ b/C/common/asset_tracking.cpp @@ -43,7 +43,7 @@ AssetTracker *AssetTracker::getAssetTracker() * @param service Service name */ AssetTracker::AssetTracker(ManagementClient *mgtClient, string service) - : m_mgtClient(mgtClient), m_service(service) + : m_mgtClient(mgtClient), m_service(service), m_updateInterval(MIN_ASSET_TRACKER_UPDATE) { instance = this; m_shutdown = false; @@ -338,6 +338,27 @@ void AssetTracker::queue(TrackingTuple *tuple) m_cv.notify_all(); } +/** + * Set the update interval for the asset tracker. + * + * @param interval The number of milliseconds between update of the asset tracker + * @return bool Was the update accepted + */ +bool AssetTracker::tune(unsigned long interval) +{ + unique_lock lck(m_mutex); + if (interval >= MIN_ASSET_TRACKER_UPDATE) + { + m_updateInterval = interval; + } + else + { + Logger::getLogger()->error("Attempt to set asset tracker update to less than minimum interval"); + return false; + } + return true; +} + /** * The worker thread that will flush any pending asset tuples to * the database. @@ -347,7 +368,7 @@ void AssetTracker::workerThread() unique_lock lck(m_mutex); while (m_pending.empty() && m_shutdown == false) { - m_cv.wait_for(lck, chrono::milliseconds(500)); + m_cv.wait_for(lck, chrono::milliseconds(m_updateInterval)); processQueue(); } // Process any items left in the queue at shutdown diff --git a/C/common/base64databuffer.cpp b/C/common/base64databuffer.cpp index 5e91d8b170..a785da66ac 100644 --- a/C/common/base64databuffer.cpp +++ b/C/common/base64databuffer.cpp @@ -16,6 +16,7 @@ using namespace std; */ Base64DataBuffer::Base64DataBuffer(const string& encoded) { + m_data = NULL; m_itemSize = encoded[0] - '0'; size_t in_len = encoded.size() - 1; if (in_len % 4 != 0) diff --git a/C/common/config_category.cpp b/C/common/config_category.cpp index 1dbeb64b1b..2356636b4b 100755 --- a/C/common/config_category.cpp +++ b/C/common/config_category.cpp @@ -144,7 +144,7 @@ ConfigCategory::ConfigCategory(const string& name, const string& json) : m_name( Logger::getLogger()->error("Configuration parse error in category '%s', %s: %s at %d, '%s'", name.c_str(), json.c_str(), GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset(), - StringAround(json, (unsigned)doc.GetErrorOffset())); + StringAround(json, (unsigned)doc.GetErrorOffset()).c_str()); throw new ConfigMalformed(); } @@ -439,6 +439,114 @@ string ConfigCategory::getValue(const string& name) const throw new ConfigItemNotFound(); } +/** + * Return the value of the configuration category item list, this + * is a convience function used when simple lists are defined + * and allows for central processing of the list values + * + * @param name The name of the configuration item to return + * @return string The configuration item name + * @throws exception if the item does not exist in the category + */ +vector ConfigCategory::getValueList(const string& name) const +{ + for (unsigned int i = 0; i < m_items.size(); i++) + { + if (name.compare(m_items[i]->m_name) == 0) + { + if (m_items[i]->m_type.compare("list")) + { + throw new ConfigItemNotAList(); + } + Document d; + vector list; + d.Parse(m_items[i]->m_value.c_str()); + if (d.HasParseError()) + { + Logger::getLogger()->error("The JSON value for a list item %s has a parse error: %s, %s", + name.c_str(), GetParseError_En(d.GetParseError()), m_items[i]->m_value.c_str()); + return list; + } + if (d.IsArray()) + { + for (auto& v : d.GetArray()) + { + if (v.IsString()) + { + list.push_back(v.GetString()); + } + } + } + else + { + Logger::getLogger()->error("The value of the list item %s should be a JSON array and it is not", name.c_str()); + } + return list; + } + } + throw new ConfigItemNotFound(); +} + +/** + * Return the value of the configuration category item kvlist, this + * is a convience function used when key/value lists are defined + * and allows for central processing of the list values + * + * @param name The name of the configuration item to return + * @return string The configuration item name + * @throws exception if the item does not exist in the category + */ +map ConfigCategory::getValueKVList(const string& name) const +{ + for (unsigned int i = 0; i < m_items.size(); i++) + { + if (name.compare(m_items[i]->m_name) == 0) + { + if (m_items[i]->m_type.compare("kvlist")) + { + throw new ConfigItemNotAList(); + } + map list; + Document d; + d.Parse(m_items[i]->m_value.c_str()); + if (d.HasParseError()) + { + Logger::getLogger()->error("The JSON value for a kvlist item %s has a parse error: %s, %s", + name.c_str(), GetParseError_En(d.GetParseError()), m_items[i]->m_value.c_str()); + return list; + } + for (auto& v : d.GetObject()) + { + string key = v.name.GetString(); + string value = to_string(v.value); + list.insert(pair(key, value)); + } + return list; + } + } + throw new ConfigItemNotFound(); +} + +/** + * Convert a RapidJSON value to a string + * + * @param v The RapidJSON value + */ +std::string ConfigCategory::to_string(const rapidjson::Value& v) const +{ + if (v.IsString()) + { + return { v.GetString(), v.GetStringLength() }; + } + else + { + StringBuffer strbuf; + Writer writer(strbuf); + v.Accept(writer); + return { strbuf.GetString(), strbuf.GetLength() }; + } +} + /** * Return the requested attribute of a configuration category item * @@ -478,6 +586,10 @@ string ConfigCategory::getItemAttribute(const string& itemName, return m_items[i]->m_rule; case BUCKET_PROPERTIES_ATTR: return m_items[i]->m_bucketProperties; + case LIST_SIZE_ATTR: + return m_items[i]->m_listSize; + case ITEM_TYPE_ATTR: + return m_items[i]->m_listItemType; default: throw new ConfigItemAttributeNotFound(); } @@ -546,6 +658,12 @@ bool ConfigCategory::setItemAttribute(const string& itemName, case BUCKET_PROPERTIES_ATTR: m_items[i]->m_bucketProperties = value; return true; + case LIST_SIZE_ATTR: + m_items[i]->m_listSize = value; + return true; + case ITEM_TYPE_ATTR: + m_items[i]->m_listItemType = value; + return true; default: return false; } @@ -882,6 +1000,44 @@ bool ConfigCategory::isDeprecated(const string& name) const throw new ConfigItemNotFound(); } +/** + * Return if the configuration item is a list item + * + * @param name The name of the item to test + * @return bool True if the item is a Numeric type + * @throws exception If the item was not found in the configuration category + */ +bool ConfigCategory::isList(const string& name) const +{ + for (unsigned int i = 0; i < m_items.size(); i++) + { + if (name.compare(m_items[i]->m_name) == 0) + { + return (m_items[i]->m_type.compare("list") == 0); + } + } + throw new ConfigItemNotFound(); +} + +/** + * Return if the configuration item is a kvlist item + * + * @param name The name of the item to test + * @return bool True if the item is a Numeric type + * @throws exception If the item was not found in the configuration category + */ +bool ConfigCategory::isKVList(const string& name) const +{ + for (unsigned int i = 0; i < m_items.size(); i++) + { + if (name.compare(m_items[i]->m_name) == 0) + { + return (m_items[i]->m_type.compare("kvlist") == 0); + } + } + throw new ConfigItemNotFound(); +} + /** * Set the description for the configuration category * @@ -1047,6 +1203,14 @@ ConfigCategory::CategoryItem::CategoryItem(const string& name, { m_itemType = BucketItem; } + if (m_type.compare("list") == 0) + { + m_itemType = ListItem; + } + if (m_type.compare("kvlist") == 0) + { + m_itemType = KVListItem; + } if (item.HasMember("deprecated")) { @@ -1131,6 +1295,33 @@ ConfigCategory::CategoryItem::CategoryItem(const string& name, } } + if (item.HasMember("items")) + { + if (item["items"].IsString()) + { + m_listItemType = item["items"].GetString(); + } + else + { + throw new runtime_error("Items configuration item property is not a string"); + } + } + else if (m_itemType == ListItem || m_itemType == KVListItem) + { + throw new runtime_error("List configuration item is missing the \"items\" attribute"); + } + if (item.HasMember("listSize")) + { + if (item["listSize"].IsString()) + { + m_listSize = item["listSize"].GetString(); + } + else + { + throw new runtime_error("ListSize configuration item property is not a string"); + } + } + std::string m_typeUpperCase = m_type; for (auto & c: m_typeUpperCase) c = toupper(c); @@ -1414,6 +1605,8 @@ ConfigCategory::CategoryItem::CategoryItem(const CategoryItem& rhs) m_group = rhs.m_group; m_rule = rhs.m_rule; m_bucketProperties = rhs.m_bucketProperties; + m_listSize = rhs.m_listSize; + m_listItemType = rhs.m_listItemType; } /** @@ -1447,7 +1640,10 @@ ostringstream convert; if (m_itemType == StringItem || m_itemType == BoolItem || - m_itemType == EnumerationItem) + m_itemType == EnumerationItem || + m_itemType == BucketItem || + m_itemType == ListItem || + m_itemType == KVListItem) { convert << "\"value\" : \"" << JSONescape(m_value) << "\", "; convert << "\"default\" : \"" << JSONescape(m_default) << "\""; @@ -1461,6 +1657,10 @@ ostringstream convert; convert << "\"value\" : " << m_value << ", "; convert << "\"default\" : " << m_default; } + else + { + Logger::getLogger()->error("Unknown item type in configuration category"); + } if (full) { @@ -1518,6 +1718,15 @@ ostringstream convert; { convert << ", \"file\" : \"" << m_file << "\""; } + + if (!m_listSize.empty()) + { + convert << ", \"listSize\" : \"" << m_listSize << "\""; + } + if (!m_listItemType.empty()) + { + convert << ", \"items\" : \"" << m_listItemType << "\""; + } } convert << " }"; @@ -1605,10 +1814,21 @@ ostringstream convert; } convert << "]"; } + if (!m_listSize.empty()) + { + convert << ", \"listSize\" : \"" << m_listSize << "\""; + } + if (!m_listItemType.empty()) + { + convert << ", \"items\" : \"" << m_listItemType << "\""; + } if (m_itemType == StringItem || m_itemType == EnumerationItem || - m_itemType == BoolItem) + m_itemType == BoolItem || + m_itemType == BucketItem || + m_itemType == ListItem || + m_itemType == KVListItem) { convert << ", \"default\" : \"" << JSONescape(m_default) << "\" }"; } diff --git a/C/common/filter_pipeline.cpp b/C/common/filter_pipeline.cpp index d6229cc654..ad046bd64b 100755 --- a/C/common/filter_pipeline.cpp +++ b/C/common/filter_pipeline.cpp @@ -134,31 +134,48 @@ bool FilterPipeline::loadFilters(const string& categoryName) // Try loading all filter plugins: abort on any error for (Value::ConstValueIterator itr = filterList.Begin(); itr != filterList.End(); ++itr) { - // Get "plugin" item fromn filterCategoryName - string filterCategoryName = itr->GetString(); - ConfigCategory filterDetails = mgtClient->getCategory(filterCategoryName); - if (!filterDetails.itemExists("plugin")) + if (itr->IsString()) { - string errMsg("loadFilters: 'plugin' item not found "); - errMsg += "in " + filterCategoryName + " category"; - Logger::getLogger()->fatal(errMsg.c_str()); - throw runtime_error(errMsg); + // Get "plugin" item fromn filterCategoryName + string filterCategoryName = itr->GetString(); + ConfigCategory filterDetails = mgtClient->getCategory(filterCategoryName); + if (!filterDetails.itemExists("plugin")) + { + string errMsg("loadFilters: 'plugin' item not found "); + errMsg += "in " + filterCategoryName + " category"; + Logger::getLogger()->fatal(errMsg.c_str()); + throw runtime_error(errMsg); + } + string filterName = filterDetails.getValue("plugin"); + PLUGIN_HANDLE filterHandle; + // Load filter plugin only: we don't call any plugin method right now + filterHandle = loadFilterPlugin(filterName); + if (!filterHandle) + { + string errMsg("Cannot load filter plugin '" + filterName + "'"); + Logger::getLogger()->fatal(errMsg.c_str()); + throw runtime_error(errMsg); + } + else + { + // Save filter handler: key is filterCategoryName + filterInfo.push_back(pair + (filterCategoryName, filterHandle)); + } } - string filterName = filterDetails.getValue("plugin"); - PLUGIN_HANDLE filterHandle; - // Load filter plugin only: we don't call any plugin method right now - filterHandle = loadFilterPlugin(filterName); - if (!filterHandle) + else if (itr->IsArray()) { - string errMsg("Cannot load filter plugin '" + filterName + "'"); - Logger::getLogger()->fatal(errMsg.c_str()); - throw runtime_error(errMsg); + // Sub pipeline + Logger::getLogger()->warn("This version of Fledge does not support branching of pipelines. The branch will be ignored."); + } + else if (itr->IsObject()) + { + // An object, probably the write destination + Logger::getLogger()->warn("This version of Fledge does not support pipelines with different destinations. The destination will be ignored and the data written to the default storage service."); } else { - // Save filter handler: key is filterCategoryName - filterInfo.push_back(pair - (filterCategoryName, filterHandle)); + Logger::getLogger()->error("Unexpected object in pipeline definition %s, ignoring", categoryName.c_str()); } } @@ -261,6 +278,11 @@ bool FilterPipeline::setupFiltersPipeline(void *passToOnwardFilter, void *useFil // Fetch up to date filter configuration updatedCfg = mgtClient->getCategory(filterCategoryName); + // Pass Management client IP:Port to filter so that it may connect to bucket service + updatedCfg.addItem("mgmt_client_url_base", "Management client host and port", + "string", "127.0.0.1:0", + mgtClient->getUrlbase()); + // Add filter category name under service/process config name children.push_back(filterCategoryName); mgtClient->addChildCategories(serviceName, children); diff --git a/C/common/include/asset_tracking.h b/C/common/include/asset_tracking.h index 5445bfca4c..9b70cb6683 100644 --- a/C/common/include/asset_tracking.h +++ b/C/common/include/asset_tracking.h @@ -21,6 +21,8 @@ #include #include +#define MIN_ASSET_TRACKER_UPDATE 500 // The minimum interval for asset tracker updates + /** * Tracking abstract base class to be passed in the process data queue */ @@ -268,6 +270,7 @@ class AssetTracker { void updateCache(std::set dpSet, StorageAssetTrackingTuple* ptr); std::set *getStorageAssetTrackingCacheData(StorageAssetTrackingTuple* tuple); + bool tune(unsigned long updateInterval); private: std::string @@ -292,6 +295,7 @@ class AssetTracker { std::string m_fledgeName; StorageClient *m_storageClient; StorageAssetCacheMap storageAssetTrackerTuplesCache; + unsigned int m_updateInterval; }; /** diff --git a/C/common/include/config_category.h b/C/common/include/config_category.h index b95220e653..60ff0c9a0e 100755 --- a/C/common/include/config_category.h +++ b/C/common/include/config_category.h @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -65,7 +66,9 @@ class ConfigCategory { ScriptItem, CategoryType, CodeItem, - BucketItem + BucketItem, + ListItem, + KVListItem }; ConfigCategory(const std::string& name, const std::string& json); @@ -93,6 +96,8 @@ class ConfigCategory { bool itemExists(const std::string& name) const; bool setItemDisplayName(const std::string& name, const std::string& displayName); std::string getValue(const std::string& name) const; + std::vector getValueList(const std::string& name) const; + std::map getValueKVList(const std::string& name) const; std::string getType(const std::string& name) const; std::string getDescription(const std::string& name) const; std::string getDefault(const std::string& name) const; @@ -110,6 +115,8 @@ class ConfigCategory { bool isBool(const std::string& name) const; bool isNumber(const std::string& name) const; bool isDouble(const std::string& name) const; + bool isList(const std::string& name) const; + bool isKVList(const std::string& name) const; bool isDeprecated(const std::string& name) const; std::string toJSON(const bool full=false) const; std::string itemsToJSON(const bool full=false) const; @@ -118,6 +125,7 @@ class ConfigCategory { void setItemsValueFromDefault(); void checkDefaultValuesOnly() const; std::string itemToJSON(const std::string& itemName) const; + std::string to_string(const rapidjson::Value& v) const; enum ItemAttribute { ORDER_ATTR, READONLY_ATTR, @@ -131,7 +139,9 @@ class ConfigCategory { DISPLAY_NAME_ATTR, DEPRECATED_ATTR, RULE_ATTR, - BUCKET_PROPERTIES_ATTR + BUCKET_PROPERTIES_ATTR, + LIST_SIZE_ATTR, + ITEM_TYPE_ATTR }; std::string getItemAttribute(const std::string& itemName, ItemAttribute itemAttribute) const; @@ -180,6 +190,8 @@ class ConfigCategory { std::string m_group; std::string m_rule; std::string m_bucketProperties; + std::string m_listSize; + std::string m_listItemType; }; std::vector m_items; std::string m_name; @@ -276,4 +288,16 @@ class ConfigItemAttributeNotFound : public std::exception { return "Configuration item attribute not found in configuration category"; } }; + +/** + * An attempt has been made to access a configuration item as a list when the + * item is not of type list + */ +class ConfigItemNotAList : public std::exception { + public: + virtual const char *what() const throw() + { + return "Configuration item is not a list type item"; + } +}; #endif diff --git a/C/common/include/management_client.h b/C/common/include/management_client.h index 9fc8bf2123..97f16e7c53 100644 --- a/C/common/include/management_client.h +++ b/C/common/include/management_client.h @@ -117,6 +117,8 @@ class ManagementClient { const std::string& event); int validateDatapoints(std::string dp1, std::string dp2); AssetTrackingTable *getDeprecatedAssetTrackingTuples(); + std::string getAlertByKey(const std::string& key); + bool raiseAlert(const std::string& key, const std::string& message, const std::string& urgency="normal"); private: std::ostringstream m_urlbase; diff --git a/C/common/include/pipeline_element.h b/C/common/include/pipeline_element.h new file mode 100644 index 0000000000..1badf33403 --- /dev/null +++ b/C/common/include/pipeline_element.h @@ -0,0 +1,54 @@ +#ifndef _PIPELINE_ELEMENT_H +#define _PIPELINE_ELEMENT_H +/* + * Fledge filter pipeline elements. + * + * Copyright (c) 2024 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ + +/** + * The base pipeline element class + */ +class PipelineElement { + public: + PipelineElement(); + void setNext(PipelineElement *next) + { + m_next = next; + } + private: + PipelineElement *m_next; + +}; + +/** + * A pipeline element the runs a filter plugin + */ +class PipelineFilter : public PipelineElement { + public: + PipelineFilter(const std::string& name); + private: + FilterPlugin *m_plugin; +}; + +/** + * A pipeline element that represents a branch in the pipeline + */ +class PipelineBranch : public PipelineElement { + public: + PipelineBranch(); +}; + +/** + * A pipeline element that writes to a storage service or buffer + */ +class PipelineWriter : public PipelineElement { + public: + PipelineWriter(); +}; + +#endif diff --git a/C/common/json_utils.cpp b/C/common/json_utils.cpp index a8fb57fb27..705f26eb9c 100644 --- a/C/common/json_utils.cpp +++ b/C/common/json_utils.cpp @@ -83,44 +83,47 @@ string escaped = subject; } return escaped; } + /** * Return unescaped version of a JSON string * * Routine removes \" inside the string * and leading and trailing " * - * @param subject Input string + * @param input Input string * @return Unescaped string */ -std::string JSONunescape(const std::string& subject) +std::string JSONunescape(const std::string& input) { - size_t pos = 0; - string replace(""); - string json = subject; + std::string output; + output.reserve(input.size()); - // Replace '\"' with '"' - while ((pos = json.find("\\\"", pos)) != std::string::npos) - { - json.replace(pos, 1, ""); - } - // Remove leading '"' - if (json[0] == '\"') - { - json.erase(0, 1); - } - // Remove trailing '"' - if (json[json.length() - 1] == '\"') - { - json.erase(json.length() - 1, 1); - } + for (size_t i = 0; i < input.size(); ++i) + { + // skip leading or trailing " + if ((i == 0 || i == input.size() -1) && input[i] == '"') + { + continue; + } - // Where we had escaped " characters we now have \\" - // replace this with \" - pos = 0; - while ((pos = json.find("\\\\\"", pos)) != std::string::npos) - { - json.replace(pos, 3, "\\\""); - } - return json; -} + // \\" -> \" + if (input[i] == '\\' && i + 2 < input.size() && input[i + 1] == '\\' && input[i + 2] == '"') + { + output.push_back('\\'); + output.push_back('"'); + i += 2; + } + // \" -> " + else if (input[i] == '\\' && i + 1 < input.size() && input[i + 1] == '"') + { + output.push_back('"'); + ++i; + } + else + { + output.push_back(input[i]); + } + } + return output; +} diff --git a/C/common/management_client.cpp b/C/common/management_client.cpp index 55c0ebfa79..9057c90da2 100644 --- a/C/common/management_client.cpp +++ b/C/common/management_client.cpp @@ -602,8 +602,12 @@ ConfigCategory ManagementClient::getCategory(const string& categoryName) try { string url = "/fledge/service/category/" + urlEncode(categoryName); auto res = this->getHttpClient()->request("GET", url.c_str()); - Document doc; string response = res->content.string(); + if (res->status_code.compare("200 OK") == 0) + { + return ConfigCategory(categoryName, response); + } + Document doc; doc.Parse(response.c_str()); if (doc.HasParseError()) { @@ -613,7 +617,7 @@ ConfigCategory ManagementClient::getCategory(const string& categoryName) categoryName.c_str(), response.c_str()); throw new exception(); } - else if (doc.HasMember("message")) + else if (doc.HasMember("message") && doc["message"].IsString()) { m_logger->error("Failed to fetch configuration category: %s.", doc["message"].GetString()); @@ -621,7 +625,9 @@ ConfigCategory ManagementClient::getCategory(const string& categoryName) } else { - return ConfigCategory(categoryName, response); + m_logger->error("Failed to fetch configuration category: %s.", + response.c_str()); + throw new exception(); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Get config category failed %s.", e.what()); @@ -649,6 +655,10 @@ string ManagementClient::setCategoryItemValue(const string& categoryName, auto res = this->getHttpClient()->request("PUT", url.c_str(), payload); Document doc; string response = res->content.string(); + if (res->status_code.compare("200 OK") == 0) + { + return response; + } doc.Parse(response.c_str()); if (doc.HasParseError()) { @@ -666,7 +676,9 @@ string ManagementClient::setCategoryItemValue(const string& categoryName, } else { - return response; + m_logger->error("Failed to set configuration category item value: %s.", + response.c_str()); + throw new exception(); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Get config category failed %s.", e.what()); @@ -2059,3 +2071,68 @@ int ManagementClient::validateDatapoints(std::string dp1, std::string dp2) return temp.compare(dp2); } + +/** + * Get an alert by specific key + * + * @param key Key to get alert + * @return string Alert + */ +std::string ManagementClient::getAlertByKey(const std::string& key) +{ + std::string response = "Status: 404 Not found"; + try + { + std::string url = "/fledge/alert/" + urlEncode(key) ; + auto res = this->getHttpClient()->request("GET", url.c_str()); + std::string statusCode = res->status_code; + if (statusCode.compare("200 OK")) + { + m_logger->error("Get alert failed %s.", statusCode.c_str()); + response = "Status: " + statusCode; + return response; + } + + response = res->content.string(); + } + catch (const SimpleWeb::system_error &e) { + m_logger->error("Get alert failed %s.", e.what()); + } + return response; +} + + +/** + * Raise an alert + * + * @param key Alert key + * @param message Alert message + * @param urgency Alert urgency + * @return whether operation was successful + */ +bool ManagementClient::raiseAlert(const std::string& key, const std::string& message, const std::string& urgency) +{ + try + { + std::string url = "/fledge/alert" ; + ostringstream payload; + payload << "{\"key\":\"" << key << "\"," + << "\"message\":\"" << message << "\"," + << "\"urgency\":\"" << urgency << "\"}"; + + auto res = this->getHttpClient()->request("POST", url.c_str(), payload.str()); + std::string statusCode = res->status_code; + if (statusCode.compare("200 OK")) + { + m_logger->error("Raise alert failed %s.", statusCode.c_str()); + return false; + } + + return true; + } + catch (const SimpleWeb::system_error &e) { + m_logger->error("Raise alert failed %s.", e.what()); + return false; + } +} + diff --git a/C/common/reading_set.cpp b/C/common/reading_set.cpp index df01437359..9033aeb742 100755 --- a/C/common/reading_set.cpp +++ b/C/common/reading_set.cpp @@ -468,7 +468,11 @@ JSONReading::JSONReading(const Value& json) // Add 'reading' values for (auto &m : json["reading"].GetObject()) { - addDatapoint(datapoint(m.name.GetString(), m.value)); + Datapoint *dp = datapoint(m.name.GetString(), m.value); + if (dp) + { + addDatapoint(dp); + } } } else @@ -550,15 +554,25 @@ Datapoint *rval = NULL; size_t pos = str.find_first_of(':'); if (str.compare(2, 10, "DATABUFFER") == 0) { - DataBuffer *databuffer = new Base64DataBuffer(str.substr(pos + 1)); - DatapointValue value(databuffer); - rval = new Datapoint(name, value); + try { + DataBuffer *databuffer = new Base64DataBuffer(str.substr(pos + 1)); + DatapointValue value(databuffer); + rval = new Datapoint(name, value); + } catch (exception& e) { + Logger::getLogger()->error("Unable to create datapoint %s as the base 64 encoded data is incorrect, %s", + name.c_str(), e.what()); + } } else if (str.compare(2, 7, "DPIMAGE") == 0) { - DPImage *image = new Base64DPImage(str.substr(pos + 1)); - DatapointValue value(image); - rval = new Datapoint(name, value); + try { + DPImage *image = new Base64DPImage(str.substr(pos + 1)); + DatapointValue value(image); + rval = new Datapoint(name, value); + } catch (exception& e) { + Logger::getLogger()->error("Unable to create datapoint %s as the base 64 encoded data is incorrect, %s", + name.c_str(), e.what()); + } } } @@ -648,7 +662,11 @@ Datapoint *rval = NULL; vector *obj = new vector; for (auto &mo : item.GetObject()) { - obj->push_back(datapoint(mo.name.GetString(), mo.value)); + Datapoint *dp = datapoint(mo.name.GetString(), mo.value); + if (dp) + { + obj->push_back(dp); + } } DatapointValue value(obj, true); rval = new Datapoint(name, value); diff --git a/C/plugins/north/OMF/include/omf.h b/C/plugins/north/OMF/include/omf.h index 68a382bf37..9c47972dbf 100644 --- a/C/plugins/north/OMF/include/omf.h +++ b/C/plugins/north/OMF/include/omf.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #define OMF_HINT "OMFHint" @@ -146,7 +147,7 @@ class OMF // Method with vector (by reference) of readings uint32_t sendToServer(const std::vector& readings, - bool skipSentDataTypes = true); + bool skipSentDataTypes = true); // never called // Method with vector (by reference) of reading pointers uint32_t sendToServer(const std::vector& readings, @@ -154,11 +155,11 @@ class OMF // Send a single reading (by reference) uint32_t sendToServer(const Reading& reading, - bool skipSentDataTypes = true); + bool skipSentDataTypes = true); // never called // Send a single reading pointer uint32_t sendToServer(const Reading* reading, - bool skipSentDataTypes = true); + bool skipSentDataTypes = true); // never called // Set saved OMF formats void setFormatType(const std::string &key, std::string &value); @@ -231,9 +232,6 @@ class OMF bool getAFMapEmptyNames() const { return m_AFMapEmptyNames; }; bool getAFMapEmptyMetadata() const { return m_AFMapEmptyMetadata; }; - bool getConnected() const { return m_connected; }; - void setConnected(const bool connectionStatus); - void setLegacyMode(bool legacy) { m_legacy = legacy; }; static std::string ApplyPIServerNamingRulesObj(const std::string &objName, bool *changed); @@ -356,6 +354,7 @@ class OMF // Start of support for using linked containers bool sendBaseTypes(); + bool sendAFLinks(Reading& reading, OMFHints *hints); // End of support for using linked containers // string createAFLinks(Reading &reading, OMFHints *hints); @@ -395,7 +394,6 @@ class OMF bool m_AFMapEmptyMetadata; std::string m_AFHierarchyLevel; std::string m_prefixAFAsset; - bool m_connected; // true if calls to PI Web API are working vector m_afhHierarchyAlreadyCreated={ @@ -520,19 +518,23 @@ class OMF * The OMFData class. * A reading is formatted with OMF specifications using the original * type creation scheme implemented by the OMF plugin + * + * There is no good reason to retain this class any more, it is here + * mostly to reduce the scope of the change when introducting the OMFBuffer */ class OMFData { public: - OMFData(const Reading& reading, + OMFData(OMFBuffer & payload, + const Reading& reading, string measurementId, + bool needDelim, const OMF_ENDPOINT PIServerEndpoint = ENDPOINT_CR, const std::string& DefaultAFLocation = std::string(), OMFHints *hints = NULL); - - const std::string& OMFdataVal() const; + bool hasData() { return m_hasData; }; private: - std::string m_value; + bool m_hasData; }; #endif diff --git a/C/plugins/north/OMF/include/omfbuffer.h b/C/plugins/north/OMF/include/omfbuffer.h new file mode 100644 index 0000000000..0a8b8aabf8 --- /dev/null +++ b/C/plugins/north/OMF/include/omfbuffer.h @@ -0,0 +1,58 @@ +#ifndef _OMF_BUFFER_H +#define _OMF_BUFFER_H +/* + * Fledge OMF North plugin buffer class + * + * Copyright (c) 2023 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ + +#include +#include + +#define BUFFER_CHUNK 8192 + +/** + * Buffer class designed to hold OMF payloads that can + * grow as required but have minimal copy semantics. + * + * TODO Add a coalesce and compress public entry point + */ +class OMFBuffer { + class Buffer { + public: + Buffer(); + Buffer(unsigned int); + ~Buffer(); + char *detach(); + char *data; + unsigned int offset; + unsigned int length; + bool attached; + }; + + public: + OMFBuffer(); + ~OMFBuffer(); + + bool isEmpty() { return buffers.empty() || (buffers.size() == 1 && buffers.front()->offset == 0); } + void append(const char); + void append(const char *); + void append(const int); + void append(const unsigned int); + void append(const long); + void append(const unsigned long); + void append(const double); + void append(const std::string&); + void quote(const std::string&); + const char *coalesce(); + void clear(); + + private: + std::list buffers; +}; + +#endif diff --git a/C/plugins/north/OMF/include/omflinkeddata.h b/C/plugins/north/OMF/include/omflinkeddata.h index c25d1ba38a..dca25696d4 100644 --- a/C/plugins/north/OMF/include/omflinkeddata.h +++ b/C/plugins/north/OMF/include/omflinkeddata.h @@ -13,6 +13,7 @@ #include #include #include +#include #include /** @@ -42,7 +43,7 @@ class OMFLinkedData m_doubleFormat("float64"), m_integerFormat("int64") {}; - std::string processReading(const Reading& reading, + bool processReading(OMFBuffer& payload, bool needDelim, const Reading& reading, const std::string& DefaultAFLocation = std::string(), OMFHints *hints = NULL); void buildLookup(const std::vector& reading); diff --git a/C/plugins/north/OMF/linkdata.cpp b/C/plugins/north/OMF/linkdata.cpp index 5e1a83cc85..f78326f95c 100644 --- a/C/plugins/north/OMF/linkdata.cpp +++ b/C/plugins/north/OMF/linkdata.cpp @@ -28,7 +28,7 @@ /** * In order to cut down on the number of string copies made whilst building * the OMF message for a reading we reseeve a number of bytes in a string and - * each time we get close to filling the string we reserve mode. The value below + * each time we get close to filling the string we reserve more. The value below * defines the increment we use to grow the string reservation. */ #define RESERVE_INCREMENT 100 @@ -62,17 +62,17 @@ static std::string DataPointNamesAsString(const Reading& reading) /** * OMFLinkedData constructor, generates the OMF message containing the data * + * @param payload The buffer into which to populate the payload * @param reading Reading for which the OMF message must be generated * @param AFHierarchyPrefix Unused at the current stage * @param hints OMF hints for the specific reading for changing the behaviour of the operation + * @param delim Add a delimiter before outputting anything * */ -string OMFLinkedData::processReading(const Reading& reading, const string& AFHierarchyPrefix, OMFHints *hints) +bool OMFLinkedData::processReading(OMFBuffer& payload, bool delim, const Reading& reading, const string& AFHierarchyPrefix, OMFHints *hints) { - string outData; + bool rval = false; bool changed; - int reserved = RESERVE_INCREMENT * 2; - outData.reserve(reserved); string assetName = reading.getAssetName(); @@ -110,7 +110,7 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi assetName = OMF::ApplyPIServerNamingRulesObj(assetName, NULL); - bool needDelim = false; + bool needDelim = delim; auto assetLookup = m_linkedAssetState->find(originalAssetName + "."); if (assetLookup == m_linkedAssetState->end()) { @@ -120,11 +120,14 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi } if (m_sendFullStructure && assetLookup->second.assetState(assetName) == false) { + if (needDelim) + payload.append(','); // Send the data message to create the asset instance - outData.append("{ \"typeid\":\"FledgeAsset\", \"values\":[ { \"AssetId\":\""); - outData.append(assetName + "\",\"Name\":\""); - outData.append(assetName + "\""); - outData.append("} ] }"); + payload.append("{ \"typeid\":\"FledgeAsset\", \"values\":[ { \"AssetId\":\""); + payload.append(assetName + "\",\"Name\":\""); + payload.append(assetName + "\""); + payload.append("} ] }"); + rval = true; needDelim = true; assetLookup->second.assetSent(assetName); } @@ -136,11 +139,6 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi for (vector::const_iterator it = data.begin(); it != data.end(); ++it) { Datapoint *dp = *it; - if (reserved - outData.size() < RESERVE_INCREMENT / 2) - { - reserved += RESERVE_INCREMENT; - outData.reserve(reserved); - } string dpName = dp->getName(); if (dpName.compare(OMF_HINT) == 0) { @@ -157,7 +155,7 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi { if (needDelim) { - outData.append(","); + payload.append(','); } else { @@ -224,29 +222,32 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi } if (m_sendFullStructure && dpLookup->second.linkState(assetName) == false) { - outData.append("{ \"typeid\":\"__Link\","); - outData.append("\"values\":[ { \"source\" : {"); - outData.append("\"typeid\": \"FledgeAsset\","); - outData.append("\"index\":\"" + assetName); - outData.append("\" }, \"target\" : {"); - outData.append("\"containerid\" : \""); - outData.append(link); - outData.append("\" } } ] },"); + payload.append("{ \"typeid\":\"__Link\","); + payload.append("\"values\":[ { \"source\" : {"); + payload.append("\"typeid\": \"FledgeAsset\","); + payload.append("\"index\":\"" + assetName); + payload.append("\" }, \"target\" : {"); + payload.append("\"containerid\" : \""); + payload.append(link); + payload.append("\" } } ] },"); + + rval = true; dpLookup->second.linkSent(assetName); } // Convert reading data into the OMF JSON string - outData.append("{\"containerid\": \"" + link); - outData.append("\", \"values\": [{"); + payload.append("{\"containerid\": \"" + link); + payload.append("\", \"values\": [{"); // Base type we are using for this data point - outData.append("\"" + baseType + "\": "); + payload.append("\"" + baseType + "\": "); // Add datapoint Value - outData.append(dp->getData().toString()); - outData.append(", "); + payload.append(dp->getData().toString()); + payload.append(", "); // Append Z to getAssetDateTime(FMT_STANDARD) - outData.append("\"Time\": \"" + reading.getAssetDateUserTime(Reading::FMT_STANDARD) + "Z" + "\""); - outData.append("} ] }"); + payload.append("\"Time\": \"" + reading.getAssetDateUserTime(Reading::FMT_STANDARD) + "Z" + "\""); + payload.append("} ] }"); + rval = true; } } if (skippedDatapoints.size() > 0) @@ -267,8 +268,7 @@ string OMFLinkedData::processReading(const Reading& reading, const string& AFHi string msg = "The asset " + assetName + " had a number of datapoints, " + points + " that are not supported by OMF and have been omitted"; OMF::reportAsset(assetName, "warn", msg); } - Logger::getLogger()->debug("Created data messages %s", outData.c_str()); - return outData; + return rval; } /** @@ -378,7 +378,7 @@ string OMFLinkedData::getBaseType(Datapoint *dp, const string& format) } /** - * Send the container message for the linked datapoint + * Create a container message for the linked datapoint * * @param linkName The name to use for the container * @param dp The datapoint to process @@ -490,7 +490,7 @@ void OMFLinkedData::sendContainer(string& linkName, Datapoint *dp, OMFHints * hi /** * Flush the container definitions that have been built up * - * @return true if the containers where succesfully flushed + * @return true if the containers were successfully flushed */ bool OMFLinkedData::flushContainers(HttpSender& sender, const string& path, vector >& header) { @@ -558,7 +558,7 @@ bool OMFLinkedData::flushContainers(HttpSender& sender, const string& path, vect catch (const std::exception& e) { - Logger::getLogger()->error("An exception occurred when sending container information the OMF endpoint, %s - %s %s", + Logger::getLogger()->error("An exception occurred when sending container information to the OMF endpoint, %s - %s %s", e.what(), sender.getHostPort().c_str(), path.c_str()); diff --git a/C/plugins/north/OMF/omf.cpp b/C/plugins/north/OMF/omf.cpp index f7ddc040be..cdd6ebb7a1 100755 --- a/C/plugins/north/OMF/omf.cpp +++ b/C/plugins/north/OMF/omf.cpp @@ -142,9 +142,8 @@ const char *AF_HIERARCHY_1LEVEL_LINK = QUOTE( * @param hints OMF hints for the specific reading for changing the behaviour of the operation * */ -OMFData::OMFData(const Reading& reading, string measurementId, const OMF_ENDPOINT PIServerEndpoint,const string& AFHierarchyPrefix, OMFHints *hints) +OMFData::OMFData(OMFBuffer& payload, const Reading& reading, string measurementId, bool delim, const OMF_ENDPOINT PIServerEndpoint,const string& AFHierarchyPrefix, OMFHints *hints) { - string outData; bool changed; Logger::getLogger()->debug("%s - measurementId :%s: ", __FUNCTION__, measurementId.c_str()); @@ -168,19 +167,11 @@ OMFData::OMFData(const Reading& reading, string measurementId, const OMF_ENDPOIN } } - // Convert reading data into the OMF JSON string - outData.append("{\"containerid\": \"" + measurementId); - outData.append("\", \"values\": [{"); - - // Get reading data const vector data = reading.getReadingData(); - unsigned long skipDatapoints = 0; - /** - * This loop creates: - * "dataName": {"type": "dataType"}, - */ + m_hasData = false; + // Check if there are any datapoints to send for (vector::const_iterator it = data.begin(); it != data.end(); ++it) { string dpName = (*it)->getName(); @@ -189,39 +180,56 @@ OMFData::OMFData(const Reading& reading, string measurementId, const OMF_ENDPOIN // Don't send the OMF Hint to the PI Server continue; } - if (!isTypeSupported((*it)->getData())) + if (isTypeSupported((*it)->getData())) { - skipDatapoints++;; - continue; + m_hasData = true; + break; } - else + } + + if (m_hasData) + { + if (delim) { - // Add datapoint Name - outData.append("\"" + OMF::ApplyPIServerNamingRulesObj(dpName, nullptr) + "\": " + (*it)->getData().toString()); - outData.append(", "); + payload.append(", "); } - } + // Convert reading data into the OMF JSON string + payload.append("{\"containerid\": \"" + measurementId); + payload.append("\", \"values\": [{"); - // Append Z to getAssetDateTime(FMT_STANDARD) - outData.append("\"Time\": \"" + reading.getAssetDateUserTime(Reading::FMT_STANDARD) + "Z" + "\""); - outData.append("}]}"); - // Append all, some or no datapoins - if (!skipDatapoints || - skipDatapoints < data.size()) - { - m_value.append(outData); + /** + * This loop creates: + * "dataName": {"type": "dataType"}, + */ + for (vector::const_iterator it = data.begin(); it != data.end(); ++it) + { + string dpName = (*it)->getName(); + if (dpName.compare(OMF_HINT) == 0) + { + // Don't send the OMF Hint to the PI Server + continue; + } + if (!isTypeSupported((*it)->getData())) + { + continue; + } + else + { + // Add datapoint Name + payload.append("\"" + OMF::ApplyPIServerNamingRulesObj(dpName, nullptr) + "\": " + (*it)->getData().toString()); + payload.append(", "); + } + } + + // Append Z to getAssetDateTime(FMT_STANDARD) + payload.append("\"Time\": \"" + reading.getAssetDateUserTime(Reading::FMT_STANDARD) + "Z" + "\""); + + payload.append("}]}"); } } -/** - * Return the (reference) JSON data in m_value - */ -const string& OMFData::OMFdataVal() const -{ - return m_value; -} /** * OMF constructor @@ -244,7 +252,6 @@ OMF::OMF(const string& name, m_changeTypeId = false; m_OMFDataTypes = NULL; m_OMFVersion = "1.0"; - m_connected = false; } /** @@ -272,7 +279,6 @@ OMF::OMF(const string& name, m_lastError = false; m_changeTypeId = false; - m_connected = false; } // Destructor @@ -421,7 +427,6 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) string msg = "An error occurred sending the dataType message for the asset " + assetName + ". " + errorMsg; reportAsset(assetName, "error", msg); - m_connected = false; return false; } @@ -488,10 +493,9 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) { string errorMsg = errorMessageHandler(e.what()); - string msg = "An error occurred sending the dataType message for the asset " + assetName + string msg = "An error occurred sending the dataType container message for the asset " + assetName + ". " + errorMsg; reportAsset(assetName, "error", msg); - m_connected = false; return false; } @@ -519,7 +523,7 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) return false; } } - // Exception raised fof HTTP 400 Bad Request + // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) { OMFError error(m_sender.getHTTPResponse()); @@ -558,7 +562,6 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) string msg = "An error occurred sending the dataType staticData message for the asset " + assetName + ". " + errorMsg; reportAsset(assetName, "debug", msg); - m_connected = false; return false; } @@ -650,7 +653,7 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) { string errorMsg = errorMessageHandler(e.what()); - string msg = "An error occurred sending the dataType staticData message for the asset " + assetName + string msg = "An error occurred sending the dataType link message for the asset " + assetName + ". " + errorMsg; reportAsset(assetName, "debug", msg); return false; @@ -711,7 +714,6 @@ bool OMF::AFHierarchySendMessage(const string& msgType, string& jsonData, const { success = false; errorMessage = ex.what(); - m_connected = false; } if (! success) @@ -957,7 +959,7 @@ bool OMF::sendAFHierarchy(string AFHierarchy) */ bool OMF::sendAFHierarchyLevels(string parentPath, string path, std::string &lastLevel) { - bool success; + bool success = true; std::string level; std::string previousLevel; @@ -1001,10 +1003,14 @@ bool OMF::sendAFHierarchyLevels(string parentPath, string path, std::string &las levelPath = StringSlashFix(levelPath); prefixId = generateUniquePrefixId(levelPath); - success = sendAFHierarchyTypes(level, prefixId); - if (success) + if (!sendAFHierarchyTypes(level, prefixId)) { - success = sendAFHierarchyStatic(level, prefixId); + return false; + } + + if (!sendAFHierarchyStatic(level, prefixId)) + { + return false; } // Creates the link between the AF level @@ -1013,7 +1019,10 @@ bool OMF::sendAFHierarchyLevels(string parentPath, string path, std::string &las parentPathFixed = StringSlashFix(previousLevelPath); prefixIdParent = generateUniquePrefixId(parentPathFixed); - sendAFHierarchyLink(previousLevel, level, prefixIdParent, prefixId); + if (!sendAFHierarchyLink(previousLevel, level, prefixIdParent, prefixId)) + { + return false; + } } previousLevelPath = levelPath; previousLevel = level; @@ -1088,16 +1097,13 @@ uint32_t OMF::sendToServer(const vector& readings, bool compression, bool skipSentDataTypes) { bool AFHierarchySent = false; + bool sendLinkedTypes = false; bool sendDataTypes; string keyComplete; string AFHierarchyPrefix; string AFHierarchyLevel; string measurementId; - string varValue; - string varDefault; - bool variablePresent; - #if INSTRUMENT ostringstream threadId; threadId << std::this_thread::get_id(); @@ -1159,8 +1165,6 @@ uint32_t OMF::sendToServer(const vector& readings, string OMFHintAFHierarchyTmp; string OMFHintAFHierarchy; - bool legacyType = m_legacy; - // Create the class that deals with the linked data generation OMFLinkedData linkedData(&m_linkedAssetState, m_PIServerEndpoint); linkedData.setSendFullStructure(m_sendFullStructure); @@ -1170,8 +1174,9 @@ uint32_t OMF::sendToServer(const vector& readings, linkedData.buildLookup(readings); bool pendingSeparator = false; - ostringstream jsonData; - jsonData << "["; + + OMFBuffer payload; + payload.append('['); // Fetch Reading* data for (vector::const_iterator elem = readings.begin(); elem != readings.end(); @@ -1196,14 +1201,8 @@ uint32_t OMF::sendToServer(const vector& readings, Logger::getLogger()->info("Using OMF Tag hint: %s", (*it)->getHint().c_str()); keyComplete.append("_" + (*it)->getHint()); usingTagHint = true; - break; } - - varValue=""; - varDefault=""; - variablePresent=false; - - if (typeid(**it) == typeid(OMFAFLocationHint)) + else if (typeid(**it) == typeid(OMFAFLocationHint)) { OMFHintAFHierarchyTmp = (*it)->getHint(); OMFHintAFHierarchy = variableValueHandle(*reading, OMFHintAFHierarchyTmp); @@ -1213,14 +1212,9 @@ uint32_t OMF::sendToServer(const vector& readings, ,OMFHintAFHierarchyTmp.c_str() ,OMFHintAFHierarchy.c_str() ); } - } - for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) - { - if (typeid(**it) == typeid(OMFLegacyTypeHint)) + else if (typeid(**it) == typeid(OMFLegacyTypeHint)) { - Logger::getLogger()->info("Using OMF Legacy Type hint: %s", (*it)->getHint().c_str()); - legacyType = true; - break; + Logger::getLogger()->warn("OMFHint LegacyType has been deprecated. The hint value '%s' will be ignored.", (*it)->getHint().c_str()); } } } @@ -1243,15 +1237,7 @@ uint32_t OMF::sendToServer(const vector& readings, // hint is present it will override any default AFLocation or AF Location rules defined in the north plugin configuration. if ( ! createAFHierarchyOmfHint(m_assetName, OMFHintAFHierarchy) ) { - if (m_connected) - { - if (!evaluateAFHierarchyRules(m_assetName, *reading)) - { - m_lastError = true; - return 0; - } - } - else + if (!evaluateAFHierarchyRules(m_assetName, *reading)) { m_lastError = true; return 0; @@ -1282,11 +1268,10 @@ uint32_t OMF::sendToServer(const vector& readings, setAFHierarchy(); } - string outData; // Use old style complex types if the user has forced it via configuration, // we are running against an EDS endpoint or Connector Relay or we have types defined for this // asset already - if (legacyType || m_PIServerEndpoint == ENDPOINT_EDS || + if (m_legacy || m_PIServerEndpoint == ENDPOINT_EDS || m_PIServerEndpoint == ENDPOINT_CR || m_OMFDataTypes->find(keyComplete) != m_OMFDataTypes->end()) { @@ -1348,7 +1333,7 @@ uint32_t OMF::sendToServer(const vector& readings, { // The AF hierarchy is created/recreated if an OMF type message is sent // it sends the hierarchy once - if (sendDataTypes and ! AFHierarchySent) + if (sendDataTypes and !AFHierarchySent) { if (!handleAFHierarchy()) { @@ -1398,7 +1383,12 @@ uint32_t OMF::sendToServer(const vector& readings, measurementId = generateMeasurementId(m_assetName); - outData = OMFData(*reading, measurementId, m_PIServerEndpoint, AFHierarchyPrefix, hints ).OMFdataVal(); + if (OMFData(payload, *reading, measurementId, pendingSeparator, m_PIServerEndpoint, AFHierarchyPrefix, hints).hasData()) + { + pendingSeparator = true; + } + + sendLinkedTypes = false; } else { @@ -1406,33 +1396,10 @@ uint32_t OMF::sendToServer(const vector& readings, // in the processReading call auto lookup = m_linkedAssetState.find(m_assetName + "."); // Send data for this reading using the new mechanism - outData = linkedData.processReading(*reading, AFHierarchyPrefix, hints); - if (m_sendFullStructure && lookup->second.afLinkState() == false) - { - // If the hierarchy has not already been sent then send it - if (! AFHierarchySent) - { - if (!handleAFHierarchy()) - { - m_lastError = true; - return 0; - } - AFHierarchySent = true; - } + if (linkedData.processReading(payload, pendingSeparator, *reading, AFHierarchyPrefix, hints)) + pendingSeparator = true; - string af = createAFLinks(*reading, hints); - if (! af.empty()) - { - outData.append(","); - outData.append(af); - } - lookup->second.afLinkSent(); - } - } - if (!outData.empty()) - { - jsonData << (pendingSeparator ? ", " : "") << outData; - pendingSeparator = true; + sendLinkedTypes = true; } if (hints) @@ -1448,15 +1415,11 @@ uint32_t OMF::sendToServer(const vector& readings, // Remove all assets supersetDataPoints OMF::unsetMapObjectTypes(m_SuperSetDataPoints); - jsonData << "]"; - - string json = jsonData.str(); - json_not_compressed = json; + payload.append(']'); - if (compression) - { - json = compress_string(json); - } + // TODO Improve this with coalesceCompressed call and avoid string on the stack + // and avoid copy into a string + const char *omfData = payload.coalesce(); #if INSTRUMENT gettimeofday(&t3, NULL); @@ -1486,7 +1449,7 @@ uint32_t OMF::sendToServer(const vector& readings, int res = m_sender.sendRequest("POST", m_path, readingData, - json); + compression ? compress_string(omfData) : omfData); if ( ! (res >= 200 && res <= 299) ) { Logger::getLogger()->error("Sending JSON readings , " @@ -1495,6 +1458,7 @@ uint32_t OMF::sendToServer(const vector& readings, m_sender.getHostPort().c_str(), m_path.c_str() ); + delete[] omfData; m_lastError = true; return 0; } @@ -1524,22 +1488,20 @@ uint32_t OMF::sendToServer(const vector& readings, timersub(&t5, &t4, &tm); timeT5 = tm.tv_sec + ((double)tm.tv_usec / 1000000); - Logger::getLogger()->warn("Timing seconds - thread :%s: - superSet :%6.3f: - Loop :%6.3f: - compress :%6.3f: - send data :%6.3f: - readings |%d| - msg size |%d| - msg size compressed |%d| ", + Logger::getLogger()->warn("Timing seconds - thread %s - superSet %6.3f - Loop %6.3f - compress %6.3f - send data %6.3f - readings %d - msg size %d", threadId.str().c_str(), timeT1, timeT2, timeT3, timeT4, readings.size(), - json_not_compressed.length(), - json.length() + strlen(omfData) ); #endif - // Return number of sent readings to the caller - return readings.size(); + delete[] omfData; } // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) @@ -1610,6 +1572,7 @@ uint32_t OMF::sendToServer(const vector& readings, ); } + delete[] omfData; // Reset error indicator m_lastError = false; @@ -1626,6 +1589,7 @@ uint32_t OMF::sendToServer(const vector& readings, m_sender.getHostPort().c_str(), m_path.c_str() ); + delete[] omfData; } // Failure m_lastError = true; @@ -1643,9 +1607,53 @@ uint32_t OMF::sendToServer(const vector& readings, // Failure m_lastError = true; - m_connected = false; + delete[] omfData; return 0; } + + // Create the AF Links between assets if AF structure creation with linked types is requested + if (sendLinkedTypes && m_sendFullStructure) + { + for (Reading *reading : readings) + { + OMFHints *hints = NULL; + Datapoint *hintsdp = reading->getDatapoint("OMFHint"); + if (hintsdp) + { + hints = new OMFHints(hintsdp->getData().toString()); + } + + m_assetName = ApplyPIServerNamingRulesObj(reading->getAssetName(), nullptr); + auto lookup = m_linkedAssetState.find(m_assetName + "."); + if (lookup->second.afLinkState() == false) + { + // If the hierarchy has not already been sent then send it + if (!AFHierarchySent) + { + if (!handleAFHierarchy()) + { + m_lastError = true; + delete hints; + return 0; + } + AFHierarchySent = true; + } + + if (!sendAFLinks(*reading, hints)) + { + m_lastError = true; + delete hints; + return 0; + } + lookup->second.afLinkSent(); + } + + delete hints; + } + } + + // Return number of sent readings to the caller + return readings.size(); } /** @@ -1686,9 +1694,9 @@ uint32_t OMF::sendToServer(const vector& readings, * - transform a reading to OMF format * - add OMF data to new vector */ - ostringstream jsonData; string measurementId; - jsonData << "["; + OMFBuffer payload; + payload.append('['); // Fetch Reading data for (vector::const_iterator elem = readings.begin(); @@ -1726,19 +1734,22 @@ uint32_t OMF::sendToServer(const vector& readings, } // Add into JSON string the OMF transformed Reading data - jsonData << OMFData(*elem, measurementId, m_PIServerEndpoint, m_AFHierarchyLevel, hints).OMFdataVal() << (elem < (readings.end() -1 ) ? ", " : ""); + if (OMFData(payload, *elem, measurementId, false, m_PIServerEndpoint, m_AFHierarchyLevel, hints).hasData()) + if (elem < (readings.end() -1 )) + payload.append(','); } - jsonData << "]"; + payload.append(']'); // Build headers for Readings data vector> readingData = OMF::createMessageHeader("Data"); + const char *omfData = payload.coalesce(); // Build an HTTPS POST with 'readingData headers and 'allReadings' JSON payload // Then get HTTPS POST ret code and return 0 to client on error try { - int res = m_sender.sendRequest("POST", m_path, readingData, jsonData.str()); + int res = m_sender.sendRequest("POST", m_path, readingData, omfData); if ( ! (res >= 200 && res <= 299) ) { Logger::getLogger()->error("Sending JSON readings data " @@ -1746,8 +1757,9 @@ uint32_t OMF::sendToServer(const vector& readings, res, m_sender.getHostPort().c_str(), m_path.c_str(), - jsonData.str().c_str() ); + omfData); + delete[] omfData; m_lastError = true; return 0; } @@ -1759,11 +1771,12 @@ uint32_t OMF::sendToServer(const vector& readings, e.what(), m_sender.getHostPort().c_str(), m_path.c_str(), - jsonData.str().c_str() ); + omfData); - m_connected = false; + delete[] omfData; return false; } + delete[] omfData; m_lastError = false; @@ -1794,9 +1807,9 @@ uint32_t OMF::sendToServer(const Reading& reading, uint32_t OMF::sendToServer(const Reading* reading, bool skipSentDataTypes) { - ostringstream jsonData; string measurementId; - jsonData << "["; + OMFBuffer payload; + payload.append('['); m_assetName = ApplyPIServerNamingRulesObj(reading->getAssetName(), nullptr); @@ -1818,18 +1831,19 @@ uint32_t OMF::sendToServer(const Reading* reading, long typeId = OMF::getAssetTypeId(m_assetName); // Add into JSON string the OMF transformed Reading data - jsonData << OMFData(*reading, measurementId, m_PIServerEndpoint, m_AFHierarchyLevel, hints).OMFdataVal(); - jsonData << "]"; + OMFData(payload, *reading, measurementId, false, m_PIServerEndpoint, m_AFHierarchyLevel, hints); + payload.append(']'); // Build headers for Readings data vector> readingData = OMF::createMessageHeader("Data"); + const char *omfData = payload.coalesce(); // Build an HTTPS POST with 'readingData headers and 'allReadings' JSON payload // Then get HTTPS POST ret code and return 0 to client on error try { - int res = m_sender.sendRequest("POST", m_path, readingData, jsonData.str()); + int res = m_sender.sendRequest("POST", m_path, readingData, omfData); if ( ! (res >= 200 && res <= 299) ) { @@ -1838,7 +1852,8 @@ uint32_t OMF::sendToServer(const Reading* reading, res, m_sender.getHostPort().c_str(), m_path.c_str(), - jsonData.str().c_str() ); + omfData); + delete[] omfData; return 0; } @@ -1853,10 +1868,11 @@ uint32_t OMF::sendToServer(const Reading* reading, m_sender.getHostPort().c_str(), m_path.c_str() ); - m_connected = false; + delete[] omfData; return false; } + delete[] omfData; // Return number of sent readings to the caller return 1; } @@ -2120,8 +2136,8 @@ const std::string OMF::createContainerData(const Reading& reading, OMFHints *hin /** * Generate the container id for the given asset * - * @param assetName Asset for quick the container id should be generated - * @return Container it for the requested asset + * @param assetName Asset for which the container id should be generated + * @return Container id for the requested asset */ std::string OMF::generateMeasurementId(const string& assetName) { @@ -2174,7 +2190,7 @@ std::string OMF::generateMeasurementId(const string& assetName) /** * Generate a suffix for the given asset in relation to the selected naming schema and the value of the type id * - * @param assetName Asset for quick the suffix should be generated + * @param assetName Asset for which the suffix should be generated * @param typeId Type id of the asset * @return Suffix to be used for the given asset */ @@ -2284,8 +2300,8 @@ const std::string OMF::createStaticData(const Reading& reading) * Note: type is 'Data' * * @param reading A reading data - * @param AFHierarchyLevel The AF eleemnt we are placing the reading in - * @param AFHierarchyPrefix The prefix we use for thr AF Eleement + * @param AFHierarchyLevel The AF element we are placing the reading in + * @param AFHierarchyPrefix The prefix we use for the AF Element * @param objectPrefix The object prefix we are using for this asset * @param legacy We are using legacy, complex types for this reading * @return Type JSON message as string @@ -2507,7 +2523,7 @@ void OMF::retrieveAFHierarchyFullPrefixAssetName(const string& assetName, string } /** - * Handle the OMF hint AFLocation to defined a position of the asset into the AF hierarchy + * Handle the OMF hint AFLocation to define a position of the asset into the AF hierarchy * * @param assetName AssetName to handle * @param OmfHintHierarchy Position of the asset into the AF hierarchy @@ -2668,12 +2684,12 @@ bool OMF::extractVariable(string &strToHandle, string &variable, string &value, } /** - * Evaulate the AF hierarchy provided and expand the variables in the form ${room:unknown} + * Evaluate the AF hierarchy provided and expand the variables in the form ${room:unknown} * - * @param reading Asset reading that should be considered from which extract the metadata values + * @param reading Asset reading that should be considered from which to extract the metadata values * @param AFHierarchy AF hierarchy containing the variable to be expanded * - * @return True if variable were found and expanded + * @return True if variables were found and expanded */ std::string OMF::variableValueHandle(const Reading& reading, std::string &AFHierarchy) { @@ -2806,6 +2822,10 @@ bool OMF::evaluateAFHierarchyRules(const string& assetName, const Reading& readi ,path.c_str() ,it->second.c_str()); } + else + { + return false; + } } else { Logger::getLogger()->debug( "%s - m_NamesRules skipped pathInitial :%s: path :%s: stored :%s:" @@ -2858,6 +2878,10 @@ bool OMF::evaluateAFHierarchyRules(const string& assetName, const Reading& readi m_AssetNamePrefix[assetName].push_back(item); Logger::getLogger()->debug("%s - m_MetadataRulesExist asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } + else + { + return false; + } } else { Logger::getLogger()->debug("%s - m_MetadataRulesExist already created asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } @@ -2911,6 +2935,10 @@ bool OMF::evaluateAFHierarchyRules(const string& assetName, const Reading& readi m_AssetNamePrefix[assetName].push_back(item); Logger::getLogger()->debug("%s - m_MetadataRulesNonExist - asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } + else + { + return false; + } } else { Logger::getLogger()->debug("%s - m_MetadataRulesNonExist - already created asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } @@ -2976,6 +3004,10 @@ bool OMF::evaluateAFHierarchyRules(const string& assetName, const Reading& readi m_AssetNamePrefix[assetName].push_back(item); Logger::getLogger()->debug("%s - m_MetadataRulesEqual asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } + else + { + return false; + } } else { Logger::getLogger()->debug("%s - m_MetadataRulesEqual already created asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } @@ -3044,6 +3076,10 @@ bool OMF::evaluateAFHierarchyRules(const string& assetName, const Reading& readi m_AssetNamePrefix[assetName].push_back(item); Logger::getLogger()->debug("%s - m_MetadataRulesNotEqual asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } + else + { + return false; + } } else { Logger::getLogger()->debug("%s - m_MetadataRulesNotEqual already created asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } @@ -3861,7 +3897,7 @@ long OMF::getAssetTypeId(const string& assetName) * Retrieve the naming scheme for the given asset in relation to the end point selected the default naming scheme selected * and the naming scheme of the asset itself * - * @param assetName Asset for quick the naming schema should be retrieved + * @param assetName Asset for which the naming schema should be retrieved * @return Naming schema of the given asset */ long OMF::getNamingScheme(const string& assetName) @@ -3924,7 +3960,7 @@ long OMF::getNamingScheme(const string& assetName) /** * Retrieve the hash for the given asset in relation to the end point selected * - * @param assetName Asset for quick the hash should be retrieved + * @param assetName Asset for which the hash should be retrieved * @return Hash of the given asset */ string OMF::getHashStored(const string& assetName) @@ -3979,7 +4015,7 @@ string OMF::getHashStored(const string& assetName) /** * Retrieve the current AF hierarchy for the given asset * - * @param assetName Asset for quick the path should be retrieved + * @param assetName Asset for which the path should be retrieved * @return Path of the given asset */ string OMF::getPathStored(const string& assetName) @@ -4033,7 +4069,7 @@ string OMF::getPathStored(const string& assetName) /** * Retrieve the AF hierarchy in which given asset was created * - * @param assetName Asset for quick the path should be retrieved + * @param assetName Asset for which the path should be retrieved * @return Path of the given asset */ string OMF::getPathOrigStored(const string& assetName) @@ -4088,7 +4124,7 @@ string OMF::getPathOrigStored(const string& assetName) /** * Stores the current AF hierarchy for the given asset * - * @param assetName Asset for quick the path should be retrieved + * @param assetName Asset for which the path should be retrieved * @param afHierarchy Current AF hierarchy of the asset * * @return True if the operation has success @@ -4633,11 +4669,6 @@ std::string OMF::ApplyPIServerNamingRulesObj(const std::string &objName, bool *c nameFixed = StringTrim(objName); - if (objName.compare(nameFixed) != 0) - { - Logger::getLogger()->debug("%s - original :%s: trimmed :%s:", __FUNCTION__, objName.c_str(), nameFixed.c_str()); - } - if (nameFixed.empty ()) { Logger::getLogger()->debug("%s - object name empty", __FUNCTION__); @@ -4670,7 +4701,10 @@ std::string OMF::ApplyPIServerNamingRulesObj(const std::string &objName, bool *c *changed = true; } - Logger::getLogger()->debug("%s - final :%s: ", __FUNCTION__, nameFixed.c_str()); + if (objName.compare(nameFixed) != 0) + { + Logger::getLogger()->debug("%s - original :%s: trimmed :%s:", __FUNCTION__, objName.c_str(), nameFixed.c_str()); + } return (nameFixed); } @@ -4688,7 +4722,7 @@ std::string OMF::ApplyPIServerNamingRulesObj(const std::string &objName, bool *c * Names on PI-Server side are not case sensitive * * @param objName The object name to verify - * @param changed if not null, it is set to true if a change occur + * @param changed if not null, it is set to true if a change occurred * @return Object name following the PI Server naming rules */ std::string OMF::ApplyPIServerNamingRulesPath(const std::string &objName, bool *changed) @@ -4700,8 +4734,6 @@ std::string OMF::ApplyPIServerNamingRulesPath(const std::string &objName, bool * nameFixed = StringTrim(objName); - Logger::getLogger()->debug("%s - original :%s: trimmed :%s:", __FUNCTION__, objName.c_str(), nameFixed.c_str()); - if (nameFixed.empty ()) { Logger::getLogger()->debug("%s - path empty", __FUNCTION__); @@ -4741,12 +4773,14 @@ std::string OMF::ApplyPIServerNamingRulesPath(const std::string &objName, bool * } - Logger::getLogger()->debug("%s - final :%s: ", __FUNCTION__, nameFixed.c_str()); + if (objName.compare(nameFixed) != 0) + { + Logger::getLogger()->debug("%s - original :%s: trimmed :%s:", __FUNCTION__, objName.c_str(), nameFixed.c_str()); + } return (nameFixed); } - /** * Send the base types that we use to define all the data point values * @@ -4813,7 +4847,6 @@ bool OMF::sendBaseTypes() errorMsg.c_str(), m_sender.getHostPort().c_str(), m_path.c_str()); - m_connected = false; return false; } Logger::getLogger()->debug("Base types successfully sent"); @@ -4821,10 +4854,80 @@ bool OMF::sendBaseTypes() } /** - * Create the messages to link the asset into the right place in the AF structure + * Send a message to link the asset into the right place in the AF structure * * @param reading The reading being sent * @param hints OMF Hints for this reading + * @return true if the message was sent correctly, otherwise false. + */ +bool OMF::sendAFLinks(Reading &reading, OMFHints *hints) +{ + bool success = true; + std::string afLinks = createAFLinks(reading, hints); + if (afLinks.empty()) + { + return success; + } + + try + { + std::string action = (this->m_OMFVersion.compare("1.2") == 0) ? "update" : "create"; + vector> messageHeader = OMF::createMessageHeader("Data", action); + afLinks = "[" + afLinks + "]"; + + int res = m_sender.sendRequest("POST", + m_path, + messageHeader, + afLinks); + if (res >= 200 && res <= 299) + { + Logger::getLogger()->debug("AF Link message sent successfully: %s", afLinks.c_str()); + success = true; + } + else + { + Logger::getLogger()->error("Sending AF Link Data message, HTTP code %d - %s %s", + res, + m_sender.getHostPort().c_str(), + m_path.c_str()); + success = false; + } + } + catch (const BadRequest &e) // HTTP 400 + { + OMFError error(m_sender.getHTTPResponse()); + if (error.hasErrors()) + { + Logger::getLogger()->warn("The OMF endpoint reported a bad request when sending AF Link: %d messages", + error.messageCount()); + for (unsigned int i = 0; i < error.messageCount(); i++) + { + Logger::getLogger()->warn("Message %d: %s, %s, %s", + i, error.getEventSeverity(i).c_str(), error.getMessage(i).c_str(), error.getEventReason(i).c_str()); + } + } + success = false; + } + catch (const std::exception &e) + { + string errorMsg = errorMessageHandler(e.what()); + + Logger::getLogger()->error("AF Link send message exception, %s - %s %s", + errorMsg.c_str(), + m_sender.getHostPort().c_str(), + m_path.c_str()); + success = false; + } + + return success; +} + +/** + * Create the messages to link the asset holding the container to its parent asset + * + * @param reading The reading being sent + * @param hints OMF Hints for this reading + * @return OMF JSON snippet to create the AF Link */ string OMF::createAFLinks(Reading& reading, OMFHints *hints) { @@ -4867,12 +4970,11 @@ string AFDataMessage; // Create data for Static Data message AFDataMessage = OMF::createLinkData(reading, AFHierarchyLevel, prefix, objectPrefix, hints, false); - } } else { - Logger::getLogger()->error("AF hiererachy is not defined for the asset Name |%s|", assetName.c_str()); + Logger::getLogger()->error("AF hierarchy is not defined for the asset Name |%s|", assetName.c_str()); } } return AFDataMessage; @@ -4902,26 +5004,3 @@ void OMF::reportAsset(const string& asset, const string& level, const string& ms Logger::getLogger()->debug(msg); } } - -/** - * Set the connection state - * - * @param connectionStatus The target connection status - */ -void OMF::setConnected(const bool connectionStatus) -{ - if (connectionStatus != m_connected) - { - // Send an audit event for the change of state - string data = "{ \"plugin\" : \"OMF\", \"service\" : \"" + m_name + "\" }"; - if (!connectionStatus) - { - AuditLogger::auditLog("NHDWN", "ERROR", data); - } - else - { - AuditLogger::auditLog("NHAVL", "INFORMATION", data); - } - } - m_connected = connectionStatus; -} diff --git a/C/plugins/north/OMF/omfbuffer.cpp b/C/plugins/north/OMF/omfbuffer.cpp new file mode 100644 index 0000000000..4648330980 --- /dev/null +++ b/C/plugins/north/OMF/omfbuffer.cpp @@ -0,0 +1,347 @@ +/* + * Fledge OMF north plugin buffer class + * + * Copyright (c) 2023 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ +#include +#include +#include + +using namespace std; +/** + * Buffer class designed to hold OMF payloads that can + * as required but have minimal copy semantics. + */ + +/** + * OMFBuffer constructor + */ +OMFBuffer::OMFBuffer() +{ + buffers.push_front(new OMFBuffer::Buffer()); +} + +/** + * OMFBuffer destructor + */ +OMFBuffer::~OMFBuffer() +{ + for (list::iterator it = buffers.begin(); it != buffers.end(); ++it) + { + delete *it; + } +} + +/** + * Clear all the buffers from the OMFBuffer and allow it to be reused + */ +void OMFBuffer::clear() +{ + for (list::iterator it = buffers.begin(); it != buffers.end(); ++it) + { + delete *it; + } + buffers.clear(); + buffers.push_front(new OMFBuffer::Buffer()); +} + +/** + * Append a character to a buffer + * + * @param data The character to append to the buffer + */ +void OMFBuffer::append(const char data) +{ +OMFBuffer::Buffer *buffer = buffers.back(); + + if (buffer->offset == buffer->length) + { + buffer = new OMFBuffer::Buffer(); + buffers.push_back(buffer); + } + buffer->data[buffer->offset] = data; + buffer->data[buffer->offset + 1] = 0; + buffer->offset++; +} + +/** + * Append a character string to a buffer + * + * @para data The string to append to the buffer + */ +void OMFBuffer::append(const char *data) +{ +unsigned int len = strlen(data); +OMFBuffer::Buffer *buffer = buffers.back(); + + if (buffer->offset + len >= buffer->length) + { + if (len > BUFFER_CHUNK) + { + buffer = new OMFBuffer::Buffer(len + BUFFER_CHUNK); + } + else + { + buffer = new OMFBuffer::Buffer(); + } + buffers.push_back(buffer); + } + memcpy(&buffer->data[buffer->offset], data, len); + buffer->offset += len; + buffer->data[buffer->offset] = 0; +} + +/** + * Append an integer to a buffer + * + * @param value The value to append to the buffer + */ +void OMFBuffer::append(const int value) +{ +char tmpbuf[80]; +unsigned int len; +OMFBuffer::Buffer *buffer = buffers.back(); + + len = (unsigned int)snprintf(tmpbuf, 80, "%d", value); + if (buffer->offset + len >= buffer->length) + { + buffer = new OMFBuffer::Buffer(); + buffers.push_back(buffer); + } + memcpy(&buffer->data[buffer->offset], tmpbuf, len); + buffer->offset += len; + buffer->data[buffer->offset] = 0; +} + +/** + * Append a long to a buffer + * + * @param value The long value to append to the buffer + */ +void OMFBuffer::append(const long value) +{ +char tmpbuf[80]; +unsigned int len; +OMFBuffer::Buffer *buffer = buffers.back(); + + len = (unsigned int)snprintf(tmpbuf, 80, "%ld", value); + if (buffer->offset + len >= buffer->length) + { + buffer = new OMFBuffer::Buffer(); + buffers.push_back(buffer); + } + memcpy(&buffer->data[buffer->offset], tmpbuf, len); + buffer->offset += len; + buffer->data[buffer->offset] = 0; +} + +/** + * Append an unsigned integer to a buffer + * + * @param value The unsigned long value to append to the buffer + */ +void OMFBuffer::append(const unsigned int value) +{ +char tmpbuf[80]; +unsigned int len; +OMFBuffer::Buffer *buffer = buffers.back(); + + len = (unsigned int)snprintf(tmpbuf, 80, "%u", value); + if (buffer->offset + len >= buffer->length) + { + buffer = new OMFBuffer::Buffer(); + buffers.push_back(buffer); + } + memcpy(&buffer->data[buffer->offset], tmpbuf, len); + buffer->offset += len; + buffer->data[buffer->offset] = 0; +} + +/** + * Append an unsigned long to a buffer + * + * @param value The value to append to the buffer + */ +void OMFBuffer::append(const unsigned long value) +{ +char tmpbuf[80]; +unsigned int len; +OMFBuffer::Buffer *buffer = buffers.back(); + + len = (unsigned int)snprintf(tmpbuf, 80, "%lu", value); + if (buffer->offset + len >= buffer->length) + { + buffer = new OMFBuffer::Buffer(); + buffers.push_back(buffer); + } + memcpy(&buffer->data[buffer->offset], tmpbuf, len); + buffer->offset += len; + buffer->data[buffer->offset] = 0; +} + +/** + * Append a double to a buffer + * + * @param value The double value to append to the buffer + */ +void OMFBuffer::append(const double value) +{ +char tmpbuf[80]; +unsigned int len; +OMFBuffer::Buffer *buffer = buffers.back(); + + len = (unsigned int)snprintf(tmpbuf, 80, "%f", value); + if (buffer->offset + len >= buffer->length) + { + buffer = new OMFBuffer::Buffer(); + buffers.push_back(buffer); + } + memcpy(&buffer->data[buffer->offset], tmpbuf, len); + buffer->offset += len; + buffer->data[buffer->offset] = 0; +} + +/** + * Append a string to a buffer + * + * @param str The string to be appended to the buffer + */ +void OMFBuffer::append(const string& str) +{ +const char *cstr = str.c_str(); +unsigned int len = strlen(cstr); +OMFBuffer::Buffer *buffer = buffers.back(); + + if (buffer->offset + len >= buffer->length) + { + if (len > BUFFER_CHUNK) + { + buffer = new OMFBuffer::Buffer(len + BUFFER_CHUNK); + } + else + { + buffer = new OMFBuffer::Buffer(); + } + buffers.push_back(buffer); + } + memcpy(&buffer->data[buffer->offset], cstr, len); + buffer->offset += len; + buffer->data[buffer->offset] = 0; +} + +/** + * Quote and append a string to a buffer + * + * @param str The string to quote and append to the buffer + */ +void OMFBuffer::quote(const string& str) +{ +string esc = str; +StringEscapeQuotes(esc); +const char *cstr = esc.c_str(); +unsigned int len = strlen(cstr) + 2; +OMFBuffer::Buffer *buffer = buffers.back(); + + if (buffer->offset + len >= buffer->length) + { + if (len > BUFFER_CHUNK) + { + buffer = new OMFBuffer::Buffer(len + BUFFER_CHUNK); + } + else + { + buffer = new OMFBuffer::Buffer(); + } + buffers.push_back(buffer); + } + buffer->data[buffer->offset] = '"'; + memcpy(&buffer->data[buffer->offset + 1], cstr, len - 2); + buffer->data[buffer->offset + len - 1] = '"'; + buffer->offset += len; + buffer->data[buffer->offset] = 0; +} + +/** + * Create a coalesced buffer from the buffer chain + * + * The buffer returned has been created using the new[] operator and must be + * deleted by the caller. + * @return char* The OMF payload in a single buffer + */ +const char *OMFBuffer::coalesce() +{ +unsigned int length = 0, offset = 0; +char *buffer = 0; + + if (buffers.size() == 1) + { + return buffers.back()->detach(); + } + for (list::iterator it = buffers.begin(); it != buffers.end(); ++it) + { + length += (*it)->offset; + } + buffer = new char[length+1]; + for (list::iterator it = buffers.begin(); it != buffers.end(); ++it) + { + memcpy(&buffer[offset], (*it)->data, (*it)->offset); + offset += (*it)->offset; + } + buffer[offset] = 0; + return buffer; +} + +/** + * Construct a buffer with a standard size initial buffer. + */ +OMFBuffer::Buffer::Buffer() : offset(0), length(BUFFER_CHUNK), attached(true) +{ + data = new char[BUFFER_CHUNK+1]; + data[0] = 0; +} + +/** + * Construct a large buffer, passing the size of buffer required. This is useful + * if you know your buffer requirements are large and you wish to reduce the amount + * of allocation required. + * + * @param size The size of the initial buffer to allocate. + */ +OMFBuffer::Buffer::Buffer(unsigned int size) : offset(0), length(size), attached(true) +{ + data = new char[size+1]; + data[0] = 0; +} + +/** + * Buffer destructor, the buffer itself is also deleted by this + * call and any reference to it must no longer be used. + */ +OMFBuffer::Buffer::~Buffer() +{ + if (attached) + { + delete[] data; + data = 0; + } +} + +/** + * Detach the buffer from the OMFBuffer. The reference to the buffer + * is removed from the OMFBuffer but the buffer itself is not deleted. + * This allows the buffer ownership to be taken by external code + * whilst allowing the OMFBuffer to allocate a new buffer. + */ +char *OMFBuffer::Buffer::detach() +{ +char *rval = data; + + attached = false; + length = 0; + data = 0; + return rval; +} diff --git a/C/plugins/north/OMF/omfinfo.cpp b/C/plugins/north/OMF/omfinfo.cpp index ed3c5b7b98..3297f04f73 100644 --- a/C/plugins/north/OMF/omfinfo.cpp +++ b/C/plugins/north/OMF/omfinfo.cpp @@ -461,7 +461,6 @@ uint32_t OMFInformation::send(const vector& readings) { // Created a new sender after a connection failure m_omf->setSender(*m_sender); - m_omf->setConnected(false); } } @@ -479,7 +478,6 @@ uint32_t OMFInformation::send(const vector& readings) m_omf = new OMF(m_name, *m_sender, m_path, m_assetsDataTypes, m_producerToken); - m_omf->setConnected(m_connected); m_omf->setSendFullStructure(m_sendFullStructure); // Set PIServerEndpoint configuration @@ -529,15 +527,6 @@ uint32_t OMFInformation::send(const vector& readings) TYPE_ID_KEY, m_typeId); } - - // Write a warning if the connection to PI Web API has been lost - bool updatedConnected = m_omf->getConnected(); - if (m_PIServerEndpoint == ENDPOINT_PIWEB_API && m_connected && !updatedConnected) - { - Logger::getLogger()->warn("Connection to PI Web API at %s has been lost", m_hostAndPort.c_str()); - } - m_connected = updatedConnected; - #if INSTRUMENT Logger::getLogger()->debug("plugin_send elapsed time: %6.3f seconds, NumValues: %u", GetElapsedTime(&startTime), ret); @@ -1339,51 +1328,52 @@ double OMFInformation::GetElapsedTime(struct timeval *startTime) } /** - * Check if the PI Web API server is available by reading the product version + * Check if the PI Web API server is available by reading the product version every 60 seconds. + * Log a message if the connection state changes. * * @return Connection status */ bool OMFInformation::IsPIWebAPIConnected() { - static std::chrono::steady_clock::time_point nextCheck; - static bool reported = false; // Has the state been reported yet - static bool reportedState; // What was the last reported state + static std::chrono::steady_clock::time_point nextCheck(std::chrono::steady_clock::time_point::duration::zero()); + static bool lastConnected = m_connected; // Previous value of m_connected - if (!m_connected && m_PIServerEndpoint == ENDPOINT_PIWEB_API) + if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now(); if (now >= nextCheck) { int httpCode = PIWebAPIGetVersion(false); - if (httpCode >= 400) + Logger::getLogger()->debug("PIWebAPIGetVersion: %s HTTP Code: %d Connected: %s LastConnected: %s", + m_hostAndPort.c_str(), + httpCode, + m_connected ? "true" : "false", + lastConnected ? "true" : "false"); + + if ((httpCode < 200) || (httpCode >= 400)) { m_connected = false; - now = std::chrono::steady_clock::now(); - nextCheck = now + std::chrono::seconds(60); - Logger::getLogger()->debug("PI Web API %s is not available. HTTP Code: %d", m_hostAndPort.c_str(), httpCode); - if (reported == false || reportedState == true) - { - reportedState = false; - reported = true; - Logger::getLogger()->error("The PI Web API service %s is not available", - m_hostAndPort.c_str()); + if (lastConnected == true) + { + Logger::getLogger()->error("The PI Web API service %s is not available. HTTP Code: %d", + m_hostAndPort.c_str(), httpCode); + lastConnected = false; } } else { m_connected = true; SetOMFVersion(); - Logger::getLogger()->info("%s reconnected to %s OMF Version: %s", - m_RestServerVersion.c_str(), m_hostAndPort.c_str(), m_omfversion.c_str()); - if (reported == true || reportedState == false) + if (lastConnected == false) { - reportedState = true; - reported = true; - Logger::getLogger()->warn("The PI Web API service %s has become available", - m_hostAndPort.c_str()); + Logger::getLogger()->warn("%s reconnected to %s OMF Version: %s", + m_RestServerVersion.c_str(), m_hostAndPort.c_str(), m_omfversion.c_str()); + lastConnected = true; } } + + nextCheck = now + std::chrono::seconds(60); } } else diff --git a/C/plugins/storage/postgres/connection.cpp b/C/plugins/storage/postgres/connection.cpp index 357cb5a008..65b53c86e6 100644 --- a/C/plugins/storage/postgres/connection.cpp +++ b/C/plugins/storage/postgres/connection.cpp @@ -353,6 +353,7 @@ Connection::Connection() : m_maxReadingRows(INSERT_ROW_LIMIT) PQerrorMessage(dbConnection)); connectErrorTime = time(0); } + throw runtime_error("Unable to connect to PostgreSQL database"); } logSQL("Set", "session time zone 'UTC' "); @@ -3301,6 +3302,12 @@ const string Connection::escape_double_quotes(const string& str) *p2++ = '\"'; p1++; } + else if (*p1 == '\\' ) // Take care of previously escaped quotes + { + *p2++ = '\\'; + *p2++ = '\\'; + p1++; + } else { *p2++ = *p1++; diff --git a/C/plugins/storage/postgres/connection_manager.cpp b/C/plugins/storage/postgres/connection_manager.cpp index 025f168033..5d9586b8a0 100644 --- a/C/plugins/storage/postgres/connection_manager.cpp +++ b/C/plugins/storage/postgres/connection_manager.cpp @@ -9,6 +9,8 @@ */ #include #include +#include +#include ConnectionManager *ConnectionManager::instance = 0; @@ -58,12 +60,16 @@ void ConnectionManager::growPool(unsigned int delta) { while (delta-- > 0) { - Connection *conn = new Connection(); - conn->setTrace(m_logSQL); - conn->setMaxReadingRows(m_maxReadingRows); - idleLock.lock(); - idle.push_back(conn); - idleLock.unlock(); + try { + Connection *conn = new Connection(); + conn->setTrace(m_logSQL); + conn->setMaxReadingRows(m_maxReadingRows); + idleLock.lock(); + idle.push_back(conn); + idleLock.unlock(); + } catch (std::exception& e) { + Logger::getLogger()->error("Failed to create storage connection: %s", e.what()); + } } } diff --git a/C/plugins/storage/postgres/plugin.cpp b/C/plugins/storage/postgres/plugin.cpp index 3892a7b44f..20270d9d38 100644 --- a/C/plugins/storage/postgres/plugin.cpp +++ b/C/plugins/storage/postgres/plugin.cpp @@ -107,6 +107,12 @@ int plugin_common_insert(PLUGIN_HANDLE handle, char *schema, char *table, char * ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return 0; + } + int result = connection->insert(std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(data)); manager->release(connection); @@ -122,6 +128,12 @@ ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return NULL; + } + bool rval = connection->retrieve(schema, std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(query), results); manager->release(connection); if (rval) @@ -139,6 +151,12 @@ int plugin_common_update(PLUGIN_HANDLE handle, char *schema, char *table, char * ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return 0; + } + int result = connection->update(std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(data)); manager->release(connection); return result; @@ -152,6 +170,12 @@ int plugin_common_delete(PLUGIN_HANDLE handle, char *schema , char *table, char ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return 0; + } + int result = connection->deleteRows(std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(condition)); manager->release(connection); return result; @@ -165,9 +189,15 @@ int plugin_reading_append(PLUGIN_HANDLE handle, char *readings) ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return 0; + } + int result = connection->appendReadings(readings); manager->release(connection); - return result;; + return result; } /** @@ -179,6 +209,12 @@ ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string resultSet; + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return NULL; + } + connection->fetchReadings(id, blksize, resultSet); manager->release(connection); return strdup(resultSet.c_str()); @@ -193,6 +229,12 @@ ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return NULL; + } + connection->retrieveReadings(std::string(condition), results); manager->release(connection); return strdup(results.c_str()); @@ -208,6 +250,12 @@ Connection *connection = manager->allocate(); std::string results; unsigned long age, size; + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return NULL; + } + if (flags & STORAGE_PURGE_SIZE) { (void)connection->purgeReadingsByRows(param, flags, sent, results); @@ -270,6 +318,12 @@ int plugin_create_table_snapshot(PLUGIN_HANDLE handle, ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return -1; + } + int result = connection->create_table_snapshot(std::string(table), std::string(id)); manager->release(connection); @@ -291,6 +345,12 @@ int plugin_load_table_snapshot(PLUGIN_HANDLE handle, ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return -1; + } + int result = connection->load_table_snapshot(std::string(table), std::string(id)); manager->release(connection); @@ -313,6 +373,12 @@ int plugin_delete_table_snapshot(PLUGIN_HANDLE handle, ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return -1; + } + int result = connection->delete_table_snapshot(std::string(table), std::string(id)); manager->release(connection); @@ -333,6 +399,12 @@ ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return NULL; + } + bool rval = connection->get_table_snapshots(std::string(table), results); manager->release(connection); @@ -353,6 +425,12 @@ int plugin_createSchema(PLUGIN_HANDLE handle, ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return -1; + } + int result = connection->create_schema(std::string(payload)); manager->release(connection); return result; @@ -364,6 +442,12 @@ int plugin_schema_update(PLUGIN_HANDLE handle, ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return 0; + } + // create_schema handles both create and update schema // schema value gets parsed from the payload int result = connection->create_schema(std::string(payload)); @@ -380,6 +464,12 @@ unsigned int plugin_reading_purge_asset(PLUGIN_HANDLE handle, char *asset) ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); + if (connection == NULL) + { + Logger::getLogger()->fatal("No database connections available"); + return 0; + } + unsigned int deleted = connection->purgeReadingsAsset(asset); manager->release(connection); return deleted; diff --git a/C/plugins/storage/sqlite/common/connection_manager.cpp b/C/plugins/storage/sqlite/common/connection_manager.cpp index d841322ac5..d20141478d 100644 --- a/C/plugins/storage/sqlite/common/connection_manager.cpp +++ b/C/plugins/storage/sqlite/common/connection_manager.cpp @@ -489,7 +489,7 @@ int ConnectionManager::SQLExec(sqlite3 *dbHandle, const char *sqlCmd, char **err /** * Background thread used to execute periodic tasks and oversee the database activity. * - * We will runt he SQLite vacuum command periodically to allow space to be reclaimed + * We will run the SQLite vacuum command periodically to allow space to be reclaimed */ void ConnectionManager::background() { diff --git a/C/plugins/storage/sqlitelb/common/connection.cpp b/C/plugins/storage/sqlitelb/common/connection.cpp index d271811078..aaf947cf56 100644 --- a/C/plugins/storage/sqlitelb/common/connection.cpp +++ b/C/plugins/storage/sqlitelb/common/connection.cpp @@ -426,7 +426,7 @@ bool retCode; /** * Create a SQLite3 database connection */ -Connection::Connection() +Connection::Connection() : m_purgeBlockSize(10000) { string dbPath, dbPathReadings; const char *defaultConnection = getenv("DEFAULT_SQLITE_DB_FILE"); diff --git a/C/plugins/storage/sqlitelb/common/connection_manager.cpp b/C/plugins/storage/sqlitelb/common/connection_manager.cpp index 4da5090368..466b246cdf 100644 --- a/C/plugins/storage/sqlitelb/common/connection_manager.cpp +++ b/C/plugins/storage/sqlitelb/common/connection_manager.cpp @@ -25,7 +25,7 @@ static void managerBackground(void *arg) /** * Default constructor for the connection manager. */ -ConnectionManager::ConnectionManager() : m_shutdown(false), m_vacuumInterval(6 * 60 * 60) +ConnectionManager::ConnectionManager() : m_shutdown(false), m_vacuumInterval(6 * 60 * 60), m_purgeBlockSize(10000) { lastError.message = NULL; lastError.entryPoint = NULL; @@ -61,6 +61,20 @@ ConnectionManager *ConnectionManager::getInstance() return instance; } +/** + * Set the purge block size in each of the connections + * + * @param purgeBlockSize The requested purgeBlockSize + */ +void ConnectionManager::setPurgeBlockSize(unsigned long purgeBlockSize) +{ + m_purgeBlockSize = purgeBlockSize; + idleLock.lock(); + for (auto& c : idle) + c->setPurgeBlockSize(purgeBlockSize); + idleLock.unlock(); +} + /** * Grow the connection pool by the number of connections * specified. @@ -72,6 +86,7 @@ void ConnectionManager::growPool(unsigned int delta) while (delta-- > 0) { Connection *conn = new Connection(); + conn->setPurgeBlockSize(m_purgeBlockSize); if (m_trace) conn->setTrace(true); idleLock.lock(); diff --git a/C/plugins/storage/sqlitelb/common/include/connection.h b/C/plugins/storage/sqlitelb/common/include/connection.h index ad1ac00aa4..b22d23697d 100644 --- a/C/plugins/storage/sqlitelb/common/include/connection.h +++ b/C/plugins/storage/sqlitelb/common/include/connection.h @@ -124,6 +124,10 @@ class Connection { bool loadDatabase(const std::string& filname); bool saveDatabase(const std::string& filname); #endif + void setPurgeBlockSize(unsigned long purgeBlockSize) + { + m_purgeBlockSize = purgeBlockSize; + }; private: #ifndef MEMORY_READING_PLUGIN @@ -132,6 +136,7 @@ class Connection { bool m_streamOpenTransaction; int m_queuing; std::mutex m_qMutex; + unsigned long m_purgeBlockSize; std::string operation(const char *sql); int SQLexec(sqlite3 *db, const std::string& table, const char *sql, int (*callback)(void*,int,char**,char**), diff --git a/C/plugins/storage/sqlitelb/common/include/connection_manager.h b/C/plugins/storage/sqlitelb/common/include/connection_manager.h index bba0ee42b9..214581285d 100644 --- a/C/plugins/storage/sqlitelb/common/include/connection_manager.h +++ b/C/plugins/storage/sqlitelb/common/include/connection_manager.h @@ -41,9 +41,10 @@ class ConnectionManager { { m_persist = persist; m_filename = filename; - } - bool persist() { return m_persist; }; - std::string filename() { return m_filename; }; + } + bool persist() { return m_persist; }; + std::string filename() { return m_filename; }; + void setPurgeBlockSize(unsigned long purgeBlockSize); protected: ConnectionManager(); @@ -62,6 +63,7 @@ class ConnectionManager { long m_vacuumInterval; bool m_persist; std::string m_filename; + unsigned long m_purgeBlockSize; }; #endif diff --git a/C/plugins/storage/sqlitelb/common/readings.cpp b/C/plugins/storage/sqlitelb/common/readings.cpp index fb6cbad35a..78290b9700 100644 --- a/C/plugins/storage/sqlitelb/common/readings.cpp +++ b/C/plugins/storage/sqlitelb/common/readings.cpp @@ -2163,7 +2163,7 @@ unsigned int Connection::purgeReadingsByRows(unsigned long rows, return 0; } - deletePoint = minId + 10000; + deletePoint = minId + m_purgeBlockSize; if (maxId - deletePoint < rows || deletePoint > maxId) deletePoint = maxId - rows; @@ -2210,7 +2210,7 @@ unsigned int Connection::purgeReadingsByRows(unsigned long rows, unsentPurged += rowsAffected; } } - std::this_thread::yield(); // Give other threads a chane to run + std::this_thread::yield(); // Give other threads a chance to run } while (rowcount > rows); if (rowsAvailableToPurge) diff --git a/C/plugins/storage/sqlitelb/plugin.cpp b/C/plugins/storage/sqlitelb/plugin.cpp index 87dfe8f9b6..03b20f648d 100644 --- a/C/plugins/storage/sqlitelb/plugin.cpp +++ b/C/plugins/storage/sqlitelb/plugin.cpp @@ -49,7 +49,16 @@ const char *default_config = QUOTE({ "default" : "6", "displayName" : "Vacuum Interval", "order" : "2" - } + }, + "purgeBlockSize" : { + "description" : "The number of rows to purge in each delete statement", + "type" : "integer", + "default" : "10000", + "displayName" : "Purge Block Size", + "order" : "3", + "minimum" : "1000", + "maximum" : "100000" + } }); /** @@ -91,6 +100,11 @@ int poolSize = 5; { manager->setVacuumInterval(strtol(category->getValue("vacuumInterval").c_str(), NULL, 10)); } + if (category->itemExists("purgeBlockSize")) + { + unsigned long purgeBlockSize = strtoul(category->getValue("purgeBlockSize").c_str(), NULL, 10); + manager->setPurgeBlockSize(purgeBlockSize); + } return manager; } diff --git a/C/plugins/storage/sqlitememory/include/connection.h b/C/plugins/storage/sqlitememory/include/connection.h index 44fc47d3f4..ed90ff13c4 100644 --- a/C/plugins/storage/sqlitememory/include/connection.h +++ b/C/plugins/storage/sqlitememory/include/connection.h @@ -38,6 +38,10 @@ class Connection { bool vacuum(); bool loadDatabase(const std::string& filname); bool saveDatabase(const std::string& filname); + void setPurgeBlockSize(unsigned long purgeBlockSize) + { + m_purgeBlockSize = purgeBlockSize; + } private: int SQLexec(sqlite3 *db, const char *sql, int (*callback)(void*,int,char**,char**), @@ -60,5 +64,6 @@ class Connection { int i, std::string& newDate); void logSQL(const char *, const char *); + unsigned long m_purgeBlockSize; }; #endif diff --git a/C/plugins/storage/sqlitememory/include/connection_manager.h b/C/plugins/storage/sqlitememory/include/connection_manager.h index 24cbf04c04..5b73617b4e 100644 --- a/C/plugins/storage/sqlitememory/include/connection_manager.h +++ b/C/plugins/storage/sqlitememory/include/connection_manager.h @@ -39,6 +39,7 @@ class MemConnectionManager { } bool persist() { return m_persist; }; std::string filename() { return m_filename; }; + void setPurgeBlockSize(unsigned long purgeBlockSize); private: MemConnectionManager(); @@ -52,6 +53,7 @@ class MemConnectionManager { bool m_trace; bool m_persist; std::string m_filename; + unsigned long m_purgeBlockSize; }; #endif diff --git a/C/plugins/storage/sqlitememory/plugin.cpp b/C/plugins/storage/sqlitememory/plugin.cpp index f44b443148..68c805b09a 100644 --- a/C/plugins/storage/sqlitememory/plugin.cpp +++ b/C/plugins/storage/sqlitememory/plugin.cpp @@ -56,6 +56,15 @@ const char *default_config = QUOTE({ "default" : "false", "displayName" : "Persist Data", "order" : "2" + }, + "purgeBlockSize" : { + "description" : "The number of rows to purge in each delete statement", + "type" : "integer", + "default" : "10000", + "displayName" : "Purge Block Size", + "order" : "3", + "minimum" : "1000", + "maximum" : "100000" } }); @@ -118,6 +127,11 @@ int poolSize = 5; Connection *connection = manager->allocate(); connection->loadDatabase(manager->filename()); } + if (category->itemExists("purgeBlockSize")) + { + unsigned long purgeBlockSize = strtoul(category->getValue("purgeBlockSize").c_str(), NULL, 10); + manager->setPurgeBlockSize(purgeBlockSize); + } return manager; } /** diff --git a/C/services/north/data_load.cpp b/C/services/north/data_load.cpp index 8d721395ef..c0a119f255 100755 --- a/C/services/north/data_load.cpp +++ b/C/services/north/data_load.cpp @@ -550,8 +550,28 @@ void DataLoad::pipelineEnd(OUTPUT_HANDLE *outHandle, { DataLoad *load = (DataLoad *)outHandle; - if (readingSet->getCount() == 0) // Special case when all filtered out + std::vector* vecPtr = readingSet->getAllReadingsPtr(); + unsigned long lastReadingId = 0; + + for(auto rdngPtrItr = vecPtr->crbegin(); rdngPtrItr != vecPtr->crend(); rdngPtrItr++) + { + if((*rdngPtrItr)->hasId()) // only consider valid reading IDs + { + lastReadingId = (*rdngPtrItr)->getId(); + break; + } + } + + Logger::getLogger()->debug("DataLoad::pipelineEnd(): readingSet->getCount()=%d, lastReadingId=%d, " + "load->m_lastFetched=%d", + readingSet->getCount(), lastReadingId, load->m_lastFetched); + + // Special case when all readings are filtered out + // or new readings are appended by filter with id 0 + if ((readingSet->getCount() == 0) || (lastReadingId == 0)) { + Logger::getLogger()->debug("DataLoad::pipelineEnd(): updating with load->updateLastSentId(%d)", + load->m_lastFetched); load->updateLastSentId(load->m_lastFetched); } diff --git a/C/services/north/data_send.cpp b/C/services/north/data_send.cpp index b6ad804052..9d0e148641 100755 --- a/C/services/north/data_send.cpp +++ b/C/services/north/data_send.cpp @@ -126,16 +126,32 @@ unsigned long DataSender::send(ReadingSet *readings) uint32_t to_send = readings->getCount(); uint32_t sent = m_plugin->send(readings->getAllReadings()); releasePause(); - unsigned long lastSent = readings->getReadingId(sent); + + // last few readings in the reading set may have 0 reading ID, + // if they have been generated by filters on north service itself + const std::vector& readingsVec = readings->getAllReadings(); + unsigned long lastSent = 0; + for(auto rdngPtrItr = readingsVec.crbegin(); rdngPtrItr != readingsVec.crend(); rdngPtrItr++) + { + if((*rdngPtrItr)->hasId()) // only consider readings with valid reading IDs + { + lastSent = (*rdngPtrItr)->getId(); + break; + } + } + + // unsigned long lastSent = readings->getReadingId(sent); if (m_perfMonitor) { m_perfMonitor->collect("Readings sent", sent); m_perfMonitor->collect("Percentage readings sent", (100 * sent) / to_send); } + Logger::getLogger()->debug("DataSender::send(): to_send=%d, sent=%d, lastSent=%d", to_send, sent, lastSent); + if (sent > 0) { - lastSent = readings->getLastId(); + // lastSent = readings->getLastId(); // Update asset tracker table/cache, if required vector *vec = readings->getAllReadingsPtr(); @@ -144,9 +160,8 @@ unsigned long DataSender::send(ReadingSet *readings) { Reading *reading = *it; - if (reading->getId() <= lastSent) + if (!reading->hasId() || reading->getId() <= lastSent) { - AssetTrackingTuple tuple(m_service->getName(), m_service->getPluginName(), reading->getAssetName(), "Egress"); if (!AssetTracker::getAssetTracker()->checkAssetTrackingCache(tuple)) { diff --git a/C/services/north/north.cpp b/C/services/north/north.cpp index d2ed9a792d..4f47a4dc42 100755 --- a/C/services/north/north.cpp +++ b/C/services/north/north.cpp @@ -475,6 +475,15 @@ void NorthService::start(string& coreAddress, unsigned short corePort) m_dataLoad->setBlockSize(newBlock); } } + if (m_configAdvanced.itemExists("assetTrackerInterval")) + { + unsigned long interval = strtoul( + m_configAdvanced.getValue("assetTrackerInterval").c_str(), + NULL, + 10); + if (m_assetTracker) + m_assetTracker->tune(interval); + } m_dataSender = new DataSender(northPlugin, m_dataLoad, this); m_dataSender->setPerfMonitor(m_perfMonitor); @@ -810,6 +819,15 @@ void NorthService::configChange(const string& categoryName, const string& catego m_dataLoad->setBlockSize(newBlock); } } + if (m_configAdvanced.itemExists("assetTrackerInterval")) + { + unsigned long interval = strtoul( + m_configAdvanced.getValue("assetTrackerInterval").c_str(), + NULL, + 10); + if (m_assetTracker) + m_assetTracker->tune(interval); + } if (m_configAdvanced.itemExists("perfmon")) { string perf = m_configAdvanced.getValue("perfmon"); @@ -921,6 +939,12 @@ void NorthService::addConfigDefaults(DefaultConfigCategory& defaultConfig) std::to_string(DEFAULT_BLOCK_SIZE), std::to_string(DEFAULT_BLOCK_SIZE)); defaultConfig.setItemDisplayName("blockSize", "Data block size"); + defaultConfig.addItem("assetTrackerInterval", + "Number of milliseconds between updates of the asset tracker information", + "integer", std::to_string(MIN_ASSET_TRACKER_UPDATE), + std::to_string(MIN_ASSET_TRACKER_UPDATE)); + defaultConfig.setItemDisplayName("assetTrackerInterval", + "Asset Tracker Update"); defaultConfig.addItem("perfmon", "Track and store performance counters", "boolean", "false", "false"); defaultConfig.setItemDisplayName("perfmon", "Performance Counters"); @@ -932,6 +956,7 @@ void NorthService::addConfigDefaults(DefaultConfigCategory& defaultConfig) * @param name Name of the variable to write * @param value Value to write to the variable * @param destination Where to write the value + * @return true if write was succesfully sent to dispatcher, else false */ bool NorthService::write(const string& name, const string& value, const ControlDestination destination) { @@ -961,6 +986,7 @@ bool NorthService::write(const string& name, const string& value, const ControlD * @param value Value to write to the variable * @param destination Where to write the value * @param arg Argument used to determine destination + * @return true if write was succesfully sent to dispatcher, else false */ bool NorthService::write(const string& name, const string& value, const ControlDestination destination, const string& arg) { @@ -1008,6 +1034,7 @@ bool NorthService::write(const string& name, const string& value, const ControlD * @param paramCount The number of parameters * @param parameters The parameters to the operation * @param destination Where to write the value + * @return -1 in case of error on operation destination, 1 if operation was succesfully sent to dispatcher, else 0 */ int NorthService::operation(const string& name, int paramCount, char *names[], char *parameters[], const ControlDestination destination) { @@ -1039,8 +1066,7 @@ int NorthService::operation(const string& name, int paramCount, char *names[], payload += ","; } payload += " } } }"; - sendToDispatcher("/dispatch/operation", payload); - return -1; + return static_cast(sendToDispatcher("/dispatch/operation", payload)); } /** @@ -1051,6 +1077,7 @@ int NorthService::operation(const string& name, int paramCount, char *names[], * @param parameters The parameters to the operation * @param destination Where to write the value * @param arg Argument used to determine destination + * @return 1 if operation was succesfully sent to dispatcher, else 0 */ int NorthService::operation(const string& name, int paramCount, char *names[], char *parameters[], const ControlDestination destination, const string& arg) { @@ -1099,8 +1126,7 @@ int NorthService::operation(const string& name, int paramCount, char *names[], c payload += ","; } payload += "} } }"; - sendToDispatcher("/dispatch/operation", payload); - return -1; + return static_cast(sendToDispatcher("/dispatch/operation", payload)); } /** diff --git a/C/services/south/include/defaults.h b/C/services/south/include/defaults.h index d6eefed30e..10f7d7cd78 100644 --- a/C/services/south/include/defaults.h +++ b/C/services/south/include/defaults.h @@ -25,6 +25,9 @@ static struct { "Enable flow control by reducing the poll rate", "boolean", "false" }, { "readingsPerSec", "Reading Rate", "Number of readings to generate per interval", "integer", "1" }, + { "assetTrackerInterval", "Asset Tracker Update", + "Number of milliseconds between updates of the asset tracker information", + "integer", "500" }, { NULL, NULL, NULL, NULL, NULL } }; #endif diff --git a/C/services/south/include/ingest.h b/C/services/south/include/ingest.h index 3b5dfbf157..0f629ab6f8 100644 --- a/C/services/south/include/ingest.h +++ b/C/services/south/include/ingest.h @@ -144,6 +144,8 @@ class Ingest : public ServiceHandler { std::unordered_set statsDbEntriesCache; // confirmed stats table entries std::map statsPendingEntries; // pending stats table entries bool m_highLatency; // Flag to indicate we are exceeding latency request + bool m_10Latency; // Latency within 10% + time_t m_reportedLatencyTime;// Last tiem we reported high latency int m_failCnt; bool m_storageFailed; int m_storesFailed; diff --git a/C/services/south/ingest.cpp b/C/services/south/ingest.cpp index e1e876c096..993b90e542 100755 --- a/C/services/south/ingest.cpp +++ b/C/services/south/ingest.cpp @@ -459,7 +459,7 @@ unsigned int nFullQueues = 0; // Get the readings in the set for (auto & rdng : *vec) { - m_queue->push_back(rdng); + m_queue->emplace_back(rdng); } if (m_queue->size() >= m_queueSizeThreshold || m_running == false) { @@ -782,14 +782,28 @@ void Ingest::processQueue() m_performance->collect("readLatency", latency); if (latency > m_timeout && m_highLatency == false) { - m_logger->warn("Current send latency of %ldmS exceeds requested maximum latency of %dmS", latency, m_timeout); + m_logger->warn("Current send latency of %ldms exceeds requested maximum latency of %dmS", latency, m_timeout); m_highLatency = true; + m_10Latency = false; + m_reportedLatencyTime = time(0); } else if (latency <= m_timeout / 1000 && m_highLatency) { m_logger->warn("Send latency now within requested limits"); m_highLatency = false; } + else if (m_highLatency && latency > m_timeout + (m_timeout / 10) && time(0) - m_reportedLatencyTime > 60) + { + // Report again every minute if we are outside the latency + // target by more than 10% + m_logger->warn("Current send latency of %ldms still significantly exceeds requested maximum latency of %dmS", latency, m_timeout); + m_reportedLatencyTime = time(0); + } + else if (m_highLatency && latency < m_timeout + (m_timeout / 10) && m_10Latency == false) + { + m_logger->warn("Send latency of %ldms is now less than 10%% from target", latency); + m_10Latency = true; + } } } @@ -1041,27 +1055,26 @@ void Ingest::useFilteredData(OUTPUT_HANDLE *outHandle, if (ingest->m_data != readingSet->getAllReadingsPtr()) { - if (ingest->m_data && ingest->m_data->size()) + if (ingest->m_data) { - // Remove the readings in the vector - for(auto & rdng : *(ingest->m_data)) - delete rdng; - ingest->m_data->clear();// Remove the pointers still in the vector - - - // move reading vector to ingest - *(ingest->m_data) = readingSet->getAllReadings(); + // Remove the readings in the vector + for(auto & rdngPtr : *(ingest->m_data)) + delete rdngPtr; + + ingest->m_data->clear();// Remove any pointers still in the vector + delete ingest->m_data; + ingest->m_data = readingSet->moveAllReadings(); } else { - // move reading vector to ingest - ingest->m_data = readingSet->moveAllReadings(); + // move reading vector to ingest + ingest->m_data = readingSet->moveAllReadings(); } } else { - Logger::getLogger()->info("%s:%d: INPUT READINGSET MODIFIED BY FILTER: ingest->m_data=%p, readingSet->getAllReadingsPtr()=%p", - __FUNCTION__, __LINE__, ingest->m_data, readingSet->getAllReadingsPtr()); + Logger::getLogger()->info("%s:%d: Input readingSet modified by filter: ingest->m_data=%p, readingSet->getAllReadingsPtr()=%p", + __FUNCTION__, __LINE__, ingest->m_data, readingSet->getAllReadingsPtr()); } readingSet->clear(); diff --git a/C/services/south/south.cpp b/C/services/south/south.cpp index c105e46b7f..530b40ec0d 100644 --- a/C/services/south/south.cpp +++ b/C/services/south/south.cpp @@ -423,6 +423,13 @@ void SouthService::start(string& coreAddress, unsigned short corePort) } m_assetTracker = new AssetTracker(m_mgtClient, m_name); + if (m_configAdvanced.itemExists("assetTrackerInterval")) + { + string interval = m_configAdvanced.getValue("assetTrackerInterval"); + unsigned long i = strtoul(interval.c_str(), NULL, 10); + if (m_assetTracker) + m_assetTracker->tune(i); + } { // Instantiate the Ingest class @@ -1024,6 +1031,13 @@ void SouthService::processConfigChange(const string& categoryName, const string& m_throttle = false; } } + if (m_configAdvanced.itemExists("assetTrackerInterval")) + { + string interval = m_configAdvanced.getValue("assetTrackerInterval"); + unsigned long i = strtoul(interval.c_str(), NULL, 10); + if (m_assetTracker) + m_assetTracker->tune(i); + } } // Update the Security category diff --git a/C/services/storage/configuration.cpp b/C/services/storage/configuration.cpp index a3253ff684..527ac349e2 100644 --- a/C/services/storage/configuration.cpp +++ b/C/services/storage/configuration.cpp @@ -77,6 +77,16 @@ static const char *defaultConfiguration = QUOTE({ "displayName" : "Log Level", "options" : [ "error", "warning", "info", "debug" ], "order" : "7" + }, + "timeout" : { + "value" : "60", + "default" : "60", + "description" : "Server request timeout, expressed in seconds", + "type" : "integer", + "displayName" : "Timeout", + "order" : "8", + "minimum" : "5", + "maximum" : "3600" } }); @@ -95,6 +105,10 @@ StorageConfiguration::StorageConfiguration() document = new Document(); readCache(); checkCache(); + if (hasValue("logLevel")) + { + logger->setMinLevel(getValue("logLevel")); + } } /** @@ -312,7 +326,52 @@ DefaultConfigCategory *StorageConfiguration::getDefaultCategory() void StorageConfiguration::checkCache() { bool forceUpdate = false; +bool writeCacheRequired = false; + /* + * If the cached version of the configuration that has been read in + * does not contain an item in the default configuration, then copy + * that item from the default configuration. + * + * This allows new tiems to be added to the configuration and populated + * in the cache on first restart. + */ + Document *newdoc = new Document(); + newdoc->Parse(defaultConfiguration); + if (newdoc->HasParseError()) + { + logger->error("Default configuration failed to parse. %s at %d", + GetParseError_En(document->GetParseError()), + newdoc->GetErrorOffset()); + } + else + { + for (Value::ConstMemberIterator itr = newdoc->MemberBegin(); + itr != newdoc->MemberEnd(); ++itr) + { + const char *name = itr->name.GetString(); + Value &newval = (*newdoc)[name]; + if (!hasValue(name)) + { + logger->warn("Adding storage configuration item %s from defaults", name); + Document::AllocatorType& a = document->GetAllocator(); + Value copy(name, a); + copy.CopyFrom(newval, a); + Value n(name, a); + document->AddMember(n, copy, a); + writeCacheRequired = true; + } + } + } + delete newdoc; + + if (writeCacheRequired) + { + // We added a new member + writeCache(); + } + + // Upgrade step to add eumeration for plugin if (document->HasMember("plugin")) { Value& item = (*document)["plugin"]; @@ -327,8 +386,11 @@ bool forceUpdate = false; } } + // Cache is from before we used an enumeration for the plugin, force upgrade + // steps if (forceUpdate == false && document->HasMember("plugin")) { + logger->info("Adding database plugin enumerations"); Value& item = (*document)["plugin"]; if (item.HasMember("type")) { @@ -349,7 +411,7 @@ bool forceUpdate = false; } logger->info("Storage configuration cache is not up to date"); - Document *newdoc = new Document(); + newdoc = new Document(); newdoc->Parse(defaultConfiguration); if (newdoc->HasParseError()) { @@ -357,28 +419,31 @@ bool forceUpdate = false; GetParseError_En(document->GetParseError()), newdoc->GetErrorOffset()); } - for (Value::ConstMemberIterator itr = newdoc->MemberBegin(); - itr != newdoc->MemberEnd(); ++itr) + else { - const char *name = itr->name.GetString(); - Value &newval = (*newdoc)[name]; - if (hasValue(name)) + for (Value::ConstMemberIterator itr = newdoc->MemberBegin(); + itr != newdoc->MemberEnd(); ++itr) { - const char *val = getValue(name); - newval["value"].SetString(strdup(val), strlen(val)); - if (strcmp(name, "plugin") == 0) + const char *name = itr->name.GetString(); + Value &newval = (*newdoc)[name]; + if (hasValue(name)) { - newval["default"].SetString(strdup(val), strlen(val)); - logger->warn("Set default of %s to %s", name, val); - } - if (strcmp(name, "readingPlugin") == 0) - { - if (strlen(val) == 0) + const char *val = getValue(name); + newval["value"].SetString(strdup(val), strlen(val)); + if (strcmp(name, "plugin") == 0) + { + newval["default"].SetString(strdup(val), strlen(val)); + logger->warn("Set default of %s to %s", name, val); + } + if (strcmp(name, "readingPlugin") == 0) { - val = "Use main plugin"; + if (strlen(val) == 0) + { + val = "Use main plugin"; + } + newval["default"].SetString(strdup(val), strlen(val)); + logger->warn("Set default of %s to %s", name, val); } - newval["default"].SetString(strdup(val), strlen(val)); - logger->warn("Set default of %s to %s", name, val); } } } diff --git a/C/services/storage/include/storage_api.h b/C/services/storage/include/storage_api.h index 1b2d152ba4..e83521b0db 100644 --- a/C/services/storage/include/storage_api.h +++ b/C/services/storage/include/storage_api.h @@ -100,6 +100,13 @@ class StorageApi { void printList(); bool createSchema(const std::string& schema); + void setTimeout(long timeout) + { + if (m_server) + { + m_server->config.timeout_request = timeout; + } + }; public: std::atomic m_workers_count; diff --git a/C/services/storage/include/storage_service.h b/C/services/storage/include/storage_service.h index 6309208716..6e97e0bd10 100644 --- a/C/services/storage/include/storage_service.h +++ b/C/services/storage/include/storage_service.h @@ -40,6 +40,10 @@ class StorageService : public ServiceHandler { string getPluginName(); string getPluginManagedStatus(); string getReadingPluginName(); + void setLogLevel(std::string level) + { + m_logLevel = level; + }; private: const string& m_name; bool loadPlugin(); @@ -50,5 +54,7 @@ class StorageService : public ServiceHandler { StoragePlugin *readingPlugin; bool m_shutdown; bool m_requestRestart; + std::string m_logLevel; + long m_timeout; }; #endif diff --git a/C/services/storage/pluginconfiguration.cpp b/C/services/storage/pluginconfiguration.cpp index 03e5517ee3..30772a89ad 100644 --- a/C/services/storage/pluginconfiguration.cpp +++ b/C/services/storage/pluginconfiguration.cpp @@ -19,7 +19,6 @@ #include #include - using namespace std; using namespace rapidjson; diff --git a/C/services/storage/storage.cpp b/C/services/storage/storage.cpp index 8999e21e41..68c67a0d8b 100644 --- a/C/services/storage/storage.cpp +++ b/C/services/storage/storage.cpp @@ -141,6 +141,7 @@ string logLevel = "warning"; } StorageService service(myName); + service.setLogLevel(logLevel); Logger::getLogger()->setMinLevel(logLevel); if (returnPlugin) { @@ -228,15 +229,26 @@ unsigned short servicePort; } if (config->hasValue("logLevel")) { - logger->setMinLevel(config->getValue("logLevel")); + m_logLevel = config->getValue("logLevel"); } else { - logger->setMinLevel("warning"); + m_logLevel = "warning"; + } + logger->setMinLevel(m_logLevel); + + if (config->hasValue("timeout")) + { + m_timeout = strtol(config->getValue("timeout"), NULL, 10); + } + else + { + m_timeout = 5; } api = new StorageApi(servicePort, threads); + api->setTimeout(m_timeout); } /** @@ -543,6 +555,21 @@ void StorageService::configChange(const string& categoryName, const string& cate if (!categoryName.compare(STORAGE_CATEGORY)) { config->updateCategory(category); + + if (m_logLevel.compare(config->getValue("logLevel"))) + { + m_logLevel = config->getValue("logLevel"); + logger->setMinLevel(m_logLevel); + } + if (config->hasValue("timeout")) + { + long timeout = strtol(config->getValue("timeout"), NULL, 10); + if (timeout != m_timeout) + { + api->setTimeout(timeout); + m_timeout = timeout; + } + } return; } if (!categoryName.compare(getPluginName())) diff --git a/C/services/storage/storage_api.cpp b/C/services/storage/storage_api.cpp index 146df5c82c..575e2129ce 100644 --- a/C/services/storage/storage_api.cpp +++ b/C/services/storage/storage_api.cpp @@ -393,6 +393,7 @@ StorageApi::StorageApi(const unsigned short port, const unsigned int threads) : m_server = new HttpServer(); m_server->config.port = port; m_server->config.thread_pool_size = threads; + m_server->config.timeout_request = 60; StorageApi::m_instance = this; } diff --git a/C/services/storage/storage_registry.cpp b/C/services/storage/storage_registry.cpp index f97798c2e4..8e0db3ab98 100644 --- a/C/services/storage/storage_registry.cpp +++ b/C/services/storage/storage_registry.cpp @@ -868,12 +868,20 @@ void StorageRegistry::processDelete(char *tableName, char *payload) { Document doc; + bool allRows = false; - doc.Parse(payload); - if (doc.HasParseError()) + if (! *payload) // Empty { - Logger::getLogger()->error("Unable to parse table delete payload for table %s, request is %s", tableName, payload); - return; + allRows = true; + } + else + { + doc.Parse(payload); + if (doc.HasParseError()) + { + Logger::getLogger()->error("Unable to parse table delete payload for table %s, request is %s", tableName, payload); + return; + } } lock_guard guard(m_tableRegistrationsMutex); @@ -889,7 +897,11 @@ StorageRegistry::processDelete(char *tableName, char *payload) { continue; } - if (tblreg->key.empty()) + if (allRows) + { + sendPayload(tblreg->url, payload); + } + else if (tblreg->key.empty()) { // No key to match, send all updates to table sendPayload(tblreg->url, payload); diff --git a/C/tasks/check_updates/CMakeLists.txt b/C/tasks/check_updates/CMakeLists.txt new file mode 100644 index 0000000000..0895d5f9b9 --- /dev/null +++ b/C/tasks/check_updates/CMakeLists.txt @@ -0,0 +1,40 @@ +cmake_minimum_required (VERSION 2.8.8) +project (check_updates) + +set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wsign-conversion") +set(COMMON_LIB common-lib) +set(PLUGINS_COMMON_LIB plugins-common-lib) + +find_package(Threads REQUIRED) + +set(BOOST_COMPONENTS system thread) + +find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) +include_directories(SYSTEM ${Boost_INCLUDE_DIR}) + +include_directories(.) +include_directories(include) +include_directories(../../thirdparty/Simple-Web-Server) +include_directories(../../thirdparty/rapidjson/include) +include_directories(../../common/include) + +file(GLOB check_updates_src "*.cpp") + +link_directories(${PROJECT_BINARY_DIR}/../../lib) + +add_executable(${PROJECT_NAME} ${check_updates_src} ${common_src}) +target_link_libraries(${PROJECT_NAME} ${Boost_LIBRARIES}) +target_link_libraries(${PROJECT_NAME} ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${PROJECT_NAME} ${COMMON_LIB}) +target_link_libraries(${PROJECT_NAME} ${PLUGINS_COMMON_LIB}) + + +install(TARGETS check_updates RUNTIME DESTINATION fledge/tasks) + +if(MSYS) #TODO: Is MSYS true when MSVC is true? + target_link_libraries(check_updates ws2_32 wsock32) + if(OPENSSL_FOUND) + target_link_libraries(check_updates ws2_32 wsock32) + endif() +endif() diff --git a/C/tasks/check_updates/check_updates.cpp b/C/tasks/check_updates/check_updates.cpp new file mode 100644 index 0000000000..1c06c01aea --- /dev/null +++ b/C/tasks/check_updates/check_updates.cpp @@ -0,0 +1,197 @@ +/* + * Fledge Check Updates + * + * Copyright (c) 2024 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Devki Nandan Ghildiyal + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +using namespace std; + +volatile std::sig_atomic_t signalReceived = 0; + +static void signalHandler(int signal) +{ + signalReceived = signal; +} + + +/** + * Constructor for CheckUpdates + */ +CheckUpdates::CheckUpdates(int argc, char** argv) : FledgeProcess(argc, argv) +{ + std::string paramName; + paramName = getName(); + m_logger = new Logger(paramName); + m_logger->info("CheckUpdates starting - parameters name :%s:", paramName.c_str() ); + m_mgtClient = this->getManagementClient(); + +} + +/** + * Destructor for CheckUpdates + */ +CheckUpdates::~CheckUpdates() +{ +} + +/** + * CheckUpdates run method, called by the base class to start the process and do the actual work. + */ +void CheckUpdates::run() +{ + // We handle these signals, add more if needed + std::signal(SIGINT, signalHandler); + std::signal(SIGSTOP, signalHandler); + std::signal(SIGTERM, signalHandler); + + + if (!m_dryRun) + { + raiseAlerts(); + } + processEnd(); +} + +/** + * Execute the raiseAlerts, create an alert for all the packages for which update is available + */ +void CheckUpdates::raiseAlerts() +{ + m_logger->debug("raiseAlerts running"); + try + { + int availableUpdates = getUpgradablePackageList().size(); + + if (availableUpdates > 0) + { + std::string key = "package_updates"; + std::string message = ""; + if (availableUpdates == 1) + message = "There is " + std::to_string(availableUpdates) + " update available to be installed"; + else + message = "There are " + std::to_string(availableUpdates) + " updates available to be installed"; + + std::string urgency = "normal"; + if (!m_mgtClient->raiseAlert(key,message,urgency)) + { + m_logger->error("Failed to raise an alert for key=%s,message=%s,urgency=%s", key.c_str(), message.c_str(), urgency.c_str()); + } + } + + } + catch (...) + { + try + { + std::exception_ptr p = std::current_exception(); + std::rethrow_exception(p); + } + catch(const std::exception& e) + { + m_logger->error("Failed to raise alert : %s", e.what()); + } + + } +} + +/** + * Logs process end message + */ + +void CheckUpdates::processEnd() +{ + m_logger->debug("raiseAlerts completed"); +} + +/** + * Fetch package manager name + */ + +std::string CheckUpdates::getPackageManager() +{ + std::string command = "command -v yum || command -v apt-get"; + std::string result = ""; + char buffer[128]; + + // Open pipe to file + FILE* pipe = popen(command.c_str(), "r"); + if (!pipe) + { + m_logger->error("getPackageManager: popen call failed : %s",strerror(errno)); + return ""; + } + // read till end of process: + while (!feof(pipe)) + { + if (fgets(buffer, 128, pipe) != NULL) + result += buffer; + } + + pclose(pipe); + + if (result.find("apt") != std::string::npos) + return "apt"; + if (result.find("yum") != std::string::npos) + return "yum"; + + m_logger->warn("Unspported environment %s", result.c_str() ); + return ""; +} + +/** + * Fetch a list of all the package name for which upgrade is available + */ +std::vector CheckUpdates::getUpgradablePackageList() +{ + std::string packageManager = getPackageManager(); + std::vector packageList; + if(!packageManager.empty()) + { + std::string command = "(sudo apt update && sudo apt list --upgradeable) 2>/dev/null | grep -v '^fledge-manage' | grep '^fledge' | tr -s ' ' | cut -d' ' -f-1,2 "; + if (packageManager.find("yum") != std::string::npos) + { + command = "(sudo yum check-update && sudo yum list updates) 2>/dev/null | grep -v '^fledge-manage' | grep '^fledge' | tr -s ' ' | cut -d' ' -f-1,2 "; + } + + FILE* pipe = popen(command.c_str(), "r"); + if (!pipe) + { + m_logger->error("getUpgradablePackageList: popen call failed : %s",strerror(errno)); + return packageList; + } + + char buffer[1024]; + while (!feof(pipe)) + { + if (fgets(buffer, sizeof(buffer), pipe) != NULL) + { + //strip out newline character + int len = strlen(buffer) - 1; + if (*buffer && buffer[len] == '\n') + buffer[len] = '\0'; + + packageList.emplace_back(buffer); + + } + } + + pclose(pipe); + } + + return packageList; +} diff --git a/C/tasks/check_updates/include/check_updates.h b/C/tasks/check_updates/include/check_updates.h new file mode 100644 index 0000000000..1a8f852e5a --- /dev/null +++ b/C/tasks/check_updates/include/check_updates.h @@ -0,0 +1,38 @@ +#ifndef _CHECK_UPDATES_H +#define _CHECK_UPDATES_H + +/* + * Fledge Check Updates + * + * Copyright (c) 2024 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Devki Nandan Ghildiyal + */ + +#include + +#define LOG_NAME "check_updates" + +/** + * CheckUpdates class + */ + +class CheckUpdates : public FledgeProcess +{ + public: + CheckUpdates(int argc, char** argv); + ~CheckUpdates(); + void run(); + + private: + Logger *m_logger; + ManagementClient *m_mgtClient; + + void raiseAlerts(); + std::string getPackageManager(); + std::vector getUpgradablePackageList(); + void processEnd(); +}; +#endif diff --git a/C/tasks/check_updates/main.cpp b/C/tasks/check_updates/main.cpp new file mode 100644 index 0000000000..4e7c5fcab5 --- /dev/null +++ b/C/tasks/check_updates/main.cpp @@ -0,0 +1,43 @@ +/* + * Fledge Check Updates + * + * Copyright (c) 2024 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Devki Nandan Ghildiyal + */ + +#include +#include + +using namespace std; + +int main(int argc, char** argv) +{ + Logger *logger = new Logger(LOG_NAME); + + try + { + CheckUpdates check(argc, argv); + + check.run(); + } + catch (...) + { + try + { + std::exception_ptr p = std::current_exception(); + std::rethrow_exception(p); + } + catch(const std::exception& e) + { + logger->error("An error occurred during the execution : %s", e.what()); + } + + exit(1); + } + + // Return success + exit(0); +} diff --git a/C/tasks/north/sending_process/sending.cpp b/C/tasks/north/sending_process/sending.cpp index e17effed82..cf61b6633b 100755 --- a/C/tasks/north/sending_process/sending.cpp +++ b/C/tasks/north/sending_process/sending.cpp @@ -903,7 +903,7 @@ ConfigCategory SendingProcess::fetchConfiguration(const std::string& defaultConf m_data_source_t = configuration.getValue("source"); } catch (...) { - m_data_source_t = ""; + m_data_source_t = "readings"; } // Sets the m_memory_buffer_size = 1 in case of an invalid value diff --git a/CMakeLists.txt b/CMakeLists.txt index 8ab13abe33..4198f82ae3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,6 +35,7 @@ add_subdirectory(C/services/filter-plugin-interfaces/python/filter_ingest_pymodu add_subdirectory(C/services/north-plugin-interfaces/python) add_subdirectory(C/tasks/north) add_subdirectory(C/tasks/purge_system) +add_subdirectory(C/tasks/check_updates) add_subdirectory(C/tasks/statistics_history) add_subdirectory(C/plugins/utils) add_subdirectory(C/plugins/north/OMF) diff --git a/Makefile b/Makefile index 3ce471339c..2b27031369 100644 --- a/Makefile +++ b/Makefile @@ -61,6 +61,7 @@ CMAKE_SOUTH_BINARY := $(CMAKE_SERVICES_DIR)/south/fledge.services.sou CMAKE_NORTH_SERVICE_BINARY := $(CMAKE_SERVICES_DIR)/north/fledge.services.north CMAKE_NORTH_BINARY := $(CMAKE_TASKS_DIR)/north/sending_process/sending_process CMAKE_PURGE_SYSTEM_BINARY := $(CMAKE_TASKS_DIR)/purge_system/purge_system +CMAKE_CHECK_UPDATES_BINARY := $(CMAKE_TASKS_DIR)/check_updates/check_updates CMAKE_STATISTICS_BINARY := $(CMAKE_TASKS_DIR)/statistics_history/statistics_history CMAKE_PLUGINS_DIR := $(CURRENT_DIR)/$(CMAKE_BUILD_DIR)/C/plugins DEV_SERVICES_DIR := $(CURRENT_DIR)/services @@ -71,6 +72,7 @@ SYMLINK_SOUTH_BINARY := $(DEV_SERVICES_DIR)/fledge.services.south SYMLINK_NORTH_SERVICE_BINARY := $(DEV_SERVICES_DIR)/fledge.services.north SYMLINK_NORTH_BINARY := $(DEV_TASKS_DIR)/sending_process SYMLINK_PURGE_SYSTEM_BINARY := $(DEV_TASKS_DIR)/purge_system +SYMLINK_CHECK_UPDATES_BINARY := $(DEV_TASKS_DIR)/check_updates SYMLINK_STATISTICS_BINARY := $(DEV_TASKS_DIR)/statistics_history ASYNC_INGEST_PYMODULE := $(CURRENT_DIR)/python/async_ingest.so* FILTER_INGEST_PYMODULE := $(CURRENT_DIR)/python/filter_ingest.so* @@ -132,6 +134,7 @@ DISPATCHER_C_SCRIPT_SRC := scripts/services/dispatcher_c BUCKET_STORAGE_C_SCRIPT_SRC := scripts/services/bucket_storage_c PURGE_SCRIPT_SRC := scripts/tasks/purge PURGE_C_SCRIPT_SRC := scripts/tasks/purge_system +CHECK_UPDATES_SCRIPT_SRC := scripts/tasks/check_updates STATISTICS_SCRIPT_SRC := scripts/tasks/statistics BACKUP_SRC := scripts/tasks/backup RESTORE_SRC := scripts/tasks/restore @@ -168,7 +171,7 @@ PACKAGE_NAME=Fledge # generally prepare the development tree to allow for core to be run default : apply_version \ generate_selfcertificate \ - c_build $(SYMLINK_STORAGE_BINARY) $(SYMLINK_SOUTH_BINARY) $(SYMLINK_NORTH_SERVICE_BINARY) $(SYMLINK_NORTH_BINARY) $(SYMLINK_PURGE_SYSTEM_BINARY) $(SYMLINK_STATISTICS_BINARY) $(SYMLINK_PLUGINS_DIR) \ + c_build $(SYMLINK_STORAGE_BINARY) $(SYMLINK_SOUTH_BINARY) $(SYMLINK_NORTH_SERVICE_BINARY) $(SYMLINK_NORTH_BINARY) $(SYMLINK_PURGE_SYSTEM_BINARY) $(SYMLINK_CHECK_UPDATES_BINARY) $(SYMLINK_STATISTICS_BINARY) $(SYMLINK_PLUGINS_DIR) \ python_build python_requirements_user apply_version : @@ -291,6 +294,10 @@ $(SYMLINK_NORTH_BINARY) : $(DEV_TASKS_DIR) $(SYMLINK_PURGE_SYSTEM_BINARY) : $(DEV_TASKS_DIR) $(LN) $(CMAKE_PURGE_SYSTEM_BINARY) $(SYMLINK_PURGE_SYSTEM_BINARY) +# create symlink to check_updates binary +$(SYMLINK_CHECK_UPDATES_BINARY) : $(DEV_TASKS_DIR) + $(LN) $(CMAKE_CHECK_UPDATES_BINARY) $(SYMLINK_CHECK_UPDATES_BINARY) + # create symlink to purge_system binary $(SYMLINK_STATISTICS_BINARY) : $(DEV_TASKS_DIR) $(LN) $(CMAKE_STATISTICS_BINARY) $(SYMLINK_STATISTICS_BINARY) @@ -354,6 +361,7 @@ scripts_install : $(SCRIPTS_INSTALL_DIR) \ install_dispatcher_c_script \ install_bucket_storage_c_script \ install_purge_script \ + install_check_updates_script \ install_statistics_script \ install_storage_script \ install_backup_script \ @@ -425,6 +433,9 @@ install_purge_script : $(SCRIPT_TASKS_INSTALL_DIR) $(PURGE_SCRIPT_SRC) $(CP) $(PURGE_SCRIPT_SRC) $(SCRIPT_TASKS_INSTALL_DIR) $(CP) $(PURGE_C_SCRIPT_SRC) $(SCRIPT_TASKS_INSTALL_DIR) +install_check_updates_script : $(SCRIPT_TASKS_INSTALL_DIR) $(CHECK_UPDATES_SCRIPT_SRC) + $(CP) $(CHECK_UPDATES_SCRIPT_SRC) $(SCRIPT_TASKS_INSTALL_DIR) + install_statistics_script : $(SCRIPT_TASKS_INSTALL_DIR) $(STATISTICS_SCRIPT_SRC) $(CP) $(STATISTICS_SCRIPT_SRC) $(SCRIPT_TASKS_INSTALL_DIR) diff --git a/VERSION b/VERSION index ed5d342137..c612d5c498 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -fledge_version=2.3.0 -fledge_schema=66 +fledge_version=2.4.0 +fledge_schema=70 diff --git a/docs/91_version_history.rst b/docs/91_version_history.rst index 327879bcd0..5e3a1caef4 100644 --- a/docs/91_version_history.rst +++ b/docs/91_version_history.rst @@ -14,7 +14,6 @@ check here - .. ============================================= @@ -25,6 +24,104 @@ Version History Fledge v2 ========== +v2.4.0 +------- + +Release Date: 2024-04-10 + +- **Fledge Core** + + - New Features: + + - A new feature has been added that allows for internal alerts to be raised. These are used to inform users of any issue internally that may require attention, they are not related to specific data that is flowing through the Fledge data pipelines. Examples of alerts that may be raised are that updates to the software are available, a service is repeatedly failing or an exceptional issue has occurred. + - A new task (update checker) has been added that will run periodically and raise an alert if there are software updates available. + - The internal service monitor has been updated to use the new alerts mechanism to alert a user if services are failing. + - A new storage configuration option has been added that allows the server request timeout value to be modified. + - The ability to tune the cache flushing frequency of the asset tracker has been added to the advanced configuration options of the south and north services. + - The reporting of south service send latency has been updated to give more detail regarding continue send latency issues. + - A new tuning parameter has been added to the purge process to control the number of readings purged within single blocks. This can be used to tune the intention between the purge process and the ingestion of new data in highly loaded systems. + - A new list type has been added to the types supported in the configuration category. This allows for improved configuration interactions. + - Support has been added in the C++ configuration manager code to allow for the new list, key/value list and object list types in configuration categories. Also some convenience functions have been added for use by plugins that wish to traverse the lists. + - In the hierarchy map, forward-slash-separated string tokens in the meta-data are now parsed and used to construct an object hierarchy in the OPC UA Server's Address Space. + - The scheduler has been enhanced to provide the capability to order the startup of services when Fledge is started. + - A performance improvement, courtesy of a community member, for the JSON escaping code has been added. This improves performance of the PostgreSQL storage plugin and other areas of the system. + - A new section has been added to the documentation that describes how storage plugins are built. + - The plugin developers guide has been updated with information and examples of the new list handling facilities added to configuration items within Fledge. + - The tuning section of the documentation has been updated to include details of the service startup ordering enhancement. + - The plugin documentation has been updated to include cross referencing between plugins. A new See Also section will be included that will link the set to other plugins that might be useful or relate to the plugin that is being viewed. + - The plugin developers guide has been updated to add some additional guidance to the developer as to how to decide if features should be added to a plugin or not and also to document common problems that cause problems with plugins. + - Documentation that describes what firewall settings are needed to install Fledge has been added to the quick start guide. + + + - Bug Fix: + + - An issue that prevented configuration categories items called messages has been resolved. + - An issue that could cause data to be repeated in a north service when using a pipeline in the north that adds new readings to the pipeline has been resolved. + - An issue that could cause the order of filters in a control pipeline API to be modified has been fixed. + - An issue that could result in series that are already installed being shown in the list of services available to be installed has been resolved. + - An issue that could cause some north plugins to fail following a restart when using the SQLite in-memory storage plugin has been fixed. + - An issue that could prevent a plugin being updated in some circumstances has been resolved. + - An issue requiring a restart before the change in log level for the storage service took effect has been resolved. + - An issue causing the database to potentially not initialize correctly when switching the readings plugin from SQLite to PostgreSQL has been resolved. + - An issue in the control pipeline API related to the type of one of the parameters of the pipeline has been resolved. This issue could manifest itself as an inability to edit a control pipeline. + - The return type of plugin_shutdown was incorrectly documented in the plugin developers guide for north plugins. This has now been resolved. + + +- **GUI** + + - New Features: + + - A new page has been added for managing additional services within an instance. + - Support for entering simple lists for configuration items has been added. + - Support has been added for manipulating key/value lists using the new available list configuration type that is available. + - Navigation buttons have been added to the tabs in the south and north services to facilitate easier navigation between tabs. + - A preview of the new flow editor for the north side has been added. This may be enabled via the GUI settings page. + - The GUI now shows the internal alerts via an icon in the navigation bar at the top of the screen. + + + - Bug Fix: + + - An issue with creating an operation in a control script with no parameters in the GUI has been resolved. + - An issue with the Next button not being enabled when changing the name of a service in the service creation wizard has been resolved. + - An issue that could result in a filter not being added to a control pipeline when the user does not click on the see button has been addressed by adding a check before navigating off the page. + - An issue that could result in the JSON code editor being incorrectly displayed for non-JSON code has been resolved. + - An issue with the visibility of the last item on the side menu when scrolling in a small window has been resolved. + + +- **Services & Plugins** + + - New Features: + + - Improvements have been made to the buffering strategy of the OMF north plugin to reduce the overhead in creating outgoing OMF messages. + - The control pipelines mechanism has been enhanced to allow pipelines to change the name of the operation that is performed as well as the parameters. + - The documentation of the expression filter has been updated to document the restriction on asset and datapoint names. + + + - Bug Fix: + + - An issue with the dynamic reconfiguration of filters in control pipelines has been resolved. + - An issue that could cause the control dispatcher service to fail when changing the destination of a control pipeline has been resolved. + - An issue with the control dispatcher that prevents operations with no parameters from being correctly passed via control pipelines has been resolved. + - An issue in the control dispatcher that could cause a crash if a control pipeline completely removed the request has now been resolved. + - An issue that could cause an error to be logged when installing the control dispatcher has been resolved. The error did not prevent the dispatcher from executing. + - An issue when using the PostgreSQL storage plugin and data containing double quotes within JSON data has been resolved. + - An issue that could cause an error in the south plugin written in Python that supports control operations has been resolved. + - A memory consumption issue in the fledge-filter-asset when using the flatten option has been resolved. + - The fledge-filter-asset issue causing deadlock in pipelines with two instances has been resolved. + - An issue that limited the number of variables the fledge-south-s2opcua plugin could subscribed to has been resolved. + - An issue that could result in the sent count being incorrectly incremented when using the fledge-north-kafka (C based) plugin has been resolved. + - An issue that could cause excessive messages regarding connection loss and regain to be raised in the OMF north plugin has been resolved. + - An issue that caused the fledge-north-kafka (C based) plugin to fetch data when it was disabled has been resolved. + - If you set the User Authentication Policy to username, you must select a Security Policy other than None to communicate with the OPC UA Server. Allowing username authentication with None would mean that usernames and passwords would be passed from the plugin to the server as clear text which is a serious security risk. This is explained in the `OPC UA Specification `_. In addition, OPC UA defines a Security Policy for a "UserIdentityToken". When configuring the fledge-south-s2opcua plugin, the Security Policy selected in your configuration must match a supported "UserIdentityToken" Security Policy. To help troubleshoot configuration problems, log messages for the endpoint search have been improved. The documentation includes a new section called "Username Authentication". + - If a datapoint or asset name contains a reserved mathematical symbol then the fledge-filter-expression plugin was previously unable to use this as a variable in an expression. A mechanism has been added to allow these names. + - The Notification service would create Rule and Delivery support objects even if the notification was disabled. When the notification was later enabled, the original objects would remain. This has been fixed. + - If the OMF North plugin gets an exception when POSTing data to the PI Web API, the plugin would declare the connection to PI broken when it wasn't. This would result in endless connection loss and reconnection messages. This has been fixed. The plugin will now ping the PI Web API every 60 seconds and will determine that connection has been lost only if this ping fails. The OMFHint LegacyType has been deprecated because a Container cannot be changed after it is created in the PI System. This means there is no way to process the LegacyType hint when readings are processed. If the LegacyType hint appears in any reading, a warning message will be written saying that this hint type has been deprecated. + - This fix applies when configuring OMF North to create an Asset Framework (AF) structure. The first time an AF Element holding an AF Attribute pointing to a PI Point (i.e. the Container) is created, it will appear in Asset Framework as a normal AF Element. If the path is then changed using an "AFLocation hint", a reference to the AF Element should appear in the hint's location. The original AF Element's location should remain unchanged. This feature was not working correctly but has been fixed. Before this fix, the hint's path would be created as expected but no reference to the original data location was created. + - The storage service with the SQLite in-memory plugin does consume large amounts of memory while running at higher data rates. Analysis has determined this is not caused by a memory leak but rather by legitimately storing large amounts of data in memory while operating. The reason for the high load on the storage service appears to be database purging but this is a subject for further study. + - An issue in the control pipeline documentation that stated that services could only be the source of control pipelines has been fixed to now show that they may be the source or the destination. + - It is not possible to change the numeric data type of OMF Container (which maps to a PI Point) after it has been created. This means it is not possible to enable or disable an integer OMFHint or change the numeric data type in the Fledge GUI after the Container has been created. It is possible to manually correct the problem if it is necessary. OMF North plugin documentation has been updated with the procedure. + + v2.3.0 ------- diff --git a/docs/OMF.rst b/docs/OMF.rst index 9ee26c68a9..3979bfbe1a 100644 --- a/docs/OMF.rst +++ b/docs/OMF.rst @@ -235,6 +235,7 @@ Formats & Types ~~~~~~~~~~~~~~~ The *Formats & Types* tab provides a means to specify the detail types that will be used and the way complex assets are mapped to OMF types to also be configured. +See the section :ref:`Numeric Data Types` for more information on configuring data types. +--------------+ | |OMF_Format| | @@ -464,6 +465,7 @@ Number Format Hints A number format hint tells the plugin what number format to use when inserting data into the PI Server. The following will cause all numeric data within the asset to be written using the format *float32*. +See the section :ref:`Numeric Data Types`. .. code-block:: console @@ -477,10 +479,11 @@ Integer Format Hints An integer format hint tells the plugin what integer format to use when inserting data into the PI Server. The following will cause all integer data within the asset to be written using the format *integer32*. +See the section :ref:`Numeric Data Types`. .. code-block:: console - "OMFHint" : { "number" : "integer32" } + "OMFHint" : { "integer" : "integer32" } The value of the *number* hint may be any numeric format that is supported by the PI Server. @@ -522,17 +525,6 @@ Specifies that a specific tag name should be used when storing data in the PI Se "OMFHint" : { "tagName" : "AC1246" } -Legacy Type Hint -~~~~~~~~~~~~~~~~ - -Use legacy style complex types for this reading rather that the newer linked data types. - -.. code-block:: console - - "OMFHint" : { "LegacyType" : "true" } - -The allows the older mechanism to be forced for a single asset. See :ref:`Linked_Types`. - Source Hint ~~~~~~~~~~~ @@ -658,6 +650,70 @@ the data point name of *OMFHint*. It can be added at any point in the processing of the data, however a specific plugin is available for adding the hints, the |OMFHint filter plugin|. +.. _Numeric Data Types: + +Numeric Data Types +------------------ + +Configuring Numeric Data Types +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It is possible to configure the exact data types used to send data to the PI Server using OMF. +To configure the data types for all integers and numbers (that is, floating point values), you can use the *Formats & Types* tab in the Fledge GUI. +To influence the data types for specific assets or datapoints, you can create an OMFHint of type *number* or *integer*. + +You must create your data type configurations before starting your OMF North plugin instance. +After your plugin has run for the first time, +OMF messages sent by the plugin to the PI Server will cause AF Attributes and PI Points to be created using data types defined by your configuration. +The data types of the AF Attributes and PI Points will not change if you edit your OMF North plugin instance configuration. +For example, if you disable an *integer* OMFHint, +you will change the OMF messages sent to PI but the data in the messages will no longer match the AF Attributes and PI Points in your PI Server. + +Detecting the Data Type Mismatch Problem +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Editing your data type choices in OMF North will cause the following messages to appear in the System Log: + +.. code-block:: console + + WARNING: The OMF endpoint reported a conflict when sending containers: 1 messages + WARNING: Message 0: Error, A container with the supplied ID already exists, but does not match the supplied container., + +These errors will cause the plugin to retry sending container information a number of times determined the *Maximum Retry* count on the *Connection* tab in the Fledge GUI. +The default is 3. +The plugin will then send numeric data values to PI continuously. +Unfortunately, the PI Web API returns no HTTP error when this happens so no messages are logged. +In PI, you will see that timestamps are correct but all numeric values are zero. + +Recovering from the Data Type Mismatch Problem +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As you experiment with configurations, you may discover that your original assumptions about your data types were not correct and need to be changed. +It is possible to repair your PI Server so that you do not need to discard your AF Database and start over. +This is the procedure: + +- Shut down your OMF North instance. +- Using PI System Explorer, locate the problematic PI Points. + These are points with a value of zero. + The PI Points are mapped to AF Attributes using the PI Point Data Reference. + For each AF Attribute, you can see the name of the PI Point in the Settings pane. +- Using PI System Management Tools (PI SMT), open the Point Builder tool (under Points) and locate the problematic PI Points. +- In the General tab in the Point Builder, locate the Extended Descriptor (*Exdesc*). + It will contain a long character string with several OMF tokens such as *OmfPropertyIndexer*, *OmfContainerId* and *OmfTypeId*. + Clear the *Excdesc* field completely and save your change. +- Start up your OMF North instance. + +Clearing the Extended Descriptor will cause OMF to "adopt" the PI Point. +OMF will update the Extended Descriptor with new values of the OMF tokens. +Watch the System Log during startup to see if any problems occur. + +Further Troubleshooting +~~~~~~~~~~~~~~~~~~~~~~~ + +If you are unable to locate your problematic PI Points using the PI System Explorer, or if there are simply too many of them, there are advanced techniques available to troubleshoot +and repair your system. +Contact Technical Support for assistance. + .. _Linked_Types: Linked Types @@ -684,10 +740,10 @@ These are the OMF versions the plugin will use to post data: | |- 2021 SP3| | | |- 2023 | | +-----------+----------+---------------------+ -| 1.1|- 2019 | | -| |- 2019 SP1| | +| 1.1| | | +-----------+----------+---------------------+ -| 1.0| |- 2020 | +| 1.0|- 2019 |- 2020 | +| |- 2019 SP1| | +-----------+----------+---------------------+ The AVEVA Data Hub (ADH) is cloud-deployed and is always at the latest version of OMF support which is 1.2. diff --git a/docs/conf.py b/docs/conf.py index de97a54cdc..42b9647e39 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -177,4 +177,4 @@ # Pass Plugin DOCBRANCH argument in Makefile ; by default develop # NOTE: During release time we need to replace DOCBRANCH with actual released version -subprocess.run(["make generated DOCBRANCH='2.3.0RC'"], shell=True, check=True) +subprocess.run(["make generated DOCBRANCH='2.4.0RC'"], shell=True, check=True) diff --git a/docs/control.rst b/docs/control.rst index f52c4ab997..4428720a99 100644 --- a/docs/control.rst +++ b/docs/control.rst @@ -625,7 +625,7 @@ The control pipelines are not defined against a particular end point as they are - Source - The request is either originating from a script or being sent to a script. * - Service - - Source + - Both - The request is either coming from a named service or going to a named service. Control pipelines are always executed in the control dispatcher service. When a request comes into the service it will look for a pipeline to pass that request through. This process will look at the source of the request and the destination of the request. If a pipeline that has source and destination endpoints that are an exact match for the source and destination of the control request then the control request will be processed through that pipeline. diff --git a/docs/images/alert.jpg b/docs/images/alert.jpg new file mode 100644 index 0000000000..24513befd0 Binary files /dev/null and b/docs/images/alert.jpg differ diff --git a/docs/images/storage_01.jpg b/docs/images/storage_01.jpg index 63e992bd3f..c53096888e 100644 Binary files a/docs/images/storage_01.jpg and b/docs/images/storage_01.jpg differ diff --git a/docs/images/storage_config.png b/docs/images/storage_config.png index f764829e2e..09c860c87f 100644 Binary files a/docs/images/storage_config.png and b/docs/images/storage_config.png differ diff --git a/docs/plugin_developers_guide/02_writing_plugins.rst b/docs/plugin_developers_guide/02_writing_plugins.rst index 2a309f8c1f..c70ccf5320 100644 --- a/docs/plugin_developers_guide/02_writing_plugins.rst +++ b/docs/plugin_developers_guide/02_writing_plugins.rst @@ -28,6 +28,17 @@ Writing and Using Plugins A plugin has a small set of external entry points that must exist in order for Fledge to load and execute that plugin. Currently plugins may be written in either Python or C/C++, the set of entry points is the same for both languages. The entry points detailed here will be presented for both languages, a more in depth discussion of writing plugins in C/C++ will then follow. +General Guidance +---------------- + +Before delving into the detail of how to write plugins, what entry points have to be provided and how to build and test them, a few notes of general guidance that all plugin developers should consider that will prevent the plugin writer difficulty. + + - The ethos of Fledge is to provide data pipelines that promote easy building of applications through re-use of small, focused processing components. Always try to make use of existing plugins when at all possible. When writing new plugins do not be tempted to make them too specific to a single application. This will mean it is more likely that at some point in the future you will have all the components in your toolbox that you need to create the next application without having to write new plugins. + + - Filters within Fledge are run within a single process which may be a south or north service, they do not run as separate executable. Therefore make sure that when you write a new plugin service that you do not make use of global variables. Global variables will be shared between all the plugins in a service and may clash with other plugins and will prevent the same plugin being used multiple times within a pipeline. + + - Do not make assumptions about how the data you are processing in your plugin will be used, or by how many upstream components it will be used. For example do not put anything in a south plugin or a filter plugin that assumes the data will be consumed by a particular north plugin or will only be consumed by one north plugin. An example of this might be a south plugin that adds OMF AF Location hints to the data it produces. Whilst this works well if the data is sent to OMF, it does not help if the data is sent to a different destination that also requires location information. Adding options for different destinations only compounds the problem, consider for example that the data might be sent to multiple destinations. A better approach would be to add generic location meta data to the data and have the hints filters for each of the destinations perform the destination specific work. + Common Fledge Plugin API ------------------------- @@ -200,7 +211,7 @@ Plugin Initialization The plugin initialization is called after the service that has loaded the plugin has collected the plugin information and resolved the configuration of the plugin but before any other calls will be made to the plugin. The initialization routine is called with the resolved configuration of the plugin, this includes values as opposed to the defaults that were returned in the *plugin_info* call. -This call is used by the plugin to do any initialization or state creation it needs to do. The call returns a handle which will be passed into each subsequent call of the plugin. The handle allows the plugin to have state information that is maintained and passed to it whilst allowing for multiple instances of the same plugin to be loaded by a service if desired. It is equivalent to a this or self pointer for the plugin, although the plugin is not defined as a class. +This call is used by the plugin to do any initialization or state creation it needs to do. The call returns a handle which will be passed into each subsequent call of the plugin. The handle allows the plugin to create state information that is maintained and passed to it whilst allowing for multiple instances of the same plugin to be loaded by a service if desired. It is equivalent to a this or self pointer for the plugin, although the plugin is not defined as a class. The handle is the only way in which the plugin should retain information between calls to a given entry point and also the only way information should be passed between entry points. In Python a simple example of a sensor that reads a GPIO pin for data, we might choose to use that configured GPIO pin as the handle we pass to other calls. @@ -332,7 +343,132 @@ The configuration items within a category are JSON object, the object key is the "default" : "5" } -We have used the properties *type* and *default* to define properties of the configuration item *MaxRetries*. These are not the only properties that a configuration item can have, the full set of properties are +We have used the properties *type* and *default* to define properties of the configuration item *MaxRetries*. These are not the only properties that a configuration item can have, the full set of item types and properties are shown below + +Types +~~~~~ + +The configuration items within a configuration category can each be defined as one of a set of types. The types currently supported by Fledge are + +.. list-table:: + :header-rows: 1 + + * - Type + - Description + * - integer + - An integer numeric value. The value may be positive or negative but may not contain any fractional part. The *minimum* and *maximum* properties may be used to control the limits of the values assigned to an integer. + * - float + - A floating point numeric item. The *minimum* and *maximum* properties may be used to control the limits of the values assigned to a floating point item. + * - string + - An alpha-numeric array of characters that may contain any printable characters. The *length* property can be used to constrain the maximum length of the string. + * - boolean + - A boolean value that can be assigned the values *true* or *false*. + * - IPv4 + - An IP version 4 address. + * - IPv6 + - An IP version 6 address. + * - X509 certificate + - An X509 certificate + * - password + - A string that is used as a password. There is no difference between this or a string type other than user interfaces do not show this in plain text. + * - JSON + - A JSON document. The value is checked to ensure it is a valid JSON document. + * - URL + - A universal resource locator string. The API will check for correct URL formatting of the value. + * - enumeration + - The item can be assigned one of a fixed set of values. These values are defined in the *options* property of the item. + * - script + - A block of text that is executed as a script. The script type should be used for larger blocks of code to be executed. + * - code + - A block of text that is executed as Python code. This is used for small snippets of Python rather than when larger scripts. + * - northTask + - The name of a north task. The API will check that the value matches the name of an existing north task. + * - ACL + - An access control list. The value is the string name of an access control list that has been created within Fledge. + * - list + - A list of items, the items can be of type *string*, *integer*, *float*, *enumeration* or *object*. The type of the items within the list must all be the same, and this is defined via the *items* property of the list. A limit on the maximum number of entries allowed in the list can be enforced by use of the *listSize* property. + * - kvlist + - A key value pair list. The key is a string value always but the value of the item in the list may be of type *string*, *enumeration*, *float*, *integer* or *object*. The type of the values in the kvlist is defined by the *items* property of the configuration item. A limit on the maximum number of entries allowed in the list can be enforced by use of the *listSize* property. + * - object + - A complex configuration type with multiple elements that may be used within *list* and *kvlist* items only, it is not possible to have *object* type items outside of a list. Object type configuration items have a set of *properties* defined, each of which is itself a configuration item. + +Key/Value List +############## + +A key/value list is a way of storing tagged item pairs within a list. For example, to create a list of labels and expressions we can use a kvlist that stores the expressions as string values in the kvlist. + +.. code-block:: JSON + + "expressions" : { + "description" : "A set of expressions used to evaluate and label data", + "type" : "kvlist", + "items" : "string", + "default" : "{\"idle\" : \"speed == 0\"}", + "order" : "4", + "displayName" : "Labels" + } + +The key values must be unique within a kvlist, as the data is stored as a JSON object with the key becoming the property name and the value of the property the corresponding value for the key. + +Lists of Objects +################ + +Object type items may be used in lists and are a mechanism to allow for list of groups of configuration items. The object list type items must specify a property called *properties*. The value of this is a JSON object that contains a list of configuration items that are grouped into the object. + +An example use of an object list might allow for a map structure to be built for accessing a device like a PLC. The following shows the definitions of a key/value pair list where the value is an object. + +.. code-block:: JSON + + "map": { + "description": "A list of datapoints to read and PLC register definitions", + "type": "kvlist", + "items" : "object", + "default": "{\"speed\" : {\"register\" : \"10\", \"width\" : \"1\", \"type\" : \"integer\"}}", + "order" : "3", + "displayName" : "PLC Map", + "properties" : { + "register" : { + "description" : "The register number to read", + "displayName" : "Register", + "type" : "integer", + "default" : "0" + }, + "width" : { + "description" : "Number of registers to read", + "displayName" : "Width", + "type" : "integer", + "maximum" : "4", + "default" : "1" + }, + "type" : { + "description" : "The data type to read", + "displayName" : "Data Type", + "type" : "enumeration", + "options" : [ "integer","float", "boolean" ], + "default" : "integer" + } + } + } + +The *value* and *default* properties for a list of objects is returned as a JSON structure. An example of the above list with two elements in the list, voltage and current would be returned as follows: + +.. code-block:: JSON + + { + "voltage" : { + "register" : "10", + "width" : "2", + "type" : "integer" + }, + "current" : { + "register" : "14", + "width" : "4", + "type" : "float" + } + } + +Properties +~~~~~~~~~~ .. list-table:: :header-rows: 1 @@ -347,8 +483,12 @@ We have used the properties *type* and *default* to define properties of the con - A description of the configuration item used in the user interface to give more details of the item. Commonly used as a mouse over help prompt. * - displayName - The string to use in the user interface when presenting the configuration item. Generally a more user friendly form of the item name. Item names are referenced within the code. + * - items + - The type of the items in a list or kvlist configuration item. * - length - The maximum length of the string value of the item. + * - listSize + - The maximum number of entries allowed in a list or kvlist item. * - mandatory - A boolean flag to indicate that this item can not be left blank. * - maximum @@ -371,8 +511,17 @@ We have used the properties *type* and *default* to define properties of the con - An expression used to determine if the configuration item is valid. Used in the UI to gray out one value based on the value of others. * - value - The current value of the configuration item. This is not included when defining a set of default configuration in, for example, a plugin. + * - properties + - A set of items that are used in list and kvlist type items to create a list of groups of configuration items. + +Of the above properties of a configuration item *type*, *default* and *description* are mandatory, all others are optional. + +.. note:: + + It is strongly advised to include a *displayName* and an *order* in every item to improve the GUI rendering of configuration screens. If a configuration category is very large it is also recommended to use the *group* property to group together related items. These grouped items are displayed within separate tabs in the current Fledge GUI. -Of the above properties of a configuration item *type*, *default* and *description* are mandatory, all other may be omitted. +Management +~~~~~~~~~~ Configuration data is stored by the storage service and is maintained by the configuration in the core Fledge service. When code requires configuration it would create a configuration category with a set of items as a JSON document. It would then register that configuration category with the configuration manager. The configuration manager is responsible for storing the data in the storage layer, as it does this it first checks to see if there is already a configuration category from a previous execution of the code. If one does exist then the two are merged, this merging process allows updates to the software to extend the configuration category whilst maintaining any changes in values made by the user. @@ -426,7 +575,7 @@ The configuration in *default_config* is assumed to have an enumeration item cal Note the use of the *Manual* option to allow entry of devices that could not be discovered. -The *discover* method does the actually discovery and manipulates the JSON configuration to add the the *options* element of the configuration item. +The *discover* method does the actually discovery and manipulates the JSON configuration to add the *options* element of the configuration item. The code that connects to the device should then look at the *discovered* configuration item, if it finds it set to *Manual* then it will get an IP address from the *IP* configuration item. Otherwise it uses the information in the *discovered* item to connect, note that this need not just be an IP address, you can format the data in a way that is more user friendly and have the connection code extract what it needs or create a table in the *discover* method to allow for user meaningful strings to be mapped to network addresses. diff --git a/docs/plugin_developers_guide/04_north_plugins.rst b/docs/plugin_developers_guide/04_north_plugins.rst index 669659faa7..4fd607dc72 100644 --- a/docs/plugin_developers_guide/04_north_plugins.rst +++ b/docs/plugin_developers_guide/04_north_plugins.rst @@ -386,7 +386,7 @@ The *plugin_shutdown* entry point is called when the plugin is no longer require .. code-block:: C - uint32_t plugin_shutdown(PLUGIN_HANDLE handle) + void plugin_shutdown(PLUGIN_HANDLE handle) { myNorthPlugin *plugin = (myNorthPlugin *)handle; delete plugin; diff --git a/docs/plugin_developers_guide/08_storage.rst b/docs/plugin_developers_guide/08_storage.rst new file mode 100644 index 0000000000..185c3f8222 --- /dev/null +++ b/docs/plugin_developers_guide/08_storage.rst @@ -0,0 +1,1258 @@ +Storage Service And Plugins +=========================== + +The storage component provides a level of abstraction of the database layer used within Fledge. The storage abstract is explicitly not a SQL layer, and the interface it offers to the clients of the storage layer; the device service, API and send process, is very deliberately not a SQL interface to facilitate the replacement of the underlying storage with any no-SQL storage mechanism or even a simple file storage mechanism. Different plugins may be used for the structured and unstructured data that is stored by the storage layer. + +The three requirements that have resulted in the plugin architecture and separation of the database access into a microservice within Fledge are: + + - A desire to be able to support different storage mechanisms as the deployment and customer requirements dictate. E.g. SQL, no-SQL, in-memory, backing store (disk, SD card etc.) or simple file based mechanisms. + + - The ability to separate the storage from the south and north services of Fledge and to allow for distribution of Fledge across multiple physical hardware components. + + - To provide flexibility to allow components to be removed from a Fledge deployment, e.g. remove the buffering and have a simple forwarding router implementation of Fledge without storage. + +Use of JSON +----------- + +There are three distinct reasons that JSON is used within the storage layer, these are; + + - The REST API uses JSON to encode the payloads within each API entry point. This is the preferred payload type for all REST interfaces in Fledge. The option to use XML has been considered and rejected as the vast majority of REST interfaces now use JSON and not XML. JSON is generally more compact and easier to read than XML. + + - The interface between the generic storage layer and the plugin also passes requests and results as JSON. This is partly to make it compatible with the REST payloads and partly to give the plugin implementer flexibility and the ability to push functionality down to the plugin layer to be able to exploit storage system specific features for greatest efficiency. + + - Some of the structures that are persisted are themselves JSON encoded documents. The assumption is that in this case they will remain as JSON all the way to the storage system itself and be persisted as JSON rather than being translated. These JSON structures are transported within the JSON structure of a request (or response) payload and will be sent as objects within that payload although they are not interpreted as anything other than data to be stored by the storage layer. + + +Requirements +~~~~~~~~~~~~ + +The storage layer represents the interface to persist data for the Fledge appliance, all persisted data will be read or written via this storage layer. This includes: + + - Configuration data - this is a set of JSON documents indexed by a key. + + - Readings data - the readings coming from the device that have buffered for a period of time. + + - User & credential data - this is username, passwords and certificates related to the users of the Fledge API. + + - Audit trail data - this is a log of significant events during the lifetime of Fledge. + + - Metrics - various modules will hold performance metrics, such as readings in, readings out etc. These will be periodically written by those models as cumulative totals. These will be collected by the statistics gatherer and interval statistics of the values will be written to the persistent storage. + + - Task records - status and history of the tasks that have been scheduled within Fledge. + + - Flexible schemas - the storage layer should be written that the schema, assuming there is a schema based underlying storage mechanism, is not fixed by the storage layer itself, but by the implementation of the storage and the application (Fledge). In particular the set of tables and columns in those tables is not preconfigured in the storage layer component (assuming a schema based underlying data store). + +Implementation Language +~~~~~~~~~~~~~~~~~~~~~~~ + +The core of the Fledge platform has to date been written using Python, for the storage layer however a decision has been taken to implement this in C/C++. There are a number of factors that need to be taken into account as a result of this decision. + + - Library choices made for the Python implementation are no longer valid and a choice has to be made for C/C++. + + - Common code, such as the microservices management API can not be reused and a C/C++ implementation is required. + +The storage service differs from the other services within Fledge as it only supports plugins compiled to shared objects that have the prescribed C interface. The plugin's code itself may be in other languages, but it must compile to a C compatible shared object using the C calling conventions. + +Language Choice Reasons +####################### + +Initially it was envisaged that the entire Fledge product would be written in Python, after the initial demo implementation issues were starting to surface regarding the validity of this choice for implementation of a product such as Fledge. These issues are; + + - Scalability - Python is essentially a single threaded language due to the Global Interpreter Lock (GIL) which only allows a single Python statement to be executing at any one time. + + - Portability - As we started working more with OSIsoft and with ARM it became clear that the option to port Fledge or some of its components to embedded hardware was going to become more of a requirement for us. In particular the ARM mbed platform is one that has been discussed. Python is not available on this platform or numerous other embedded platforms. + +If Python was not to be the language in which to implement in future then it was decided that the storage layer, as something that has yet to be started, might be best implemented in a different way. Since the design is based on micro-services with REST API’s between them, then it is possible to mix and match the implementation of different components amongst different languages. + +The storage layer is a separate micro-service and not directly linked to any Python code, linkage is only via a REST API. Therefore the storage layer can implement a threading model that best suits it and is not tied to the Python threading model in use in other microservices. + +The choice of C/C++ is based on what is commonly available on all the platforms on which we now envisage Fledge might need to run in the foreseeable future and on the experience available within the team. + +Library Choice +############## + +One of the key libraries that will need to be chosen for C/C++ is the JSON library since there is no native support for this in the language. There are numerous libraries that exist for this purpose, for example, rapidjson, Jansson and many more. Some investigation is required to find the most suitable. The factors to be considered in the choice of library are, in order of importance; + + - Functionality - clearly any library chosen must offer the feature we need. + + - Footprint - Footprint is a major concern for Fledge as we wish to run in constrained devices with the likelihood that in future the device we want to run on may become even smaller than we are considering today. + + - Thread safety - It is assumed that for reasons of scalability and the nature of a REST interface that multiple threads will be employed in the implementation, so hence thread safety is a major concern when choosing a library. + + - Performance - Any library chosen should be reasonably performant at the job it does in order to be considered. We need to avoid choosing libraries that are slow or bloated as part of our drive to run on highly constrained hardware. + +The choice of the JSON library is also something to be considered; since JSON objects are passed across the plugin interface, choosing a C++ library would limit both the microservice and the plugins to use C++. It may be preferable to use a C based library and thus have the flexibility to have a C or C++ implementation for either the service itself or for the plugin. + +Another key library choice, in order to support the REST interface, is an HTTP library capable of being used to support the REST interface development and able to support custom header fields and HTTPS. Once again these are numerous, libmicrohttpd, Simple-Web-Server, Proxygen. A choice must be made here also using the same criteria outlined above. + +Thread safety is likely to be important also as it is assumed the storage layer will be multi-threaded and almost certainly utilise asynchronous I/O operations. + +Classes of Data Stored +---------------------- + +There are two classes of data that Fledge needs to store: + + - Internally generated data + + - Data that emanates from sensors + +The first of these are essentially Fledges configuration, state and lookup data it needs to function. The pattern of access to this data is the classic create, retrieve, update and delete operations that are common to most databases. Access is random by nature and usually via some form of indexes and keys. + +The second class of data that is stored, and the one which is the primary function of Fledge to store, is the data that it receives from sensors. Here the pattern of access is very different; + + - New data is always appended to the stored data + + - No updates are supported on this data + + - Data is predominately read in sequential blocks (main use case) + + - Random access is rare and confined to display and analytics within the user interface or by clients of the public API + + - Deletion of data is done based solely on age and entries will not be removed other than in chronological order. + +Given the difference in the nature of the two classes of data and the possibility that this will result in different storage implementations for the two, the interface is split between these two classes of data. This allows; + + - Different plugins to be used for each type, perhaps a SQL database for the internal data storage and a specialised time series database or document store for the sensor readings. + + - A single plugin can choose to only implement a subset of the plugin API, e.g. the common data access methods or the readings methods. Or both. + + - Plugins can choose where and how they store the readings to optimize the implementation. E.g. a SQL data can store the JSON in a table or a series of tables if preferred. + + - The plugins are not forced to store the JSON data in a particular way. For example, a SQL database does not have to use JSON data types in a single column if it does not support them. + +These two classes of data are referred to in this documentation as “common data access” and “readings data”. + +Common Data Access Methods +-------------------------- + +Most of these types of data can be accessed by the classic create, update, retrieve and delete methods and consist of data in JSON format with an associated key and timestamp. In this case a simple create with a key and JSON value, an update with the same key and value, a retrieve with an optional key (which returns an array of JSON objects) and a delete with the key is all that is required. Configuration, metrics, task records, audit trail and user data all fall into this category. Readings however do not and have to be treated differently. + +Readings Data Access +-------------------- + +Readings work differently from other data, both in the way they are created, retrieved and removed. There is no update functionality required for readings currently, in particular there is no method to update readings data. + +The other difference with readings data from the other data that is managed by the storage layer is related to the volume and use of the data. Readings data is by far the largest volume of data that is managed by Fledge, and has a somewhat different lifecycle and use. The data streams in from external devices, lives within the storage layer for a period of time and is then removed. It may also be retrieved by other processes during the period of time in lives within the buffer. + +Another characteristic of the readings data is the ability to trigger processing based on the arrival of new data. This could be from a process that blocks, waiting for data to arrive or as an optimisation when a process wishes to process the new data as it arrives and not retrieve it explicitly from the storage layer. In this later case the storage data would still be buffered in the storage layer using the usual rules for storage and purging of that data. + +Reading Creation +~~~~~~~~~~~~~~~~ + +Readings come from the device component of Fledge and are a time series stream of JSON documents. They should be appended to the storage device with unique keys and a timestamp. The appending of readings can be considered as a queuing mechanism into the storage layer. + +Managing Blocked Retrievals +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Various components, most notably the sending process and north service, read blocks of readings from the storage layer. These components may request a notification when new readings are available, for example the sending process may request a new block of data when there are no more blocks available. This will be registered with the storage layer and the storage layer will notify the sending process that new data is available and that a subsequent call will return a new block of data. + +This is an advantage feature that may be omitted from the first version. It is intended to allow a process that is fetching and processing readings data to have an efficient way to know that new data is available to be processed. One scenario would be a sending process that has sent all of the readings that are available; it wishes to be informed when new readings are available to it for sending. Rather than poll the storage layer requesting new readings, it may request the storage layer to call it when a number of readings are available beyond the id that process last fetched. + +Bypassing Database Storage +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +One potential optimisation which the storage layer should be built to allow as a future optimization is to architect the storage layer such that a publish/subscribe mechanism could be used to allow the data that flows into the storage layer and be directed to both the storage plugin itself and also send it to other services such as the sending process. + +Reading Retrieval +~~~~~~~~~~~~~~~~~ + +Readings may be retrieved via one of two mechanism + + - By the sending process that will request readings within a time window + + - From the API layer for analysis within the edge device or an external entity that is retrieving the data via the Fledge user REST API. + +The sending process and north service may require large volumes of data to be sent, in order to reduce the memory footprint required and to improve reliability, the sending module will require the readings in controllable “chunks”, therefore it will request readings between two timestamps in blocks of x readings and then request each block sequentially. It is the responsibility of the sending process to ensure that it requests blocks of a reasonable size. Since the REST interface is by definition stateless the storage layer does not need to maintain any information about previous fetches of data. + +The API access to data will be similar, except it will have a limitation on the number of readings, it will request ordered readings between timestamps and ask for readings between the n-th and m-th reading. E.g. Return readings between 21:00 on 10th June 2017 and 21:00 on the 11th June limited to the 100th and 150th reading in that time. The API layer will enforce a maximum number of readings that can be returned in order to make sure result sets are small. + +Reading Removal +~~~~~~~~~~~~~~~ + +The reading removal is done via the purge process, this process will request readings before a given time to be removed from the storage device based on the timestamp of each reading. Introducing the storage layer and removing the pure SQL interface will alter the nature of the purge process and essentially move the logic of the purge process into the storage layer. + +Storage Plugin +-------------- + +One of the requirements that drives the desire to have a storage layer is to isolate the other services and users of the storage layer from the technology that provides that storage. The upper level of the storage service offers a consistent API to the client of the storage service and provides the common infrastructure to communicate with the other services within Fledge, whilst the lower layer provides the interface to the storage technology that will actually store the data. Since we have a desire to be able to switch between different storage layers this lower layer will use a plugin mechanism that will allow a common storage service to dynamically load one or more storage plugins. + +The ability to use multiple plugins within a single storage layer would allow a different plugin to be used for each class of data, see Classes of Data Stored. This would give the flexibility to store Fledges internal data in generic database whilst storing the readings data in something that was tailored specifically to time series or JSON data. There is no requirement to have multiple plugins in any specific deployment, however if the option is to be made available the code that is initially developed should be aware of this future requirement and be implemented appropriately. It is envisaged that the first version will have a single plugin for both classes of data. The incremental effort for supporting more than one plugin is virtually zero, hence the inclusion here. + +Entry Points +~~~~~~~~~~~~ + +The storage plugin exposes a number of entry points in a similar way to the Python plugins used for the translator interface and the device interface. In the C/C++ environment the mechanism is slightly different from that of Python. A plugin is a shared library that is included with the installation or may be installed later into a known location. The library is use by use the dlopen() C library function and each entry point is retrieved using the dlsym() call. + +The plugin interface is modeled as a set of C functions rather than as a C++ class in order to give the plugin writer the flexibility to implement the plugin in C or C++ as desired. + +.. list-table:: + :widths: 30 70 + :header-rows: 1 + + * - Entry Point + - Summary + * - plugin_info + - Return information about the plugin. + * - plugin_init + - Initialise the plugin. + * - plugin_common_insert + - Insert a row into a data set (table). + * - plugin_common_retrieve + - Retrieve a result set from a table. + * - plugin_common_update + - Update data in a data set. + * - plugin_common_delete + - Delete data from a data set. + * - plugin_reading_append + - Append one or more readings or the readings table. + * - plugin_reading_fetch + - Retrieve a block of readings from the readings table. + * - plugin_reading_retrieve + - Generic retrieve to retrieve data from the readings table based on query parameters. + * - plugin_reading_purge + - Purge readings from the readings table. + * - plugin_release + - Release a result set previously returned by the plugin to the plugin, so that it may be freed. + * - plugin_last_error + - Return information on the last error that occurred within the plugin. + * - plugin_shutdown + - Called prior to the device service being shut down. + + +Plugin Error Handling +~~~~~~~~~~~~~~~~~~~~~ + +Errors that occur within the plugin must be propagated to the generic storage layer with sufficient information to allow the generic layer to report those errors and take appropriate remedial action. The interface to the plugin has been deliberately chosen not to use C++ classes or interfaces so that plugin implementers are not forced to implement plugins in C++. Therefore the error propagation mechanism can not be C++ exceptions and a much simpler, language agnostic approach must be taken. To that end errors will be indicated by the return status of each call into the interface and a specific plugin entry point will be used to retrieve more details on errors that occur. + +Plugin API Header File +~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: C + + #ifndef _PLUGIN_API + #define _PLUGIN_API + + typedef struct { + char *name; + char *version; + unsigned int options; + char *type; + char *interface; + char *config; + } PLUGIN_INFORMATION; + + typedef struct { + char *message; + char *entryPoint; + boolean retryable; + } PLUGIN_ERROR; + + typedef void * PLUGIN_HANDLE; + + /** + * Plugin options bitmask values + */ + #define SP_COMMON 0x0001 + #define SP_READINGS 0x0002 + + /** + * Plugin types + */ + #define PLUGIN_TYPE_STORAGE "storage" + + /** + * Readings purge flags + */ + #define PLUGIN_PURGE_UNSENT 0x0001 + + extern PLUGIN_INFORMATION *plugin_info(); + extern PLUGIN_HANDLE plugin_init(); + extern boolean plugin_common_insert(PLUGIN_HANDLE handle, char *table, JSON *data); + extern JSON *plugin_common_retrieve(PLUGIN_HANDLE handle, char *table, JSON *query); + extern boolean plugin_common_update(PLUGIN_HANDLE handle, char *table, JSON *data); + extern boolean plugin_common_delete(PLUGIN_HANDLE handle, char *table, JSON *condition); + extern boolean plugin_reading_append(PLUGIN_HANDLE handle, JSON *reading); + extern JSON *plugin_reading_fetch(PLUGIN_HANDLE handle, unsigned long id, unsigned int blksize); + extern JSON *plugin_reading_retrieve(PLUGIN_HANDLE handle, JSON *condition); + extern unsigned int plugin_reading_purge(PLUGIN_HANDLE handle, unsigned long age, unsigned int flags, unsigned long sent); + extern plugin_release(PLUGIN_HANDLE handle, JSON *results); + extern PLUGIN_ERROR *plugin_last_error(PLUGIN_HANDLE); + extern boolean plugin_shutdown(PLUGIN_HANDLE handle) + #endif + + +Plugin Support +~~~~~~~~~~~~~~ + +A storage plugin may support either or both of the two data access methods; common data access methods and readings access methods. The storage service can use the mechanism to have one plugin for the common data access methods, and hence a storage system for the general tables and configuration information. It then may load a second plugin in order to support the storage and retrieval of readings. + +Plugin Information +~~~~~~~~~~~~~~~~~~ + +The plugin information entry point, plugin_info() allows the device service to retrieve information from the plugin. This information comes back as a C structure (PLUGIN_INFORMATION). The PLUGIN_INFORMATION will include a number of fields with information that will be used by the storage service. + +.. list-table:: + :header-rows: 1 + :widths: 20 60 20 + + * - Property + - Description + - Example + * - name + - A printable name that can be used to identify the plugin. + - Postgres Plugin + * - version + - A version number of the plugin, again used for diagnostics and status reporting + - 1.0.2 + * - options + - A bitmask of options that describes the level of support offered by this plugin. + Currently two options are available; SP_COMMON and SP_READINGS. Each of these bits represents support for the set of common data access methods and the readings access method. See Plugin Support for details. + - SP_COMMON|SP_READINGS + * - type + - The type of the plugin, this is used to distinguish a storage API plugin from any other type of plugin in Fledge. This should always be the string “storage”. + - storage + * - interface + - The interface version that the plugin implements. Currently the version is 1.0. + - 1.0 + + +This is the first call that will be made to the plugin after it has been loaded, it is designed to give the loader enough information to know how to interact with the plugin and to allow it to confirm the plugin is of the correct type. + +Plugin Initialisation +~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: C + + extern PLUGIN_HANDLE plugin_init(); + +Called after the plugin has been loaded and the plugin information has been successfully retrieved. This will only be called once and should perform the initialisation necessary for the sensor communication. + +The plugin initialisation call returns a handle, of type void \*, which will be used in future calls to the plugin. This may be used to hold instance or state information that would be needed for any future calls. The handle should be used in preference to global variables within the plugin. + +If the initialisation fails the routine should raise an exception. After this exception is raised the plugin will not be used further. + +Plugin Common Insert +~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: C + + extern boolean plugin_common_insert(PLUGIN_HANDLE handle, char *table, JSON *data); + +Insert data that is represented by the JSON structure that is passed into the call to the specified table. + +The handle is the value returned by the call to plugin_init(). + +The table is the name of the table, or data set, into which the data is to be inserted. + +The data is a JSON document with a number of property name/value pairs. For example, if the plugin is storing the data in a SQL database; the names are the column names in an equivalent SQL database and the values are the values to write to that column. Plugins for non-SQL, such as document databases may choose to store the data as it is represented in the JSON document or in a very different structure. Note that the value may be of different types, represented by JSON type and may be JSON objects themselves. The plugin should do whatever conversation is needed for the particular storage layer based on the JSON type. + +The return value of this call is a boolean that represents success or value of the insert. + +Plugin Common Retrieve +~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: C + + extern JSON *plugin_common_retrieve(PLUGIN_HANDLE handle, char *table, JSON *query); + +Retrieve a data set from a named table. + +The handle is the value returned by the call to plugin_init(). + +The table is the name of the table, or data set, from which the data is to be retrieved. + +The query is a JSON document that encodes the predicates for the query, the where condition in the case of a SQL layer. See Encoding Query Predicates in JSON for details of how this JSON is encoded. + +The return value is the result set of the query encoded as a JSON structure. This encoding takes the form of an array of JSON object, one per row in the result set. Each object represents a row encoded as name/value pair properties. In addition a property count is included that returns the number of rows in the result set. + +An query that returns two rows with columns named “c1”, “c2” and “c3” would be represented as + +.. code-block:: JSON + + { + "count" : 2, + "rows" : [ + { + "c1" : 1, + "c2" : 5, + "c3" : 9 + }, + { + "c1" : 8, + "c2" : 2, + "c3" : 15 + } + ] + } + +The pointer return to the caller must be released when the caller has finished with the result set. This is done by calling the plugin_release() call with the plugin_handle and the pointer returned from this call. + +Plugin Common Update +~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: C + + extern boolean plugin_common_update(PLUGIN_HANDLE handle, char *table, JSON *data); + + +Update the contents of a set of rows in the given table. + +The handle is the value returned by the call to plugin_init(). + +The table is the name of the table, or data set, into which the data is to be updated. + +The data item is a JSON document that encodes but the values to set in the table and the condition used to select the data. The object contains two properties, a condition, the value of which is a JSON encoded where clause as defined in Encoding Query Predicates in JSON and a values object. The values object is a set of name/value pairs where the name matches column names within the data and the value defines the value to set for that column. + +The following JSON example + +.. code-block:: JSON + + { + "condition" : { + "column" : "c1", + "condition" : "=", + "value" : 15 + }, + "values" : { + "c2" : 20, + "c3" : "Updated" + } + } + + +would map to a SQL update statement + +.. code-block:: SQL + + UPDATE SET c2 = 20, c3 = "Updated" where c1 = 15; + +Plugin Common Delete +~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: C + + extern boolean plugin_common_delete(PLUGIN_HANDLE handle, char *table, JSON *condition); + + +Update the contents of a set of rows in the given table. + +The handle is the value returned by the call to plugin_init(). + +The table is the name of the table, or data set, into which the data is to be removed. +The condition JSON element defines the condition clause which will select the rows of data to be removed. This condition object follows the same JSON encoding scheme defined in the section Encoding Query Predicates in JSON. A condition object containing + +.. code-block:: JSON + + { + "column" : "c1", + "condition" : "=", + "value" : 15 + } + +would delete all rows where the value of c1 is 15. + +Plugin Reading Append +~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: C + + extern boolean plugin_reading_append(PLUGIN_HANDLE handle, JSON *reading); + +The handle is the value returned by the call to plugin_init(). + +The reading JSON object is an array of one or more readings objects that should be appended to the readings storage device. + +The return status indicates if the readings have been successfully appended to the storage device or not. + +Plugin Reading Fetch +~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: C + + extern JSON *plugin_reading_fetch(PLUGIN_HANDLE handle, unsigned long id, unsigned int blksize); + +Fetch a block of readings, starting from a given id and return them as a JSON object. + +This call will be used by the sending process to retrieve readings that have been buffered and send them to the historian. The process of sending readings will read a set of consecutive readings from the database and send them as a block rather than send all readings in a single transaction with the historian. This allows the sending process to rate limit the send and also to provide improved error recovery in the case of transmission failure. + +The handle is the value returned by the call to plugin_init(). + +The id passed in is the id of the first record to return in the block. + +The blksize is the maximum number of records to return in the block. If there are no sufficient readings to return a complete block of readings then a smaller number of readings will be returned. If no reading can be returned then a NULL pointer is returned. This call will not block waiting for new readings. + +Plugin Reading Retrieve +~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: C + + extern JSON *plugin_reading_retrieve(PLUGIN_HANDLE handle, JSON *condition); + +Return a set of readings as a JSON object based on a query to select those readings. + +The handle is the value returned by the call to plugin_init(). + +The condition is a JSON encoded query using the same mechanisms as defined in the section Encoding Query Predicates in JSON. In this case it is expected that the JSON condition would include not just selection criteria but also grouping and aggregation options. + +Plugin Reading Purge +~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: C + + extern unsigned int plugin_reading_purge(PLUGIN_HANDLE handle, unsigned long age, unsigned int flags, unsigned long sent); + +The removal of readings data based on the age of the data with an optional limit to prevent purging of data that has not been sent out of the Fledge device for external storage/processing. + +The handle is the value returned by the call to plugin_init(). + +The age defines the maximum age of data that is to be retained + +The flags define if the sent or unsent status of data should be considered or not. If the flags specify that unsent data should not be purged then the value of the sent parameter is used to determine what data has not been sent and readings with an id greater than the sent id will not be purged. + +Plugin Release +~~~~~~~~~~~~~~ + +.. code-block:: C + + extern boolean plugin_release(PLUGIN_HANDLE handle, JSON *json) + +This call is used by the storage service to release a result set or other JSON object that has been returned previously from the plugin to the storage service. JSON structures should only be released to the plugin when the storage service has finished with them as the plugin will most likely free the memory resources associated with the JSON structure. + +Plugin Error Retrieval +~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: C + + extern PLUGIN_ERROR *plugin_last_error(PLUGIN_HANDLE) + +Return more details on the last error that occurred within this instance of a plugin. The returned pointer points to a static area of memory that will be overwritten when the next error occurs within the plugin. There is no requirement for the caller to free any memory returned. + +Plugin Shutdown +~~~~~~~~~~~~~~~ + +.. code-block:: C + + extern boolean plugin_shutdown(PLUGIN_HANDLE handle) + +Shutdown the plugin, this is called with the plugin handle returned from plugin_init and is the last operation that will be performed on the plugin. It is designed to allow the plugin to complete any outstanding operations it may have, close connections to storage layers and generally release resources. + +Once this call has completed the plugin handle that was previously given out by the plugin should be considered to be invalid and any future calls using that handle should fail. + +Encoding Query Predicates in JSON +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +One particular issue with a storage layer API is how to encode the query predicates in a JSON structure that are as expression as the SQL predicates whilst not making the JSON document too complex whilst still maintaining the flexibility to be able to implement storage plugins that are not based on SQL databases. In traditional REST API’s the HTTP GET operation should be used to retrieve data, however the GET operation does not strictly support body content and therefore any modifiers or queries have to be encoded in the URL. Encoding complex query predicates in a URL quickly becomes an issue, therefore this API layer will not take this approach, it will allow simple predicates in the URL, but will use JSON documents and PUT operations to encode more complex predicates in the body of the PUT operation. + +The same JSON encoding will be used in the storage layer to the plugin interface for all retrieval operations. + +The predicates will be encoded in a JSON object that contains a where clause, other optional properties may be added to control aggregation, grouping and sorting of the selected data. + +The where object contains a column name, operation and value to match, it may also optionally contain an and property and an or property. The values of the and and or property, if they exist, are themselves where objects. + +As an example the following JSON object + +.. code-block:: JSON + + { + "where" : { + "column" : "c1", + "condition" : "=", + "value" : "mine", + "and" : { + "column" : "c2", + "condition" : "<", + "value" : 20 + } + } + } + +would result in a SQL where clause of the form + +.. code-block:: console + + WHERE c1 = “mine” AND c2 < 20 + +An example of a more complex example, using an and and an or condition, would be + +.. code-block:: JSON + + { + "where" : { + "column" : "id", + "condition" : "<", + "value" : "3", + "or" : { + "column" : "id", + "condition" : ">", + "value" : "7", + "and" : { + "column" : "description", + "condition" : "=", + "value" : "A test row" + } + } + } + } + +Which would yield a traditional SQL query of + +.. code-block:: console + + WHERE id < 3 OR id > 7 AND description = “A test row” + +.. note:: + + It is currently not possible to introduce bracketed conditions. + +Aggregation +########### + +In some cases adding aggregation of the results of a record selection is also required. Within the JSON this is represented using an optional aggregate object. + +.. code-block:: console + + "aggregate" : { + "operation" : "" + "column" : "" + } + +Valid operations for aggregations are; min, max, avg, sum and count. + +As an example the following JSON object + +.. code-block:: JSON + + { + "where" : { + "column" : "room", + "condition" : "=", + "value" : "kitchen" + }, + "aggregate" : { + "operation" : "avg", + "column" : "temperature" + } + } + +Multiple aggregates may be applied, in which case the aggregate property becomes an array of objects rather than a single object. + +.. code-block:: JSON + + { + "where" : { + "column" : "room", + "condition" : "=", + "value" : "kitchen" + }, + "aggregate" : [ + { + "operation" : "avg", + "column" : "temperature" + }, + { + "operation" : "min", + "column" : "temperature" + }, + { + "operation" : "max", + "column" : "temperature" + } + ] + } + +The result set JSON that is created for aggregates will have properties with names that are a concatenation of the column and operation. For example, the where clause defined above would result in a response similar to below. + +.. code-block:: JSON + + { + "count": 1, + "rows" : [ + { + "avg_temperature" : 21.8, + "min_temperature" : 18.4, + "max_temperature" : 22.6 + } + ] + } + +Alternatively an “alias” property may be added to aggregates to control the naming of the property in the JSON document that is produced. + +.. code-block:: JSON + + { + "where" : { + "column" : "room", + "condition" : "=", + "value" : "kitchen" + }, + "aggregate" : [ + { + "operation" : "avg", + "column" : "temperature", + "alias" : "Average" + }, + { + "operation" : "min", + "column" : "temperature", + "alias" : "Minimum" + }, + { + "operation" : "max", + "column" : "temperature", + "alias" : "Maximum" + } + ] + } + +Would result in the following output + +.. code-block:: JSON + + { + "count": 1, + "rows" : [ + { + "Average" : 21.8, + "Minimum" : 18.4, + "Maximum" : 22.6 + } + ] + } + +When the column that is being aggregated contains a JSON document rather than a simple value then the column property is replaced with a json property and the object defines the properties within the json document in the database field that will be used for aggregation. + +The following is an example of a payload that will query the readings data and return aggregations of the JSON property rate from within the column reading. The column reading is a JSON blob within the database. + +.. code-block:: JSON + + { + "where" : { + "column" : "asset_code", + "condition" : "=", + "value" : "MyAsset" + }, + "aggregate" : [ + { + "operation" : "min", + "json" : { + "column" : "reading", + "properties" : "rate" + }, + "alias" : "Minimum" + }, + { + "operation" : "max", + "json" : { + "column" : "reading", + "properties" : "rate" + }, + "alias" : "Maximum" + }, + { + "operation" : "avg", + "json" : { + "column" : "reading", + "properties" : "rate" + }, + "alias" : "Average" + } + ], + "group" : "asset_code" + } + +Grouping +######## + +Grouping of records can be achieved by adding a group property to the JSON document, the value of the group property is the column name to group on. + +.. code-block:: console + + "group" : "" + +Sorting +####### + +Where the output is required to be sorted a sort object may be added to the JSON document. This contains a column to sort on and a direction for the sort “asc” or “desc”. + +.. code-block:: console + + "sort" : { + "column" : "c1", + "direction" : "asc" + } + +It is also possible to apply multiple sort operations, in which case the sort property becomes an ordered array of objects rather than a single object + +.. code-block:: console + + "sort" : [ + { + "column" : "c1", + "direction" : "asc" + }, + { + "column" : "c3", + "direction" : "asc" + } + ] + +.. note:: + + The direction property is optional and if omitted will default to ascending order. + +Limit +##### + +A limit property can be included that will limit the number of rows returned to no more than the value of the limit property. + +.. code-block:: console + + "limit" : + + +Creating Time Series Data +######################### + +The timebucket mechanism in the storage layer allows data that includes a timestamp value to be extracted in timestamp order, grouped over a fixed period of time. + +The time bucket directive allows a timestamp column to be defined, the size of each time bucket, in seconds, an optional date format for the timestamp written in the results and an optional alias for the timestamp property that is written. + +.. code-block:: console + + "timebucket" : { + "timestamp" : "user_ts", + "size" : "5", + "format" : "DD-MM-YYYY HH24:MI:SS", + "alias" : "bucket" + } + +If no size element is present then the default time bucket size is 1 second. + +This produces a grouping of data results, therefore it is expected to be used in conjunction with aggregates to extract data results. The following example is the complete payload that would be used to extract assets from the readings interface + +.. code-block:: JSON + + { + "where" : { + "column" : "asset_code", + "condition" : "=", + "value" : "MyAsset" + }, + "aggregate" : [ + { + "operation" : "min", + "json" : { + "column" : "reading", + "properties" : "rate" + }, + "alias" : "Minimum" + }, + { + "operation" : "max", + "json" : { + "column" : "reading", + "properties" : "rate" + }, + "alias" : "Maximum" + }, + { + "operation" : "avg", + "json" : { + "column" : "reading", + "properties" : "rate" + }, + "alias" : "Average" + } + ], + "timebucket" : { + "timestamp" : "user_ts", + "size" : "30", + "format" : "DD-MM-YYYY HH24:MI:SS", + "alias" : "Time" + } + } + +In this case the payload would be sent in a PUT request to the URL /storage/reading/query and the returned values would contain the reading data for the asset called MyAsset which has a sensor value rate in the JSON payload it returns. The data would be aggregated in 30 second time buckets and the return values would be in the JSON format shown below. + +.. code-block:: JSON + + { + "count":2, + "Rows":[ + { + "Minimum" : 2, + "Maximum" : 96, + "Average" : 47.9523809523809, + "asset_code" : "MyAsset", + "Time" : "11-10-20177 15:10:50" + }, + { + "Minimum" : 1, + "Maximum" : 98, + "Average" : 53.7721518987342, + "asset_code" : "MyAsset", + "Time" : "11-10-20177 15:11:20" + } + ] + } + +Joining Tables +############## + +Joins can be created between tables using the join object. The JSON object contains a table name, a column to join on in the table of the query itself and an optional column in the joined table. It also allows a query to be added that may define a where condition to select columns in the joined table and a returns object to define which rows should be used from that table and how to name them. + +The following example joins the table called attributes to the table given in the URL of the request. It uses a column called parent_id in the attributes table to join to the column id in the table given in the request. If the column name in both tables is the same then there is no need to give the column field in the table object, the column name can be given in the on field instead. + +.. code-block:: JSON + + { + "join" : { + "table" : { + "name" : "attributes", + "column" : "parent_id" + }, + "on" : "id", + "query" : { + "where" : { + "column" : "name", + "condition" : "=", + "value" : "MyName" + + }, + "return" : [ + "parent_id", + { + "column" : "name", + "alias" : "attribute_name" + }, + { + "column" : "value", + "alias" : "attribute_value" + } + ] + } + } + } + +Assuming no additional where conditions or return constraints on the main table query, this would yields SQL of the form + +.. code-block:: SQL + + select t1.*, t2.parent_id, t2.name as "attribute_name", t2.value as "attribute_value" from parent t1, attributes t2 where t1.id = t2.parent_id and t2.name = "MyName"; + +Joins may be nested, allowing more than two tables to be joined. Assume again we have a parent table that contains items and an attributes table that contains attributes of those items. We wish to return the items that have an attribute called MyName and a colour. We need to join the attributes table twice to get the requests we require. The JSON payload would be as follows + +.. code-block:: JSON + + { + "join" : { + "table" : { + "name" : "attributes", + "column" : "parent_id" + }, + "on" : "id", + "query" : { + "where" : { + "column" : "name", + "condition" : "=", + "value" : "MyName" + + }, + "return" : [ + "parent_id", + { + "column" : "value", + "alias" : "my_name" + } + ] + "join" : { + "table" : { + "name" : "attributes", + "column" : "parent_id" + }, + "on" : "id", + "query" : { + "where" : { + "column" : "name", + "condition" : "=", + "value" : "colour" + + }, + "return" : [ + "parent_id", + { + "column" : "value", + "alias" : "colour" + } + ] + } + } + } + } + } + +And the resultant SQL query would be + +.. code-block:: SQL + + select t1.*, t2.parent_id, t2.value as "my_name", t3.value as "colour" from parent t1, attributes t2, attributes t3 where t1.id = t2.parent_id and t2.name = "MyName" and t1.id = t3.parent_id and t3.name = "colour"; + +JSON Predicate Schema +##################### + +The following is the JSON schema definition for the predicate encoding. + +.. code-block:: JSON + + { + "$schema": "http://json-schema.org/draft-04/schema#", + "definitions": {}, + "id": "http://example.com/example.json", + "properties": { + "group": { + "id": "/properties/group", + "type": "string" + }, + "sort": { + "id": "/properties/sort", + "properties": { + "column": { + "id": "/properties/sort/properties/column", + "type": "string" + }, + "direction": { + "id": "/properties/sort/properties/direction", + "type": "string" + } + }, + "type": "object" + }, + "aggregate": { + "id": "/properties/aggregate", + "properties": { + "column": { + "id": "/properties/aggregate/properties/column", + "type": "string" + }, + "operation": { + "id": "/properties/sort/properties/operation", + "type": "string" + } + }, + "type": "object" + }, + "properties": { + "limit": { + "id": "/properties/limit", + "type": "number" + } + "where": { + "id": "/properties/where", + "properties": { + "and": { + "id": "/properties/where/properties/and", + "properties": { + "column": { + "id": "/properties/where/properties/and/properties/column", + "type": "string" + }, + "condition": { + "id": "/properties/where/properties/and/properties/condition", + "type": "string" + }, + "value": { + "id": "/properties/where/properties/and/properties/value", + "type": "string" + } + }, + "type": "object" + }, + "column": { + "id": "/properties/where/properties/column", + "type": "string" + }, + "condition": { + "id": "/properties/where/properties/condition", + "type": "string" + }, + "or": { + "id": "/properties/where/properties/or", + "properties": { + "column": { + "id": "/properties/where/properties/or/properties/column", + "type": "string" + }, + "condition": { + "id": "/properties/where/properties/or/properties/condition", + "type": "string" + }, + "value": { + "id": "/properties/where/properties/or/properties/value", + "type": "string" + } + }, + "type": "object" + }, + "value": { + "id": "/properties/where/properties/value", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + } + +Controlling Returned Values +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The common retrieval API and the reading retrieval API can be controlled to return subsets of the data by defining the “columns” to be returned in an optional “return” object in the JSON payload of these entry points. + +Returning Limited Set of Columns +################################ + +An optional “returns” object may be followed by a JSON array that contains the names of the columns to return. + +.. code-block:: console + + "return" : [ "column1", "column2", "column3" ] + +The array may be simple strings that the columns to return or they may be JSON objects which give the column and and an alias for that column + +.. code-block:: console + + "return : [ "column1", { + "column" : "column2", + "alias" : "SecondColumn" + } + ] + + +Individual array items may also be mixed as in the example above. + +Formatting Columns +################## + +When a return object is specified it is also possible to format the returned data, this is particularly applicable to dates. Formatting is done by adding a format property to the column object to be returned. + +.. code-block:: console + + "return" : [ "key", "description", + { + "column" : "ts", + "format" : "DD Mon YYYY", + "alias" : "date" + } + ] + +The format string may be for dates or numeric values. The content of the string for dates is a template pattern consisting of a combination of the following. + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Pattern + - Description + * - HH + - Hour of the day in 12 hour clock + * - HH24 + - Hour of the day in 24 hour clock + * - MI + - Minute value + * - SS + - Seconds value + * - MS + - Milliseconds value + * - US + - Microseconds value + * - SSSS + - Seconds since midnight + * - YYYY + - Year as 4 digits + * - YY + - Year as 2 digits + * - Month + - Full month name + * - Mon + - Month name abbreviated to 3 characters + * - MM + - Month number + * - Day + - Day of the week + * - Dy + - Abbreviated data of the week + * - DDD + - Day of the year + * - DD + - Day of the month + * - D + - Day of the week + * - W + - Week of the year + * - am + - am/pm meridian + + +Return JSON Document Content +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The returns mechanism may also be used to return the properties within a JSON document stored within the database. + +.. code-block:: JSON + + { + "return" : [ + "code", + { + "column" : "ts", + "alias" : "timestamp" + }, + { + "json" : { + "column" : "log", + "properties" : "reason" + }, + "alias" : "myJson" + } + ] + } + +In the example above a database column called json contains a JSON document with the property reason at the base level of the JSON document. The above statement extracts the JSON properties value and returns it in the result set using the property name myJSON. + +To access properties nested more deeply in the JSON document the properties property in the above example can also be an array of JSON property names for each level in the hierarchy. If the column contains a JSON document as below, + +.. code-block:: console + + { + "building" : { + "floor" : { + "room" : { + "number" : 432, + ... + }, + }, + } + } + +To access the room number a return fragment as shown below would be used. + +.. code-block:: JSON + + { + "return" : [ + { + "json" : { + "column" : "street", + "properties" : [ + "building", + "floor", + "room", + "number" + ] + }, + "alias" : "RoomNumber" + } + ] + } + diff --git a/docs/plugin_developers_guide/index.rst b/docs/plugin_developers_guide/index.rst index 9ece7c92df..c455755403 100644 --- a/docs/plugin_developers_guide/index.rst +++ b/docs/plugin_developers_guide/index.rst @@ -18,6 +18,7 @@ Plugin Developer Guide 05_storage_plugins 06_filter_plugins 08_notify_plugins.rst + 08_storage.rst 09_packaging.rst 10_testing 11_WSL2.rst diff --git a/docs/quick_start/index.rst b/docs/quick_start/index.rst index f5b34e472d..13210f62aa 100644 --- a/docs/quick_start/index.rst +++ b/docs/quick_start/index.rst @@ -14,4 +14,5 @@ Quick Start Guide ../OMF backup support + update uninstalling diff --git a/docs/quick_start/installing.rst b/docs/quick_start/installing.rst index ceaabc6128..4329b24d83 100644 --- a/docs/quick_start/installing.rst +++ b/docs/quick_start/installing.rst @@ -15,9 +15,9 @@ Installing Fledge ================== -Fledge is extremely lightweight and can run on inexpensive edge devices, sensors and actuator boards. For the purposes of this manual, we assume that all services are running on a Raspberry Pi running the Raspbian operating system. Be sure your system has plenty of storage available for data readings. +Fledge is extremely lightweight and can run on inexpensive edge devices, sensors and actuator boards. For the purposes of this manual, we assume that all services are running on a Raspberry Pi running the Bullseye operating system. Be sure your system has plenty of storage available for data readings. -If your system does not have Raspbian pre-installed, you can find instructions on downloading and installing it at https://www.raspberrypi.org/downloads/raspbian/. After installing Raspbian, ensure you have the latest updates by executing the following commands on your Fledge server:: +If your system does not have a supported version of the Raspberry Pi Operating System pre-installed, you can find instructions on downloading and installing it at https://www.raspberrypi.org/downloads/operating-systems/. After installing a supported operating system, ensure you have the latest updates by executing the following commands on your Fledge server:: sudo apt-get update sudo apt-get upgrade @@ -58,12 +58,12 @@ Once complete you can add the repository itself into the apt configuration file .. code-block:: console - deb http://archives.fledge-iot.org/latest/buster/armv7l/ / + deb http://archives.fledge-iot.org/latest/bullseye/armv7l/ / to the end of the file. .. note:: - Replace `buster` with `stretch` or `bullseye` based on the OS image used. + Replace `bullseye` with the name of the version of the Raspberry Operating System you have installed. - Users with an Intel or AMD system with Ubuntu 18.04 should run @@ -77,9 +77,6 @@ Once complete you can add the repository itself into the apt configuration file sudo add-apt-repository "deb http://archives.fledge-iot.org/latest/ubuntu2004/x86_64/ / " - .. note:: - We do not support the `aarch64` architecture with Ubuntu 20.04 yet. - - Users with an Arm system with Ubuntu 18.04, such as the Odroid board, should run .. code-block:: console @@ -114,21 +111,58 @@ You may also install multiple packages in a single command. To install the base Installing Fledge downloaded packages ###################################### -Assuming you have downloaded the packages from the download link given above. Use SSH to login to the system that will host Fledge services. For each Fledge package that you choose to install, type the following command:: +Assuming you have downloaded the packages from the download link given above. Use SSH to login to the system that will host Fledge services. For each Fledge package that you choose to install, type the following command + +.. code-block:: console - sudo apt -y install PackageName + sudo apt -y install -The key packages to install are the Fledge core and the Fledge User Interface:: +.. note:: - sudo DEBIAN_FRONTEND=noninteractive apt -y install ./fledge-1.8.0-armv7l.deb - sudo apt -y install ./fledge-gui-1.8.0.deb + The downloaded files are named using the package name and the current version of the software. Therefore these names will change over time as new versions are released. At the time of writing the version of the Fledge package is 2.3.0, therefore the package filename is fledge_2.3.0_x86_64.deb on the X86 64bit platform. As a result the filenames shown in the following examples may differ from the names of the files you have downloaded. + +The key packages to install are the Fledge core and the Fledge Graphical User Interface + +.. code-block:: console -You will need to install one of more South plugins to acquire data. You can either do this now or when you are adding the data source. For example, to install the plugin for the Sense HAT sensor board, type:: + sudo DEBIAN_FRONTEND=noninteractive apt -y install ./fledge_2.3.0_x86_64.deb + sudo apt -y install ./fledge-gui_2.3.0.deb + +You will need to install one of more South plugins to acquire data. You can either do this now or when you are adding the data source. For example, to install the plugin for the Sense HAT sensor board, type + +.. code-block:: console + + sudo apt -y install ./fledge-south-sensehat_2.3.0_armv7l.deb + +.. note:: - sudo apt -y install ./fledge-south-sensehat-1.8.0-armv7l.deb + In this case we are showing the name for a package on the Raspberry Pi platform. The sensehat plugin is not supported on all platforms as it requires Raspberry Pi specific hardware connections. You may also need to install one or more North plugins to transmit data. Support for OSIsoft PI and OCS are included with the Fledge core package, so you don't need to install anything more if you are sending data to only these systems. +Firewall Configuration +###################### + +If you are installing packages within a fire walled environment you will need to open a number of locations for outgoing connections. This will vary depending upon how you install the packages. + +If you are downloading or installing packages on the fire walled machine, that machine will need to access *archives.fledge-iot.org* to be able to pull the Fledge packages. This will use the standard HTTP port, port 80. + +It is also recommended that you allow the machine to access the source of packages for your Linux installation. This allows you to keep the machine updated with important patches and also for the installation of any Linux packages that are required by Fledge or the plugins that you load. + +As part of the installation of the Python components of Fledge a number of Python packages are installed using the *pip* utility. In order to allow this you need to open access to a set of locations that pip will pull packages from. The set of locations required is + + - python.org + + - pypi.org + + - pythonhosted.org + +In all cases the standard HTTPS port, 443, is used for communication and is the only port that needs to be opened. + +.. note:: + + If you download packages on a different machine and copy them to your machine behind the fire wall you must still open the access for pip to the Python package locations. + Checking package installation ############################# diff --git a/docs/quick_start/update.rst b/docs/quick_start/update.rst new file mode 100644 index 0000000000..67cdba90e7 --- /dev/null +++ b/docs/quick_start/update.rst @@ -0,0 +1,45 @@ +.. Images +.. |alert| image:: ../images/alert.jpg + +Package Updates +=============== + +Fledge will periodically check for updates to the various packages that are installed. If updates are available then this will be indicated by a status indicating on the bar at the top of the Fledge GUI. + ++---------+ +| |alert| | ++---------+ + +Clicking on the *bell* icon will display the current system alerts, including the details of the packages available to be updated. + +Installing Updates +------------------ + +Updates must either be installed manually from the command line or via the Fledge API. To update via the API a call to the */fledge/update* should be made using the PUT method. + +.. code-block:: console + + curl -X PUT http://localhost:8081/fledge/update + +If the Fledge instance has been configured to require authentication then a valid authentication token must be passed in the request header and that authentication token must by for a user with administration rights on the instance. + +.. code-block:: console + + curl -H "authorization: " -X PUT http://localhost:8081/fledge/update + +Manual updates can be down from the command line using the appropriate package manager for your Linux host. If using the *apt* package manager then the command would be + +.. code-block:: console + + apt install --only-upgrade 'fledge*' + +Or for the *yum* package manager + +.. code-block:: console + + yum upgrade 'fledge*' + +.. note:: + + These commands should be executed as the root user or using the sudo command. + diff --git a/docs/scripts/fledge_plugin_list b/docs/scripts/fledge_plugin_list index d06e0a3d85..0b9319882e 100755 --- a/docs/scripts/fledge_plugin_list +++ b/docs/scripts/fledge_plugin_list @@ -22,10 +22,12 @@ print("\n".join(fRepos)); fledge_wip_repos=$(curl -sX GET -H "$header" -H "Accept: application/vnd.github.mercy-preview+json" https://api.github.com/search/repositories?q=topic:wip+org:fledge-iot) fledge_poc_repos=$(curl -sX GET -H "$header" -H "Accept: application/vnd.github.mercy-preview+json" https://api.github.com/search/repositories?q=topic:poc+org:fledge-iot) fledge_internal_repos=$(curl -sX GET -H "$header" -H "Accept: application/vnd.github.mercy-preview+json" https://api.github.com/search/repositories?q=topic:internal+org:fledge-iot) +fledge_obsolete_repos=$(curl -sX GET -H "$header" -H "Accept: application/vnd.github.mercy-preview+json" https://api.github.com/search/repositories?q=topic:obsolete+org:fledge-iot) fledge_wip_repos_name=$(echo ${fledge_wip_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) fledge_poc_repos_name=$(echo ${fledge_poc_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) fledge_internal_repos_name=$(echo ${fledge_internal_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) -export EXCLUDE_FLEDGE_TOPIC_REPOSITORIES=$(echo ${fledge_wip_repos_name} ${fledge_poc_repos_name} ${fledge_internal_repos_name} | sort -f) +fledge_obsolete_repos_name=$(echo ${fledge_obsolete_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) +export EXCLUDE_FLEDGE_TOPIC_REPOSITORIES=$(echo ${fledge_wip_repos_name} ${fledge_poc_repos_name} ${fledge_internal_repos_name} ${fledge_obsolete_repos_name} | sort -f) echo "EXCLUDED FLEDGE TOPIC REPOS LIST: $EXCLUDE_FLEDGE_TOPIC_REPOSITORIES" fetchFledgeReposPyScript=' import os,json,sys;\ diff --git a/docs/scripts/plugin_and_service_documentation b/docs/scripts/plugin_and_service_documentation index 523122b066..344ffe5b1f 100644 --- a/docs/scripts/plugin_and_service_documentation +++ b/docs/scripts/plugin_and_service_documentation @@ -72,10 +72,12 @@ print("\n".join(fRepos)); fledge_wip_repos=$(curl -sX GET -H "$header" -H "Accept: application/vnd.github.mercy-preview+json" https://api.github.com/search/repositories?q=topic:wip+org:fledge-iot) fledge_poc_repos=$(curl -sX GET -H "$header" -H "Accept: application/vnd.github.mercy-preview+json" https://api.github.com/search/repositories?q=topic:poc+org:fledge-iot) fledge_internal_repos=$(curl -sX GET -H "$header" -H "Accept: application/vnd.github.mercy-preview+json" https://api.github.com/search/repositories?q=topic:internal+org:fledge-iot) +fledge_obsolete_repos=$(curl -sX GET -H "$header" -H "Accept: application/vnd.github.mercy-preview+json" https://api.github.com/search/repositories?q=topic:obsolete+org:fledge-iot) fledge_wip_repos_name=$(echo ${fledge_wip_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) fledge_poc_repos_name=$(echo ${fledge_poc_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) fledge_internal_repos_name=$(echo ${fledge_internal_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) -export EXCLUDE_FLEDGE_TOPIC_REPOSITORIES=$(echo ${fledge_wip_repos_name} ${fledge_poc_repos_name} ${fledge_internal_repos_name} | sort -f) +fledge_obsolete_repos_name=$(echo ${fledge_obsolete_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) +export EXCLUDE_FLEDGE_TOPIC_REPOSITORIES=$(echo ${fledge_wip_repos_name} ${fledge_poc_repos_name} ${fledge_internal_repos_name} ${fledge_obsolete_repos_name} | sort -f) echo "EXCLUDED FLEDGE TOPIC REPOS LIST: $EXCLUDE_FLEDGE_TOPIC_REPOSITORIES" fetchFledgeReposPyScript=' import os,json,sys;\ @@ -144,7 +146,7 @@ do echo '.. include:: ../../fledge-north-OMF.rst' > plugins/${name}/index.rst # Append OMF.rst to the end of the file rather than including it so that we may edit the links to prevent duplicates cat OMF.rst >> plugins/${name}/index.rst - sed -i -e 's/Naming_Scheme/Naming_Scheme_plugin/' -e 's/Linked_Types/Linked_Types_Plugin/' -e 's/Edge_Data_Store/Edge_Data_Store_OMF_Endpoint/' -e 's/_Connector_Relay/PI_Connector_Relay/' plugins/${name}/index.rst + sed -i -e 's/Naming_Scheme/Naming_Scheme_plugin/' -e 's/Linked_Types/Linked_Types_Plugin/' -e 's/Edge_Data_Store/Edge_Data_Store_OMF_Endpoint/' -e 's/_Connector_Relay/PI_Connector_Relay/' -e 's/.. _Numeric Data Types/.. _Numeric_Data_Types/' plugins/${name}/index.rst elif [[ $repo == "fledge-rule-DataAvailability" ]]; then name="fledge-rule-DataAvailability" echo " ${name}/index" >> $dest @@ -182,3 +184,62 @@ do dest=services/index.rst plugin_and_service_doc $repo $dest "services" done + +# Cross Referencing list of plugins +plugins_path=$(pwd)/plugins + +# HashMap used for storing keywords and repos +declare -A KEYWORDS +for dir in $plugins_path/* +do + dir_name=$(echo $dir | sed 's/^.*fledge-/fledge-/') + if [[ $dir_name == *fledge-* ]]; then + if [ -f $plugins_path/$dir_name/keywords ]; then + keywords=$(cat $plugins_path/$dir_name/keywords | sed -e "s/,/ /g") + for k in $keywords + do + KEYWORDS+=(["$k"]+="$dir_name ") + done + fi + fi +done + +function get_repos_list_by_keywords() { + DIR_NAME="$1" + REPOSITORIES_LIST="" + for i in "${!KEYWORDS[@]}" + do + repos_val=$(echo ${KEYWORDS[$i]} | grep -w "$DIR_NAME") + if [[ $repos_val != "" ]]; then + repos_result+=$(echo "$repos_val ") + fi + done + REPOSITORIES_LIST=$(echo $repos_result | sed -e "s/$DIR_NAME//g" | xargs -n 1 | sort -u | xargs) + echo "$REPOSITORIES_LIST" +} + +# See Also section added as per installed plugins directory path +for dir in $plugins_path/* +do + dir_name=$(echo $dir | sed 's/^.*fledge-/fledge-/') + if [[ $dir_name == *fledge-* ]]; then + if [ -f $plugins_path/$dir_name/keywords ]; then + result=$(get_repos_list_by_keywords "$dir_name") + echo "For $dir_name: $result" + if [[ -n "$result" ]]; then + cat >> $plugins_path/$dir_name/index.rst << EOFPLUGINS + +See Also +-------- +EOFPLUGINS + for r in $result + do + # Add link and description to the plugin + description=$(cat $(pwd)/fledge_plugins.rst | grep -A1 -w "plugins/$r/index.html" | grep -v "$r") + echo " \`$r <../$r/index.html>\`_ $description" >> $plugins_path/$dir_name/index.rst + echo -e "\n" >> $plugins_path/$dir_name/index.rst + done + fi + fi + fi +done diff --git a/docs/storage.rst b/docs/storage.rst index 121ab60d1c..762d81a822 100644 --- a/docs/storage.rst +++ b/docs/storage.rst @@ -1,6 +1,5 @@ .. Images .. |storage_01| image:: images/storage_01.jpg -.. |storage_02| image:: images/storage_02.jpg .. |storage_03| image:: images/storage_03.jpg .. |sqlite_01| image:: images/sqlite_storage_configuration.jpg .. |purge_01| image:: images/purge_01.jpg @@ -56,14 +55,10 @@ user interface to set the storage engine and its options. - Using the user interface to configuration the storage, select the *Configuration* item in the left hand menu bar. - +--------------+ - | |storage_01| | - +--------------+ - - - In the category pull down menu select *Advanced*. + - In the category category tree select *Advanced* and under that select *Storage*. +--------------+ - | |storage_02| | + | |storage_01| | +--------------+ - To change the storage plugin to use for both configuration and readings enter the name of the new plugin in the *Storage Plugin* entry field. If *Readings Plugin* is left empty then the storage plugin will also be used to store reading data. The default set of plugins installed with Fledge that can be used as *Storage Plugin* values are: @@ -80,15 +75,20 @@ user interface to set the storage engine and its options. - The *Management Port* and *Service Port* options allow fixed ports to be assigned to the storage service. These settings are for debugging purposes only and the values should be set to 0 in normal operation. -Note: Additional storage engines may be installed to extend the set -that is delivered with the standard Fledge installation. These will be -documented in the packages that provide the storage plugin. -Storage plugin configurations are not dynamic and Fledge *must* be -restarted after changing these values. Changing the plugin used to store -readings will *not* cause the data in the previous storage system to be -migrated to the new storage system and this data may be lost if it has -not been sent onward from Fledge. +.. note:: + + Additional storage engines may be installed to extend the set + that is delivered with the standard Fledge installation. These will be + documented in the packages that provide the storage plugin. + + Storage plugin configurations are not dynamic and Fledge *must* be + restarted after changing these values. Changing the plugin used to store + readings will *not* cause the data in the previous storage system to be + migrated to the new storage system and this data may be lost if it has + not been sent onward from Fledge. + + If selecting the Postgres storeage engine then postgress must be installed and running with a fledge user created in order for Fledge to start succesfully. SQLite Plugin Configuration --------------------------- diff --git a/docs/tuning_fledge.rst b/docs/tuning_fledge.rst index 0fc0483072..7a3ac21cee 100644 --- a/docs/tuning_fledge.rst +++ b/docs/tuning_fledge.rst @@ -47,6 +47,8 @@ The south services within Fledge each have a set of advanced configuration optio - *Reading Rate* - The rate at which polling occurs for this south service. This parameter only has effect if your south plugin is polled, asynchronous south services do not use this parameter. The units are defined by the setting of the *Reading Rate Per* item. + - *Asset Tracker Update* - This control how frequently the asset tracker flushes the cache of asset tracking information to the storage layer. It is a value expressed in milliseconds. The asset tracker only write updates, therefore if you have a fixed set of assets flowing in a pipeline the asset tracker will only write any data the first time each asset is seen and will then perform no further writes. If you have variablility in your assets or asset structure the asset tracker will be more active and it becomes more useful to tune this parameter. + - *Reading Rate Per* - This defines the units to be used in the *Reading Rate* value. It allows the selection of per *second*, *minute* or *hour*. - *Poll Type* - This defines the mechanism used to control the poll requests that will be sent to the plugin. Three options are currently available, interval polling and fixed time polling and polling on demand. @@ -102,7 +104,9 @@ Performance counters are collected in the service and a report is written once p - The number of samples of the counter collected within the current minute -In the current release the performance counters can only be retrieved by director access to the configuration and statistics database, they are stored in the *monitors* table. Future releases will include tools for the retrieval and analysis of these performance counters. +In the current release the performance counters can only be retrieved by direct access to the configuration and statistics database, they are stored in the *monitors* table. Or via the REST API. Future releases will include tools for the retrieval and analysis of these performance counters. + +To access the performance counters via the REST API use the entry point /fledge/monitors to retrieve all counters, or /fledge/monitor/{service name} to retrieve counters for a single service. When collection is enabled the following counters will be collected for the south service that is enabled. @@ -184,6 +188,8 @@ In a similar way to the south services, north services and tasks also have advan - *Data block size* - This defines the number of readings that will be sent to the north plugin for each call to the *plugin_send* entry point. This allows the performance of the north data pipeline to be adjusted, with larger blocks sizes increasing the performance, by reducing overhead, but at the cost of requiring more memory in the north service or task to buffer the data as it flows through the pipeline. Setting this value too high may cause issues for certain of the north plugins that have limitations on the number of messages they can handle within a single block. + - *Asset Tracker Update* - This control how frequently the asset tracker flushes the cache of asset tracking information to the storage layer. It is a value expressed in milliseconds. The asset tracker only write updates, therefore if you have a fixed set of assets flowing in a pipeline the asset tracker will only write any data the first time each asset is seen and will then perform no further writes. If you have variablility in your assets or asset structure the asset tracker will be more active and it becomes more useful to tune this parameter. + - *Performance Counters* - This option allows for collection of performance counters that can be use to help tune the north service. Performance Counters @@ -201,7 +207,133 @@ Performance counters are collected in the service and a report is written once p - The number of samples of the counter collected within the current minute -In the current release the performance counters can only be retrieved by director access to the configuration and statistics database, they are stored in the *monitors* table. Future releases will include tools for the retrieval and analysis of these performance counters. +In the current release the performance counters can only be retrieved by direct access to the configuration and statistics database, they are stored in the *monitors* table. Future releases will include tools for the retrieval and analysis of these performance counters. + +To access the performance counters via the REST API use the entry point */fledge/monitors* to retrieve all counters, or */fledge/monitor/{service name}* to retrieve counters for a single service. + +.. code-block:: bash + + $ curl -s http://localhost:8081/fledge/monitors | jq + { + "monitors": [ + { + "monitor": "storedReadings", + "values": [ + { + "average": 102, + "maximum": 102, + "minimum": 102, + "samples": 20, + "timestamp": "2024-02-19 16:33:46.690", + "service": "si" + }, + { + "average": 102, + "maximum": 102, + "minimum": 102, + "samples": 20, + "timestamp": "2024-02-19 16:34:46.713", + "service": "si" + }, + { + "average": 102, + "maximum": 102, + "minimum": 102, + "samples": 20, + "timestamp": "2024-02-19 16:35:46.736", + "service": "si" + } + ] + }, + { + "monitor": "readLatency", + "values": [ + { + "average": 2055, + "maximum": 2064, + "minimum": 2055, + "samples": 20, + "timestamp": "2024-02-19 16:33:46.698", + "service": "si" + }, + { + "average": 2056, + "maximum": 2068, + "minimum": 2053, + "samples": 20, + "timestamp": "2024-02-19 16:34:46.719", + "service": "si" + }, + { + "average": 2058, + "maximum": 2079, + "minimum": 2056, + "samples": 20, + "timestamp": "2024-02-19 16:35:46.743", + "service": "si" + } + ] + }, + { + "monitor": "ingestCount", + "values": [ + { + "average": 34, + "maximum": 34, + "minimum": 34, + "samples": 60, + "timestamp": "2024-02-19 16:33:46.702", + "service": "si" + }, + { + "average": 34, + "maximum": 34, + "minimum": 34, + "samples": 60, + "timestamp": "2024-02-19 16:34:46.724", + "service": "si" + }, + { + "average": 34, + "maximum": 34, + "minimum": 34, + "samples": 60, + "timestamp": "2024-02-19 16:35:46.748", + "service": "si" + } + ] + }, + { + "monitor": "queueLength", + "values": [ + { + "average": 55, + "maximum": 100, + "minimum": 34, + "samples": 60, + "timestamp": "2024-02-19 16:33:46.706", + "service": "si" + }, + { + "average": 55, + "maximum": 100, + "minimum": 34, + "samples": 60, + "timestamp": "2024-02-19 16:34:46.729", + "service": "si" + }, + { + "average": 55, + "maximum": 100, + "minimum": 34, + "samples": 60, + "timestamp": "2024-02-19 16:35:46.753", + "service": "si" + } + ] + } + ] + } When collection is enabled the following counters will be collected for the south service that is enabled. @@ -268,6 +400,17 @@ The Fledge core contains a scheduler that is used for running periodic tasks, th Individual tasks have a setting that they may use to stop multiple instances of the same task running in parallel. This also helps protect the system from runaway tasks. +Startup Ordering +---------------- + +The Fledge scheduler also provides for ordering the startup sequence of the various services within a Fledge instance. This ensures that the support services are started before any south or north services are started, with the south services started before the north services. + +There is no ordering within the south or north services, with all south services being started in a single block and all north services started in a single block. + +The order in which a service is started is controlled by assigning a priority to the service. This priority is a numeric value and services are started based on this value. The lower the value the earlier in the sequence the service is started. + +Priorities are stored in the database table, scheduled_processes. There is currently no user interface to modify the priority of scheduled processes, but it may be changed by direct access to the database. Future versions of Fledge may add an interface to allow for the tuning of process startup priorities. + Storage ======= @@ -331,6 +474,8 @@ The storage plugins to use can be selected in the *Advanced* section of the *Con - **Log Level**: This control the level at which the storage plugin will output logs. +- **Timeout**: Sets the timeout value in seconds for each request to the storage layer. This causes a timeout error to be returned to a client if a storage call takes longer than the specified value. + Changing will be saved once the *save* button is pressed. Fledge uses a mechanism whereby this data is not only saved in the configuration database, but also cached to a file called *storage.json* in the *etc* directory of the data directory. This is required such that Fledge can find the configuration database during the boot process. If the configuration becomes corrupt for some reason simply removing this file and restarting Fledge will cause the default configuration to be restored. The location of the Fledge data directory will depend upon how you installed Fledge and the environment variables used to run Fledge. - Installation from a package will usually put the data directory in */usr/local/fledge/data*. However this can be overridden by setting the *$FLEDGE_DATA* environment variable to point at a different location. @@ -368,6 +513,8 @@ The storage plugin configuration can be found in the *Advanced* section of the * - **Purge Exclusion**: This is not a performance settings, but allows a number of assets to be exempted from the purge process. This value is a comma separated list of asset names that will be excluded from the purge operation. +- **Vacuum Interval**: The interval between execution of vacuum operations on the database, expressed in hours. A vacuum operation is used to reclaim space occupied in the database by data that has been deleted. + sqlitelb Configuration ###################### @@ -389,6 +536,10 @@ The storage plugin configuration can be found in the *Advanced* section of the * Although the pool size denotes the number of parallel operations that can take place, database locking considerations may reduce the number of actual operations in progress at any point in time. +- **Vacuum Interval**: The interval between execution of vacuum operations on the database, expressed in hours. A vacuum operation is used to reclaim space occupied in the database by data that has been deleted. + +- **Purge Block Size**: The maximum number of rows that will be deleted within a single transactions when performing a purge operation on the readings data. Large block sizes are potential the most efficient in terms of the time to complete the purge operation, however this will increase database contention as a database lock is required that will cause any ingest operations to be stalled until the purge completes. By setting a lower block size the purge will take longer, nut ingest operations can be interleaved with the purging of blocks. + postgres Configuration ###################### @@ -434,3 +585,6 @@ The storage plugin configuration can be found in the *Advanced* section of the * - **Persist Data**: Control the persisting of the in-memory database on shutdown. If enabled the in-memory database will be persisted on shutdown of Fledge and reloaded when Fledge is next started. Selecting this option will slow down the shutdown and startup processing for Fledge. - **Persist File**: This defines the name of the file to which the in-memory database will be persisted. + + - **Purge Block Size**: The maximum number of rows that will be deleted within a single transactions when performing a purge operation on the readings data. Large block sizes are potential the most efficient in terms of the time to complete the purge operation, however this will increase database contention as a database lock is required that will cause any ingest operations to be stalled until the purge completes. By setting a lower block size the purge will take longer, nut ingest operations can be interleaved with the purging of blocks. + diff --git a/python/.coveragerc b/python/.coveragerc deleted file mode 100644 index 7ed5194bfd..0000000000 --- a/python/.coveragerc +++ /dev/null @@ -1,12 +0,0 @@ -# .coveragerc to control coverage.py -[run] -omit = - /*/tests/* - /*/venv/* - /*/.tox/* - __template__.py - -[report] - -[html] -directory = htmlcov \ No newline at end of file diff --git a/python/fledge/common/alert_manager.py b/python/fledge/common/alert_manager.py new file mode 100644 index 0000000000..ba63125c95 --- /dev/null +++ b/python/fledge/common/alert_manager.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- + +# FLEDGE_BEGIN +# See: http://fledge-iot.readthedocs.io/ +# FLEDGE_END + +from fledge.common.logger import FLCoreLogger +from fledge.common.storage_client.payload_builder import PayloadBuilder + +__author__ = "Ashish Jabble" +__copyright__ = "Copyright (c) 2024 Dianomic Systems Inc." +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + +_logger = FLCoreLogger().get_logger(__name__) + +class AlertManagerSingleton(object): + _shared_state = {} + + def __init__(self): + self.__dict__ = self._shared_state + + +class AlertManager(AlertManagerSingleton): + storage_client = None + alerts = [] + urgency = {"Critical": 1, "High": 2, "Normal": 3, "Low": 4} + + def __init__(self, storage_client=None): + AlertManagerSingleton.__init__(self) + if not storage_client: + from fledge.services.core import connect + self.storage_client = connect.get_storage_async() + else: + self.storage_client = storage_client + + async def get_all(self): + """ Get all alerts from storage """ + try: + q_payload = PayloadBuilder().SELECT("key", "message", "urgency", "ts").ALIAS( + "return", ("ts", 'timestamp')).FORMAT("return", ("ts", "YYYY-MM-DD HH24:MI:SS.MS")).payload() + storage_result = await self.storage_client.query_tbl_with_payload('alerts', q_payload) + result = [] + if 'rows' in storage_result: + for row in storage_result['rows']: + tmp = {"key": row['key'], + "message": row['message'], + "urgency": self._urgency_name_by_value(row['urgency']), + "timestamp": row['timestamp'] + } + result.append(tmp) + self.alerts = result + except Exception as ex: + raise Exception(ex) + else: + return self.alerts + + async def get_by_key(self, name): + """ Get an alert by key """ + key_found = [a for a in self.alerts if a['key'] == name] + if key_found: + return key_found[0] + try: + q_payload = PayloadBuilder().SELECT("key", "message", "urgency", "ts").ALIAS( + "return", ("ts", 'timestamp')).FORMAT("return", ("ts", "YYYY-MM-DD HH24:MI:SS.MS")).WHERE( + ["key", "=", name]).payload() + results = await self.storage_client.query_tbl_with_payload('alerts', q_payload) + alert = {} + if 'rows' in results: + if len(results['rows']) > 0: + row = results['rows'][0] + alert = {"key": row['key'], + "message": row['message'], + "urgency": self._urgency_name_by_value(row['urgency']), + "timestamp": row['timestamp'] + } + if not alert: + raise KeyError('{} alert not found.'.format(name)) + except KeyError as err: + msg = str(err.args[0]) + raise KeyError(msg) + else: + return alert + + async def add(self, params): + """ Add an alert """ + response = None + try: + payload = PayloadBuilder().INSERT(**params).payload() + insert_api_result = await self.storage_client.insert_into_tbl('alerts', payload) + if insert_api_result['response'] == 'inserted' and insert_api_result['rows_affected'] == 1: + response = {"alert": params} + self.alerts.append(params) + except Exception as ex: + raise Exception(ex) + else: + return response + + async def delete(self, key=None): + """ Delete an entry from storage """ + try: + payload = {} + message = "Nothing to delete." + key_exists = -1 + if key is not None: + key_exists = [index for index, item in enumerate(self.alerts) if item['key'] == key] + if key_exists: + payload = PayloadBuilder().WHERE(["key", "=", key]).payload() + else: + raise KeyError + result = await self.storage_client.delete_from_tbl("alerts", payload) + if 'rows_affected' in result: + if result['response'] == "deleted" and result['rows_affected']: + if key is None: + message = "Delete all alerts." + self.alerts = [] + else: + message = "{} alert is deleted.".format(key) + if key_exists: + del self.alerts[key_exists[0]] + except KeyError: + raise KeyError + except Exception as ex: + raise Exception(ex) + else: + return message + + def _urgency_name_by_value(self, value): + try: + name = list(self.urgency.keys())[list(self.urgency.values()).index(value)] + except: + name = "UNKNOWN" + return name + diff --git a/python/fledge/common/configuration_manager.py b/python/fledge/common/configuration_manager.py index 57d1885bca..ce2f22711a 100644 --- a/python/fledge/common/configuration_manager.py +++ b/python/fledge/common/configuration_manager.py @@ -35,9 +35,10 @@ # MAKE UPPER_CASE _valid_type_strings = sorted(['boolean', 'integer', 'float', 'string', 'IPv4', 'IPv6', 'X509 certificate', 'password', - 'JSON', 'URL', 'enumeration', 'script', 'code', 'northTask', 'ACL', 'bucket']) + 'JSON', 'URL', 'enumeration', 'script', 'code', 'northTask', 'ACL', 'bucket', + 'list', 'kvlist']) _optional_items = sorted(['readonly', 'order', 'length', 'maximum', 'minimum', 'rule', 'deprecated', 'displayName', - 'validity', 'mandatory', 'group']) + 'validity', 'mandatory', 'group', 'listSize']) RESERVED_CATG = ['South', 'North', 'General', 'Advanced', 'Utilities', 'rest_api', 'Security', 'service', 'SCHEDULER', 'SMNTR', 'PURGE_READ', 'Notifications'] @@ -268,7 +269,7 @@ async def _validate_category_val(self, category_name, category_val, set_value_va optional_item_entries = {'readonly': 0, 'order': 0, 'length': 0, 'maximum': 0, 'minimum': 0, 'deprecated': 0, 'displayName': 0, 'rule': 0, 'validity': 0, 'mandatory': 0, - 'group': 0} + 'group': 0, 'listSize': 0} expected_item_entries = {'description': 0, 'default': 0, 'type': 0} if require_entry_value: @@ -331,6 +332,140 @@ def get_entry_val(k): raise TypeError('For {} category, entry value must be a string for item name {} and ' 'entry name {}; got {}'.format(category_name, item_name, entry_name, type(entry_val))) + # Validate list type and mandatory items + elif 'type' in item_val and get_entry_val("type") in ('list', 'kvlist'): + if entry_name not in ('properties', 'options') and not isinstance(entry_val, str): + raise TypeError('For {} category, entry value must be a string for item name {} and ' + 'entry name {}; got {}'.format(category_name, item_name, entry_name, + type(entry_val))) + if 'items' not in item_val: + raise KeyError('For {} category, items KV pair must be required ' + 'for item name {}.'.format(category_name, item_name)) + if entry_name == 'items': + if entry_val not in ("string", "float", "integer", "object", "enumeration"): + raise ValueError("For {} category, items value should either be in string, float, " + "integer, object or enumeration for item name {}".format( + category_name, item_name)) + if entry_val == 'object': + if 'properties' not in item_val: + raise KeyError('For {} category, properties KV pair must be required for item name {}' + ''.format(category_name, item_name)) + prop_val = get_entry_val('properties') + if not isinstance(prop_val, dict): + raise ValueError( + 'For {} category, properties must be JSON object for item name {}; got {}' + .format(category_name, item_name, type(prop_val))) + if not prop_val: + raise ValueError( + 'For {} category, properties JSON object cannot be empty for item name {}' + ''.format(category_name, item_name)) + for kp, vp in prop_val.items(): + if isinstance(vp, dict): + prop_keys = list(vp.keys()) + if not prop_keys: + raise ValueError('For {} category, {} properties cannot be empty for ' + 'item name {}'.format(category_name, kp, item_name)) + diff = {'description', 'default', 'type'} - set(prop_keys) + if diff: + raise ValueError('For {} category, {} properties must have type, description, ' + 'default keys for item name {}'.format(category_name, + kp, item_name)) + else: + raise TypeError('For {} category, Properties must be a JSON object for {} key ' + 'for item name {}'.format(category_name, kp, item_name)) + if entry_val == 'enumeration': + if 'options' not in item_val: + raise KeyError('For {} category, options required for item name {}'.format( + category_name, item_name)) + options = item_val['options'] + if type(options) is not list: + raise TypeError('For {} category, entry value must be a list for item name {} and ' + 'entry name {}; got {}'.format(category_name, item_name, + entry_name, type(options))) + if not options: + raise ValueError( + 'For {} category, options cannot be empty list for item_name {} and ' + 'entry_name {}'.format(category_name, item_name, entry_name)) + default_val = get_entry_val("default") + list_size = -1 + if 'listSize' in item_val: + list_size = item_val['listSize'] + if not isinstance(list_size, str): + raise TypeError('For {} category, listSize type must be a string for item name {}; ' + 'got {}'.format(category_name, item_name, type(list_size))) + if self._validate_type_value('listSize', list_size) is False: + raise ValueError('For {} category, listSize value must be an integer value ' + 'for item name {}'.format(category_name, item_name)) + list_size = int(item_val['listSize']) + msg = "array" if item_val['type'] == 'list' else "KV pair" + if entry_name == 'items' and entry_val not in ("object", "enumeration"): + try: + eval_default_val = ast.literal_eval(default_val) + if item_val['type'] == 'list': + if len(eval_default_val) > len(set(eval_default_val)): + raise ArithmeticError("For {} category, default value {} elements are not " + "unique for item name {}".format(category_name, msg, + item_name)) + else: + if isinstance(eval_default_val, dict) and eval_default_val: + nv = default_val.replace("{", "") + unique_list = [] + for pair in nv.split(','): + if pair: + k, v = pair.split(':') + ks = k.strip() + if ks not in unique_list: + unique_list.append(ks) + else: + raise ArithmeticError("For category {}, duplicate KV pair found " + "for item name {}".format( + category_name, item_name)) + else: + raise ArithmeticError("For {} category, KV pair invalid in default " + "value for item name {}".format( + category_name, item_name)) + if list_size >= 0: + if len(eval_default_val) > list_size: + raise ArithmeticError("For {} category, default value {} list size limit to " + "{} for item name {}".format(category_name, msg, + list_size, item_name)) + except ArithmeticError as err: + raise ValueError(err) + except: + raise TypeError("For {} category, default value should be passed {} list in string " + "format for item name {}".format(category_name, msg, item_name)) + type_check = str + if entry_val == 'integer': + type_check = int + elif entry_val == 'float': + type_check = float + type_mismatched_message = ("For {} category, all elements should be of same {} type " + "in default value for item name {}").format(category_name, + type_check, item_name) + if item_val['type'] == 'kvlist': + if not isinstance(eval_default_val, dict): + raise TypeError("For {} category, KV pair invalid in default value for item name {}" + "".format(category_name, item_name)) + for k, v in eval_default_val.items(): + try: + eval_s = v if entry_val == "string" else ast.literal_eval(v) + except: + raise ValueError(type_mismatched_message) + if not isinstance(eval_s, type_check): + raise ValueError(type_mismatched_message) + else: + for s in eval_default_val: + try: + eval_s = s if entry_val == "string" else ast.literal_eval(s) + except: + raise ValueError(type_mismatched_message) + if not isinstance(eval_s, type_check): + raise ValueError(type_mismatched_message) + d = {entry_name: entry_val} + expected_item_entries.update(d) + if entry_name in ('properties', 'options'): + d = {entry_name: entry_val} + expected_item_entries.update(d) else: if type(entry_val) is not str: raise TypeError('For {} category, entry value must be a string for item name {} and ' @@ -350,8 +485,8 @@ def get_entry_val(k): 'For {} category, A default value must be given for {}'.format(category_name, item_name)) elif entry_name == 'minimum' or entry_name == 'maximum': - if (self._validate_type_value('integer', entry_val) or self._validate_type_value('float', - entry_val)) is False: + if (self._validate_type_value('integer', entry_val) or + self._validate_type_value('float', entry_val)) is False: raise ValueError('For {} category, entry value must be an integer or float for item name ' '{}; got {}'.format(category_name, entry_name, type(entry_val))) elif entry_name in ('displayName', 'group', 'rule', 'validity'): @@ -359,7 +494,8 @@ def get_entry_val(k): raise ValueError('For {} category, entry value must be string for item name {}; got {}' .format(category_name, entry_name, type(entry_val))) else: - if self._validate_type_value('integer', entry_val) is False: + if (self._validate_type_value('integer', entry_val) or + self._validate_type_value('listSize', entry_val)) is False: raise ValueError('For {} category, entry value must be an integer for item name {}; got {}' .format(category_name, entry_name, type(entry_val))) @@ -1623,7 +1759,7 @@ def _str_to_ipaddress(item_val): if _type == 'boolean': return _str_to_bool(_value) - elif _type == 'integer': + elif _type in ('integer', 'listSize'): return _str_to_int(_value) elif _type == 'float': return _str_to_float(_value) @@ -1695,21 +1831,20 @@ def _validate_value_per_optional_attribute(self, item_name, storage_value_entry, def in_range(n, start, end): return start <= n <= end # start and end inclusive - config_item_type = storage_value_entry['type'] - if config_item_type == 'string': + def _validate_length(val): if 'length' in storage_value_entry: - if len(new_value_entry) > int(storage_value_entry['length']): + if len(val) > int(storage_value_entry['length']): raise TypeError('For config item {} you cannot set the new value, beyond the length {}'.format( item_name, storage_value_entry['length'])) - if config_item_type == 'integer' or config_item_type == 'float': + def _validate_min_max(_type, val): if 'minimum' in storage_value_entry and 'maximum' in storage_value_entry: - if config_item_type == 'integer': - _new_value = int(new_value_entry) + if _type == 'integer': + _new_value = int(val) _min_value = int(storage_value_entry['minimum']) _max_value = int(storage_value_entry['maximum']) else: - _new_value = float(new_value_entry) + _new_value = float(val) _min_value = float(storage_value_entry['minimum']) _max_value = float(storage_value_entry['maximum']) @@ -1717,22 +1852,104 @@ def in_range(n, start, end): raise TypeError('For config item {} you cannot set the new value, beyond the range ({},{})'.format( item_name, storage_value_entry['minimum'], storage_value_entry['maximum'])) elif 'minimum' in storage_value_entry: - if config_item_type == 'integer': - _new_value = int(new_value_entry) + if _type == 'integer': + _new_value = int(val) _min_value = int(storage_value_entry['minimum']) else: - _new_value = float(new_value_entry) + _new_value = float(val) _min_value = float(storage_value_entry['minimum']) if _new_value < _min_value: raise TypeError('For config item {} you cannot set the new value, below {}'.format(item_name, _min_value)) elif 'maximum' in storage_value_entry: - if config_item_type == 'integer': - _new_value = int(new_value_entry) + if _type == 'integer': + _new_value = int(val) _max_value = int(storage_value_entry['maximum']) else: - _new_value = float(new_value_entry) + _new_value = float(val) _max_value = float(storage_value_entry['maximum']) if _new_value > _max_value: raise TypeError('For config item {} you cannot set the new value, above {}'.format(item_name, - _max_value)) \ No newline at end of file + _max_value)) + + config_item_type = storage_value_entry['type'] + if config_item_type == 'string': + _validate_length(new_value_entry) + + if config_item_type == 'integer' or config_item_type == 'float': + _validate_min_max(config_item_type, new_value_entry) + + if config_item_type in ("list", "kvlist"): + if storage_value_entry['items'] not in ('object', 'enumeration'): + msg = "array" if config_item_type == 'list' else "KV pair" + try: + eval_new_val = ast.literal_eval(new_value_entry) + except: + raise TypeError("For config item {} value should be passed {} list in string format".format( + item_name, msg)) + + if config_item_type == 'list': + if len(eval_new_val) > len(set(eval_new_val)): + raise ValueError("For config item {} elements are not unique".format(item_name)) + else: + if isinstance(eval_new_val, dict) and eval_new_val: + nv = new_value_entry.replace("{", "") + unique_list = [] + for pair in nv.split(','): + if pair: + k, v = pair.split(':') + ks = k.strip() + if ks not in unique_list: + unique_list.append(ks) + else: + raise TypeError("For config item {} duplicate KV pair found".format(item_name)) + else: + raise TypeError("For config item {} KV pair invalid".format(item_name)) + if 'listSize' in storage_value_entry: + list_size = int(storage_value_entry['listSize']) + if list_size >= 0: + if len(eval_new_val) > list_size: + raise TypeError("For config item {} value {} list size limit to {}".format( + item_name, msg, list_size)) + + type_mismatched_message = "For config item {} all elements should be of same {} type".format( + item_name, storage_value_entry['items']) + type_check = str + if storage_value_entry['items'] == 'integer': + type_check = int + elif storage_value_entry['items'] == 'float': + type_check = float + + if config_item_type == 'kvlist': + if not isinstance(eval_new_val, dict): + raise TypeError("For config item {} KV pair invalid".format(item_name)) + for k, v in eval_new_val.items(): + try: + eval_s = v + if storage_value_entry['items'] in ("integer", "float"): + eval_s = ast.literal_eval(v) + _validate_min_max(storage_value_entry['items'], eval_s) + elif storage_value_entry['items'] == 'string': + _validate_length(eval_s) + except TypeError as err: + raise ValueError(err) + except: + raise ValueError(type_mismatched_message) + if not isinstance(eval_s, type_check): + raise ValueError(type_mismatched_message) + else: + for s in eval_new_val: + try: + eval_s = s + if storage_value_entry['items'] in ("integer", "float"): + eval_s = ast.literal_eval(s) + _validate_min_max(storage_value_entry['items'], eval_s) + elif storage_value_entry['items'] == 'string': + _validate_length(eval_s) + except TypeError as err: + raise ValueError(err) + except: + raise ValueError(type_mismatched_message) + if not isinstance(eval_s, type_check): + raise ValueError(type_mismatched_message) + diff --git a/python/fledge/common/microservice_management_client/microservice_management_client.py b/python/fledge/common/microservice_management_client/microservice_management_client.py index 247205fe5a..38cb9fbef1 100644 --- a/python/fledge/common/microservice_management_client/microservice_management_client.py +++ b/python/fledge/common/microservice_management_client/microservice_management_client.py @@ -390,3 +390,34 @@ def create_asset_tracker_event(self, asset_event): self._management_client_conn.close() response = json.loads(res) return response + + def get_alert_by_key(self, key): + url = "/fledge/alert/{}".format(key) + self._management_client_conn.request(method='GET', url=url) + r = self._management_client_conn.getresponse() + if r.status != 404: + if r.status in range(400, 500): + _logger.error("For URL: %s, Client error code: %d, Reason: %s", url, r.status, r.reason) + raise client_exceptions.MicroserviceManagementClientError(status=r.status, reason=r.reason) + if r.status in range(500, 600): + _logger.error("For URL: %s, Server error code: %d, Reason: %s", url, r.status, r.reason) + raise client_exceptions.MicroserviceManagementClientError(status=r.status, reason=r.reason) + res = r.read().decode() + self._management_client_conn.close() + response = json.loads(res) + return response + + def add_alert(self, params): + url = '/fledge/alert' + self._management_client_conn.request(method='POST', url=url, body=json.dumps(params)) + r = self._management_client_conn.getresponse() + if r.status in range(401, 500): + _logger.error("For URL: %s, Client error code: %d, Reason: %s", url, r.status, r.reason) + raise client_exceptions.MicroserviceManagementClientError(status=r.status, reason=r.reason) + if r.status in range(500, 600): + _logger.error("For URL: %s, Server error code: %d, Reason: %s", url, r.status, r.reason) + raise client_exceptions.MicroserviceManagementClientError(status=r.status, reason=r.reason) + res = r.read().decode() + self._management_client_conn.close() + response = json.loads(res) + return response \ No newline at end of file diff --git a/python/fledge/common/utils.py b/python/fledge/common/utils.py index 09bf56a1a8..f11cb409da 100644 --- a/python/fledge/common/utils.py +++ b/python/fledge/common/utils.py @@ -6,6 +6,7 @@ """Common utilities""" +import functools import datetime __author__ = "Amarendra K Sinha" @@ -132,3 +133,18 @@ def get_open_ssl_version(version_string=True): """ import ssl return ssl.OPENSSL_VERSION if version_string else ssl.OPENSSL_VERSION_INFO + + +def make_async(fn): + """ turns a sync function to async function using threads """ + from concurrent.futures import ThreadPoolExecutor + import asyncio + pool = ThreadPoolExecutor() + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + future = pool.submit(fn, *args, **kwargs) + return asyncio.wrap_future(future) # make it awaitable + + return wrapper + diff --git a/python/fledge/services/common/microservice_management/routes.py b/python/fledge/services/common/microservice_management/routes.py index 9c21704bc4..6a27848b7c 100644 --- a/python/fledge/services/common/microservice_management/routes.py +++ b/python/fledge/services/common/microservice_management/routes.py @@ -73,6 +73,10 @@ def setup(app, obj, is_core=False): app.router.add_route('GET', '/fledge/ACL/{acl_name}', obj.get_control_acl) + # alerts + app.router.add_route('GET', '/fledge/alert/{key}', obj.get_alert) + app.router.add_route('POST', '/fledge/alert', obj.add_alert) + # Proxy API setup for a microservice proxy.setup(app) diff --git a/python/fledge/services/core/api/alerts.py b/python/fledge/services/core/api/alerts.py new file mode 100644 index 0000000000..9523e39e7b --- /dev/null +++ b/python/fledge/services/core/api/alerts.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# FLEDGE_BEGIN +# See: http://fledge-iot.readthedocs.io/ +# FLEDGE_END + +import json +from aiohttp import web + +from fledge.common.logger import FLCoreLogger +from fledge.services.core import server + +__author__ = "Ashish Jabble" +__copyright__ = "Copyright (c) 2024, Dianomic Systems Inc." +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + +_help = """ + ---------------------------------------------------------------- + | GET DELETE | /fledge/alert | + | DELETE | /fledge/alert/{key} | + ---------------------------------------------------------------- +""" +_LOGGER = FLCoreLogger().get_logger(__name__) + +def setup(app): + app.router.add_route('GET', '/fledge/alert', get_all) + app.router.add_route('DELETE', '/fledge/alert', delete) + app.router.add_route('DELETE', '/fledge/alert/{key}', delete) + + + +async def get_all(request: web.Request) -> web.Response: + """ GET list of alerts + + :Example: + curl -sX GET http://localhost:8081/fledge/alert + """ + try: + alerts = await server.Server._alert_manager.get_all() + except Exception as ex: + msg = str(ex) + _LOGGER.error(ex, "Failed to get alerts.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + return web.json_response({"alerts": alerts}) + +async def delete(request: web.Request) -> web.Response: + """ DELETE all alerts + + :Example: + curl -sX DELETE http://localhost:8081/fledge/alert + curl -sX DELETE http://localhost:8081/fledge/alert/{key} + """ + key = request.match_info.get('key', None) + try: + if key: + response = await server.Server._alert_manager.delete(key=key) + else: + response = await server.Server._alert_manager.delete() + except KeyError: + msg = '{} alert not found.'.format(key) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except Exception as ex: + msg = str(ex) + _LOGGER.error(ex, "Failed to delete alerts.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + return web.json_response({"message": response}) \ No newline at end of file diff --git a/python/fledge/services/core/api/common.py b/python/fledge/services/core/api/common.py index 44bc68ba12..6087d51592 100644 --- a/python/fledge/services/core/api/common.py +++ b/python/fledge/services/core/api/common.py @@ -93,7 +93,11 @@ def services_health_litmus_test(): return 'green' status_color = services_health_litmus_test() - safe_mode = True if server.Server.running_in_safe_mode else False + safe_mode = True + alert_count = 0 + if not server.Server.running_in_safe_mode: + safe_mode = False + alert_count = len(server.Server._alert_manager.alerts) version = get_version() return web.json_response({'uptime': int(since_started), 'dataRead': data_read, @@ -105,7 +109,8 @@ def services_health_litmus_test(): 'ipAddresses': ip_addresses, 'health': status_color, 'safeMode': safe_mode, - 'version': version + 'version': version, + 'alerts': alert_count }) diff --git a/python/fledge/services/core/api/control_service/acl_management.py b/python/fledge/services/core/api/control_service/acl_management.py index 8f3361098b..93252b4e20 100644 --- a/python/fledge/services/core/api/control_service/acl_management.py +++ b/python/fledge/services/core/api/control_service/acl_management.py @@ -17,13 +17,11 @@ from fledge.services.core import connect from fledge.services.core.api.control_service.exceptions import * - __author__ = "Ashish Jabble, Massimiliano Pinto" __copyright__ = "Copyright (c) 2021 Dianomic Systems Inc." __license__ = "Apache 2.0" __version__ = "${VERSION}" - _help = """ -------------------------------------------------------------- | GET POST | /fledge/ACL | @@ -88,31 +86,21 @@ async def add_acl(request: web.Request) -> web.Response: curl -H "authorization: $AUTH_TOKEN" -sX POST http://localhost:8081/fledge/ACL -d '{"name": "testACL", "service": [{"name": "IEC-104"}, {"type": "notification"}], "url": [{"url": "/fledge/south/operation", "acl": [{"type": "Northbound"}]}]}' + curl -H "authorization: $AUTH_TOKEN" -sX POST http://localhost:8081/fledge/ACL -d '{"name": "testACL-2", + "service": [{"name": "IEC-104"}], "url": []}' + curl -H "authorization: $AUTH_TOKEN" -sX POST http://localhost:8081/fledge/ACL -d '{"name": "testACL-3", + "service": [{"type": "Notification"}], "url": []}' + curl -H "authorization: $AUTH_TOKEN" -sX POST http://localhost:8081/fledge/ACL -d '{"name": "testACL-4", + "service": [{"name": "IEC-104"}, {"type": "notification"}], "url": [{"url": "/fledge/south/operation", + "acl": [{"type": "Northbound"}]}, {"url": "/fledge/south/write", + "acl": [{"type": "Northbound"}, {"type": "Southbound"}]}]}' """ try: data = await request.json() - name = data.get('name', None) - service = data.get('service', None) - url = data.get('url', None) - if name is None: - raise ValueError('ACL name is required.') - else: - if not isinstance(name, str): - raise TypeError('ACL name must be a string.') - name = name.strip() - if name == "": - raise ValueError('ACL name cannot be empty.') - if service is None: - raise ValueError('service parameter is required.') - if not isinstance(service, list): - raise TypeError('service must be a list.') - # check each item in list is an object of, either 'type'| or 'name'| value pair - if url is None: - raise ValueError('url parameter is required.') - if not isinstance(url, list): - raise TypeError('url must be a list.') - # check URLs list has objects with URL and a list of ACL where each acl item here is an object of - # 'type'| value pair + columns = await _check_params(data, action="POST") + name = columns['name'] + service = columns['service'] + url = columns['url'] result = {} storage = connect.get_storage_async() payload = PayloadBuilder().SELECT("name").WHERE(['name', '=', name]).payload() @@ -153,26 +141,23 @@ async def add_acl(request: web.Request) -> web.Response: @has_permission("admin") async def update_acl(request: web.Request) -> web.Response: """ Update an access control list - Only the service and URL parameters can be updated. + Only the service and URL parameters can be updated. :Example: - curl -H "authorization: $AUTH_TOKEN" -sX PUT http://localhost:8081/fledge/ACL/testACL -d '{"service": [{"name": "Sinusoid"}]}' - curl -H "authorization: $AUTH_TOKEN" -sX PUT http://localhost:8081/fledge/ACL/testACL -d '{"service": [], - "url": [{"url": "/fledge/south/operation", "acl": [{"type": "Southbound"}]}]}' + curl -H "authorization: $AUTH_TOKEN" -sX PUT http://localhost:8081/fledge/ACL/testACL + -d '{"service": [{"name": "Sinusoid"}]}' + curl -H "authorization: $AUTH_TOKEN" -sX PUT http://localhost:8081/fledge/ACL/testACL + -d '{"url": [{"url": "/fledge/south/write", "acl": []}]}' + curl -H "authorization: $AUTH_TOKEN" -sX PUT http://localhost:8081/fledge/ACL/testACL + -d '{"service": [{"type": "core"}], "url": [{"url": "/fledge/south/write", "acl": [{"type": "Northbound"}]}]}' """ try: name = request.match_info.get('acl_name', None) - + data = await request.json() service = data.get('service', None) url = data.get('url', None) - if service is None and url is None: - raise ValueError("Nothing to update for the given payload.") - - if service is not None and not isinstance(service, list): - raise TypeError('service must be a list.') - if url is not None and not isinstance(url, list): - raise TypeError('url must be a list.') + await _check_params(data, action="PUT") storage = connect.get_storage_async() payload = PayloadBuilder().SELECT("name", "service", "url").WHERE(['name', '=', name]).payload() result = await storage.query_tbl_with_payload('control_acl', payload) @@ -185,7 +170,7 @@ async def update_acl(request: web.Request) -> web.Response: set_values["service"] = json.dumps(service) if url is not None: set_values["url"] = json.dumps(url) - + update_query.SET(**set_values).WHERE(['name', '=', name]) update_result = await storage.update_tbl("control_acl", update_query.payload()) if 'response' in update_result: @@ -248,7 +233,7 @@ async def delete_acl(request: web.Request) -> web.Response: scripts = await acl_handler.get_all_entities_for_a_acl(name, "script") if services or scripts: message = "{} is associated with an entity. So cannot delete." \ - " Make sure to remove all the usages of this ACL.".format(name) + " Make sure to remove all the usages of this ACL.".format(name) _logger.warning(message) return web.HTTPConflict(reason=message, body=json.dumps({"message": message})) @@ -343,7 +328,7 @@ async def attach_acl_to_service(request: web.Request) -> web.Response: 'displayName': 'Service ACL', 'default': '' } - } + } # Create category content with ACL default set to '' await cf_mgr.create_category(category_name=security_cat_name, category_description=category_desc, category_value=category_value) @@ -367,7 +352,7 @@ async def attach_acl_to_service(request: web.Request) -> web.Response: # Call service security endpoint with attachACL = acl_name data = {'ACL': acl_name} await cf_mgr.update_configuration_item_bulk(security_cat_name, data) - + return web.json_response({"message": "ACL with name {} attached to {} service successfully.".format( acl_name, svc_name)}) @@ -408,10 +393,10 @@ async def detach_acl_from_service(request: web.Request) -> web.Response: , 'ACL': { - 'description': 'Service ACL for {}'.format(svc_name), - 'type': 'ACL', - 'displayName': 'Service ACL', - 'default': '' + 'description': 'Service ACL for {}'.format(svc_name), + 'type': 'ACL', + 'displayName': 'Service ACL', + 'default': '' } } # Call service security endpoint with detachACL = '' @@ -441,3 +426,88 @@ async def detach_acl_from_service(request: web.Request) -> web.Response: raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: return web.json_response({"message": message}) + +async def _check_params(data, action): + final = {} + name = data.get('name', None) + service = data.get('service', None) + url = data.get('url', None) + + if action == "PUT": + if service is None and url is None: + raise ValueError("Nothing to update for the given payload.") + + if action == "POST": + if name is None: + raise ValueError('ACL name is required.') + else: + if not isinstance(name, str): + raise TypeError('ACL name must be a string.') + name = name.strip() + if name == "": + raise ValueError('ACL name cannot be empty.') + final['name'] = name + if action == "POST": + if service is None: + raise ValueError('service parameter is required.') + if action == "POST" or (action == "PUT" and service is not None): + if not isinstance(service, list): + raise TypeError('service must be a list.') + if not service: + raise ValueError('service list cannot be empty.') + is_type_seen = False + is_name_seen = False + for s in service: + if not isinstance(s, dict): + raise TypeError("service elements must be an object.") + if not s: + raise ValueError('service object cannot be empty.') + if 'type' in list(s.keys()) and not is_type_seen: + if not isinstance(s['type'], str): + raise TypeError("Value must be a string for service type.") + s['type'] = s['type'].strip() + if s['type'] == "": + raise ValueError('Value cannot be empty for service type.') + is_type_seen = True + if 'name' in list(s.keys()) and not is_name_seen: + if not isinstance(s['name'], str): + raise TypeError("Value must be a string for service name.") + s['name'] = s['name'].strip() + if s['name'] == "": + raise ValueError('Value cannot be empty for service name.') + is_name_seen = True + if not is_type_seen and not is_name_seen: + raise ValueError('Either type or name Key-Value Pair is missing for service.') + final['service'] = service + + if action == "POST": + if url is None: + raise ValueError('url parameter is required.') + if action == "POST" or (action == "PUT" and url is not None): + if not isinstance(url, list): + raise TypeError('url must be a list.') + if url: + for u in url: + is_url_seen = False + if not isinstance(u, dict): + raise TypeError("url elements must be an object.") + if 'url' in u: + if not isinstance(u['url'], str): + raise TypeError("Value must be a string for url object.") + u['url'] = u['url'].strip() + if u['url'] == "": + raise ValueError('Value cannot be empty for url object.') + is_url_seen = True + if 'acl' in u: + if not isinstance(u['acl'], list): + raise TypeError("Value must be an array for acl object.") + if u['acl']: + for uacl in u['acl']: + if not isinstance(uacl, dict): + raise TypeError("acl elements must be an object.") + if not uacl: + raise ValueError('acl object cannot be empty.') + if not is_url_seen: + raise ValueError('url child Key-Value Pair is missing.') + final['url'] = url + return final diff --git a/python/fledge/services/core/api/control_service/entrypoint.py b/python/fledge/services/core/api/control_service/entrypoint.py index 13a0a216ee..0304bfe1f7 100644 --- a/python/fledge/services/core/api/control_service/entrypoint.py +++ b/python/fledge/services/core/api/control_service/entrypoint.py @@ -443,9 +443,9 @@ async def update_request(request: web.Request) -> web.Response: constant_dict = {key: data.get(key, ep_info["constants"][key]) for key in ep_info["constants"]} variables_dict = {key: data.get(key, ep_info["variables"][key]) for key in ep_info["variables"]} params = {**constant_dict, **variables_dict} - if not params: - raise ValueError("Nothing to update as given entrypoint do not have the parameters.") if ep_info['type'] == 'write': + if not params: + raise ValueError("Nothing to update as given entrypoint do not have the parameters.") url = "dispatch/write" dispatch_payload["write"] = params else: diff --git a/python/fledge/services/core/api/control_service/pipeline.py b/python/fledge/services/core/api/control_service/pipeline.py index ffd932aa26..2dcf06ee5f 100644 --- a/python/fledge/services/core/api/control_service/pipeline.py +++ b/python/fledge/services/core/api/control_service/pipeline.py @@ -218,13 +218,13 @@ async def update(request: web.Request) -> web.Response: columns = await _check_parameters(data, request) storage = connect.get_storage_async() if columns: - payload = PayloadBuilder().SET(**columns).WHERE(['cpid', '=', cpid]).payload() + payload = PayloadBuilder().SET(**columns).WHERE(['cpid', '=', pipeline['id']]).payload() await storage.update_tbl("control_pipelines", payload) filters = data.get('filters', None) if filters is not None: # Case: When filters payload is empty then remove all filters if not filters: - await _remove_filters(storage, pipeline['filters'], cpid, pipeline['name']) + await _remove_filters(storage, pipeline['filters'], pipeline['id'], pipeline['name']) else: go_ahead = await _check_filters(storage, filters) if filters else True if go_ahead: @@ -234,7 +234,7 @@ async def update(request: web.Request) -> web.Response: if result_filters['rows']: db_filters = [r['fname'].replace("ctrl_{}_".format(pipeline['name']), '' ) for r in result_filters['rows']] - await _update_filters(storage, cpid, pipeline['name'], filters, db_filters) + await _update_filters(storage, pipeline['id'], pipeline['name'], filters, db_filters) else: raise ValueError('Filters do not exist as per the given list {}'.format(filters)) except ValueError as err: @@ -268,7 +268,7 @@ async def delete(request: web.Request) -> web.Response: storage = connect.get_storage_async() pipeline = await _get_pipeline(cpid) # Remove filters if exists and also delete the entry from control_filter table - await _remove_filters(storage, pipeline['filters'], cpid, pipeline['name']) + await _remove_filters(storage, pipeline['filters'], pipeline['id'], pipeline['name']) # Delete entry from control_pipelines payload = PayloadBuilder().WHERE(['cpid', '=', pipeline['id']]).payload() await storage.delete_from_tbl("control_pipelines", payload) @@ -593,11 +593,9 @@ async def _update_filters(storage, cp_id, cp_name, cp_filters, db_filters=None): cf_mgr = ConfigurationManager(storage) new_filters = [] children = [] - - insert_filters = set(cp_filters) - set(db_filters) - update_filters = set(cp_filters) & set(db_filters) - delete_filters = set(db_filters) - set(cp_filters) - + insert_filters = list(filter(lambda x: x not in db_filters, cp_filters)) + update_filters = list(filter(lambda x: x in cp_filters, db_filters)) + delete_filters = list(filter(lambda x: x not in cp_filters, db_filters)) if insert_filters: for fid, fname in enumerate(insert_filters, start=1): # get plugin config of filter diff --git a/python/fledge/services/core/api/control_service/script_management.py b/python/fledge/services/core/api/control_service/script_management.py index dded07370e..1e6f8668db 100644 --- a/python/fledge/services/core/api/control_service/script_management.py +++ b/python/fledge/services/core/api/control_service/script_management.py @@ -522,7 +522,7 @@ def _validate_steps_and_convert_to_str(payload: list) -> str: so that any client can use from there itself. For example: GUI client has also prepared this list by their own to show down in the dropdown. - Therefore if any new/update type is introduced with the current scenario both sides needs to be changed + Therefore, if any new/update type is introduced with the current scenario both sides needs to be changed """ steps_supported_types = ["configure", "delay", "operation", "script", "write"] unique_order_items = [] @@ -539,6 +539,10 @@ def _validate_steps_and_convert_to_str(payload: list) -> str: raise ValueError('order key is missing for {} step.'.format(k)) else: if isinstance(v['order'], int): + if v['order'] < 1: + if v['order'] == 0: + raise ValueError('order cannot be zero.') + raise ValueError('order should be a positive number.') if v['order'] not in unique_order_items: unique_order_items.append(v['order']) else: diff --git a/python/fledge/services/core/api/filters.py b/python/fledge/services/core/api/filters.py index 44ef07c221..ded547fa54 100644 --- a/python/fledge/services/core/api/filters.py +++ b/python/fledge/services/core/api/filters.py @@ -40,7 +40,6 @@ async def create_filter(request: web.Request) -> web.Response: """ Create a new filter with a specific plugin - :Example: curl -X POST http://localhost:8081/fledge/filter -d '{"name": "North_Readings_to_PI_scale_stage_1Filter", "plugin": "scale"}' curl -X POST http://localhost:8081/fledge/filter -d '{"name": "North_Readings_to_PI_scale_stage_1Filter", "plugin": "scale", "filter_config": {"offset":"1","enable":"true"}}' @@ -78,7 +77,6 @@ async def create_filter(request: web.Request) -> web.Response: raise ValueError("This '{}' filter already exists".format(filter_name)) # Load C/Python filter plugin info - #loaded_plugin_info = apiutils.get_plugin_info(plugin_name, dir='filter') try: # Try fetching Python filter plugin_module_path = "{}/python/fledge/plugins/filter/{}".format(_FLEDGE_ROOT, plugin_name) @@ -154,21 +152,18 @@ async def add_filters_pipeline(request: web.Request) -> web.Response: Add filter names to "filter" item in {user_name} PUT /fledge/filter/{user_name}/pipeline - + 'pipeline' is the array of filter category names to set into 'filter' default/value properties :Example: set 'pipeline' for user 'NorthReadings_to_PI' - curl -X PUT http://localhost:8081/fledge/filter/NorthReadings_to_PI/pipeline -d - '{ - "pipeline": ["Scale10Filter", "Python_assetCodeFilter"], - }' + curl -X PUT http://localhost:8081/fledge/filter/NorthReadings_to_PI/pipeline -d '["Scale10Filter", "Python_assetCodeFilter"]' Configuration item 'filter' is added to {user_name} or updated with the pipeline list Returns the filter pipeline on success: - {"pipeline": ["Scale10Filter", "Python_assetCodeFilter"]} + {"pipeline": ["Scale10Filter", "Python_assetCodeFilter"]} Query string parameters: - append_filter=true|false Default false @@ -189,7 +184,7 @@ async def add_filters_pipeline(request: web.Request) -> web.Response: }' Delete pipeline: - curl -X PUT -d '{"pipeline": []}' http://localhost:8081/fledge/filter/NorthReadings_to_PI/pipeline + curl -X PUT -d '{"pipeline": []}' http://localhost:8081/fledge/filter/NorthReadings_to_PI/pipeline NOTE: the method also adds the filters category names under parent category {user_name} @@ -227,13 +222,19 @@ async def add_filters_pipeline(request: web.Request) -> web.Response: if category_info is None: raise ValueError("No such '{}' category found.".format(user_name)) + async def _get_filter(f_name): + payload = PayloadBuilder().WHERE(['name', '=', f_name]).payload() + f_result = await storage.query_tbl_with_payload("filters", payload) + if len(f_result["rows"]) == 0: + raise ValueError("No such '{}' filter found in filters table.".format(f_name)) + # Check and validate if all filters in the list exists in filters table for _filter in filter_list: - payload = PayloadBuilder().WHERE(['name', '=', _filter]).payload() - result = await storage.query_tbl_with_payload("filters", payload) - if len(result["rows"]) == 0: - raise ValueError("No such '{}' filter found in filters table.".format(_filter)) - + if isinstance(_filter, list): + for f in _filter: + await _get_filter(f) + else: + await _get_filter(_filter) config_item = "filter" if config_item in category_info: # Check if config_item key has already been added to the category config @@ -254,7 +255,8 @@ async def add_filters_pipeline(request: web.Request) -> web.Response: # Config update for filter pipeline and a change callback after category children creation await cf_mgr.set_category_item_value_entry(user_name, config_item, {'pipeline': new_list}) else: # No existing filters, hence create new item 'config_item' and add the "pipeline" array as a string - new_item = dict({config_item: {'description': 'Filter pipeline', 'type': 'JSON', 'default': {}, 'readonly':'true'}}) + new_item = dict( + {config_item: {'description': 'Filter pipeline', 'type': 'JSON', 'default': {}, 'readonly': 'true'}}) new_item[config_item]['default'] = json.dumps({'pipeline': filter_list}) await _add_child_filters(storage, cf_mgr, user_name, filter_list) await cf_mgr.create_category(category_name=user_name, category_value=new_item, keep_original_items=True) @@ -267,15 +269,26 @@ async def add_filters_pipeline(request: web.Request) -> web.Response: else: # Create Parent-child relation for standalone filter category with service/username # And that way we have the ability to remove the category when we delete the service - await cf_mgr.create_child_category(user_name, filter_list) + f_c = [] + f_c2 = [] + for _filter in filter_list: + if isinstance(_filter, list): + for f in _filter: + f_c.append(f) + else: + f_c2.append(_filter) + if f_c: + await cf_mgr.create_child_category(user_name, f_c) + if f_c2: + await cf_mgr.create_child_category(user_name, f_c2) return web.json_response( {'result': "Filter pipeline {} updated successfully".format(json.loads(result['value']))}) except ValueError as err: msg = str(err) - raise web.HTTPNotFound(reason=msg) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) except TypeError as err: msg = str(err) - raise web.HTTPBadRequest(reason=msg) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) except StorageServerError as e: msg = e.error _LOGGER.exception("Add filters pipeline, caught storage error: {}".format(msg)) @@ -500,7 +513,8 @@ def _delete_keys_from_dict(dict_del: Dict, lst_keys: List[str], deleted_values: try: if parent is not None: if dict_del['type'] == 'JSON': - i_val = json.loads(dict_del[k]) if isinstance(dict_del[k], str) else json.loads(json.dumps(dict_del[k])) + i_val = json.loads(dict_del[k]) if isinstance(dict_del[k], str) else json.loads( + json.dumps(dict_del[k])) else: i_val = dict_del[k] deleted_values.update({parent: i_val}) @@ -518,47 +532,79 @@ async def _delete_child_filters(storage: StorageClientAsync, cf_mgr: Configurati new_list: List[str], old_list: List[str] = []) -> None: # Difference between pipeline and value from storage lists and then delete relationship as per diff delete_children = _diff(new_list, old_list) - for child in delete_children: + async def _delete_relationship(cat_name): try: - filter_child_category_name = "{}_{}".format(user_name, child) + filter_child_category_name = "{}_{}".format(user_name, cat_name) await cf_mgr.delete_child_category(user_name, filter_child_category_name) await cf_mgr.delete_child_category("{} Filters".format(user_name), filter_child_category_name) except: pass - await _delete_configuration_category(storage, "{}_{}".format(user_name, child)) - payload = PayloadBuilder().WHERE(['name', '=', child]).AND_WHERE(['user', '=', user_name]).payload() + await _delete_configuration_category(storage, "{}_{}".format(user_name, cat_name)) + payload = PayloadBuilder().WHERE(['name', '=', cat_name]).AND_WHERE(['user', '=', user_name]).payload() await storage.delete_from_tbl("filter_users", payload) + for child in delete_children: + if isinstance(child, list): + for c in child: + await _delete_relationship(c) + else: + await _delete_relationship(child) async def _add_child_filters(storage: StorageClientAsync, cf_mgr: ConfigurationManager, user_name: str, filter_list: List[str], old_list: List[str] = []) -> None: # Create children categories. Since create_category() does not expect "value" key to be # present in the payload, we need to remove all "value" keys BUT need to add back these # "value" keys to the new configuration. - for filter_name in filter_list: - filter_config = await cf_mgr.get_category_all_items(category_name="{}_{}".format(user_name, filter_name)) + + async def _create_filter_category(filter_cat_name): + filter_config = await cf_mgr.get_category_all_items(category_name="{}_{}".format( + user_name, filter_cat_name)) # If "username_filter" category does not exist if filter_config is None: - filter_config = await cf_mgr.get_category_all_items(category_name=filter_name) + filter_config = await cf_mgr.get_category_all_items(category_name=filter_cat_name) - filter_desc = "Configuration of {} filter for user {}".format(filter_name, user_name) - new_filter_config, deleted_values = _delete_keys_from_dict(filter_config, ['value'], deleted_values={}, parent=None) - await cf_mgr.create_category(category_name="{}_{}".format(user_name, filter_name), + filter_desc = "Configuration of {} filter for user {}".format(filter_cat_name, user_name) + new_filter_config, deleted_values = _delete_keys_from_dict(filter_config, ['value'], + deleted_values={}, parent=None) + await cf_mgr.create_category(category_name="{}_{}".format(user_name, filter_cat_name), category_description=filter_desc, category_value=new_filter_config, keep_original_items=True) if deleted_values != {}: - await cf_mgr.update_configuration_item_bulk("{}_{}".format(user_name, filter_name), deleted_values) + await cf_mgr.update_configuration_item_bulk("{}_{}".format( + user_name, filter_cat_name), deleted_values) # Remove cat from cache - if filter_name in cf_mgr._cacheManager.cache: - cf_mgr._cacheManager.remove(filter_name) + if filter_cat_name in cf_mgr._cacheManager.cache: + cf_mgr._cacheManager.remove(filter_cat_name) + + # Create filter category + for _fn in filter_list: + if isinstance(_fn, list): + for f in _fn: + await _create_filter_category(f) + else: + await _create_filter_category(_fn) # Create children categories in category_children table - children = ["{}_{}".format(user_name, _filter) for _filter in filter_list] - await cf_mgr.create_child_category(category_name=user_name, children=children) + children = [] + for _filter in filter_list: + if isinstance(_filter, list): + for f in _filter: + child_cat_name = "{}_{}".format(user_name, f) + children.append(child_cat_name) + else: + child_cat_name = "{}_{}".format(user_name, _filter) + children.append(child_cat_name) + await cf_mgr.create_child_category(category_name=user_name, children=children) # Add entries to filter_users table new_added = _diff(old_list, filter_list) for filter_name in new_added: - payload = PayloadBuilder().INSERT(name=filter_name, user=user_name).payload() - await storage.insert_into_tbl("filter_users", payload) + payload = None + if isinstance(filter_name, list): + for f in filter_name: + payload = PayloadBuilder().INSERT(name=f, user=user_name).payload() + else: + payload = PayloadBuilder().INSERT(name=filter_name, user=user_name).payload() + if payload is not None: + await storage.insert_into_tbl("filter_users", payload) diff --git a/python/fledge/services/core/api/plugins/update.py b/python/fledge/services/core/api/plugins/update.py index 490c322466..2c67b7f112 100644 --- a/python/fledge/services/core/api/plugins/update.py +++ b/python/fledge/services/core/api/plugins/update.py @@ -129,7 +129,7 @@ async def update_package(request: web.Request) -> web.Response: if (plugin_name == p['plugin'] and not plugin_type == 'filter') or ( p['plugin'] in filters_used_by and plugin_type == 'filter'): sch_info = await _get_sch_id_and_enabled_by_name(p['service']) - if sch_info[0]['enabled'] == 't': + if sch_info and sch_info[0]['enabled'] == 't': status, reason = await server.Server.scheduler.disable_schedule(uuid.UUID(sch_info[0]['id'])) if status: _logger.warning("Disabling {} {} instance, as {} plugin is being updated...".format( @@ -271,7 +271,7 @@ async def update_plugin(request: web.Request) -> web.Response: if (name == p['plugin'] and not _type == 'filter') or ( p['plugin'] in filters_used_by and _type == 'filter'): sch_info = await _get_sch_id_and_enabled_by_name(p['service']) - if sch_info[0]['enabled'] == 't': + if sch_info and sch_info[0]['enabled'] == 't': status, reason = await server.Server.scheduler.disable_schedule(uuid.UUID(sch_info[0]['id'])) if status: _logger.warning("Disabling {} {} instance, as {} plugin is being updated...".format( diff --git a/python/fledge/services/core/api/scheduler.py b/python/fledge/services/core/api/scheduler.py index d3d985630a..f5bc729faf 100644 --- a/python/fledge/services/core/api/scheduler.py +++ b/python/fledge/services/core/api/scheduler.py @@ -431,6 +431,8 @@ async def enable_schedule_with_name(request): except (TypeError, ValueError): raise web.HTTPNotFound(reason="No Schedule with ID {}".format(sch_id)) + # Reset startup priority order + server.Server.scheduler.reset_process_script_priority() status, reason = await server.Server.scheduler.enable_schedule(uuid.UUID(sch_id)) schedule = { @@ -508,6 +510,8 @@ async def enable_schedule(request): except ValueError as ex: raise web.HTTPNotFound(reason="Invalid Schedule ID {}".format(schedule_id)) + # Reset startup priority order + server.Server.scheduler.reset_process_script_priority() status, reason = await server.Server.scheduler.enable_schedule(uuid.UUID(schedule_id)) schedule = { @@ -515,7 +519,6 @@ async def enable_schedule(request): 'status': status, 'message': reason } - return web.json_response(schedule) except (ValueError, ScheduleNotFoundError) as ex: raise web.HTTPNotFound(reason=str(ex)) diff --git a/python/fledge/services/core/api/service.py b/python/fledge/services/core/api/service.py index e15490d48d..a9aa89819b 100644 --- a/python/fledge/services/core/api/service.py +++ b/python/fledge/services/core/api/service.py @@ -176,6 +176,12 @@ async def delete_service(request): # Update deprecated timestamp in asset_tracker await update_deprecated_ts_in_asset_tracker(storage, svc) + + # Delete user alerts + try: + await server.Server._alert_manager.delete(svc) + except: + pass except Exception as ex: raise web.HTTPInternalServerError(reason=str(ex)) else: @@ -289,7 +295,7 @@ async def add_service(request): return web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) # Check If requested service is available for configured repository - services, log_path = await common.fetch_available_packages("service") + services, log_path = await common.fetch_available_packages() if name not in services: raise KeyError('{} service is not available for the given repository'.format(name)) @@ -413,7 +419,10 @@ async def add_service(request): count = await check_scheduled_processes(storage, process_name, script) if count == 0: # Now first create the scheduled process entry for the new service - payload = PayloadBuilder().INSERT(name=process_name, script=script).payload() + column_name = {"name": process_name, "script": script} + if service_type == 'management': + column_name["priority"] = 300 + payload = PayloadBuilder().INSERT(**column_name).payload() try: res = await storage.insert_into_tbl("scheduled_processes", payload) except StorageServerError as ex: @@ -486,6 +495,9 @@ async def add_service(request): # if "enabled" is supplied, it gets activated in save_schedule() via is_enabled flag schedule.enabled = False + # Reset startup priority order + server.Server.scheduler.reset_process_script_priority() + # Save schedule await server.Server.scheduler.save_schedule(schedule, is_enabled, dryrun=dryrun) schedule = await server.Server.scheduler.get_schedule_by_name(name) diff --git a/python/fledge/services/core/api/update.py b/python/fledge/services/core/api/update.py index e49e7cb75e..008c91af56 100644 --- a/python/fledge/services/core/api/update.py +++ b/python/fledge/services/core/api/update.py @@ -126,10 +126,10 @@ async def get_updates(request: web.Request) -> web.Response: curl -sX GET http://localhost:8081/fledge/update |jq """ update_cmd = "sudo apt update" - upgradable_pkgs_check_cmd = "apt list --upgradable | grep \^fledge" + upgradable_pkgs_check_cmd = "apt list --upgradable | grep \^fledge | grep -v \^fledge-manage" if utils.is_redhat_based(): update_cmd = "sudo yum check-update" - upgradable_pkgs_check_cmd = "yum list updates | grep \^fledge" + upgradable_pkgs_check_cmd = "yum list updates | grep \^fledge | grep -v \^fledge-manage" update_process = await asyncio.create_subprocess_shell(update_cmd, stdout=asyncio.subprocess.PIPE, diff --git a/python/fledge/services/core/proxy.py b/python/fledge/services/core/proxy.py index ae2e9e8f6b..6ea6308ac4 100644 --- a/python/fledge/services/core/proxy.py +++ b/python/fledge/services/core/proxy.py @@ -10,6 +10,7 @@ from aiohttp import web from fledge.common.logger import FLCoreLogger +from fledge.common.utils import make_async from fledge.services.core import server from fledge.services.core.service_registry.service_registry import ServiceRegistry from fledge.services.core.service_registry import exceptions as service_registry_exceptions @@ -163,6 +164,20 @@ async def _get_service_record_info_along_with_bearer_token(svc_name): return service[0], token +@make_async +def _post_multipart(url, headers, payload): + import requests + from requests_toolbelt.multipart.encoder import MultipartEncoder + from aiohttp.web_request import FileField + multipart_payload = {} + for k, v in payload.items(): + multipart_payload[k] = (v.filename, v.file.read(), 'text/plain') if isinstance(v, FileField) else v + m = MultipartEncoder(fields=multipart_payload) + headers['Content-Type'] = m.content_type + result = requests.post(url, data=m, headers=headers) + return result + + async def _call_microservice_service_api( request: web.Request, protocol: str, address: str, port: int, uri: str, token: str): # Custom Request header @@ -182,15 +197,7 @@ async def _call_microservice_service_api( elif request.method == 'POST': payload = await request.post() if 'multipart/form-data' in request.headers['Content-Type']: - import requests - from requests_toolbelt.multipart.encoder import MultipartEncoder - from aiohttp.web_request import FileField - multipart_payload = {} - for k, v in payload.items(): - multipart_payload[k] = (v.filename, v.file.read(), 'text/plain') if isinstance(v, FileField) else v - m = MultipartEncoder(fields=multipart_payload) - headers['Content-Type'] = m.content_type - r = requests.post(url, data=m, headers=headers) + r = await _post_multipart(url, headers, payload) response = (r.status_code, r.text) if r.status_code not in range(200, 209): _logger.error("POST Request Error: Http status code: {}, reason: {}, response: {}".format( diff --git a/python/fledge/services/core/routes.py b/python/fledge/services/core/routes.py index 469d66af30..cd8806f17b 100644 --- a/python/fledge/services/core/routes.py +++ b/python/fledge/services/core/routes.py @@ -5,7 +5,7 @@ # FLEDGE_END from fledge.services.core import proxy -from fledge.services.core.api import asset_tracker, auth, backup_restore, browser, certificate_store, filters, health, notification, north, package_log, performance_monitor, python_packages, south, support, service, task, update +from fledge.services.core.api import alerts, asset_tracker, auth, backup_restore, browser, certificate_store, filters, health, notification, north, package_log, performance_monitor, python_packages, south, support, service, task, update from fledge.services.core.api import audit as api_audit from fledge.services.core.api import common as api_common from fledge.services.core.api import configuration as api_configuration @@ -267,6 +267,9 @@ def setup(app): # Performance Monitor performance_monitor.setup(app) + # Alerts + alerts.setup(app) + # enable cors support enable_cors(app) diff --git a/python/fledge/services/core/scheduler/scheduler.py b/python/fledge/services/core/scheduler/scheduler.py index 61a5d659ac..37dffe19f5 100644 --- a/python/fledge/services/core/scheduler/scheduler.py +++ b/python/fledge/services/core/scheduler/scheduler.py @@ -104,6 +104,8 @@ def __init__(self): """Maximum age of rows in the task table that have finished, in days""" _DELETE_TASKS_LIMIT = 500 """The maximum number of rows to delete in the tasks table in a single transaction""" + _DEFAULT_PROCESS_SCRIPT_PRIORITY = 999 + """Priority order for process scripts""" _HOUR_SECONDS = 3600 _DAY_SECONDS = 3600 * 24 @@ -234,7 +236,7 @@ async def _wait_for_task_completion(self, task_process: _TaskProcess) -> None: task_process.process.pid, exit_code, len(self._task_processes) - 1, - self._process_scripts[schedule.process_name]) + self._process_scripts[schedule.process_name][0]) schedule_execution = self._schedule_executions[schedule.id] del schedule_execution.task_processes[task_process.task_id] @@ -293,10 +295,22 @@ async def _start_task(self, schedule: _ScheduleRow, dryrun=False) -> None: Raises: EnvironmentError: If the process could not start """ + def _get_delay_in_sec(pname): + if pname == 'dispatcher_c': + val = 3 + elif pname == 'notification_c': + val = 5 + elif pname == 'south_c': + val = 7 + elif pname == 'north_C': + val = 9 + else: + val = 12 + return val # This check is necessary only if significant time can elapse between "await" and # the start of the awaited coroutine. - args = self._process_scripts[schedule.process_name] + args = self._process_scripts[schedule.process_name][0] # add core management host and port to process script args args_to_exec = args.copy() @@ -319,7 +333,12 @@ async def _start_task(self, schedule: _ScheduleRow, dryrun=False) -> None: startToken = ServiceRegistry.issueStartupToken(schedule.name) # Add startup token to args for services args_to_exec.append("--token={}".format(startToken)) - + + if self._process_scripts[schedule.process_name][1] != self._DEFAULT_PROCESS_SCRIPT_PRIORITY: + # With startup Delay + res = _get_delay_in_sec(self._process_scripts[schedule.process_name][0][0].split("/")[1]) + args_to_exec.append("--delay={}".format(res)) + args_to_exec.append("--name={}".format(schedule.name)) if dryrun: args_to_exec.append("--dryrun") @@ -674,18 +693,29 @@ async def _get_process_scripts(self): self._logger.debug('Database command: %s', "scheduled_processes") res = await self._storage_async.query_tbl("scheduled_processes") for row in res['rows']: - self._process_scripts[row.get('name')] = row.get('script') + self._process_scripts[row.get('name')] = (row.get('script'), row.get('priority')) except Exception: self._logger.exception('Query failed: %s', "scheduled_processes") raise async def _get_schedules(self): + def _get_schedule_by_priority(sch_list): + schedules_in_order = [] + for sch in sch_list: + sch['priority'] = self._DEFAULT_PROCESS_SCRIPT_PRIORITY + for name, priority in self._process_scripts.items(): + if name == sch['process_name']: + sch['priority'] = priority[1] + schedules_in_order.append(sch) + sort_sch = sorted(schedules_in_order, key=lambda k: ("priority" not in k, k.get("priority", None))) + self._logger.debug(sort_sch) + return sort_sch + # TODO: Get processes first, then add to Schedule try: self._logger.debug('Database command: %s', 'schedules') res = await self._storage_async.query_tbl("schedules") - - for row in res['rows']: + for row in _get_schedule_by_priority(res['rows']): interval_days, interval_dt = self.extract_day_time_from_interval(row.get('schedule_interval')) interval = datetime.timedelta(days=interval_days, hours=interval_dt.hour, minutes=interval_dt.minute, seconds=interval_dt.second) @@ -878,7 +908,7 @@ async def stop(self): schedule.process_name, task_id, task_process.process.pid, - self._process_scripts[schedule.process_name]) + self._process_scripts[schedule.process_name][0]) try: # We need to terminate the child processes because now all tasks are started vide a script and # this creates two unix processes. Scheduler can store pid of the parent shell script process only @@ -935,7 +965,7 @@ async def get_scheduled_processes(self) -> List[ScheduledProcess]: for (name, script) in self._process_scripts.items(): process = ScheduledProcess() process.name = name - process.script = script + process.script = script[0] processes.append(process) return processes @@ -1139,7 +1169,7 @@ async def save_schedule(self, schedule: Schedule, is_enabled_modified=None, dryr self._logger.debug('Database command: %s', select_payload) res = await self._storage_async.query_tbl_with_payload("scheduled_processes", select_payload) for row in res['rows']: - self._process_scripts[row.get('name')] = row.get('script') + self._process_scripts[row.get('name')] = (row.get('script'), row.get('priority')) except Exception: self._logger.exception('Select failed: %s', select_payload) @@ -1305,7 +1335,7 @@ async def disable_schedule(self, schedule_id: uuid.UUID, bypass_check=None, reco schedule.process_name, task_id, task_process.process.pid, - self._process_scripts[schedule.process_name]) + self._process_scripts[schedule.process_name][0]) # TODO: FOGL-356 track the last time TERM was sent to each task task_process.cancel_requested = time.time() task_future = task_process.future @@ -1588,7 +1618,7 @@ async def cancel_task(self, task_id: uuid.UUID) -> None: schedule.process_name, task_id, task_process.process.pid, - self._process_scripts[schedule.process_name]) + self._process_scripts[schedule.process_name][0]) try: # We need to terminate the child processes because now all tasks are started vide a script and @@ -1647,3 +1677,8 @@ async def audit_trail_entry(self, old_row, new_row): ) if old_row.time else '00:00:00' old_schedule["day"] = old_row.day if old_row.day else 0 await audit.information('SCHCH', {'schedule': new_row.toDict(), 'old_schedule': old_schedule}) + + def reset_process_script_priority(self): + for k,v in self._process_scripts.items(): + if isinstance(v, tuple): + self._process_scripts[k] = (v[0], self._DEFAULT_PROCESS_SCRIPT_PRIORITY) diff --git a/python/fledge/services/core/server.py b/python/fledge/services/core/server.py index ee4bd61c40..c1e636cc5d 100755 --- a/python/fledge/services/core/server.py +++ b/python/fledge/services/core/server.py @@ -23,6 +23,7 @@ import jwt from fledge.common import logger +from fledge.common.alert_manager import AlertManager from fledge.common.audit_logger import AuditLogger from fledge.common.configuration_manager import ConfigurationManager from fledge.common.storage_client.exceptions import * @@ -321,6 +322,9 @@ class Server: _asset_tracker = None """ Asset tracker """ + _alert_manager = None + """ Alert Manager """ + running_in_safe_mode = False """ Fledge running in Safe mode """ @@ -732,44 +736,38 @@ def _write_pid(cls, api_address, api_port): @classmethod def _reposition_streams_table(cls, loop): - _logger.info("'fledge.readings' is stored in memory and a restarted has occurred, " - "force reset of 'fledge.streams' last_objects") - - configuration = loop.run_until_complete(cls._storage_client_async.query_tbl('configuration')) - rows = configuration['rows'] - if len(rows) > 0: - streams_id = [] - # Identifies the sending process handling the readings table - for _item in rows: - try: - if _item['value']['source']['value'] is not None: - if _item['value']['source']['value'] == "readings": - # Sending process in C++ - try: - streams_id.append(_item['value']['streamId']['value']) - except KeyError: - # Sending process in Python - try: - streams_id.append(_item['value']['stream_id']['value']) - except KeyError: - pass - except KeyError: - pass - - # Reset identified rows of the streams table - if len(streams_id) >= 0: - for _stream_id in streams_id: - - # Checks if there is the row in the Stream table to avoid an error during the update - where = 'id={0}'.format(_stream_id) - streams = loop.run_until_complete(cls._readings_client_async.query_tbl('streams', where)) - rows = streams['rows'] + "force reset of last_object column in 'fledge.streams'") - if len(rows) > 0: - payload = payload_builder.PayloadBuilder().SET(last_object=0, ts='now()')\ - .WHERE(['id', '=', _stream_id]).payload() - loop.run_until_complete(cls._storage_client_async.update_tbl("streams", payload)) + def _reset_last_object_in_streams(_stream_id): + payload = payload_builder.PayloadBuilder().SET(last_object=0, ts='now()').WHERE( + ['id', '=', _stream_id]).payload() + loop.run_until_complete(cls._storage_client_async.update_tbl("streams", payload)) + try: + # Find the child categories of parent North + query_payload = payload_builder.PayloadBuilder().SELECT("child").WHERE(["parent", "=", "North"]).payload() + north_children = loop.run_until_complete(cls._storage_client_async.query_tbl_with_payload( + 'category_children', query_payload)) + rows = north_children['rows'] + if len(rows) > 0: + configuration = loop.run_until_complete(cls._storage_client_async.query_tbl('configuration')) + for nc in rows: + for cat in configuration['rows']: + if nc['child'] == cat['key']: + cat_value = cat['value'] + stream_id = cat_value['streamId']['value'] + # reset last_object in streams table as per streamId with following scenarios + # a) if source KV pair is present and having value 'readings' + # b) if source KV pair is not present + if 'source' in cat_value: + source_val = cat_value['source']['value'] + if source_val == 'readings': + _reset_last_object_in_streams(stream_id) + else: + _reset_last_object_in_streams(stream_id) + break + except Exception as ex: + _logger.error(ex, "last_object of 'fledge.streams' reset is failed.") @classmethod def _check_readings_table(cls, loop): @@ -817,6 +815,11 @@ async def _start_asset_tracker(cls): cls._asset_tracker = AssetTracker(cls._storage_client_async) await cls._asset_tracker.load_asset_records() + @classmethod + async def _get_alerts(cls): + cls._alert_manager = AlertManager(cls._storage_client_async) + await cls._alert_manager.get_all() + @classmethod def _start_core(cls, loop=None): if cls.running_in_safe_mode: @@ -927,6 +930,10 @@ def _start_core(cls, loop=None): if not cls.running_in_safe_mode: # Start asset tracker loop.run_until_complete(cls._start_asset_tracker()) + + # Start Alert Manager + loop.run_until_complete(cls._get_alerts()) + # If dispatcher installation: # a) not found then add it as a StartUp service # b) found then check the status of its schedule and take action @@ -1907,6 +1914,9 @@ async def is_dispatcher_running(cls, storage): if sch['process_name'] == 'dispatcher_c' and sch['enabled'] == 'f': _logger.info("Dispatcher service found but not in enabled state. " "Therefore, {} schedule name is enabled".format(sch['schedule_name'])) + # reset process_script priority for the service + cls.scheduler._process_scripts['dispatcher_c'] = ( + cls.scheduler._process_scripts['dispatcher_c'][0], 999) await cls.scheduler.enable_schedule(uuid.UUID(sch["id"])) return True elif sch['process_name'] == 'dispatcher_c' and sch['enabled'] == 't': @@ -1987,3 +1997,51 @@ async def get_control_acl(cls, request): request.is_core_mgt = True res = await acl_management.get_acl(request) return res + + @classmethod + async def get_alert(cls, request): + name = request.match_info.get('key', None) + try: + alert = await cls._alert_manager.get_by_key(name) + except KeyError as err: + msg = str(err.args[0]) + return web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to get an alert.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + return web.json_response({"alert": alert}) + + @classmethod + async def add_alert(cls, request): + try: + data = await request.json() + key = data.get("key") + message = data.get("message") + urgency = data.get("urgency") + if any(elem is None for elem in [key, message, urgency]): + msg = 'key, message, urgency post params are required to raise an alert.' + return web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + if not all(isinstance(i, str) for i in [key, message, urgency]): + msg = 'key, message, urgency KV pair must be passed as string.' + return web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + urgency = urgency.lower().capitalize() + if urgency not in cls._alert_manager.urgency: + msg = 'Urgency value should be from list {}'.format(list(cls._alert_manager.urgency.keys())) + return web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + key_exists = [a for a in cls._alert_manager.alerts if a['key'] == key] + if key_exists: + # Delete existing key + await cls._alert_manager.delete(key) + param = {"key": key, "message": message, "urgency": cls._alert_manager.urgency[urgency]} + response = await cls._alert_manager.add(param) + if response is None: + raise Exception + except Exception as ex: + msg = str(ex) + _logger.error(ex, "Failed to add an alert.") + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + response['alert']['urgency'] = cls._alert_manager._urgency_name_by_value(response['alert']['urgency']) + return web.json_response(response) diff --git a/python/fledge/services/core/service_registry/monitor.py b/python/fledge/services/core/service_registry/monitor.py index 503fd60529..ae9c4112a5 100644 --- a/python/fledge/services/core/service_registry/monitor.py +++ b/python/fledge/services/core/service_registry/monitor.py @@ -192,6 +192,32 @@ async def restart_service(self, service_record): schedule = await server.Server.scheduler.get_schedule_by_name(service_record._name) await server.Server.scheduler.queue_task(schedule.schedule_id) self.restarted_services.remove(service_record._id) + # Raise an alert during restart service + await self.raise_an_alert(server.Server, service_record._name) + + async def raise_an_alert(self, obj, svc_name): + async def _new_alert_entry(restart_count=1): + param = {"key": svc_name, "message": 'The Service {} restarted {} times'.format( + svc_name, restart_count), "urgency": "3"} + await obj._alert_manager.add(param) + + try: + alert = await obj._alert_manager.get_by_key(svc_name) + message = alert['message'].strip() + key = alert['key'] + if message.startswith('The Service {} restarted'.format(key)) and message.endswith("times"): + result = [int(s) for s in message.split() if s.isdigit()] + if result: + await obj._alert_manager.delete(key) + await _new_alert_entry(result[-1:][0] + 1) + else: + await _new_alert_entry() + else: + await _new_alert_entry() + except KeyError: + await _new_alert_entry() + except Exception as ex: + self._logger.error(ex, "Failed to raise an alert on restarting {} service.".format(svc_name)) async def start(self): await self._read_config() diff --git a/python/fledge/tasks/purge/purge.py b/python/fledge/tasks/purge/purge.py index 3183407786..6316301ecc 100644 --- a/python/fledge/tasks/purge/purge.py +++ b/python/fledge/tasks/purge/purge.py @@ -199,40 +199,42 @@ async def purge_data(self, config): self._logger.debug("purge_data - flag :{}: last_id :{}: count :{}: operation_type :{}:".format( flag, last_id, result["count"], operation_type)) + # Do the purge by rows first as it is cheaper than doing the purge by age and + # may result in less rows for purge by age to operate on. try: - if int(config['age']['value']) != 0: - result = await self._readings_storage_async.purge(age=config['age']['value'], sent_id=last_id, + if int(config['size']['value']) != 0: + result = await self._readings_storage_async.purge(size=config['size']['value'], sent_id=last_id, flag=flag) if result is not None: total_rows_removed = result['removed'] unsent_rows_removed = result['unsentPurged'] unsent_retained = result['unsentRetained'] - duration += result['duration'] - method = result['method'] + duration = result['duration'] + if method is None: + method = result['method'] + else: + method += " and " + method += result['method'] except ValueError: - self._logger.error("purge_data - Configuration item age {} should be integer!".format( - config['age']['value'])) + self._logger.error("purge_data - Configuration item size {} should be integer!".format( + config['size']['value'])) except StorageServerError: # skip logging as its already done in details for this operation in case of error # FIXME: check if ex.error jdoc has retryable True then retry the operation else move on pass try: - if int(config['size']['value']) != 0: - result = await self._readings_storage_async.purge(size=config['size']['value'], sent_id=last_id, + if int(config['age']['value']) != 0: + result = await self._readings_storage_async.purge(age=config['age']['value'], sent_id=last_id, flag=flag) if result is not None: total_rows_removed += result['removed'] unsent_rows_removed += result['unsentPurged'] unsent_retained = result['unsentRetained'] duration += result['duration'] - if method is None: - method = result['method'] - else: - method += " and " - method += result['method'] + method = result['method'] except ValueError: - self._logger.error("purge_data - Configuration item size {} should be integer!".format( - config['size']['value'])) + self._logger.error("purge_data - Configuration item age {} should be integer!".format( + config['age']['value'])) except StorageServerError: # skip logging as its already done in details for this operation in case of error # FIXME: check if ex.error jdoc has retryable True then retry the operation else move on diff --git a/python/requirements-test.txt b/python/requirements-test.txt index 0b679c3b9f..4e6ba3ddf8 100644 --- a/python/requirements-test.txt +++ b/python/requirements-test.txt @@ -3,7 +3,7 @@ pytest==3.6.4 pytest-allure-adaptor==1.7.10 pytest-asyncio==0.10.0 pytest-mock==1.10.3 -pytest-cov==2.5.1 +pytest-cov==2.9.0 pytest-aiohttp==0.3.0 # Common - REST interface diff --git a/python/requirements.txt b/python/requirements.txt index 75e8a090ce..54634351be 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -7,7 +7,7 @@ yarl==1.7.2 pyjwt==2.4.0 # only required for Public Proxy multipart payload -requests-toolbelt==0.9.1 +requests-toolbelt==1.0.0 # Transformation of data, Apply JqFilter # Install pyjq based on python version diff --git a/scripts/plugins/storage/postgres/downgrade/66.sql b/scripts/plugins/storage/postgres/downgrade/66.sql new file mode 100644 index 0000000000..ed10f65b6f --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/66.sql @@ -0,0 +1,2 @@ +--Remove priority column from scheduled_processes table +ALTER TABLE fledge.scheduled_processes DROP COLUMN IF EXISTS priority; diff --git a/scripts/plugins/storage/postgres/downgrade/67.sql b/scripts/plugins/storage/postgres/downgrade/67.sql new file mode 100644 index 0000000000..cc80b23116 --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/67.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS fledge.alerts; diff --git a/scripts/plugins/storage/postgres/downgrade/68.sql b/scripts/plugins/storage/postgres/downgrade/68.sql new file mode 100644 index 0000000000..78bc486212 --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/68.sql @@ -0,0 +1,2 @@ +DELETE FROM fledge.schedules WHERE process_name = 'update checker'; +DELETE FROM fledge.scheduled_processes WHERE name = 'update checker'; diff --git a/scripts/plugins/storage/postgres/downgrade/69.sql b/scripts/plugins/storage/postgres/downgrade/69.sql new file mode 100644 index 0000000000..feab9f1d6e --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/69.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('BUCAD', 'BUCCH', 'BUCDL'); diff --git a/scripts/plugins/storage/postgres/init.sql b/scripts/plugins/storage/postgres/init.sql index c8e1a32c68..7b858b7c74 100644 --- a/scripts/plugins/storage/postgres/init.sql +++ b/scripts/plugins/storage/postgres/init.sql @@ -699,6 +699,7 @@ CREATE INDEX fki_user_asset_permissions_fk2 CREATE TABLE fledge.scheduled_processes ( name character varying(255) NOT NULL, -- Name of the process script jsonb, -- Full path of the process + priority INTEGER NOT NULL DEFAULT 999, -- priority to run for STARTUP CONSTRAINT scheduled_processes_pkey PRIMARY KEY ( name ) ); @@ -947,6 +948,15 @@ CREATE TABLE fledge.monitors ( CREATE INDEX monitors_ix1 ON fledge.monitors(service, monitor); +-- Create alerts table + +CREATE TABLE fledge.alerts ( + key character varying(80) NOT NULL, -- Primary key + message character varying(255) NOT NULL, -- Alert Message + urgency smallint NOT NULL, -- 1 Critical - 2 High - 3 Normal - 4 Low + ts timestamp(6) with time zone NOT NULL DEFAULT now(), -- Timestamp, updated at every change + CONSTRAINT alerts_pkey PRIMARY KEY (key) ); + -- Grants to fledge schema GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA fledge TO PUBLIC; @@ -1025,7 +1035,8 @@ INSERT INTO fledge.log_codes ( code, description ) ( 'ACLAD', 'ACL Added' ),( 'ACLCH', 'ACL Changed' ),( 'ACLDL', 'ACL Deleted' ), ( 'CTSAD', 'Control Script Added' ),( 'CTSCH', 'Control Script Changed' ),('CTSDL', 'Control Script Deleted' ), ( 'CTPAD', 'Control Pipeline Added' ),( 'CTPCH', 'Control Pipeline Changed' ),('CTPDL', 'Control Pipeline Deleted' ), - ( 'CTEAD', 'Control Entrypoint Added' ),( 'CTECH', 'Control Entrypoint Changed' ),('CTEDL', 'Control Entrypoint Deleted' ) + ( 'CTEAD', 'Control Entrypoint Added' ),( 'CTECH', 'Control Entrypoint Changed' ),('CTEDL', 'Control Entrypoint Deleted' ), + ( 'BUCAD', 'Bucket Added' ), ( 'BUCCH', 'Bucket Changed' ), ( 'BUCDL', 'Bucket Deleted' ) ; -- @@ -1055,6 +1066,7 @@ INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'purge_system', INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'stats collector', '["tasks/statistics"]' ); INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'FledgeUpdater', '["tasks/update"]' ); INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'certificate checker', '["tasks/check_certs"]' ); +INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'update checker', '["tasks/check_updates"]'); -- Storage Tasks -- @@ -1063,13 +1075,13 @@ INSERT INTO fledge.scheduled_processes (name, script) VALUES ('restore', '["task -- South, Notification, North Tasks -- -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'south_c', '["services/south_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'notification_c', '["services/notification_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north_c', '["tasks/north_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north', '["tasks/north"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north_C', '["services/north_C"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'dispatcher_c', '["services/dispatcher_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'bucket_storage_c', '["services/bucket_storage_c"]' ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'south_c', '["services/south_c"]', 100 ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'notification_c', '["services/notification_c"]', 30 ); +INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north_c', '["tasks/north_c"]' ); +INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north', '["tasks/north"]' ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'north_C', '["services/north_C"]', 200 ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'dispatcher_c', '["services/dispatcher_c"]', 20 ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'bucket_storage_c', '["services/bucket_storage_c"]', 10 ); -- Automation script tasks -- @@ -1129,6 +1141,18 @@ INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, true -- enabled ); +-- Update checker +INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, + schedule_time, schedule_interval, exclusive, enabled ) + VALUES ( '852cd8e4-3c29-440b-89ca-2c7691b0450d', -- id + 'update checker', -- schedule_name + 'update checker', -- process_name + 2, -- schedule_type (timed) + '00:05:00', -- schedule_time + '00:00:00', -- schedule_interval + true, -- exclusive + true -- enabled + ); -- Check for expired certificates INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, diff --git a/scripts/plugins/storage/postgres/upgrade/67.sql b/scripts/plugins/storage/postgres/upgrade/67.sql new file mode 100644 index 0000000000..0ac6bffda5 --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/67.sql @@ -0,0 +1,9 @@ +-- Add new column name 'priority' in scheduled_processes + +ALTER TABLE fledge.scheduled_processes ADD COLUMN priority INTEGER NOT NULL DEFAULT 999; +UPDATE fledge.scheduled_processes SET priority = '10' WHERE name = 'bucket_storage_c'; +UPDATE fledge.scheduled_processes SET priority = '20' WHERE name = 'dispatcher_c'; +UPDATE fledge.scheduled_processes SET priority = '30' WHERE name = 'notification_c'; +UPDATE fledge.scheduled_processes SET priority = '100' WHERE name = 'south_c'; +UPDATE fledge.scheduled_processes SET priority = '200' WHERE name = 'north_C'; +UPDATE fledge.scheduled_processes SET priority = '300' WHERE name = 'management'; diff --git a/scripts/plugins/storage/postgres/upgrade/68.sql b/scripts/plugins/storage/postgres/upgrade/68.sql new file mode 100644 index 0000000000..7f1e383b74 --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/68.sql @@ -0,0 +1,8 @@ +-- Create alerts table + +CREATE TABLE IF NOT EXISTS fledge.alerts ( + key character varying(80) NOT NULL, -- Primary key + message character varying(255) NOT NULL, -- Alert Message + urgency smallint NOT NULL, -- 1 Critical - 2 High - 3 Normal - 4 Low + ts timestamp(6) with time zone NOT NULL DEFAULT now(), -- Timestamp, updated at every change + CONSTRAINT alerts_pkey PRIMARY KEY (key) ); diff --git a/scripts/plugins/storage/postgres/upgrade/69.sql b/scripts/plugins/storage/postgres/upgrade/69.sql new file mode 100644 index 0000000000..dd6b131266 --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/69.sql @@ -0,0 +1,13 @@ +--- Insert update checker schedule and process entry + +INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'update checker', '["tasks/check_updates"]' ); +INSERT INTO fledge.schedules (id, schedule_name, process_name, schedule_type, schedule_time, schedule_interval, exclusive, enabled) +VALUES ('852cd8e4-3c29-440b-89ca-2c7691b0450d', -- id + 'update checker', -- schedule_name + 'update checker', -- process_name + 2, -- schedule_type (timed) + '00:05:00', -- schedule_time + '00:00:00', -- schedule_interval (evey 24 hours) + 'true', -- exclusive + 'true' -- enabled + ); diff --git a/scripts/plugins/storage/postgres/upgrade/70.sql b/scripts/plugins/storage/postgres/upgrade/70.sql new file mode 100644 index 0000000000..fdec1bec8f --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/70.sql @@ -0,0 +1,2 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES ( 'BUCAD', 'Bucket Added' ), ( 'BUCCH', 'Bucket Changed' ), ( 'BUCDL', 'Bucket Deleted' ); diff --git a/scripts/plugins/storage/sqlite/downgrade/66.sql b/scripts/plugins/storage/sqlite/downgrade/66.sql new file mode 100644 index 0000000000..cc24dce7e8 --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/66.sql @@ -0,0 +1,23 @@ +-- From: http://www.sqlite.org/faq.html: +-- SQLite has limited ALTER TABLE support that you can use to change type of column. +-- If you want to change the type of any column you will have to recreate the table. +-- You can save existing data to a temporary table and then drop the old table +-- Now, create the new table, then copy the data back in from the temporary table + + +-- Remove priority column in fledge.scheduled_processes + +-- Rename existing table into a temp one +ALTER TABLE fledge.scheduled_processes RENAME TO scheduled_processes_old; + +-- Create new table +CREATE TABLE fledge.scheduled_processes ( + name character varying(255) NOT NULL, -- Name of the process + script JSON, -- Full path of the process + CONSTRAINT scheduled_processes_pkey PRIMARY KEY ( name ) ); + +-- Copy data +INSERT INTO fledge.scheduled_processes ( name, script) SELECT name, script FROM fledge.scheduled_processes_old; + +-- Remote old table +DROP TABLE IF EXISTS fledge.scheduled_processes_old; diff --git a/scripts/plugins/storage/sqlite/downgrade/67.sql b/scripts/plugins/storage/sqlite/downgrade/67.sql new file mode 100644 index 0000000000..cc80b23116 --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/67.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS fledge.alerts; diff --git a/scripts/plugins/storage/sqlite/downgrade/68.sql b/scripts/plugins/storage/sqlite/downgrade/68.sql new file mode 100644 index 0000000000..78bc486212 --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/68.sql @@ -0,0 +1,2 @@ +DELETE FROM fledge.schedules WHERE process_name = 'update checker'; +DELETE FROM fledge.scheduled_processes WHERE name = 'update checker'; diff --git a/scripts/plugins/storage/sqlite/downgrade/69.sql b/scripts/plugins/storage/sqlite/downgrade/69.sql new file mode 100644 index 0000000000..feab9f1d6e --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/69.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('BUCAD', 'BUCCH', 'BUCDL'); diff --git a/scripts/plugins/storage/sqlite/init.sql b/scripts/plugins/storage/sqlite/init.sql index ea55f9a235..f10f540b90 100644 --- a/scripts/plugins/storage/sqlite/init.sql +++ b/scripts/plugins/storage/sqlite/init.sql @@ -470,8 +470,9 @@ CREATE INDEX fki_user_asset_permissions_fk2 -- List of scheduled Processes CREATE TABLE fledge.scheduled_processes ( - name character varying(255) NOT NULL, -- Name of the process - script JSON, -- Full path of the process + name character varying(255) NOT NULL, -- Name of the process + script JSON, -- Full path of the process + priority INTEGER NOT NULL DEFAULT 999, -- priority to run for STARTUP CONSTRAINT scheduled_processes_pkey PRIMARY KEY ( name ) ); -- List of schedules @@ -709,6 +710,15 @@ CREATE TABLE fledge.monitors ( CREATE INDEX monitors_ix1 ON monitors(service, monitor); +-- Create alerts table + +CREATE TABLE fledge.alerts ( + key character varying(80) NOT NULL, -- Primary key + message character varying(255) NOT NULL, -- Alert Message + urgency SMALLINT NOT NULL, -- 1 Critical - 2 High - 3 Normal - 4 Low + ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')), -- Timestamp, updated at every change + CONSTRAINT alerts_pkey PRIMARY KEY (key) ); + ---------------------------------------------------------------------- -- Initialization phase - DML ---------------------------------------------------------------------- @@ -780,7 +790,8 @@ INSERT INTO fledge.log_codes ( code, description ) ( 'ACLAD', 'ACL Added' ),( 'ACLCH', 'ACL Changed' ),( 'ACLDL', 'ACL Deleted' ), ( 'CTSAD', 'Control Script Added' ),( 'CTSCH', 'Control Script Changed' ),('CTSDL', 'Control Script Deleted' ), ( 'CTPAD', 'Control Pipeline Added' ),( 'CTPCH', 'Control Pipeline Changed' ),('CTPDL', 'Control Pipeline Deleted' ), - ( 'CTEAD', 'Control Entrypoint Added' ),( 'CTECH', 'Control Entrypoint Changed' ),('CTEDL', 'Control Entrypoint Deleted' ) + ( 'CTEAD', 'Control Entrypoint Added' ),( 'CTECH', 'Control Entrypoint Changed' ),('CTEDL', 'Control Entrypoint Deleted' ), + ( 'BUCAD', 'Bucket Added' ), ( 'BUCCH', 'Bucket Changed' ), ( 'BUCDL', 'Bucket Deleted' ) ; -- @@ -811,6 +822,7 @@ INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'purge_system', INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'stats collector', '["tasks/statistics"]' ); INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'FledgeUpdater', '["tasks/update"]' ); INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'certificate checker', '["tasks/check_certs"]' ); +INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'update checker', '["tasks/check_updates"]'); -- Storage Tasks -- @@ -819,13 +831,13 @@ INSERT INTO fledge.scheduled_processes (name, script) VALUES ('restore', '["task -- South, Notification, North Tasks -- -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'south_c', '["services/south_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'notification_c', '["services/notification_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north_c', '["tasks/north_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north', '["tasks/north"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north_C', '["services/north_C"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'dispatcher_c', '["services/dispatcher_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'bucket_storage_c', '["services/bucket_storage_c"]' ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'south_c', '["services/south_c"]', 100 ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'notification_c', '["services/notification_c"]', 30 ); +INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north_c', '["tasks/north_c"]' ); +INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north', '["tasks/north"]' ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'north_C', '["services/north_C"]', 200 ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'dispatcher_c', '["services/dispatcher_c"]', 20 ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'bucket_storage_c', '["services/bucket_storage_c"]', 10 ); -- Automation script tasks -- @@ -885,6 +897,20 @@ INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, 't' -- enabled ); +-- Check Updates +INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, + schedule_time, schedule_interval, exclusive, enabled ) + VALUES ( '852cd8e4-3c29-440b-89ca-2c7691b0450d', -- id + 'update checker', -- schedule_name + 'update checker', -- process_name + 2, -- schedule_type (timed) + '00:05:00', -- schedule_time + '00:00:00', -- schedule_interval + 't', -- exclusive + 't' -- enabled + ); + + -- Check for expired certificates INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, schedule_time, schedule_interval, exclusive, enabled ) diff --git a/scripts/plugins/storage/sqlite/upgrade/67.sql b/scripts/plugins/storage/sqlite/upgrade/67.sql new file mode 100644 index 0000000000..ff3ac1949b --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/67.sql @@ -0,0 +1,9 @@ +-- Add new column name 'priority' in scheduled_processes + +ALTER TABLE fledge.scheduled_processes ADD COLUMN priority INTEGER NOT NULL DEFAULT 999; +UPDATE scheduled_processes SET priority = '10' WHERE name = 'bucket_storage_c'; +UPDATE scheduled_processes SET priority = '20' WHERE name = 'dispatcher_c'; +UPDATE scheduled_processes SET priority = '30' WHERE name = 'notification_c'; +UPDATE scheduled_processes SET priority = '100' WHERE name = 'south_c'; +UPDATE scheduled_processes SET priority = '200' WHERE name = 'north_C'; +UPDATE scheduled_processes SET priority = '300' WHERE name = 'management'; diff --git a/scripts/plugins/storage/sqlite/upgrade/68.sql b/scripts/plugins/storage/sqlite/upgrade/68.sql new file mode 100644 index 0000000000..f7f79a239b --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/68.sql @@ -0,0 +1,8 @@ +-- Create alerts table + +CREATE TABLE IF NOT EXISTS fledge.alerts ( + key character varying(80) NOT NULL, -- Primary key + message character varying(255) NOT NULL, -- Alert Message + urgency SMALLINT NOT NULL, -- 1 Critical - 2 High - 3 Normal - 4 Low + ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')), -- Timestamp, updated at every change + CONSTRAINT alerts_pkey PRIMARY KEY (key) ); diff --git a/scripts/plugins/storage/sqlite/upgrade/69.sql b/scripts/plugins/storage/sqlite/upgrade/69.sql new file mode 100644 index 0000000000..6a7d056fbd --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/69.sql @@ -0,0 +1,14 @@ +--- Insert check updates schedule and process entry + +INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'update checker', '["tasks/check_updates"]' ); +INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, + schedule_time, schedule_interval, exclusive, enabled ) + VALUES ( '852cd8e4-3c29-440b-89ca-2c7691b0450d', -- id + 'update checker', -- schedule_name + 'update checker', -- process_name + 2, -- schedule_type (timed) + '00:05:00', -- schedule_time + '00:00:00', -- schedule_interval + 't', -- exclusive + 't' -- enabled + ); diff --git a/scripts/plugins/storage/sqlite/upgrade/70.sql b/scripts/plugins/storage/sqlite/upgrade/70.sql new file mode 100644 index 0000000000..fdec1bec8f --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/70.sql @@ -0,0 +1,2 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES ( 'BUCAD', 'Bucket Added' ), ( 'BUCCH', 'Bucket Changed' ), ( 'BUCDL', 'Bucket Deleted' ); diff --git a/scripts/plugins/storage/sqlitelb/downgrade/66.sql b/scripts/plugins/storage/sqlitelb/downgrade/66.sql new file mode 100644 index 0000000000..cc24dce7e8 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/66.sql @@ -0,0 +1,23 @@ +-- From: http://www.sqlite.org/faq.html: +-- SQLite has limited ALTER TABLE support that you can use to change type of column. +-- If you want to change the type of any column you will have to recreate the table. +-- You can save existing data to a temporary table and then drop the old table +-- Now, create the new table, then copy the data back in from the temporary table + + +-- Remove priority column in fledge.scheduled_processes + +-- Rename existing table into a temp one +ALTER TABLE fledge.scheduled_processes RENAME TO scheduled_processes_old; + +-- Create new table +CREATE TABLE fledge.scheduled_processes ( + name character varying(255) NOT NULL, -- Name of the process + script JSON, -- Full path of the process + CONSTRAINT scheduled_processes_pkey PRIMARY KEY ( name ) ); + +-- Copy data +INSERT INTO fledge.scheduled_processes ( name, script) SELECT name, script FROM fledge.scheduled_processes_old; + +-- Remote old table +DROP TABLE IF EXISTS fledge.scheduled_processes_old; diff --git a/scripts/plugins/storage/sqlitelb/downgrade/67.sql b/scripts/plugins/storage/sqlitelb/downgrade/67.sql new file mode 100644 index 0000000000..cc80b23116 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/67.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS fledge.alerts; diff --git a/scripts/plugins/storage/sqlitelb/downgrade/68.sql b/scripts/plugins/storage/sqlitelb/downgrade/68.sql new file mode 100644 index 0000000000..78bc486212 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/68.sql @@ -0,0 +1,2 @@ +DELETE FROM fledge.schedules WHERE process_name = 'update checker'; +DELETE FROM fledge.scheduled_processes WHERE name = 'update checker'; diff --git a/scripts/plugins/storage/sqlitelb/downgrade/69.sql b/scripts/plugins/storage/sqlitelb/downgrade/69.sql new file mode 100644 index 0000000000..feab9f1d6e --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/69.sql @@ -0,0 +1 @@ +DELETE FROM fledge.log_codes where code IN ('BUCAD', 'BUCCH', 'BUCDL'); diff --git a/scripts/plugins/storage/sqlitelb/init.sql b/scripts/plugins/storage/sqlitelb/init.sql index b7c2adce2c..ff92c281f2 100644 --- a/scripts/plugins/storage/sqlitelb/init.sql +++ b/scripts/plugins/storage/sqlitelb/init.sql @@ -470,9 +470,10 @@ CREATE INDEX fki_user_asset_permissions_fk2 -- List of scheduled Processes CREATE TABLE fledge.scheduled_processes ( - name character varying(255) NOT NULL, -- Name of the process - script JSON, -- Full path of the process - CONSTRAINT scheduled_processes_pkey PRIMARY KEY ( name ) ); + name character varying(255) NOT NULL, -- Name of the process + script JSON, -- Full path of the process + priority INTEGER NOT NULL DEFAULT 999, -- priority to run for STARTUP + CONSTRAINT scheduled_processes_pkey PRIMARY KEY ( name ) ); -- List of schedules CREATE TABLE fledge.schedules ( @@ -709,6 +710,15 @@ CREATE TABLE fledge.monitors ( CREATE INDEX monitors_ix1 ON monitors(service, monitor); +-- Create alerts table + +CREATE TABLE fledge.alerts ( + key character varying(80) NOT NULL, -- Primary key + message character varying(255) NOT NULL, -- Alert Message + urgency SMALLINT NOT NULL, -- 1 Critical - 2 High - 3 Normal - 4 Low + ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')), -- Timestamp, updated at every change + CONSTRAINT alerts_pkey PRIMARY KEY (key) ); + ---------------------------------------------------------------------- -- Initialization phase - DML ---------------------------------------------------------------------- @@ -780,7 +790,8 @@ INSERT INTO fledge.log_codes ( code, description ) ( 'ACLAD', 'ACL Added' ),( 'ACLCH', 'ACL Changed' ),( 'ACLDL', 'ACL Deleted' ), ( 'CTSAD', 'Control Script Added' ),( 'CTSCH', 'Control Script Changed' ),('CTSDL', 'Control Script Deleted' ), ( 'CTPAD', 'Control Pipeline Added' ),( 'CTPCH', 'Control Pipeline Changed' ),('CTPDL', 'Control Pipeline Deleted' ), - ( 'CTEAD', 'Control Entrypoint Added' ),( 'CTECH', 'Control Entrypoint Changed' ),('CTEDL', 'Control Entrypoint Deleted' ) + ( 'CTEAD', 'Control Entrypoint Added' ),( 'CTECH', 'Control Entrypoint Changed' ),('CTEDL', 'Control Entrypoint Deleted' ), + ( 'BUCAD', 'Bucket Added' ), ( 'BUCCH', 'Bucket Changed' ), ( 'BUCDL', 'Bucket Deleted' ) ; -- @@ -811,6 +822,7 @@ INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'purge_system', INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'stats collector', '["tasks/statistics"]' ); INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'FledgeUpdater', '["tasks/update"]' ); INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'certificate checker', '["tasks/check_certs"]' ); +INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'update checker', '["tasks/check_updates"]'); -- Storage Tasks -- @@ -819,13 +831,13 @@ INSERT INTO fledge.scheduled_processes (name, script) VALUES ('restore', '["task -- South, Notification, North Tasks -- -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'south_c', '["services/south_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'notification_c', '["services/notification_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north_c', '["tasks/north_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north', '["tasks/north"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north_C', '["services/north_C"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'dispatcher_c', '["services/dispatcher_c"]' ); -INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'bucket_storage_c', '["services/bucket_storage_c"]' ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'south_c', '["services/south_c"]', 100 ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'notification_c', '["services/notification_c"]', 30 ); +INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north_c', '["tasks/north_c"]' ); +INSERT INTO fledge.scheduled_processes (name, script) VALUES ( 'north', '["tasks/north"]' ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'north_C', '["services/north_C"]', 200 ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'dispatcher_c', '["services/dispatcher_c"]', 20 ); +INSERT INTO fledge.scheduled_processes (name, script, priority) VALUES ( 'bucket_storage_c', '["services/bucket_storage_c"]', 10 ); -- Automation script tasks -- @@ -885,6 +897,19 @@ INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, 't' -- enabled ); +-- Update checker +INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, + schedule_time, schedule_interval, exclusive, enabled ) + VALUES ( '852cd8e4-3c29-440b-89ca-2c7691b0450d', -- id + 'update checker', -- schedule_name + 'update checker', -- process_name + 2, -- schedule_type (timed) + '00:05:00', -- schedule_time + '00:00:00', -- schedule_interval + 't', -- exclusive + 't' -- enabled + ); + -- Check for expired certificates INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, schedule_time, schedule_interval, exclusive, enabled ) diff --git a/scripts/plugins/storage/sqlitelb/upgrade/67.sql b/scripts/plugins/storage/sqlitelb/upgrade/67.sql new file mode 100644 index 0000000000..ff3ac1949b --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/67.sql @@ -0,0 +1,9 @@ +-- Add new column name 'priority' in scheduled_processes + +ALTER TABLE fledge.scheduled_processes ADD COLUMN priority INTEGER NOT NULL DEFAULT 999; +UPDATE scheduled_processes SET priority = '10' WHERE name = 'bucket_storage_c'; +UPDATE scheduled_processes SET priority = '20' WHERE name = 'dispatcher_c'; +UPDATE scheduled_processes SET priority = '30' WHERE name = 'notification_c'; +UPDATE scheduled_processes SET priority = '100' WHERE name = 'south_c'; +UPDATE scheduled_processes SET priority = '200' WHERE name = 'north_C'; +UPDATE scheduled_processes SET priority = '300' WHERE name = 'management'; diff --git a/scripts/plugins/storage/sqlitelb/upgrade/68.sql b/scripts/plugins/storage/sqlitelb/upgrade/68.sql new file mode 100644 index 0000000000..f7f79a239b --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/68.sql @@ -0,0 +1,8 @@ +-- Create alerts table + +CREATE TABLE IF NOT EXISTS fledge.alerts ( + key character varying(80) NOT NULL, -- Primary key + message character varying(255) NOT NULL, -- Alert Message + urgency SMALLINT NOT NULL, -- 1 Critical - 2 High - 3 Normal - 4 Low + ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')), -- Timestamp, updated at every change + CONSTRAINT alerts_pkey PRIMARY KEY (key) ); diff --git a/scripts/plugins/storage/sqlitelb/upgrade/69.sql b/scripts/plugins/storage/sqlitelb/upgrade/69.sql new file mode 100644 index 0000000000..cd17b27a83 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/69.sql @@ -0,0 +1,14 @@ +--- Insert update checker schedule and process entry + +INSERT INTO fledge.scheduled_processes ( name, script ) VALUES ( 'update checker', '["tasks/check_updates"]' ); +INSERT INTO fledge.schedules ( id, schedule_name, process_name, schedule_type, + schedule_time, schedule_interval, exclusive, enabled ) + VALUES ( '852cd8e4-3c29-440b-89ca-2c7691b0450d', -- id + 'update checker', -- schedule_name + 'update checker', -- process_name + 2, -- schedule_type (timed) + '00:05:00', -- schedule_time + '00:00:00', -- schedule_interval + 't', -- exclusive + 't' -- enabled + ); diff --git a/scripts/plugins/storage/sqlitelb/upgrade/70.sql b/scripts/plugins/storage/sqlitelb/upgrade/70.sql new file mode 100644 index 0000000000..fdec1bec8f --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/70.sql @@ -0,0 +1,2 @@ +INSERT INTO fledge.log_codes ( code, description ) + VALUES ( 'BUCAD', 'Bucket Added' ), ( 'BUCCH', 'Bucket Changed' ), ( 'BUCDL', 'Bucket Deleted' ); diff --git a/scripts/services/dispatcher_c b/scripts/services/dispatcher_c index 9ec5b874e1..550016087e 100755 --- a/scripts/services/dispatcher_c +++ b/scripts/services/dispatcher_c @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Run a Fledge Dispatcher service written in C/C++ if [ "${FLEDGE_ROOT}" = "" ]; then FLEDGE_ROOT=/usr/local/fledge @@ -9,6 +9,20 @@ if [ ! -d "${FLEDGE_ROOT}" ]; then exit 1 fi -cd "${FLEDGE_ROOT}/services" +# startup with delay +delay() { + for ARG in "$@"; + do + PARAM=$(echo $ARG | cut -f1 -d=) + if [ $PARAM = '--delay' ]; then + PARAM_LENGTH=${#PARAM} + VALUE="${ARG:$PARAM_LENGTH+1}" + sleep $VALUE + break + fi + done +} +cd "${FLEDGE_ROOT}/services" +delay "$@" ./fledge.services.dispatcher "$@" diff --git a/scripts/services/north_C b/scripts/services/north_C index 3cf728d425..d94d472e9f 100755 --- a/scripts/services/north_C +++ b/scripts/services/north_C @@ -1,5 +1,4 @@ -#!/bin/sh - +#!/bin/bash # Run a Fledge north service written in C/C++ if [ "${FLEDGE_ROOT}" = "" ]; then FLEDGE_ROOT=/usr/local/fledge @@ -44,6 +43,20 @@ if [ "$STRACE_NORTH" != "" ]; then done fi +# startup with delay +delay() { + for ARG in "$@"; + do + PARAM=$(echo $ARG | cut -f1 -d=) + if [ $PARAM = '--delay' ]; then + PARAM_LENGTH=${#PARAM} + VALUE="${ARG:$PARAM_LENGTH+1}" + sleep $VALUE + break + fi + done +} + cd "${FLEDGE_ROOT}/services" if [ "$runvalgrind" = "y" ]; then file=${HOME}/north.${name}.valgrind.out @@ -66,6 +79,7 @@ elif [ "$INTERPOSE_NORTH" != "" ]; then ./fledge.services.north "$@" unset LD_PRELOAD else - ./fledge.services.north "$@" + delay "$@" + ./fledge.services.north "$@" fi diff --git a/scripts/services/notification_c b/scripts/services/notification_c index 4481e37d2f..ae3023cb79 100755 --- a/scripts/services/notification_c +++ b/scripts/services/notification_c @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Run a Fledge notification service written in C/C++ if [ "${FLEDGE_ROOT}" = "" ]; then FLEDGE_ROOT=/usr/local/fledge @@ -9,6 +9,20 @@ if [ ! -d "${FLEDGE_ROOT}" ]; then exit 1 fi -cd "${FLEDGE_ROOT}/services" +# startup with delay +delay() { + for ARG in "$@"; + do + PARAM=$(echo $ARG | cut -f1 -d=) + if [ $PARAM = '--delay' ]; then + PARAM_LENGTH=${#PARAM} + VALUE="${ARG:$PARAM_LENGTH+1}" + sleep $VALUE + break + fi + done +} +cd "${FLEDGE_ROOT}/services" +delay "$@" ./fledge.services.notification "$@" diff --git a/scripts/services/south_c b/scripts/services/south_c index 6f3d2c13e9..0e18e751f8 100755 --- a/scripts/services/south_c +++ b/scripts/services/south_c @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Run a Fledge south service written in C/C++ if [ "${FLEDGE_ROOT}" = "" ]; then FLEDGE_ROOT=/usr/local/fledge @@ -9,6 +9,21 @@ if [ ! -d "${FLEDGE_ROOT}" ]; then exit 1 fi + +# startup with delay +delay() { + for ARG in "$@"; + do + PARAM=$(echo $ARG | cut -f1 -d=) + if [ $PARAM = '--delay' ]; then + PARAM_LENGTH=${#PARAM} + VALUE="${ARG:$PARAM_LENGTH+1}" + sleep $VALUE + break + fi + done +} + cd "${FLEDGE_ROOT}/services" runvalgrind=n @@ -33,6 +48,7 @@ if [ "$runvalgrind" = "y" ]; then rm -f "$file" valgrind --leak-check=full --trace-children=yes --log-file="$file" ./fledge.services.south "$@" else - ./fledge.services.south "$@" + delay "$@" + ./fledge.services.south "$@" fi diff --git a/scripts/storage b/scripts/storage index 1c2f8c955e..b2e229ee13 100755 --- a/scripts/storage +++ b/scripts/storage @@ -88,6 +88,15 @@ elif [[ "$1" == "purge" ]]; then else # Pass any other operation to the storage plugin source "$PLUGIN_SCRIPT" $1 $2 + # Also start the readings plugin if it is different to the configuration plugin + # The reason to do this is to create the schema in the readings database if required + if [[ "$PLUGIN_TO_USE" != "$READINGS_PLUGIN_TO_USE" ]]; then + READINGS_SCRIPT="$FLEDGE_ROOT/scripts/plugins/storage/$READINGS_PLUGIN_TO_USE.sh" + # Some readings plugins, notably sqlitememory, do not have a script + if [[ -x "$READINGS_SCRIPT" ]]; then + source "$READINGS_SCRIPT" $1 $2 + fi + fi fi # exit cannot be used because the script is sourced. diff --git a/scripts/tasks/check_updates b/scripts/tasks/check_updates new file mode 100755 index 0000000000..3623db9a31 --- /dev/null +++ b/scripts/tasks/check_updates @@ -0,0 +1,25 @@ +#!/bin/sh +# Run a Fledge task written in C + +# Bash Script to invoke Installed packages available upgrade checks binary and raise alerts + +if [ -z ${FLEDGE_ROOT+x} ]; then + # Set FLEDGE_ROOT as the default directory + FLEDGE_ROOT="/usr/local/fledge" + export FLEDGE_ROOT +fi + +# Check if the default directory exists +if [[ ! -d "${FLEDGE_ROOT}" ]]; then + echo "Fledge cannot be executed: ${FLEDGE_ROOT} is not a valid directory." + echo "Create the enviroment variable FLEDGE_ROOT before using check_updates." + echo "Specify the base directory for Fledge and set the variable with:" + echo "export FLEDGE_ROOT=" + exit 1 +fi + + +cd "${FLEDGE_ROOT}" + +./tasks/check_updates "$@" + diff --git a/tests/README.rst b/tests/README.rst index 47c9db148e..84892b8b22 100644 --- a/tests/README.rst +++ b/tests/README.rst @@ -16,6 +16,10 @@ pytest +.. |pytest-cov docs| raw:: html + + pytest-cov + .. _Unit: unit\\python\\ .. _System: system\\ .. _here: ..\\README.rst @@ -85,3 +89,29 @@ If you want to contribute towards adding a new tests in Fledge, make sure you fo - Test file name should begin with the word ``test_`` to enable pytest auto test discovery. - Make sure you are placing your test file in the correct test directory. For example, if you are writing a unit test, it should be located under ``$FLEDGE_ROOT/tests/unit/python/fledge/`` where component is the name of the component for which you are writing the unit tests. For more information of type of test, refer to the test categories. + +Code Coverage +------------- + +Python Tests +++++++++++++ + +Fledge uses pytest-cov Framework of pytest as the code coverage measuring tool for python tests, For more information on pytest-cov please refer to |pytest-cov docs|. + +To install pytest-cov Framework along with pytest Framework use the following command: +:: + python3 -m pip install pytest==3.6.4 pytest-cov==2.9.0 + +Running the python tests: + +- ``pytest --cov=. --cov-report xml:xml_filepath --cov-report html:html_directorypath`` - This will execute all the python test files in the given directory and sub-directories and generate the code coverage report in XML as well as the HTML format at the specified path in the command. +- ``pytest test_filename.py --cov=. --cov-report xml:xml_filepath --cov-report html:html_directorypath`` - This will execute all tests in the file named test_filename.py and generate the code coverage report in XML as well as the HTML format at the specified path in the command. +- ``pytest test_filename.py::TestClass --cov=. --cov-report xml:xml_filepath --cov-report html:html_directorypath`` - This will execute all test methods in a single class TestClass in file test_filename.py and generate the code coverage report in XML as well as the HTML format at the specified path in the command. +- ``pytest test_filename.py::TestClass::test_case --cov=. --cov-report xml:xml_filepath --cov-report html:html_directorypath`` - This will execute test method test_case in class TestClass in file test_filename.py and generate the code coverage report in XML as well as the HTML format at the specified path in the command. +- ``pytest -s -vv tests/unit/python/fledge/ --cov=. --cov-report=html --cov-config $FLEDGE_ROOT/tests/unit/python/.coveragerc`` - This will execute all the python tests and generate the code coverage report in the HTML format on the basis of settings in the configuration file. + + +C Tests ++++++++ + +TODO: FOGL-8497 Add documentation of Code Coverage of C Based tests diff --git a/tests/system/lab/check_env b/tests/system/lab/check_env index ee0b090fe7..126f172c7d 100755 --- a/tests/system/lab/check_env +++ b/tests/system/lab/check_env @@ -2,17 +2,18 @@ ID=$(cat /etc/os-release | grep -w ID | cut -f2 -d"=") -if [[ ${ID} == "raspbian" ]] +# debian for bullseye aarch64 +if [[ ${ID} == "raspbian" || ${ID} == "debian" ]] then echo else - echo "Please test with Raspbian OS."; exit 1; + echo "Please test with Raspberry Pi OS."; exit 1; fi VERSION_CODENAME=$(cat /etc/os-release | grep VERSION_CODENAME | cut -f2 -d"=") -if [[ ${VERSION_CODENAME} == "bullseye" || ${VERSION_CODENAME} == "buster" || ${VERSION_CODENAME} == "stretch" ]] +if [[ ${VERSION_CODENAME} == "bullseye" || ${VERSION_CODENAME} == "buster" ]] then echo "Running test on ${VERSION_CODENAME}" else - echo "This test is specific to RPi bullseye, buster and stretch only!"; exit 1; + echo "This test is specific to RPi bullseye & buster only!"; exit 1; fi diff --git a/tests/system/lab/install b/tests/system/lab/install index 1d5d418ea0..af25164f9a 100755 --- a/tests/system/lab/install +++ b/tests/system/lab/install @@ -29,7 +29,7 @@ fi VERSION_CODENAME=$(cat /etc/os-release | grep VERSION_CODENAME | cut -f2 -d"=") wget -q -O - http://archives.fledge-iot.org/KEY.gpg | sudo apt-key add - -echo "deb http://archives.fledge-iot.org/${BUILD_VERSION}/${VERSION_CODENAME}/armv7l/ /" | sudo tee -a /etc/apt/sources.list +echo "deb http://archives.fledge-iot.org/${BUILD_VERSION}/${VERSION_CODENAME}/$(arch)/ /" | sudo tee -a /etc/apt/sources.list sudo apt update time sudo -E apt install -yq fledge diff --git a/tests/system/memory_leak/config.sh b/tests/system/memory_leak/config.sh index 83e3d50d9c..bb4875c924 100644 --- a/tests/system/memory_leak/config.sh +++ b/tests/system/memory_leak/config.sh @@ -2,4 +2,9 @@ FLEDGE_URL="http://localhost:8081/fledge" TEST_RUN_TIME=3600 PI_IP="localhost" PI_USER="Administrator" -PI_PASSWORD="password" \ No newline at end of file +PI_PASSWORD="password" +READINGS_RATE="100" # It is the readings rate per second per service +PURGE_INTERVAL_SECONDS="180" +STORAGE='sqlite' # postgres, sqlite-in-memory, sqlitelb +READING_PLUGIN_DB='Use main plugin' +MEMORY_THRESHOLD=20 # If system memory falls below the specified memory threshold percentage, Fledge halts and generates a support bundle with a Valgrind report. \ No newline at end of file diff --git a/tests/system/memory_leak/scripts/log_analyzer b/tests/system/memory_leak/scripts/log_analyzer new file mode 100755 index 0000000000..aaa45575a0 --- /dev/null +++ b/tests/system/memory_leak/scripts/log_analyzer @@ -0,0 +1,48 @@ +#!/bin/bash + +log_directory="${1}" +error_tolerance=$(printf '%d' "${2}" 2>/dev/null) +leak_tolerance=$(printf '%d' "${3}" 2>/dev/null) + +for log_file in "$log_directory"/*.log; do + echo "Analyzing $log_file..." + + error_summary=$(grep -o "ERROR SUMMARY: [0-9]* errors" "$log_file" | tail -n 1 | cut -d ' ' -f 3) + leak_summary=$(sed -n '/LEAK SUMMARY:/,/ERROR SUMMARY:/p' "$log_file" | grep -E "definitely lost|indirectly lost|possibly lost|still reachable" | tail -n 4) + + if [ -n "$error_summary" ]; then + if [ "$error_summary" -gt "${error_tolerance}" ]; then + echo "Valgrind detected $error_summary error(s) in the log file: $log_file" + exit 1 + else + echo "Valgrind did not detected any errors in the log file: $log_file" + fi + else + echo "No error summary found in the log file." + fi + + if [ -n "$leak_summary" ]; then + echo "Valgrind detected memory leaks in the log file." + definitely_lost=$(echo "$leak_summary" | grep -o "definitely lost: [0-9,]* bytes" | awk '{print $3}' | tr -d ',') + indirectly_lost=$(echo "$leak_summary" | grep -o "indirectly lost: [0-9,]* bytes" | awk '{print $3}' | tr -d ',') + possibly_lost=$(echo "$leak_summary" | grep -o "possibly lost: [0-9,]* bytes" | awk '{print $3}' | tr -d ',') + still_reachable=$(echo "$leak_summary" | grep -o "still reachable: [0-9,]* bytes" | awk '{print $3}' | tr -d ',') + + echo "Definitely Lost: $definitely_lost" + echo "Indirectly Lost: $indirectly_lost" + echo "Possibly Lost: $possibly_lost" + echo "Still Reachable: $still_reachable" + + if [ "$definitely_lost" -gt "$leak_tolerance" ] || [ "$indirectly_lost" -gt "$leak_tolerance" ] || [ "$possibly_lost" -gt "$leak_tolerance" ] || [ "$still_reachable" -gt "$leak_tolerance" ]; then + echo "Memory leak is higher than the tolerable value: $log_file" + exit 1 + else + echo "Valgrind did not detect any errors in the log file: $log_file" + fi + + else + echo "No memory leaks detected by Valgrind in the log file." + fi + + echo "==============================" +done diff --git a/tests/system/memory_leak/scripts/reset b/tests/system/memory_leak/scripts/reset index 2cc67c71f1..093d89459f 100755 --- a/tests/system/memory_leak/scripts/reset +++ b/tests/system/memory_leak/scripts/reset @@ -1,12 +1,36 @@ #!/usr/bin/env bash - -echo "Stopping Fledge" export FLEDGE_ROOT=$1 +echo "${FLEDGE_ROOT}" -cd ${1}/scripts/ && ./fledge stop +install_postgres() { + sudo apt install -y postgresql + sudo -u postgres createuser -d "$(whoami)" +} + +_config_reading_db () { + if [[ "postgres" == @($1|$2) ]] + then + install_postgres + fi + [[ -f $FLEDGE_ROOT/data/etc/storage.json ]] && echo $(jq -c --arg STORAGE_PLUGIN_VAL "${1}" '.plugin.value=$STORAGE_PLUGIN_VAL' $FLEDGE_ROOT/data/etc/storage.json) > $FLEDGE_ROOT/data/etc/storage.json || true + [[ -f $FLEDGE_ROOT/data/etc/storage.json ]] && echo $(jq -c --arg READING_PLUGIN_VAL "${2}" '.readingPlugin.value=$READING_PLUGIN_VAL' $FLEDGE_ROOT/data/etc/storage.json) > $FLEDGE_ROOT/data/etc/storage.json || true +} -echo 'resetting fledge' +# check for storage plugin +. ./config.sh + +if [[ ${STORAGE} == @(sqlite|postgres|sqlitelb) && ${READING_PLUGIN_DB} == @(Use main plugin|sqlitememory|sqlite|postgres|sqlitelb) ]] +then + _config_reading_db "${STORAGE}" "${READING_PLUGIN_DB}" +else + echo "Invalid Storage Configuration" + exit 1 +fi + +echo "Stopping Fledge" +cd ${1}/scripts/ && ./fledge stop +echo 'Resetting Fledge' echo -e "YES\nYES" | ./fledge reset || exit 1 echo echo "Starting Fledge" diff --git a/tests/system/memory_leak/scripts/setup b/tests/system/memory_leak/scripts/setup index f95fefbade..7f6b4d6a72 100755 --- a/tests/system/memory_leak/scripts/setup +++ b/tests/system/memory_leak/scripts/setup @@ -2,102 +2,132 @@ set -e +FLEDGE_PLUGINS_LIST=${1} BRANCH=${2:-develop} # here Branch means branch of fledge repository that is needed to be scanned through valgrind, default is develop COLLECT_FILES=${3} +PROJECT_ROOT=$(pwd) + +# Function to fetch OS information +fetch_os_info() { + OS_NAME=$(grep -o '^NAME=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g') + ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"') + UNAME=$(uname -m) + VERSION_ID=$(awk -F= '/^VERSION_ID=/{print $2}' /etc/os-release | tr -d '"') + echo "OS Name is ${OS_NAME}" + echo "VERSION ID is ${VERSION_ID}" + echo "ID is ${ID}" + echo "UNAME is ${UNAME}" +} + +clone_fledge(){ + # installing pre requisite package - git, for cloning fledge non package + sudo apt -y install git + + # cloning fledge + echo "Cloning Fledge branch $BRANCH" + git clone -b $BRANCH https://github.com/fledge-iot/fledge.git && cd fledge && chmod +x requirements.sh && sh -x requirements.sh ; + +} + +# Function to modify scripts for Valgrind +modify_scripts_for_valgrind() { + echo 'fledge root path is set to ${FLEDGE_ROOT}' + valgrind_conf=' --tool=memcheck --leak-check=full --show-leak-kinds=all' + + psouth_c=${FLEDGE_ROOT}/scripts/services/south_c + echo $psouth_c + sudo sed -i 's#/usr/local/fledge#'"$FLEDGE_ROOT"'#g' ${psouth_c} + if [[ "${COLLECT_FILES}" == "LOGS" ]]; then + sudo sed -i '/.\/fledge.services.south.*/s/^/valgrind --log-file=\/tmp\/south_valgrind.log '"$valgrind_conf"' /' ${psouth_c} + else + sudo sed -i '/.\/fledge.services.south.*/s/^/valgrind --xml=yes --xml-file=\/tmp\/south_valgrind_%p.xml --track-origins=yes '"$valgrind_conf"' /' ${psouth_c} + fi -OS_NAME=`(grep -o '^NAME=.*' /etc/os-release | cut -f2 -d\" | sed 's/"//g')` -ID=$(cat /etc/os-release | grep -w ID | cut -f2 -d"=" | tr -d '"') -UNAME=`uname -m` -VERSION_ID=$(cat /etc/os-release | grep -w VERSION_ID | cut -f2 -d"=" | tr -d '"') -echo "OS Name is "${OS_NAME} -echo "VERSION ID is "${VERSION_ID} -echo "ID is "${ID} -echo "UNAME is "${UNAME} + pnorth_C=${FLEDGE_ROOT}/scripts/services/north_C + echo $pnorth_C + sudo sed -i 's#/usr/local/fledge#'"$FLEDGE_ROOT"'#g' ${pnorth_C} + if [[ "${COLLECT_FILES}" == "LOGS" ]]; then + sudo sed -i '/.\/fledge.services.north.*/s/^/valgrind --log-file=\/tmp\/north_valgrind.log '"$valgrind_conf"' /' ${pnorth_C} + else + sudo sed -i '/.\/fledge.services.north.*/s/^/valgrind --xml=yes --xml-file=\/tmp\/north_valgrind_%p.xml --track-origins=yes '"$valgrind_conf"' /' ${pnorth_C} + fi -# installing pre requisite package - git, for cloning fledge non package -sudo apt -y install git + pstorage=${FLEDGE_ROOT}/scripts/services/storage + echo $pstorage + sudo sed -i 's#/usr/local/fledge#'"$FLEDGE_ROOT"'#g' ${pstorage} + if [[ "${COLLECT_FILES}" == "LOGS" ]]; then + sudo sed -i '/\${storageExec} \"\$@\"/s/^/valgrind --log-file=\/tmp\/storage_valgrind.log '"$valgrind_conf"' /' ${pstorage} + else + sudo sed -i '/\${storageExec} \"\$@\"/s/^/valgrind --xml=yes --xml-file=\/tmp\/storage_valgrind_%p.xml --track-origins=yes '"$valgrind_conf"' /' ${pstorage} + fi +} + +# Function to install C based plugin +install_c_plugin() { + local plugin="${1}" + echo "Installing C based plugin: ${plugin}" + sed -i 's|c++11 -O3|c++11 -O0 -ggdb|g' "${plugin}/CMakeLists.txt" + cd "${plugin}" && mkdir -p build && cd build && \ + cmake -DFLEDGE_INSTALL=${FLEDGE_ROOT} -DFLEDGE_ROOT=${FLEDGE_ROOT} .. && make && make install && cd "${PROJECT_ROOT}" + echo "Done installation of C Based Plugin" +} + +# Function to install Python based plugin +install_python_plugin() { + local plugin_dir="${1}" + # Install dependencies if requirements.txt exists + [[ -f ${plugin_dir}/requirements.txt ]] && python3 -m pip install -r "${plugin_dir}/requirements.txt" + # Copy plugin + echo 'Copying Plugin' + sudo cp -r "${plugin_dir}/python" "${FLEDGE_ROOT}/" + echo 'Copied.' +} + +# Function to install plugins +install_plugins() { + local plugin_dir="${1}" + echo "Installing Plugin: ${plugin_dir}" + + # Install dependencies if requirements.sh exists + [[ -f ${plugin_dir}/requirements.sh ]] && ${plugin_dir}/requirements.sh + + # Install plugin based on type + if [[ -f ${plugin_dir}/CMakeLists.txt ]]; then + install_c_plugin "${plugin_dir}" + else + install_python_plugin "${plugin_dir}" + fi +} + +# Main + +# Fetch OS information +fetch_os_info + +# Clone Fledge +cd "${PROJECT_ROOT}" +clone_fledge + +# Change CMakelists to build with debug options +echo 'Changing CMakelists' +sed -i 's|c++11 -O3|c++11 -O0 -ggdb|g' CMakeLists.txt && make -# cloning fledge -echo "Cloning Fledge branch $BRANCH" -git clone -b $BRANCH https://github.com/fledge-iot/fledge.git && cd fledge && chmod +x requirements.sh && sh -x requirements.sh ; -echo 'Changing CMakelists' -sed -i 's|c++11 -O3|c++11 -O0 -ggdb|g' CMakeLists.txt && make - -echo '----------------------------------' -echo -cat CMakeLists.txt -echo -echo '----------------------------------' -echo 'CMakeLists.txt changed' - -# exporting fledge path and changing directory to location where plugin repositories will be cloned and removed once the test is finished -export FLEDGE_ROOT=`pwd` && cd ..; - -# modifying script -echo 'fledge root path is set to ${FLEDGE_ROOT}' -valgrind_conf=' --tool=memcheck --leak-check=full --show-leak-kinds=all' - -psouth_c=${FLEDGE_ROOT}/scripts/services/south_c -echo $psouth_c -sudo sed -i 's#/usr/local/fledge#'"$FLEDGE_ROOT"'#g' ${psouth_c} -if [[ "${COLLECT_FILES}" == "LOGS" ]]; then - sudo sed -i '/.\/fledge.services.south.*/s/^/valgrind --log-file=\/tmp\/south_valgrind.log '"$valgrind_conf"' /' ${psouth_c} -else - sudo sed -i '/.\/fledge.services.south.*/s/^/valgrind --xml=yes --xml-file=\/tmp\/south_valgrind_%p.xml --track-origins=yes '"$valgrind_conf"' /' ${psouth_c} -fi - -pnorth_C=${FLEDGE_ROOT}/scripts/services/north_C -echo $pnorth_C -sudo sed -i 's#/usr/local/fledge#'"$FLEDGE_ROOT"'#g' ${pnorth_C} -if [[ "${COLLECT_FILES}" == "LOGS" ]]; then - sudo sed -i '/.\/fledge.services.north.*/s/^/valgrind --log-file=\/tmp\/north_valgrind.log '"$valgrind_conf"' /' ${pnorth_C} -else - sudo sed -i '/.\/fledge.services.north.*/s/^/valgrind --xml=yes --xml-file=\/tmp\/north_valgrind_%p.xml --track-origins=yes '"$valgrind_conf"' /' ${pnorth_C} -fi - -pstorage=${FLEDGE_ROOT}/scripts/services/storage -echo $pstorage -sudo sed -i 's#/usr/local/fledge#'"$FLEDGE_ROOT"'#g' ${pstorage} -if [[ "${COLLECT_FILES}" == "LOGS" ]]; then - sudo sed -i '/\${storageExec} \"\$@\"/s/^/valgrind --log-file=\/tmp\/storage_valgrind.log '"$valgrind_conf"' /' ${pstorage} -else - sudo sed -i '/\${storageExec} \"\$@\"/s/^/valgrind --xml=yes --xml-file=\/tmp\/storage_valgrind_%p.xml --track-origins=yes '"$valgrind_conf"' /' ${pstorage} -fi - -# cloning plugins based on parameters passed to the script, Currently only installing sinusoid - -IFS=' ' read -ra plugin_list <<< "${1}" -for i in "${plugin_list[@]}" -do - echo $i - git clone https://github.com/fledge-iot/${i}.git && cd ${i}; plugin_dir=`pwd` - - # Cheking requirements.sh file exists or not, to install plugins dependencies - if [[ -f ${plugin_dir}/requirements.sh ]] - then - ./${plugin_dir}/requirements.sh - fi +# Export fledge path and change directory to the location where plugin repositories will be cloned +export FLEDGE_ROOT=$(pwd) +cd "${PROJECT_ROOT}" - # checking CMakeLists.txt exists or not, to confirm whther it is a C based plugin or python based plugin - if [[ -f ${plugin_dir}/CMakeLists.txt ]] - then - sed -i 's|c++11 -O3|c++11 -O0 -ggdb|g' ${plugin_dir}/CMakeLists.txt - # building C based plugin - echo 'Building C plugin' - mkdir -p build && cd build && cmake -DFLEDGE_INSTALL=${FLEDGE_ROOT} -DFLEDGE_ROOT=${FLEDGE_ROOT} .. && make && make install && cd .. - else - # Checking requirements.txt file exists or not, to install plugins dependencies (if any) - if [[ -f ${plugin_dir}/requirements.txt ]] - then - python3 -m pip install -r ${plugin_dir}/requirements.txt - fi - # Copying Plugin - echo 'Copying Plugin' - sudo cp -r $plugin_dir/python $FLEDGE_ROOT/ - echo 'Copied.' - fi - cd ../ +# Install Fledge Based Plugins +IFS=' ' read -ra fledge_plugin_list <<< "${FLEDGE_PLUGINS_LIST}" +for i in "${fledge_plugin_list[@]}"; do + echo "Plugin: ${i}" + # tar -xzf sources.tar.gz --wildcards "*/${i}" --strip-components=1 + git clone https://github.com/fledge-iot/${i}.git + install_plugins "${PROJECT_ROOT}/${i}" done -echo 'Current location - '; pwd; + +# Modify scripts for Valgrind +modify_scripts_for_valgrind + +echo "Current location - $(pwd)" echo 'End of setup' \ No newline at end of file diff --git a/tests/system/memory_leak/test_memcheck.sh b/tests/system/memory_leak/test_memcheck.sh index 5d9d5308a4..0096b35b6f 100755 --- a/tests/system/memory_leak/test_memcheck.sh +++ b/tests/system/memory_leak/test_memcheck.sh @@ -1,6 +1,27 @@ #!/bin/bash + +__author__="Mohit Singh Tomar" +__copyright__="Copyright (c) 2024 Dianomic Systems Inc." +__license__="Apache 2.0" +__version__="1.0.0" + +####################################################################################################################### +# Script Name: test_memcheck.sh +# Description: Tests for checking memory leaks in Fledge. +# Usage: ./test_memcheck.sh FLEDGE_TEST_BRANCH COLLECT_FILES [OPTIONS] +# +# Parameters: +# FLEDGE_TEST_BRANCH (str): Branch of Fledge Repository on which valgrind test will run. +# COLLECT_FILES (str): Type of report file needs to be collected from valgrind test, default is LOGS otherwise XML. +# +# Options: +# --use-filters: If passed, add filters to South Services. # -# Tests for checking meomory leaks. +# Example: +# ./test_memcheck.sh develop LOGS +# ./test_memcheck.sh develop LOGS --use-filters +# +######################################################################################################################### set -e source config.sh @@ -8,8 +29,13 @@ source config.sh export FLEDGE_ROOT=$(pwd)/fledge FLEDGE_TEST_BRANCH="$1" # here fledge_test_branch means branch of fledge repository that is needed to be scanned, default is develop - COLLECT_FILES="${2:-LOGS}" +USE_FILTER="False" +SCRIPT_DIR=$(dirname "$(readlink -f "$0")") + +if [ "$3" = "--use-filters" ]; then + USE_FILTER="True" +fi if [[ ${COLLECT_FILES} != @(LOGS|XML|) ]] then @@ -25,13 +51,32 @@ cleanup(){ # Setting up Fledge and installing its plugin setup(){ - ./scripts/setup "fledge-south-sinusoid fledge-south-random" "${FLEDGE_TEST_BRANCH}" "${COLLECT_FILES}" + ./scripts/setup "fledge-south-sinusoid fledge-south-random fledge-filter-asset fledge-filter-rename" "${FLEDGE_TEST_BRANCH}" "${COLLECT_FILES}" } reset_fledge(){ ./scripts/reset ${FLEDGE_ROOT} ; } +configure_purge(){ + # This function is for updating purge configuration and schedule of python based purge. + echo -e "Updating Purge Configuration \n" + row_count="$(printf "%.0f" "$(echo "${READINGS_RATE} * 2 * ${PURGE_INTERVAL_SECONDS}"| bc)")" + curl -X PUT "$FLEDGE_URL/category/PURGE_READ" -d "{\"size\":\"${row_count}\"}" + echo + echo -e "Updated Purge Configuration \n" + echo -e "Updating Purge Schedule \n" + curl -X PUT "$FLEDGE_URL/schedule/cea17db8-6ccc-11e7-907b-a6006ad3dba0" -d \ + '{ + "name": "purge", + "type": 3, + "repeat": '"${PURGE_INTERVAL_SECONDS}"', + "exclusive": true, + "enabled": true + }' + echo -e "Updated Purge Schedule \n" +} + add_sinusoid(){ echo -e INFO: "Add South Sinusoid" curl -sX POST "$FLEDGE_URL/service" -d \ @@ -39,7 +84,7 @@ add_sinusoid(){ "name": "Sine", "type": "south", "plugin": "sinusoid", - "enabled": true, + "enabled": "false", "config": {} }' echo @@ -47,10 +92,33 @@ add_sinusoid(){ sleep 60 - curl -sX PUT "$FLEDGE_URL/category/SineAdvanced" -d '{ "readingsPerSec": "100"}' + curl -sX PUT "$FLEDGE_URL/category/SineAdvanced" -d '{ "readingsPerSec": "'${READINGS_RATE}'"}' echo } +add_asset_filter_to_sine(){ + echo 'Adding Asset Filter to Sinusoid Service' + curl -sX POST "$FLEDGE_URL/filter" -d \ + '{ + "name":"asset #1", + "plugin":"asset", + "filter_config":{ + "enable":"true", + "config":{ + "rules":[ + {"asset_name":"sinusoid","action":"rename","new_asset_name":"sinner"} + ] + } + } + }' + + curl -sX PUT "$FLEDGE_URL/filter/Sine/pipeline?allow_duplicates=true&append_filter=true" -d \ + '{ + "pipeline":["asset #1"], + "files":[] + }' +} + add_random(){ echo -e INFO: "Add South Random" curl -sX POST "$FLEDGE_URL/service" -d \ @@ -58,7 +126,7 @@ add_random(){ "name": "Random", "type": "south", "plugin": "Random", - "enabled": true, + "enabled": "false", "config": {} }' echo @@ -66,10 +134,39 @@ add_random(){ sleep 60 - curl -sX PUT "$FLEDGE_URL/category/RandomAdvanced" -d '{ "readingsPerSec": "100"}' + curl -sX PUT "$FLEDGE_URL/category/RandomAdvanced" -d '{ "readingsPerSec": "'${READINGS_RATE}'"}' echo } + +add_rename_filter_to_random(){ + echo -e "\nAdding Rename Filter to Random Service" + curl -sX POST "$FLEDGE_URL/filter" -d \ + '{ + "name":"rename #1", + "plugin":"rename", + "filter_config":{ + "find":"Random", + "replaceWith":"Randomizer", + "enable":"true" + } + }' + + curl -sX PUT "$FLEDGE_URL/filter/Random/pipeline?allow_duplicates=true&append_filter=true" -d \ + '{ + "pipeline":["rename #1"], + "files":[] + }' +} + +enable_services(){ + echo -e "\nEnable Services" + curl -sX PUT "$FLEDGE_URL/schedule/enable" -d '{"schedule_name":"Sine"}' + sleep 20 + curl -sX PUT "$FLEDGE_URL/schedule/enable" -d '{"schedule_name": "Random"}' + sleep 20 +} + setup_north_pi_egress () { # Add PI North as service echo 'Setting up North' @@ -110,20 +207,65 @@ setup_north_pi_egress () { echo 'North setup done' } -# This Function keep the fledge and its plugin running state for the "TEST_RUN_TIME" seconds then stop the fledge, So that data required for mem check be collected. -collect_data(){ - sleep ${TEST_RUN_TIME} - # TODO: remove set +e / set -e - # FOGL-6840 fledge stop returns exit code 1 +monitor_memory() { + local duration=$1 + local threshold=$2 + local interval=5 # Check memory every 5 seconds + + echo "Monitoring system memory for ${duration} seconds..." + + # Calculate threshold memory value + local total_mem=$(free | awk '/^Mem:/{print $2}') + local threshold_mem=$((total_mem * threshold / 100)) + + local remaining=$duration + + while [ $remaining -gt 0 ]; do + # Check available memory + local avail_mem=$(free | awk '/^Mem:/{print $7}') + + if [ $avail_mem -lt $threshold_mem ]; then + echo "Available memory is below threshold. Stopping monitoring." + break + fi + + # Sleep for interval seconds + sleep $interval + remaining=$((remaining - interval)) + echo "${remaining} seconds remaining" + done +} + +collect_data() { + echo "Collecting Data and Generating reports" set +e - ${FLEDGE_ROOT}/scripts/fledge stop && echo $? + + echo "===================== COLLECTING SUPPORT BUNDLE / SYSLOG ============================" + mkdir -p reports/ && ls -lrth + BUNDLE=$(curl -sX POST "$FLEDGE_URL/support") + # Check if the bundle is created using jq + if jq -e 'has("bundle created")' <<< "$BUNDLE" > /dev/null; then + echo "Support Bundle Created" + # Use proper quoting for variable expansion + cp -r "$FLEDGE_ROOT/data/support/"* reports/ && \ + echo "Support bundle has been saved to path: $SCRIPT_DIR/reports" + else + echo "Failed to Create support bundle" + # Use proper quoting for variable expansion + cp /var/log/syslog reports/ && \ + echo "Syslog Saved to path: $SCRIPT_DIR/reports" + fi + echo "===================== COLLECTED SUPPORT BUNDLE / SYSLOG ============================" + # Use proper quoting for variable expansion + "${FLEDGE_ROOT}/scripts/fledge" stop && echo $? set -e } + generate_valgrind_logs(){ echo 'Creating reports directory'; mkdir -p reports/ ; ls -lrth - echo 'copying reports ' + echo 'copying reports' extension="xml" if [[ "${COLLECT_FILES}" == "LOGS" ]]; then extension="log"; fi cp -rf /tmp/*valgrind*.${extension} reports/. && echo 'copied' @@ -132,9 +274,16 @@ generate_valgrind_logs(){ cleanup setup reset_fledge +configure_purge add_sinusoid add_random +if [ "${USE_FILTER}" = "True" ]; then + add_asset_filter_to_sine + add_rename_filter_to_random +fi +enable_services setup_north_pi_egress +monitor_memory ${TEST_RUN_TIME} ${MEMORY_THRESHOLD} collect_data generate_valgrind_logs diff --git a/tests/system/python/api/test_alerts.py b/tests/system/python/api/test_alerts.py new file mode 100644 index 0000000000..ef0b19c4cc --- /dev/null +++ b/tests/system/python/api/test_alerts.py @@ -0,0 +1,144 @@ +import http.client +import json +import pytest + +__author__ = "Ashish Jabble" +__copyright__ = "Copyright (c) 2024 Dianomic Systems Inc." +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + +""" User Alerts API tests """ + + +def verify_alert_in_ping(url, alert_count): + conn = http.client.HTTPConnection(url) + conn.request("GET", '/fledge/ping') + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert len(jdoc), "No Ping data found." + assert jdoc['alerts'] == alert_count + + +def create_alert(url, payload): + svc_conn = http.client.HTTPConnection(url) + svc_conn.request("GET", '/fledge/service?type=Core') + resp = svc_conn.getresponse() + assert 200 == resp.status + resp = resp.read().decode() + svc_jdoc = json.loads(resp) + + svc_details = svc_jdoc["services"][0] + url = "{}:{}".format(svc_details['address'], svc_details['management_port']) + conn = http.client.HTTPConnection(url) + conn.request('POST', '/fledge/alert', body=json.dumps(payload)) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert len(jdoc), "Failed to create alert!" + return jdoc + +class TestAlerts: + + def test_get_default_alerts(self, fledge_url, reset_and_start_fledge): + verify_alert_in_ping(fledge_url, alert_count=0) + + conn = http.client.HTTPConnection(fledge_url) + conn.request("GET", '/fledge/alert') + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert len(jdoc), "No alerts found." + assert 'alerts' in jdoc + assert jdoc['alerts'] == [] + + def test_no_delete_alert(self, fledge_url): + conn = http.client.HTTPConnection(fledge_url) + conn.request("DELETE", '/fledge/alert') + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert 'message' in jdoc + assert {"message": "Nothing to delete."} == jdoc + + def test_bad_delete_alert_by_key(self, fledge_url): + key = "blah" + conn = http.client.HTTPConnection(fledge_url) + conn.request("DELETE", '/fledge/alert/{}'.format(key)) + r = conn.getresponse() + assert 404 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert 'message' in jdoc + assert {"message": "{} alert not found.".format(key)} == jdoc + + @pytest.mark.parametrize("payload, count", [ + ({"key": "updates", "urgency": "normal", "message": "Fledge new version is available."}, 1), + ({"key": "updates", "urgency": "normal", "message": "Fledge new version is available."}, 1) + ]) + def test_create_alert(self, fledge_url, payload, count): + jdoc = create_alert(fledge_url, payload) + assert 'alert' in jdoc + alert_jdoc = jdoc['alert'] + payload['urgency'] = 'Normal' + assert payload == alert_jdoc + + verify_alert_in_ping(fledge_url, alert_count=count) + + def test_get_all_alerts(self, fledge_url): + verify_alert_in_ping(fledge_url, alert_count=1) + + conn = http.client.HTTPConnection(fledge_url) + conn.request("GET", '/fledge/alert') + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert len(jdoc), "No alerts found." + assert 'alerts' in jdoc + assert 1 == len(jdoc['alerts']) + alert_jdoc = jdoc['alerts'][0] + assert 'key' in alert_jdoc + assert 'updates' == alert_jdoc['key'] + assert 'message' in alert_jdoc + assert 'Fledge new version is available.' == alert_jdoc['message'] + assert 'urgency' in alert_jdoc + assert 'Normal' == alert_jdoc['urgency'] + assert 'timestamp' in alert_jdoc + + def test_delete_alert_by_key(self, fledge_url): + payload = {"key": "Sine", "message": "The service has restarted 4 times", "urgency": "critical"} + jdoc = create_alert(fledge_url, payload) + assert 'alert' in jdoc + alert_jdoc = jdoc['alert'] + payload['urgency'] = 'Critical' + assert payload == alert_jdoc + + verify_alert_in_ping(fledge_url, alert_count=2) + + conn = http.client.HTTPConnection(fledge_url) + conn.request("DELETE", '/fledge/alert/{}'.format(payload['key'])) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert 'message' in jdoc + assert {'message': '{} alert is deleted.'.format(payload['key'])} == jdoc + + verify_alert_in_ping(fledge_url, alert_count=1) + + def test_delete_alert(self, fledge_url): + conn = http.client.HTTPConnection(fledge_url) + conn.request("DELETE", '/fledge/alert') + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert 'message' in jdoc + assert {'message': 'Delete all alerts.'} == jdoc + + verify_alert_in_ping(fledge_url, alert_count=0) diff --git a/tests/system/python/api/test_audit.py b/tests/system/python/api/test_audit.py index b8a4b1d328..265d47d01b 100644 --- a/tests/system/python/api/test_audit.py +++ b/tests/system/python/api/test_audit.py @@ -33,7 +33,8 @@ def test_get_log_codes(self, fledge_url, reset_and_start_fledge): 'ACLAD', 'ACLCH', 'ACLDL', 'CTSAD', 'CTSCH', 'CTSDL', 'CTPAD', 'CTPCH', 'CTPDL', - 'CTEAD', 'CTECH', 'CTEDL' + 'CTEAD', 'CTECH', 'CTEDL', + 'BUCAD', 'BUCCH', 'BUCDL' ] conn = http.client.HTTPConnection(fledge_url) conn.request("GET", '/fledge/audit/logcode') diff --git a/tests/system/python/api/test_common.py b/tests/system/python/api/test_common.py index c60ea04889..69c9e17850 100644 --- a/tests/system/python/api/test_common.py +++ b/tests/system/python/api/test_common.py @@ -85,6 +85,7 @@ def test_ping_default(self, reset_and_start_fledge, fledge_url): assert jdoc['authenticationOptional'] is True assert jdoc['safeMode'] is False assert re.match(SEMANTIC_VERSIONING_REGEX, jdoc['version']) is not None + assert jdoc['alerts'] == 0 def test_ping_when_auth_mandatory_allow_ping_true(self, fledge_url, wait_time, retries): conn = http.client.HTTPConnection(fledge_url) diff --git a/tests/system/python/api/test_endpoints_with_different_user_types.py b/tests/system/python/api/test_endpoints_with_different_user_types.py index a0180372f8..4ad36b8dea 100644 --- a/tests/system/python/api/test_endpoints_with_different_user_types.py +++ b/tests/system/python/api/test_endpoints_with_different_user_types.py @@ -222,7 +222,13 @@ def test_login(self, fledge_url, wait_time): ("POST", "/fledge/notification", 403), ("PUT", "/fledge/notification/N1", 403), ("DELETE", "/fledge/notification/N1", 403), ("GET", "/fledge/notification/N1/delivery", 404), ("POST", "/fledge/notification/N1/delivery", 403), ("GET", "/fledge/notification/N1/delivery/C1", 404), - ("DELETE", "/fledge/notification/N1/delivery/C1", 403) + ("DELETE", "/fledge/notification/N1/delivery/C1", 403), + # performance monitors + ("GET", "/fledge/monitors", 200), ("GET", "/fledge/monitors/SVC", 200), + ("GET", "/fledge/monitors/Svc/Counter", 200), ("DELETE", "/fledge/monitors", 403), + ("DELETE", "/fledge/monitors/SVC", 403), ("DELETE", "/fledge/monitors/Svc/Counter", 403), + # alerts + ("GET", "/fledge/alert", 200), ("DELETE", "/fledge/alert", 403), ("DELETE", "/fledge/alert/blah", 403) ]) def test_endpoints(self, fledge_url, method, route_path, http_status_code, storage_plugin): conn = http.client.HTTPConnection(fledge_url) @@ -372,7 +378,13 @@ def test_login(self, fledge_url, wait_time): ("POST", "/fledge/notification", 403), ("PUT", "/fledge/notification/N1", 403), ("DELETE", "/fledge/notification/N1", 403), ("GET", "/fledge/notification/N1/delivery", 403), ("POST", "/fledge/notification/N1/delivery", 403), ("GET", "/fledge/notification/N1/delivery/C1", 403), - ("DELETE", "/fledge/notification/N1/delivery/C1", 403) + ("DELETE", "/fledge/notification/N1/delivery/C1", 403), + # performance monitors + ("GET", "/fledge/monitors", 403), ("GET", "/fledge/monitors/SVC", 403), + ("GET", "/fledge/monitors/Svc/Counter", 403), ("DELETE", "/fledge/monitors", 403), + ("DELETE", "/fledge/monitors/SVC", 403), ("DELETE", "/fledge/monitors/Svc/Counter", 403), + # alerts + ("GET", "/fledge/alert", 403), ("DELETE", "/fledge/alert", 403), ("DELETE", "/fledge/alert/blah", 403) ]) def test_endpoints(self, fledge_url, method, route_path, http_status_code, storage_plugin): conn = http.client.HTTPConnection(fledge_url) @@ -527,7 +539,13 @@ def test_login(self, fledge_url, wait_time): ("POST", "/fledge/notification", 404), ("PUT", "/fledge/notification/N1", 404), ("DELETE", "/fledge/notification/N1", 404), ("GET", "/fledge/notification/N1/delivery", 404), ("POST", "/fledge/notification/N1/delivery", 400), ("GET", "/fledge/notification/N1/delivery/C1", 404), - ("DELETE", "/fledge/notification/N1/delivery/C1", 404) + ("DELETE", "/fledge/notification/N1/delivery/C1", 404), + # performance monitors + ("GET", "/fledge/monitors", 200), ("GET", "/fledge/monitors/SVC", 200), + ("GET", "/fledge/monitors/Svc/Counter", 200), ("DELETE", "/fledge/monitors", 200), + ("DELETE", "/fledge/monitors/SVC", 200), ("DELETE", "/fledge/monitors/Svc/Counter", 200), + # alerts + ("GET", "/fledge/alert", 200), ("DELETE", "/fledge/alert", 200), ("DELETE", "/fledge/alert/blah", 404) ]) def test_endpoints(self, fledge_url, method, route_path, http_status_code, storage_plugin): conn = http.client.HTTPConnection(fledge_url) diff --git a/tests/system/python/api/test_service.py b/tests/system/python/api/test_service.py index 9eb38d1898..e6e463d476 100644 --- a/tests/system/python/api/test_service.py +++ b/tests/system/python/api/test_service.py @@ -234,7 +234,7 @@ def test_service_on_restart(self, fledge_url, wait_time): assert len(jdoc), "No data found" assert 'Fledge restart has been scheduled.' == jdoc['message'] - time.sleep(wait_time * 4) + time.sleep(wait_time * 7) jdoc = get_service(fledge_url, '/fledge/service') assert len(jdoc), "No data found" assert 4 == len(jdoc['services']) diff --git a/tests/system/python/packages/test_available_and_install_api.py b/tests/system/python/packages/test_available_and_install_api.py index 24ace33af1..ad3d34aac9 100644 --- a/tests/system/python/packages/test_available_and_install_api.py +++ b/tests/system/python/packages/test_available_and_install_api.py @@ -78,6 +78,7 @@ def test_ping(self, fledge_url): assert 'green' == jdoc['health'] assert jdoc['authenticationOptional'] is True assert jdoc['safeMode'] is False + assert jdoc['alerts'] == 0 def test_available_plugin_packages(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) diff --git a/tests/unit/C/CMakeLists.txt b/tests/unit/C/CMakeLists.txt index 53ba41dd3a..f4c39c6461 100644 --- a/tests/unit/C/CMakeLists.txt +++ b/tests/unit/C/CMakeLists.txt @@ -95,6 +95,7 @@ set_target_properties(plugins-common-lib PROPERTIES SOVERSION 1) set(LIB_NAME OMF) file(GLOB OMF_LIB_SOURCES ../../../C/plugins/north/OMF/omf.cpp + ../../../C/plugins/north/OMF/omfbuffer.cpp ../../../C/plugins/north/OMF/omfhints.cpp ../../../C/plugins/north/OMF/OMFError.cpp ../../../C/plugins/north/OMF/linkdata.cpp) diff --git a/tests/unit/C/common/test_config_category.cpp b/tests/unit/C/common/test_config_category.cpp index 0bbce88f8a..5a36334001 100644 --- a/tests/unit/C/common/test_config_category.cpp +++ b/tests/unit/C/common/test_config_category.cpp @@ -341,6 +341,27 @@ const char *json_parse_error = "{\"description\": {" "\"default\": {\"first\" : \"Fledge\", \"second\" : \"json\" }," "\"description\": \"A JSON configuration parameter\"}}"; +const char *listConfig = "{ \"name\": {" + "\"type\": \"list\"," + "\"items\" : \"string\"," + "\"default\": \"[ \\\"Fledge\\\" ]\"," + "\"value\" : \"[ \\\"one\\\", \\\"two\\\" ]\"," + "\"description\": \"A simple list\"} }"; + +const char *kvlistConfig = "{ \"name\": {" + "\"type\": \"kvlist\"," + "\"items\" : \"string\"," + "\"default\": \"{ }\"," + "\"value\" : \"{ \\\"a\\\" : \\\"first\\\", \\\"b\\\" : \\\"second\\\" }\"," + "\"description\": \"A simple list\"} }"; + +const char *kvlistObjectConfig = "{ \"name\": {" + "\"type\": \"kvlist\"," + "\"items\" : \"object\"," + "\"default\": \"{ }\"," + "\"value\" : \"{ \\\"a\\\" : { \\\"one\\\" : \\\"first\\\"}, \\\"b\\\" : { \\\"two\\\" :\\\"second\\\" } }\"," + "\"description\": \"A simple list\"} }"; + TEST(CategoriesTest, Count) { ConfigCategories confCategories(categories); @@ -676,3 +697,36 @@ TEST(Categorytest, parseError) { EXPECT_THROW(ConfigCategory("parseTest", json_parse_error), ConfigMalformed*); } + +TEST(CategoryTest, listItem) +{ + ConfigCategory category("list", listConfig); + ASSERT_EQ(true, category.isList("name")); + ASSERT_EQ(0, category.getItemAttribute("name", ConfigCategory::ITEM_TYPE_ATTR).compare("string")); + std::vector v = category.getValueList("name"); + ASSERT_EQ(2, v.size()); + ASSERT_EQ(0, v[0].compare("one")); + ASSERT_EQ(0, v[1].compare("two")); +} + +TEST(CategoryTest, kvlistItem) +{ + ConfigCategory category("list", kvlistConfig); + ASSERT_EQ(true, category.isKVList("name")); + ASSERT_EQ(0, category.getItemAttribute("name", ConfigCategory::ITEM_TYPE_ATTR).compare("string")); + std::map v = category.getValueKVList("name"); + ASSERT_EQ(2, v.size()); + ASSERT_EQ(0, v["a"].compare("first")); + ASSERT_EQ(0, v["b"].compare("second")); +} + +TEST(CategoryTest, kvlistObjectItem) +{ + ConfigCategory category("list", kvlistObjectConfig); + ASSERT_EQ(true, category.isKVList("name")); + ASSERT_EQ(0, category.getItemAttribute("name", ConfigCategory::ITEM_TYPE_ATTR).compare("object")); + std::map v = category.getValueKVList("name"); + ASSERT_EQ(2, v.size()); + ASSERT_EQ(0, v["a"].compare("{\"one\":\"first\"}")); + ASSERT_EQ(0, v["b"].compare("{\"two\":\"second\"}")); +} diff --git a/tests/unit/C/common/test_json_utils.cpp b/tests/unit/C/common/test_json_utils.cpp index 96f2bffed1..6243fe9b1a 100644 --- a/tests/unit/C/common/test_json_utils.cpp +++ b/tests/unit/C/common/test_json_utils.cpp @@ -73,3 +73,21 @@ TEST(JsonToVectorString, JSONbad) ASSERT_EQ(result, false); } + +TEST(JsonStringUnescape, LeadingAndTrailingDoubleQuote) +{ + string json = R"("value")"; + ASSERT_EQ("value", JSONunescape(json)); +} + +TEST(JsonStringUnescape, UnescapedDoubleQuote) +{ + string json = R"({\"key\":\"value\"})"; + ASSERT_EQ(R"({"key":"value"})", JSONunescape(json)); +} + +TEST(JsonStringUnescape, TwoTimesUnescapedDoubleQuote) +{ + string json = R"({\\"key\\":\\"value\\"})"; + ASSERT_EQ(R"({\"key\":\"value\"})", JSONunescape(json)); +} diff --git a/tests/unit/C/plugins/common/test_omf_translation.cpp b/tests/unit/C/plugins/common/test_omf_translation.cpp index 90eecc6065..698e7a140b 100644 --- a/tests/unit/C/plugins/common/test_omf_translation.cpp +++ b/tests/unit/C/plugins/common/test_omf_translation.cpp @@ -5,6 +5,8 @@ #include #include #include +#include + /* * Fledge Readings to OMF translation unit tests * @@ -240,9 +242,10 @@ TEST(OMF_transation, TwoTranslationsCompareResult) // Build a ReadingSet from JSON ReadingSet readingSet(two_readings); - ostringstream jsonData; - jsonData << "["; + OMFBuffer payload; + payload.append("["); + bool sep = false; // Iterate over Readings via readingSet.getAllReadings() for (vector::const_iterator elem = readingSet.getAllReadings().begin(); elem != readingSet.getAllReadings().end(); @@ -251,13 +254,17 @@ TEST(OMF_transation, TwoTranslationsCompareResult) measurementId = to_string(TYPE_ID) + "measurement_luxometer"; // Add into JSON string the OMF transformed Reading data - jsonData << OMFData(**elem, measurementId).OMFdataVal() << (elem < (readingSet.getAllReadings().end() - 1 ) ? ", " : ""); + if (OMFData(payload, **elem, measurementId, sep).hasData()) + sep = true; } - jsonData << "]"; + payload.append("]"); + const char *data = payload.coalesce(); + string json(data); + delete[] data; // Compare translation - ASSERT_EQ(0, jsonData.str().compare(two_translated_readings)); + ASSERT_EQ(0, json.compare(two_translated_readings)); } // Create ONE reading, convert it and run checks @@ -265,7 +272,6 @@ TEST(OMF_transation, OneReading) { string measurementId; - ostringstream jsonData; string strVal("printer"); DatapointValue value(strVal); // ONE reading @@ -277,17 +283,22 @@ TEST(OMF_transation, OneReading) measurementId = "dummy"; + OMFBuffer payload; // Create the OMF Json data - jsonData << "["; - jsonData << OMFData(lab, measurementId).OMFdataVal(); - jsonData << "]"; + payload.append("["); + OMFData(payload, lab, measurementId, false); + payload.append("]"); + + const char *data = payload.coalesce(); + string json(data); + delete[] data; // "values" key is in the output - ASSERT_NE(jsonData.str().find(string("\"values\" : { ")), 0); + ASSERT_NE(json.find(string("\"values\" : { ")), 0); // Parse JSON of translated data Document doc; - doc.Parse(jsonData.str().c_str()); + doc.Parse(json.c_str()); if (doc.HasParseError()) { ASSERT_FALSE(true); @@ -349,8 +360,8 @@ TEST(OMF_transation, AllReadingsWithUnsupportedTypes) // Build a ReadingSet from JSON ReadingSet readingSet(all_readings_with_unsupported_datapoints_types); - ostringstream jsonData; - jsonData << "["; + OMFBuffer payload; + payload.append("["); bool pendingSeparator = false; // Iterate over Readings via readingSet.getAllReadings() @@ -360,19 +371,19 @@ TEST(OMF_transation, AllReadingsWithUnsupportedTypes) { measurementId = "dummy"; - string rData = OMFData(**elem, measurementId).OMFdataVal(); - // Add into JSON string the OMF transformed Reading data - if (!rData.empty()) - { - jsonData << (pendingSeparator ? ", " : "") << rData; + if (OMFData(payload, **elem, measurementId, pendingSeparator).hasData()) pendingSeparator = true; - } + // Add into JSON string the OMF transformed Reading data } - jsonData << "]"; + payload.append("]"); + + const char *data = payload.coalesce(); + string json(data); + delete[] data; Document doc; - doc.Parse(jsonData.str().c_str()); + doc.Parse(json.c_str()); if (doc.HasParseError()) { ASSERT_FALSE(true); @@ -394,8 +405,8 @@ TEST(OMF_transation, ReadingsWithUnsupportedTypes) // Build a ReadingSet from JSON ReadingSet readingSet(readings_with_unsupported_datapoints_types); - ostringstream jsonData; - jsonData << "["; + OMFBuffer payload; + payload.append("["); bool pendingSeparator = false; // Iterate over Readings via readingSet.getAllReadings() @@ -405,21 +416,19 @@ TEST(OMF_transation, ReadingsWithUnsupportedTypes) { measurementId = "dummy"; - string rData = OMFData(**elem, measurementId).OMFdataVal(); - // Add into JSON string the OMF transformed Reading data - if (!rData.empty()) - { - jsonData << (pendingSeparator ? ", " : "") << rData; + if (OMFData(payload, **elem, measurementId, pendingSeparator).hasData()) pendingSeparator = true; - } + // Add into JSON string the OMF transformed Reading data } - jsonData << "]"; + payload.append("]"); + const char *data = payload.coalesce(); Document doc; - doc.Parse(jsonData.str().c_str()); + doc.Parse(data); if (doc.HasParseError()) { + cout << data << "\n"; ASSERT_FALSE(true); } else @@ -429,6 +438,7 @@ TEST(OMF_transation, ReadingsWithUnsupportedTypes) // Array size is 1 ASSERT_EQ(doc.Size(), 2); } + delete[] data; } // Test the Asset Framework hierarchy fucntionlities diff --git a/tests/unit/C/plugins/common/test_omf_translation_piwebapi.cpp b/tests/unit/C/plugins/common/test_omf_translation_piwebapi.cpp index d72a1e7fa7..d5b467ea58 100644 --- a/tests/unit/C/plugins/common/test_omf_translation_piwebapi.cpp +++ b/tests/unit/C/plugins/common/test_omf_translation_piwebapi.cpp @@ -65,10 +65,11 @@ TEST(PIWEBAPI_OMF_transation, TwoTranslationsCompareResult) // Build a ReadingSet from JSON ReadingSet readingSet(pi_web_api_two_readings); - ostringstream jsonData; - jsonData << "["; + OMFBuffer payload; + payload.append('['); const OMF_ENDPOINT PI_SERVER_END_POINT = ENDPOINT_PIWEB_API; + bool sep = false; // Iterate over Readings via readingSet.getAllReadings() for (vector::const_iterator elem = readingSet.getAllReadings().begin(); @@ -76,13 +77,15 @@ TEST(PIWEBAPI_OMF_transation, TwoTranslationsCompareResult) ++elem) { // Add into JSON string the OMF transformed Reading data - jsonData << OMFData(**elem, CONTAINER_ID, PI_SERVER_END_POINT, AF_HIERARCHY_1LEVEL).OMFdataVal() << (elem < (readingSet.getAllReadings().end() - 1 ) ? ", " : ""); + sep = OMFData(payload, **elem, CONTAINER_ID, sep, PI_SERVER_END_POINT, AF_HIERARCHY_1LEVEL).hasData(); } - jsonData << "]"; + payload.append(']'); + const char *buf = payload.coalesce(); // Compare translation - ASSERT_EQ(jsonData.str(), pi_web_api_two_translated_readings); + ASSERT_STREQ(buf, pi_web_api_two_translated_readings); + delete[] buf; } diff --git a/tests/unit/python/.coveragerc b/tests/unit/python/.coveragerc new file mode 100644 index 0000000000..8b4adad8ff --- /dev/null +++ b/tests/unit/python/.coveragerc @@ -0,0 +1,16 @@ +[run] +omit = + # Ignore files + */__init__.py + */__template__.py + */setup.py + # Omit directory + */python/fledge/plugins/common/* + */python/fledge/plugins/filter/* + */python/fledge/plugins/north/* + */python/fledge/plugins/south/* + */python/fledge/plugins/notificationDelivery/* + */python/fledge/plugins/notificationRule/* + */python/fledge/services/south/* + */python/fledge/tasks/north/sending_process.py + tests/* diff --git a/tests/unit/python/.pytest.ini b/tests/unit/python/.pytest.ini new file mode 100644 index 0000000000..41b5713e8b --- /dev/null +++ b/tests/unit/python/.pytest.ini @@ -0,0 +1,3 @@ +[pytest] +minversion = 3.6.4 +norecursedirs=tests/unit/python/fledge/plugins/north tests/unit/python/fledge/services/south diff --git a/tests/unit/python/fledge/common/test_alert_manager.py b/tests/unit/python/fledge/common/test_alert_manager.py new file mode 100644 index 0000000000..ac7a7a26c5 --- /dev/null +++ b/tests/unit/python/fledge/common/test_alert_manager.py @@ -0,0 +1,186 @@ +import asyncio +import json +import sys + +from unittest.mock import MagicMock, patch +import pytest +from fledge.common.storage_client.storage_client import StorageClientAsync +from fledge.common.alert_manager import AlertManager + + +__author__ = "Ashish Jabble" +__copyright__ = "Copyright (c) 2024 Dianomic Systems Inc." +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + +class TestAlertManager: + """ Alert Manager """ + alert_manager = None + + async def async_mock(self, ret_val): + return ret_val + + def setup_method(self): + storage_client_mock = MagicMock(StorageClientAsync) + self.alert_manager = AlertManager(storage_client=storage_client_mock) + self.alert_manager.storage_client = storage_client_mock + #self.alert_manager.alerts = [] + + def teardown_method(self): + self.alert_manager.alerts = [] + self.alert_manager = None + + async def test_urgency(self): + urgencies = self.alert_manager.urgency + assert 4 == len(urgencies) + assert ['Critical', 'High', 'Normal', 'Low'] == list(urgencies.keys()) + + @pytest.mark.parametrize("urgency_index, urgency", [ + ('1', 'UNKNOWN'), + ('High', 'UNKNOWN'), + (0, 'UNKNOWN'), + (1, 'Critical'), + (2, 'High'), + (3, 'Normal'), + (4, 'Low') + ]) + async def test__urgency_name_by_value(self, urgency_index, urgency): + value = self.alert_manager._urgency_name_by_value(value=urgency_index) + assert urgency == value + + @pytest.mark.parametrize("storage_result, response", [ + ({"rows": [], 'count': 0}, []), + ({"rows": [{"key": "RW", "message": "The Service RW restarted 1 times", "urgency": 3, + "timestamp": "2024-03-01 09:40:34.482"}], 'count': 1}, [{"key": "RW", "message": + "The Service RW restarted 1 times", "urgency": "Normal", "timestamp": "2024-03-01 09:40:34.482"}]) + ]) + async def test_get_all(self, storage_result, response): + rv = await self.async_mock(storage_result) if sys.version_info.major == 3 and sys.version_info.minor >= 8 \ + else asyncio.ensure_future(self.async_mock(storage_result)) + with patch.object(self.alert_manager.storage_client, 'query_tbl_with_payload', return_value=rv + ) as patch_query_tbl: + result = await self.alert_manager.get_all() + assert response == result + args, _ = patch_query_tbl.call_args + assert 'alerts' == args[0] + assert {"return": ["key", "message", "urgency", {"column": "ts", "alias": "timestamp", + "format": "YYYY-MM-DD HH24:MI:SS.MS"}]} == json.loads(args[1]) + + + async def test_bad_get_all(self): + storage_result = {"rows": [{}], 'count': 1} + rv = await self.async_mock(storage_result) if sys.version_info.major == 3 and sys.version_info.minor >= 8 \ + else asyncio.ensure_future(self.async_mock(storage_result)) + with patch.object(self.alert_manager.storage_client, 'query_tbl_with_payload', return_value=rv + ) as patch_query_tbl: + with pytest.raises(Exception) as ex: + await self.alert_manager.get_all() + assert "'key'" == str(ex.value) + args, _ = patch_query_tbl.call_args + assert 'alerts' == args[0] + assert {"return": ["key", "message", "urgency", {"column": "ts", "alias": "timestamp", + "format": "YYYY-MM-DD HH24:MI:SS.MS"}]} == json.loads(args[1]) + + async def test_get_by_key_when_in_cache(self): + self.alert_manager.alerts = [{"key": "RW", "message": "The Service RW restarted 1 times", "urgency": 3, + "timestamp": "2024-03-01 09:40:34.482"}] + key = "RW" + result = await self.alert_manager.get_by_key(key) + assert self.alert_manager.alerts[0] == result + + async def test_get_by_key_not_found(self): + key = "Sine" + storage_result = {"rows": [], 'count': 1} + rv = await self.async_mock(storage_result) if sys.version_info.major == 3 and sys.version_info.minor >= 8 \ + else asyncio.ensure_future(self.async_mock(storage_result)) + with patch.object(self.alert_manager.storage_client, 'query_tbl_with_payload', return_value=rv + ) as patch_query_tbl: + with pytest.raises(Exception) as ex: + await self.alert_manager.get_by_key(key) + assert ex.type is KeyError + assert "'{} alert not found.'".format(key) == str(ex.value) + args, _ = patch_query_tbl.call_args + assert 'alerts' == args[0] + assert {"return": ["key", "message", "urgency", {"column": "ts", "alias": "timestamp", + "format": "YYYY-MM-DD HH24:MI:SS.MS"}], + "where": {"column": "key", "condition": "=", "value": key}} == json.loads(args[1]) + + async def test_get_by_key_when_not_in_cache(self): + key = 'update' + storage_result = {"rows": [{"key": "RW", "message": "The Service RW restarted 1 times", "urgency": 3, + "timestamp": "2024-03-01 09:40:34.482"}], 'count': 1} + rv = await self.async_mock(storage_result) if sys.version_info.major == 3 and sys.version_info.minor >= 8 \ + else asyncio.ensure_future(self.async_mock(storage_result)) + with patch.object(self.alert_manager.storage_client, 'query_tbl_with_payload', return_value=rv + ) as patch_query_tbl: + result = await self.alert_manager.get_by_key(key) + storage_result['rows'][0]['urgency'] = 'Normal' + assert storage_result['rows'][0] == result + args, _ = patch_query_tbl.call_args + assert 'alerts' == args[0] + assert {"return": ["key", "message", "urgency", {"column": "ts", "alias": "timestamp", + "format": "YYYY-MM-DD HH24:MI:SS.MS"}], + "where": {"column": "key", "condition": "=", "value": key}} == json.loads(args[1]) + + async def test_add(self): + params = {"key": "update", 'message': 'New version available', 'urgency': 'High'} + storage_result = {'rows_affected': 1, "response": "inserted"} + rv = await self.async_mock(storage_result) if sys.version_info.major == 3 and sys.version_info.minor >= 8 \ + else asyncio.ensure_future(self.async_mock(storage_result)) + with patch.object(self.alert_manager.storage_client, 'insert_into_tbl', return_value=rv + ) as insert_tbl_patch: + result = await self.alert_manager.add(params) + assert 'alert' in result + assert params == result['alert'] + args, _ = insert_tbl_patch.call_args + assert 'alerts' == args[0] + assert params == json.loads(args[1]) + + async def test_bad_add(self): + params = {"key": "update", 'message': 'New version available', 'urgency': 'High'} + storage_result = {} + rv = await self.async_mock(storage_result) if sys.version_info.major == 3 and sys.version_info.minor >= 8 \ + else asyncio.ensure_future(self.async_mock(storage_result)) + with patch.object(self.alert_manager.storage_client, 'insert_into_tbl', return_value=rv + ) as insert_tbl_patch: + with pytest.raises(Exception) as ex: + await self.alert_manager.add(params) + assert "'response'" == str(ex.value) + args, _ = insert_tbl_patch.call_args + assert 'alerts' == args[0] + assert params == json.loads(args[1]) + + async def test_delete(self): + storage_result = {'rows_affected': 1, "response": "deleted"} + rv = await self.async_mock(storage_result) if sys.version_info.major == 3 and sys.version_info.minor >= 8 \ + else asyncio.ensure_future(self.async_mock(storage_result)) + with patch.object(self.alert_manager.storage_client, 'delete_from_tbl', return_value=rv + ) as delete_tbl_patch: + result = await self.alert_manager.delete() + assert 'alert' in result + assert "Delete all alerts." == result + args, _ = delete_tbl_patch.call_args + assert 'alerts' == args[0] + + async def test_delete_by_key(self): + key = "RW" + self.alert_manager.alerts = [{"key": key, "message": "The Service RW restarted 1 times", "urgency": 3, + "timestamp": "2024-03-01 09:40:34.482"}] + storage_result = {'rows_affected': 1, "response": "deleted"} + rv = await self.async_mock(storage_result) if sys.version_info.major == 3 and sys.version_info.minor >= 8 \ + else asyncio.ensure_future(self.async_mock(storage_result)) + with patch.object(self.alert_manager.storage_client, 'delete_from_tbl', return_value=rv + ) as delete_tbl_patch: + result = await self.alert_manager.delete(key) + assert 'alert' in result + assert "{} alert is deleted.".format(key) == result + args, _ = delete_tbl_patch.call_args + assert 'alerts' == args[0] + assert {"where": {"column": "key", "condition": "=", "value": key}} == json.loads(args[1]) + + async def test_bad_delete(self): + with pytest.raises(Exception) as ex: + await self.alert_manager.delete("Update") + assert ex.type is KeyError + assert "" == str(ex.value) + diff --git a/tests/unit/python/fledge/common/test_configuration_manager.py b/tests/unit/python/fledge/common/test_configuration_manager.py index 3587e48c00..8f64f018d2 100644 --- a/tests/unit/python/fledge/common/test_configuration_manager.py +++ b/tests/unit/python/fledge/common/test_configuration_manager.py @@ -34,15 +34,17 @@ def reset_singleton(self): ConfigurationManagerSingleton._shared_state = {} def test_supported_validate_type_strings(self): - expected_types = ['IPv4', 'IPv6', 'JSON', 'URL', 'X509 certificate', 'boolean', 'code', 'enumeration', 'float', 'integer', - 'northTask', 'password', 'script', 'string', 'ACL', 'bucket'] + expected_types = ['IPv4', 'IPv6', 'JSON', 'URL', 'X509 certificate', 'boolean', 'code', 'enumeration', + 'float', 'integer', 'northTask', 'password', 'script', 'string', 'ACL', 'bucket', + 'list', 'kvlist'] assert len(expected_types) == len(_valid_type_strings) assert sorted(expected_types) == _valid_type_strings def test_supported_optional_items(self): - assert 11 == len(_optional_items) - assert ['deprecated', 'displayName', 'group', 'length', 'mandatory', 'maximum', 'minimum', 'order', - 'readonly', 'rule', 'validity'] == _optional_items + expected_types = ['deprecated', 'displayName', 'group', 'length', 'mandatory', 'maximum', 'minimum', 'order', + 'readonly', 'rule', 'validity', 'listSize'] + assert len(expected_types) == len(_optional_items) + assert sorted(expected_types) == _optional_items def test_constructor_no_storage_client_defined_no_storage_client_passed( self, reset_singleton): @@ -577,7 +579,285 @@ async def test__validate_category_val_bucket_type_bad(self, config, exc_name, re set_value_val_from_default_val=False) assert excinfo.type is exc_name assert reason == str(excinfo.value) - + + @pytest.mark.parametrize("config, exc_name, reason", [ + ({ITEM_NAME: {"description": "test description", "type": "list", "default": "A"}}, KeyError, + "'For {} category, items KV pair must be required for item name {}.'".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test description", "type": "list", "default": "A", "items": []}}, TypeError, + "For {} category, entry value must be a string for item name {} and entry name items; " + "got ".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test description", "type": "list", "default": "A", "items": "str"}}, ValueError, + "For {} category, items value should either be in string, float, integer, object or enumeration for " + "item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test description", "type": "list", "default": "A", "items": "float"}}, TypeError, + "For {} category, default value should be passed array list in string format for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"AJ\"]", "items": "float"}}, ValueError, + "For {} category, all elements should be of same type in default value for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"13\", \"AJ\"]", "items": "integer"}}, + ValueError, "For {} category, all elements should be of same type in default " + "value for item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"13\", \"1.04\"]", "items": "integer"}}, + ValueError, "For {} category, all elements should be of same type in default " + "value for item name {}".format(CAT_NAME, ITEM_NAME)), + ({"include": {"description": "multiple", "type": "list", "default": "[\"135\", \"1111\"]", "items": "integer", + "value": "1"}, + ITEM_NAME: {"description": "test", "type": "list", "default": "[\"13\", \"1\"]", "items": "float"}}, + ValueError, "For {} category, all elements should be of same type in default " + "value for item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[]", "items": "float", "listSize": 1}}, + TypeError, "For {} category, listSize type must be a string for item name {}; got ".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[]", "items": "float", "listSize": ""}}, + ValueError, "For {} category, listSize value must be an integer value for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"10.12\", \"0.9\"]", "items": "float", + "listSize": "1"}}, ValueError, "For {} category, default value array list size limit to 1 for " + "item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"1\"]", "items": "integer", + "listSize": "0"}}, ValueError, "For {} category, default value array list size limit to 0 " + "for item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"6e7777\", \"1.79e+308\", \"1.0\", \"0.9\"]", + "items": "float", "listSize": "3"}}, ValueError, + "For {} category, default value array list size limit to 3 for item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"1\", \"2\", \"1\"]", "items": "integer", + "listSize": "3"}}, ValueError, "For {} category, default value array elements are not unique " + "for item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"a\", \"b\", \"ab\", \"a\"]", + "items": "string"}}, ValueError, "For {} category, default value array elements are not unique " + "for item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "{\"key\": \"1.0\"}", "items": "object", + "property": {}}}, KeyError, "'For {} category, properties KV pair must be required for item name " + "{}'".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "{\"key\": \"1.0\"}", "items": "object", + "properties": 1}}, ValueError, + "For {} category, properties must be JSON object for item name {}; got ".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "{\"key\": \"1.0\"}", "items": "object", + "properties": ""}}, ValueError, + "For {} category, properties must be JSON object for item name {}; got ".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "{\"key\": \"1.0\"}", "items": "object", + "properties": {}}}, ValueError, + "For {} category, properties JSON object cannot be empty for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"integer\"]", + "items": "enumeration"}}, KeyError, + "'For {} category, options required for item name {}'".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"integer\"]", + "items": "enumeration", "options": 1}}, TypeError, + "For {} category, entry value must be a list for item name {} and entry name items; got ".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"integer\"]", + "items": "enumeration", "options": []}}, ValueError, + "For {} category, options cannot be empty list for item_name {} and entry_name items".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"integer\"]", + "items": "enumeration", "options": ["integer"], "listSize": 1}}, TypeError, + "For {} category, listSize type must be a string for item name {}; got ".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "test", "type": "list", "default": "[\"integer\"]", + "items": "enumeration", "options": ["integer"], "listSize": "blah"}}, ValueError, + "For {} category, listSize value must be an integer value for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "A"}}, KeyError, + "'For {} category, items KV pair must be required for item name {}.'".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "A", "items": []}}, TypeError, + "For {} category, entry value must be a string for item name {} and entry name items; " + "got ".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "A", "items": "str"}}, ValueError, + "For {} category, items value should either be in string, float, integer, object or enumeration for " + "item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "A", "items": "string"}}, TypeError, + "For {} category, default value should be passed KV pair list in string format for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key\"}", "items": "string"}}, + TypeError, "For {} category, KV pair invalid in default value for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key\": \"1\"}", "items": "float"}}, + ValueError, "For {} category, all elements should be of same type in default value for " + "item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key\": \"AJ\"}", + "items": "integer"}}, ValueError, + "For {} category, all elements should be of same type in default value for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key1\": \"13\", \"key2\": \"1.04\"}" + , "items": "integer"}}, ValueError, "For {} category, all elements should be of same type in " + "default value for item name {}".format(CAT_NAME, ITEM_NAME)), + ({"include": {"description": "expression", "type": "kvlist", + "default": "{\"key1\": \"135\", \"key2\": \"1111\"}", "items": "integer", "value": "1"}, + ITEM_NAME: {"description": "expression", "type": "kvlist", + "default": "{\"key1\": \"135\", \"key2\": \"1111\"}", "items": "float"}}, ValueError, + "For {} category, all elements should be of same type in default value for item name " + "{}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "[]", "items": "float", "listSize": 1}}, + TypeError, "For {} category, listSize type must be a string for item name {}; got ".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "[]", "items": "float", + "listSize": "blah"}}, ValueError, "For {} category, listSize value must be an integer value for " + "item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "[\"1\"]", "items": "float", + "listSize": "1"}}, TypeError, "For {} category, KV pair invalid in default value for item name " + "{}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"1\"}", "items": "float", + "listSize": "1"}}, TypeError, "For {} category, KV pair invalid in default value for item name " + "{}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key\": {} }", "items": "float", + "listSize": "1"}}, ValueError, "For {} category, all elements should be of same " + "type in default value for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", + "default": "{\"key\": \"1.0\", \"key2\": \"val2\"}", "items": "float", "listSize": "1"}}, + ValueError, "For {} category, default value KV pair list size limit to 1 for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", + "default": "{\"key\": \"1.0\", \"key\": \"val2\"}", "items": "float", "listSize": "2"}}, + ValueError, "For category {}, duplicate KV pair found for item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", + "default": "{\"key\": \"1.0\", \"key1\": \"val2\"}", "items": "float", "listSize": "2"}}, + ValueError, "For {} category, all elements should be of same type in default value for " + "item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", + "default": "{\"key\": \"1.0\", \"key1\": \"val2\", \"key3\": \"val2\"}", "items": "float", + "listSize": "2"}}, ValueError, "For {} category, default value KV pair list size limit to 2 for" + " item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key\": \"1.0\"}", "items": "float", + "listSize": "0"}}, ValueError, "For {} category, default value KV pair list size limit to 0 " + "for item name {}".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key\": \"1.0\"}", "items": "object" + }}, KeyError, "'For {} category, properties KV pair must be required for item name {}'".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key\": \"1.0\"}", "items": "object", + "property": {}}}, KeyError, "'For {} category, properties KV pair must be required for item name " + "{}'".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key\": \"1.0\"}", "items": "object", + "properties": 1}}, ValueError, + "For {} category, properties must be JSON object for item name {}; got ".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key\": \"1.0\"}", "items": "object", + "properties": ""}}, ValueError, + "For {} category, properties must be JSON object for item name {}; got ".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key\": \"1.0\"}", "items": "object", + "properties": {}}}, ValueError, + "For {} category, properties JSON object cannot be empty for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key\": \"1.0\"}", "items": "object", + "properties": {"width": 1}}}, TypeError, + "For {} category, Properties must be a JSON object for width key for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"width\": \"12\"}", "items": + "object", "properties": {"width": {}}}}, ValueError, + "For {} category, width properties cannot be empty for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"width\": \"12\"}", "items": + "object","properties": {"width": {"type": ""}}}}, ValueError, + "For {} category, width properties must have type, description, default keys for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"width\": \"12\"}", "items": + "object", "properties": {"width": {"description": ""}}}}, ValueError, + "For {} category, width properties must have type, description, default keys for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"width\": \"12\"}", "items": + "object", "properties": {"width": {"default": ""}}}}, ValueError, + "For {} category, width properties must have type, description, default keys for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"width\": \"12\"}", "items": + "object", "properties": {"width": {"type": "", "description": ""}}}}, ValueError, + "For {} category, width properties must have type, description, default keys for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"width\": \"12\"}", "items": + "object", "properties": {"width": {"type": "", "default": ""}}}}, ValueError, + "For {} category, width properties must have type, description, default keys for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"width\": \"12\"}", "items": + "object", "properties": {"width": {"description": "", "default": ""}}}}, ValueError, + "For {} category, width properties must have type, description, default keys for item name {}".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key1\": \"integer\"}", + "items": "enumeration"}}, KeyError, + "'For {} category, options required for item name {}'".format(CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key1\": \"integer\"}", + "items": "enumeration", "options": 1}}, TypeError, + "For {} category, entry value must be a list for item name {} and entry name items; got ".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key1\": \"integer\"}", + "items": "enumeration", "options": []}}, ValueError, + "For {} category, options cannot be empty list for item_name {} and entry_name items".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key1\": \"integer\"}", + "items": "enumeration", "options": ["integer"], "listSize": 1}}, TypeError, + "For {} category, listSize type must be a string for item name {}; got ".format( + CAT_NAME, ITEM_NAME)), + ({ITEM_NAME: {"description": "expression", "type": "kvlist", "default": "{\"key1\": \"integer\"}", + "items": "enumeration", "options": ["integer"], "listSize": "blah"}}, ValueError, + "For {} category, listSize value must be an integer value for item name {}".format( + CAT_NAME, ITEM_NAME)) + ]) + async def test__validate_category_val_list_type_bad(self, config, exc_name, reason): + storage_client_mock = MagicMock(spec=StorageClientAsync) + c_mgr = ConfigurationManager(storage_client_mock) + with pytest.raises(Exception) as excinfo: + await c_mgr._validate_category_val(category_name=CAT_NAME, category_val=config, + set_value_val_from_default_val=False) + assert excinfo.type is exc_name + assert reason == str(excinfo.value) + + @pytest.mark.parametrize("config", [ + {"include": {"description": "A list of variables to include", "type": "list", "items": "string", + "default": "[]"}}, + {"include": {"description": "A list of variables to include", "type": "list", "items": "string", + "default": "[\"first\", \"second\"]"}}, + {"include": {"description": "A list of variables to include", "type": "list", "items": "integer", + "default": "[\"1\", \"0\"]"}}, + {"include": {"description": "A list of variables to include", "type": "list", "items": "float", + "default": "[\"0.5\", \"123.57\"]"}}, + {"include": {"description": "A list of variables to include", "type": "list", "items": "float", + "default": "[\".5\", \"1.79e+308\"]", "listSize": "2"}}, + {"include": {"description": "A list of variables to include", "type": "list", "items": "string", + "default": "[\"var1\", \"var2\"]", "listSize": "2"}}, + {"include": {"description": "A list of variables to include", "type": "list", "items": "string", + "default": "[]", "listSize": "1"}}, + {"include": {"description": "A list of variables to include", "type": "list", "items": "integer", + "default": "[\"10\", \"100\", \"200\", \"300\"]", "listSize": "4"}}, + {"include": {"description": "A list of variables to include", "type": "list", "items": "object", + "default": "[{\"datapoint\": \"voltage\"}]", + "properties": {"datapoint": {"description": "The datapoint name to create", "displayName": + "Datapoint", "type": "string", "default": ""}}}}, + {"include": {"description": "A simple list", "type": "list", "default": "[\"integer\", \"float\"]", + "items": "enumeration", "options": ["integer", "float"]}}, + {"include": {"description": "A list of expressions and values", "type": "kvlist", "items": "string", + "default": "{}", "order": "1", "displayName": "labels"}}, + {"include": {"description": "A list of expressions and values", "type": "kvlist", "items": "string", + "default": "{\"key\": \"value\"}", "order": "1", "displayName": "labels"}}, + {"include": {"description": "A list of expressions and values", "type": "kvlist", "items": "integer", + "default": "{\"key\": \"13\"}", "order": "1", "displayName": "labels"}}, + {"include": {"description": "A list of expressions and values", "type": "kvlist", "items": "float", + "default": "{\"key\": \"13.13\"}", "order": "1", "displayName": "labels"}}, + {"include": {"description": "A list of expressions and values", "type": "kvlist", "items": "string", + "default": "{\"key\": \"value\"}", "order": "1", "displayName": "labels", "listSize": "1"}}, + {"include": {"description": "A list of expressions and values", "type": "kvlist", "items": "integer", + "default": "{\"key\": \"13\"}", "order": "1", "displayName": "labels", "listSize": "1"}}, + {"include": {"description": "A list of expressions and values", "type": "kvlist", "items": "float", + "default": "{\"key\": \"13.13\"}", "order": "1", "displayName": "labels", "listSize": "1"}}, + {"include": {"description": "A list of expressions and values", "type": "kvlist", "items": "float", + "default": "{}", "order": "1", "displayName": "labels", "listSize": "3"}}, + {"include": {"description": "A list of expressions and values", "type": "kvlist", "items": "object", + "default": "{\"register\": {\"width\": \"2\"}}", "order": "1", "displayName": "labels", + "properties": {"width": {"description": "Number of registers to read", "displayName": "Width", + "type": "integer", "maximum": "4", "default": "1"}}}}, + {"include": {"description": "A list of expressions and values ", "type": "kvlist", "default": + "{\"key1\": \"integer\", \"key2\": \"float\"}", "items": "enumeration", "options": ["integer", "float"]}} + ]) + async def test__validate_category_val_list_type_good(self, config): + storage_client_mock = MagicMock(spec=StorageClientAsync) + c_mgr = ConfigurationManager(storage_client_mock) + res = await c_mgr._validate_category_val(category_name=CAT_NAME, category_val=config, + set_value_val_from_default_val=True) + assert config['include']['default'] == res['include']['default'] + assert config['include']['default'] == res['include']['value'] + @pytest.mark.parametrize("_type, value, from_default_val", [ ("integer", " ", False), ("string", "", False), @@ -586,13 +866,21 @@ async def test__validate_category_val_bucket_type_bad(self, config, exc_name, re ("JSON", " ", False), ("bucket", "", False), ("bucket", " ", False), + ("list", "", False), + ("list", " ", False), + ("kvlist", "", False), + ("kvlist", " ", False), ("integer", " ", True), ("string", "", True), ("string", " ", True), ("JSON", "", True), ("JSON", " ", True), ("bucket", "", True), - ("bucket", " ", True) + ("bucket", " ", True), + ("list", "", True), + ("list", " ", True), + ("kvlist", "", True), + ("kvlist", " ", True) ]) async def test__validate_category_val_with_optional_mandatory(self, _type, value, from_default_val): storage_client_mock = MagicMock(spec=StorageClientAsync) @@ -601,6 +889,9 @@ async def test__validate_category_val_with_optional_mandatory(self, _type, value "mandatory": "true"}} if _type == "bucket": test_config[ITEM_NAME]['properties'] = {"key": "foo"} + elif _type in ("list", "kvlist"): + test_config[ITEM_NAME]['items'] = "string" + with pytest.raises(Exception) as excinfo: await c_mgr._validate_category_val(category_name=CAT_NAME, category_val=test_config, set_value_val_from_default_val=from_default_val) @@ -3197,7 +3488,9 @@ async def test__clean(self, item_type, item_val, result): ("URL", "coaps://host:6683", True), ("password", "not implemented", None), ("X509 certificate", "not implemented", None), - ("northTask", "valid_north_task", True) + ("northTask", "valid_north_task", True), + ("listSize", "5", True), + ("listSize", "0", True) ]) async def test__validate_type_value(self, item_type, item_val, result): storage_client_mock = MagicMock(spec=StorageClientAsync) @@ -3229,7 +3522,9 @@ async def test__validate_type_value(self, item_type, item_val, result): ("JSON", None), ("URL", "blah"), ("URL", "example.com"), - ("URL", "123:80") + ("URL", "123:80"), + ("listSize", "Blah"), + ("listSize", "None") # TODO: can not use urlopen hence we may want to check # result.netloc with some regex, but limited # ("URL", "http://somevalue.a"), @@ -3570,34 +3865,176 @@ async def async_mock(return_value): assert 1 == log_exc.call_count log_exc.assert_called_once_with('Unable to set optional %s entry based on category_name %s and item_name %s and value_item_entry %s', optional_key_name, 'catname', 'itemname', new_value_entry) - @pytest.mark.parametrize("new_value_entry, storage_value_entry, exc_msg", [ + @pytest.mark.parametrize("new_value_entry, storage_value_entry, exc_msg, exc_type", [ ("Fledge", {'default': 'FOG', 'length': '3', 'displayName': 'Length Test', 'value': 'fog', 'type': 'string', - 'description': 'Test value '}, 'beyond the length 3'), + 'description': 'Test value '}, + 'For config item {} you cannot set the new value, beyond the length 3', TypeError), ("0", {'order': '4', 'default': '10', 'minimum': '10', 'maximum': '19', 'displayName': 'RangeMin Test', - 'value': '15', 'type': 'integer', 'description': 'Test value'}, 'beyond the range (10,19)'), + 'value': '15', 'type': 'integer', 'description': 'Test value'}, + 'For config item {} you cannot set the new value, beyond the range (10,19)', TypeError), ("20", {'order': '4', 'default': '10', 'minimum': '10', 'maximum': '19', 'displayName': 'RangeMax Test', - 'value': '19', 'type': 'integer', 'description': 'Test value'}, 'beyond the range (10,19)'), + 'value': '19', 'type': 'integer', 'description': 'Test value'}, + 'For config item {} you cannot set the new value, beyond the range (10,19)', TypeError), ("1", {'order': '5', 'default': '2', 'minimum': '2', 'displayName': 'MIN', 'value': '10', 'type': 'integer', - 'description': 'Test value '}, 'below 2'), + 'description': 'Test value '}, 'For config item {} you cannot set the new value, below 2', TypeError), ("11", {'default': '10', 'maximum': '10', 'displayName': 'MAX', 'value': '10', 'type': 'integer', - 'description': 'Test value'}, 'above 10'), + 'description': 'Test value'}, 'For config item {} you cannot set the new value, above 10', TypeError), ("19.0", {'default': '19.3', 'minimum': '19.1', 'maximum': '19.5', 'displayName': 'RangeMin Test', - 'value': '19.1', 'type': 'float', 'description': 'Test val'}, 'beyond the range (19.1,19.5)'), + 'value': '19.1', 'type': 'float', 'description': 'Test val'}, + 'For config item {} you cannot set the new value, beyond the range (19.1,19.5)', TypeError), ("19.6", {'default': '19.4', 'minimum': '19.1', 'maximum': '19.5', 'displayName': 'RangeMax Test', - 'value': '19.5', 'type': 'float', 'description': 'Test val'}, 'beyond the range (19.1,19.5)'), + 'value': '19.5', 'type': 'float', 'description': 'Test val'}, + 'For config item {} you cannot set the new value, beyond the range (19.1,19.5)', TypeError), ("20", {'order': '8', 'default': '10.1', 'maximum': '19.8', 'displayName': 'MAX Test', 'value': '10.1', - 'type': 'float', 'description': 'Test value'}, 'above 19.8'), + 'type': 'float', 'description': 'Test value'}, + 'For config item {} you cannot set the new value, above 19.8', TypeError), ("0.7", {'order': '9', 'default': '0.9', 'minimum': '0.8', 'displayName': 'MIN Test', 'value': '0.9', - 'type': 'float', 'description': 'Test value'}, 'below 0.8') + 'type': 'float', 'description': 'Test value'}, + 'For config item {} you cannot set the new value, below 0.8', TypeError), + ("", {'description': 'Simple list', 'type': 'list', 'default': '[\"1\", \"1\"]', 'order': '2', + 'items': 'integer', 'listSize': '2', 'value': '[\"1\", \"2\"]'}, + "For config item {} value should be passed array list in string format", TypeError), + ("[\"5\", \"7\", \"9\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1\", \"3\"]', + 'order': '2', 'items': 'integer', 'listSize': '2', 'value': '[\"5\", \"7\"]'}, + "For config item {} value array list size limit to 2", TypeError), + ("", {'description': 'Simple list', 'type': 'list', 'default': '[\"foo\"]', 'order': '2', + 'items': 'string', 'listSize': '1', 'value': '[\"bar\"]'}, + "For config item {} value should be passed array list in string format", TypeError), + ("", {'description': 'Simple list', 'type': 'list', 'default': '[\"foo\"]', 'order': '2', + 'items': 'string', 'listSize': '1', 'value': '[\"bar\"]'}, + "For config item {} value should be passed array list in string format", TypeError), + ("[\"foo\", \"bar\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"foo\"]', 'order': '2', + 'items': 'string', 'listSize': '1', 'value': '[\"bar\"]'}, + "For config item {} value array list size limit to 1", TypeError), + ("[\"1.4\", \".03\", \"50.67\", \"13.13\"]", + {'description': 'Simple list', 'type': 'list', 'default': '[\"1.4\", \".03\", \"50.67\"]', 'order': '2', + 'items': 'float', 'listSize': '3', 'value': '[\"1.4\", \".03\", \"50.67\"]'}, + "For config item {} value array list size limit to 3", TypeError), + ("[\"10\", \"10\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1\", \"2\"]', 'order': '2', + 'items': 'integer', 'value': '[\"3\", \"4\"]'}, "For config item {} elements are not unique", ValueError), + ("[\"foo\", \"foo\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"a\", \"c\"]', 'order': '2', + 'items': 'string', 'value': '[\"abc\", \"def\"]'}, + "For config item {} elements are not unique", ValueError), + ("[\".002\", \".002\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1.2\", \"1.4\"]', + 'order': '2', 'items': 'float', 'value': '[\"5.67\", \"12.0\"]'}, + "For config item {} elements are not unique", ValueError), + ("[\"10\", \"foo\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1\", \"2\"]', 'order': '2', + 'items': 'integer', 'value': '[\"3\", \"4\"]'}, + "For config item {} all elements should be of same integer type", ValueError), + ("[\"foo\", 1]", {'description': 'Simple list', 'type': 'list', 'default': '[\"a\", \"c\"]', 'order': '2', + 'items': 'string', 'value': '[\"abc\", \"def\"]'}, + "For config item {} all elements should be of same string type", ValueError), + ("[\"1\", \"2\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1.2\", \"1.4\"]', + 'order': '2', 'items': 'float', 'value': '[\"5.67\", \"12.0\"]'}, + "For config item {} all elements should be of same float type", ValueError), + ("[\"100\", \"2\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"34\", \"48\"]', 'order': '2', + 'items': 'integer', 'listSize': '2', 'value': '[\"34\", \"48\"]', 'minimum': '20'}, + "For config item {} you cannot set the new value, below 20", ValueError), + ("[\"50\", \"49\", \"51\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"34\", \"48\"]', + 'order': '2', 'items': 'integer', 'listSize': '3', + 'value': '[\"34\", \"48\"]', 'maximum': '50'}, + "For config item {} you cannot set the new value, above 50", ValueError), + ("[\"50\", \"49\", \"46\"]", {'description': 'Simple list', 'type': 'list', 'default': + '[\"50\", \"48\", \"49\"]', 'order': '2', 'items': 'integer', 'listSize': '3', + 'value': '[\"47\", \"48\", \"49\"]', 'maximum': '50', 'minimum': '47'}, + "For config item {} you cannot set the new value, beyond the range (47,50)", ValueError), + ("[\"50\", \"49\", \"51\"]", {'description': 'Simple list', 'type': 'list', 'default': + '[\"50\", \"48\", \"49\"]', 'order': '2', 'items': 'integer', 'listSize': '3', + 'value': '[\"47\", \"48\", \"49\"]', 'maximum': '50', 'minimum': '47'}, + "For config item {} you cannot set the new value, beyond the range (47,50)", ValueError), + ("[\"foo\", \"bars\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"a1\", \"c1\"]', + 'order': '2', 'items': 'string', 'value': '[\"ab\", \"de\"]', 'listSize': '2', + 'length': '3'}, + "For config item {} you cannot set the new value, beyond the length 3", ValueError), + ("[\"2.6\", \"1.002\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"5.2\", \"2.5\"]', + 'order': '2', 'items': 'float', 'value': '[\"5.67\", \"2.5\"]', 'minimum': '2.5', + 'listSize': '2'}, "For config item {} you cannot set the new value, below 2.5", + ValueError), + ("[\"2.6\", \"1.002\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"2.2\", \"2.5\"]', + 'order': '2', 'items': 'float', 'value': '[\"1.67\", \"2.5\"]', 'maximum': '2.5', + 'listSize': '2'}, "For config item {} you cannot set the new value, above 2.5", + ValueError), + ("[\"2.6\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"2.2\"]', 'order': '2', + 'items': 'float', 'value': '[\"2.5\"]', 'listSize': '1', 'minimum': '2', 'maximum': '2.5'}, + "For config item {} you cannot set the new value, beyond the range (2,2.5)", ValueError), + ("[\"1.999\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"2.2\"]', 'order': '2', + 'items': 'float', 'value': '[\"2.5\"]', 'listSize': '1', 'minimum': '2', 'maximum': '2.5'}, + "For config item {} you cannot set the new value, beyond the range (2,2.5)", ValueError), + ("", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"val\"}', 'order': '2', + 'items': 'integer', 'listSize': '1', 'value': '{\"key\": \"val\"}'}, + "For config item {} value should be passed KV pair list in string format", TypeError), + ("{\"key\": \"1\", \"key2\": \"2\"}", + {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"1\"}', 'order': '2', + 'items': 'integer', 'listSize': '1', 'value': '{\"key\": \"2\"}'}, + "For config item {} value KV pair list size limit to 1", TypeError), + ("", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"val\"}', 'order': '2', + 'items': 'string', 'listSize': '1', 'value': '{\"key\": \"val\"}'}, + "For config item {} value should be passed KV pair list in string format", TypeError), + ("", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"val\"}', 'order': '2', + 'items': 'string', 'listSize': '1', 'value': '[\"bar\"]'}, + "For config item {} value should be passed KV pair list in string format", TypeError), + ("{\"key\": \"val\", \"key2\": \"val2\"}", + {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"val\"}', 'order': '2', + 'items': 'string', 'listSize': '1', 'value': '{\"key\": \"val\"}'}, + "For config item {} value KV pair list size limit to 1", TypeError), + ("{\"key\": \"1.2\", \"key2\": \"0.9\", \"key3\": \"444.12\"}", + {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"1.2\", \"key2\": \"0.9\"}', + 'order': '2', 'items': 'float', 'listSize': '2', 'value': '{\"key\": \"1.2\", \"key2\": \"0.9\"}'}, + "For config item {} value KV pair list size limit to 2", TypeError), + ("{\"key\": \"1.2\", \"key\": \"1.23\"}", {'description': 'Simple list', 'type': 'kvlist', 'default': '{\"key\": \"11.12\"}', + 'order': '2', 'items': 'float', 'value': '{\"key\": \"1.4\"}'}, + "For config item {} duplicate KV pair found", TypeError), + ("{\"key\": \"val\"}", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"1\"}', + 'items': 'integer', 'value': '{\"key\": \"13\"}'}, + "For config item {} all elements should be of same integer type", ValueError), + ("{\"key\": 1}", {'description': 'expression', 'type': 'kvlist', 'default': '{\"a\": \"c\"}', 'order': '2', + 'items': 'string', 'value': '{\"abc\", \"def\"}'}, + "For config item {} all elements should be of same string type", ValueError), + ("{\"key\": \"2\"}", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"1.4\"}', + 'order': '2', 'items': 'float', 'value': '{\"key\": \"12.0\"}'}, + "For config item {} all elements should be of same float type", ValueError), + ("{\"key\": \"2\"}", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"48\"}', + 'items': 'integer', 'listSize': '1', 'value': '{\"key\": \"48\"}', 'minimum': '20'}, + "For config item {} you cannot set the new value, below 20", ValueError), + ("{\"key\": \"100\"}", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"48\"}', + 'items': 'integer', 'listSize': '1', 'value': '{\"key\": \"48\"}', 'maximum': '50'}, + "For config item {} you cannot set the new value, above 50", ValueError), + ("{\"key\": \"46\"}", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"50\"}', + 'items': 'integer', 'listSize': '1', 'value': '{\"key\": \"48\"}', 'maximum': '50', + 'minimum': '47'}, "For config item {} you cannot set the new value, beyond the " + "range (47,50)", ValueError), + ("{\"key\": \"100\"}", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"48\"}', + 'items': 'integer', 'listSize': '1', 'value': '{\"key\": \"48\"}', 'maximum': '50', + 'minimum': '47'}, + "For config item {} you cannot set the new value, beyond the range (47,50)", ValueError), + ("{\"foo\": \"bars\"}", {'description': 'expression', 'type': 'kvlist', 'default': '{\"a1\": \"c1\"}', + 'items': 'string', 'value': '[\"ab\", \"de\"]', 'listSize': '1', 'length': '3'}, + "For config item {} you cannot set the new value, beyond the length 3", ValueError), + ("{\"key\": \"1.002\", \"key2\": \"2.6\"}", {'description': 'expression', 'type': 'kvlist', + 'default': '{\"key\", \"2.5\"}', 'items': 'float', + 'value': '{\"key\", \"2.5\"}', 'minimum': '2.5', 'listSize': '2'}, + "For config item {} you cannot set the new value, below 2.5", ValueError), + ("{\"key\": \"2.6\"}", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"2.5\"}', + 'items': 'float', 'value': '{\"key\": \"2.5\"}', 'maximum': '2.5', + 'listSize': '1'}, "For config item {} you cannot set the new value, above 2.5", + ValueError), + ("{\"key\": \"2.6\"}", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"2.2\"}', + 'items': 'float', 'value': '{\"key\": \"2.5\"}', 'listSize': '1', 'minimum': '2', + 'maximum': '2.5'}, + "For config item {} you cannot set the new value, beyond the range (2,2.5)", ValueError), + ("{\"key\": \"1.999\"}", {'description': 'expression', 'type': 'kvlist', 'default': '{\"key\": \"2.2\"}', + 'items': 'float', 'value': '{\"key\": \"2.5\"}', 'listSize': '1', 'minimum': '2', + 'maximum': '2.5'}, + "For config item {} you cannot set the new value, beyond the range (2,2.5)", ValueError) ]) - def test_bad__validate_value_per_optional_attribute(self, new_value_entry, storage_value_entry, exc_msg): - message = "For config item {} you cannot set the new value, {}".format(ITEM_NAME, exc_msg) + def test_bad__validate_value_per_optional_attribute(self, new_value_entry, storage_value_entry, exc_msg, exc_type): storage_client_mock = MagicMock(spec=StorageClientAsync) c_mgr = ConfigurationManager(storage_client_mock) - with pytest.raises(Exception) as excinfo: + with pytest.raises(Exception) as exc_info: c_mgr._validate_value_per_optional_attribute(ITEM_NAME, storage_value_entry, new_value_entry) - assert excinfo.type is TypeError - assert message == str(excinfo.value) + assert exc_info.type is exc_type + msg = exc_msg.format(ITEM_NAME) + assert msg == str(exc_info.value) @pytest.mark.parametrize("new_value_entry, storage_value_entry", [ ("Fledge", {'default': 'FOG', 'length': '7', 'displayName': 'Length Test', 'value': 'fledge', @@ -3615,7 +4052,117 @@ def test_bad__validate_value_per_optional_attribute(self, new_value_entry, stora ("19", {'order': '4', 'default': '10', 'minimum': '10', 'maximum': '19', 'displayName': 'RangeMax Test', 'value': '15', 'type': 'integer', 'description': 'Test value'}), ("15", {'order': '4', 'default': '10', 'minimum': '10', 'maximum': '19', 'displayName': 'Range Test', - 'value': '15', 'type': 'integer', 'description': 'Test value'}) + 'value': '15', 'type': 'integer', 'description': 'Test value'}), + ("[]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1\", \"2\"]', 'order': '2', + 'items': 'integer', 'value': '[\"3\", \"4\"]'}), + ("[\"10\", \"20\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1\", \"2\"]', 'order': '2', + 'items': 'integer', 'value': '[\"3\", \"4\"]'}), + ("[\"foo\", \"bar\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"a\", \"c\"]', 'order': '2', + 'items': 'string', 'value': '[\"abc\", \"def\"]'}), + ("[\".002\", \"1.002\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1.2\", \"1.4\"]', + 'order': '2', 'items': 'float', 'value': '[\"5.67\", \"12.0\"]'}), + ("[\"10\", \"20\", \"30\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1\", \"2\"]', + 'order': '2', 'items': 'integer', 'listSize': "3", 'value': '[\"3\", \"4\"]'}), + ("[\"new string\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"a\", \"c\"]', 'order': '2', + 'items': 'string', 'listSize': "1", 'value': '[\"abc\", \"def\"]'}), + ("[\"6.523e-07\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1.2\", \"1.4\"]', + 'order': '2', 'items': 'float', 'listSize': "1", 'value': '[\"5.67\", \"12.0\"]'}), + ("[]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1\", \"2\"]', + 'order': '2', 'items': 'integer', 'listSize': "0", 'value': '[\"3\", \"4\"]'}), + ("[]", {'description': 'Simple list', 'type': 'list', 'default': '[\"a\", \"c\"]', 'order': '2', + 'items': 'string', 'listSize': "0", 'value': '[\"abc\", \"def\"]'}), + ("[]", {'description': 'Simple list', 'type': 'list', 'default': '[\"1.2\", \"1.4\"]', + 'order': '2', 'items': 'float', 'listSize': "0", 'value': '[\"5.67\", \"12.0\"]'}), + ("[]", {'description': 'Simple list', 'type': 'list', 'default': '[\"a\", \"c\"]', 'order': '2', + 'items': 'string', 'listSize': "1", 'value': '[\"abc\", \"def\"]'}), + ("[\"100\", \"20\"]", {'description': 'SL', 'type': 'list', 'default': '[\"34\", \"48\"]', 'order': '2', + 'items': 'integer', 'listSize': '2', 'value': '[\"34\", \"48\"]', 'minimum': '20'}), + ("[\"50\", \"49\", \"0\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"34\", \"48\"]', + 'order': '2', 'items': 'integer', 'listSize': '3', + 'value': '[\"34\", \"48\"]', 'maximum': '50'}), + ("[\"50\", \"49\", \"47\"]", {'description': 'Simple list', 'type': 'list', 'default': + '[\"50\", \"48\", \"49\"]', 'order': '2', 'items': 'integer', 'listSize': '3', + 'value': '[\"47\", \"48\", \"49\"]', 'maximum': '50', 'minimum': '47'}), + ("[\"50\", \"49\", \"48\"]", {'description': 'Simple list', 'type': 'list', 'default': + '[\"50\", \"48\", \"49\"]', 'order': '2', 'items': 'integer', 'listSize': '3', + 'value': '[\"47\", \"48\", \"49\"]', 'maximum': '50', 'minimum': '47'}), + ("[\"foo\", \"bar\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"a1\", \"c1\"]', + 'order': '2', 'items': 'string', 'value': '[\"ab\", \"de\"]', 'listSize': '2', + 'length': '3'}), + ("[\"2.6\", \"13.002\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"5.2\", \"2.5\"]', + 'order': '2', 'items': 'float', 'value': '[\"5.67\", \"2.5\"]', 'minimum': '2.5', + 'listSize': '2'}), + ("[\"2.4\", \"1.002\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"2.2\", \"2.5\"]', + 'order': '2', 'items': 'float', 'value': '[\"1.67\", \"2.5\"]', 'maximum': '2.5', + 'listSize': '2'}), + ("[\"2.0\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"2.2\"]', 'order': '2', + 'items': 'float', 'value': '[\"2.5\"]', 'listSize': '1', 'minimum': '2', 'maximum': '2.5'}), + ("[\"2.5\"]", {'description': 'Simple list', 'type': 'list', 'default': '[\"2.2\"]', 'order': '2', + 'items': 'float', 'value': '[\"2.5\"]', 'listSize': '1', 'minimum': '2', 'maximum': '2.5'}), + ("{\"key\": \"bar\"}", + {'description': 'A list of expressions and values', 'type': 'kvlist', 'default': '{\"key\": \"c\"}', + 'order': '2', + 'items': 'string', 'value': '{\"key\": \"def\"}'}), + ("{\"key\": \"1.002\"}", + {'description': 'A list of expressions and values', 'type': 'kvlist', 'default': '{\"key\": \"1.4\"}', + 'order': '2', 'items': 'float', 'value': '{\"key\": \"12.0\"}'}), + ("{\"key\": \"10\", \"key1\": \"20\", \"key2\": \"30\"}", + {'description': 'A list of expressions and values', 'type': 'kvlist', + 'default': '{\"key\": \"10\", \"key1\": \"20\", \"key2\": \"30\"}', + 'order': '2', 'items': 'integer', 'listSize': "3", + 'value': '{\"key\": \"1\", \"key1\": \"2\", \"key2\": \"3\"}'}), + ("{\"key\": \"new string\"}", + {'description': 'A list of expressions and values', 'type': 'kvlist', 'default': '{\"key\": \"c\"}', + 'order': '2', + 'items': 'string', 'listSize': "1", 'value': '{\"key\": \"def\"}'}), + ("{\"key\": \"6.523e-07\"}", + {'description': 'A list of expressions and values', 'type': 'kvlist', 'default': '{\"key\": \"1.4\"}', + 'order': '2', 'items': 'float', 'listSize': "1", 'value': '{\"key\": \"12.0\"}'}), + ("{}", {'description': 'A list of expressions and values', 'type': 'kvlist', 'default': '{\"1\": \"2\"}', + 'order': '2', 'items': 'integer', 'listSize': "0", 'value': '{\"3\": \"4\"}'}), + ("{}", {'description': 'A list of expressions and values', 'type': 'kvlist', 'default': '{\"a\": \"c\"}', + 'order': '2', + 'items': 'string', 'listSize': "0", 'value': '{\"abc\": \"def\"}'}), + ("{}", {'description': 'A list of expressions and values', 'type': 'kvlist', 'default': '{\"key\": \"1.4\"}', + 'order': '2', 'items': 'float', 'listSize': "0", 'value': '{\"key\": \"12.0\"}'}), + ("{}", {'description': 'A list of expressions and values', 'type': 'kvlist', 'default': '{\"1\": \"2\"}', + 'order': '2', 'items': 'integer', 'listSize': "1", 'value': '{\"3\": \"4\"}'}), + ("{\"key\": \"100\", \"key2\": \"20\"}", + {'description': 'SL', 'type': 'kvlist', 'default': '{\"key\": \"100\", \"key2\": \"48\"}', 'order': '2', + 'items': 'integer', 'listSize': '2', 'value': '{\"key\": \"34\", \"key2\": \"20\"}', 'minimum': '20'}), + ("{\"key\": \"50\", \"key2\": \"0\", \"key3\": \"49\"}", + {'description': 'A list of expressions and values', 'type': 'kvlist', + 'default': '{\"key\": \"47\", \"key2\": \"48\", \"key3\": \"49\"}', + 'order': '2', 'items': 'integer', 'listSize': '3', + 'value': '{\"key\": \"47\", \"key2\": \"48\", \"key3\": \"49\"}', 'maximum': '50'}), + ("{\"key\": \"50\", \"key2\": \"48\", \"key3\": \"49\"}", + {'description': 'A list of expressions and values', 'type': 'kvlist', 'default': + '{\"key\": \"50\", \"key2\": \"48\", \"key3\": \"49\"}', 'order': '2', 'items': 'integer', 'listSize': '3', + 'value': '{\"key\": \"47\", \"key2\": \"48\", \"key3\": \"49\"}', 'maximum': '50', 'minimum': '47'}), + ("{\"key\": \"50\", \"key2\": \"48\", \"key3\": \"49\"}", + {'description': 'A list of expressions and values', 'type': 'kvlist', 'default': + '{\"key\": \"47\", \"key2\": \"48\", \"key3\": \"49\"}', 'order': '2', 'items': 'integer', 'listSize': '3', + 'value': '{\"key\": \"47\", \"key2\": \"48\", \"key3\": \"49\"}', 'maximum': '50', 'minimum': '47'}), + ("{\"key\": \"foo\", \"key2\": \"bar\"}", {'description': 'A list of expressions and values', 'type': 'kvlist', + 'default': '{\"key\": \"a1\", \"key2\": \"c1\"}', + 'order': '2', 'items': 'string', + 'value': '{\"key\": \"ab\", \"key2\": \"de\"}', 'listSize': '2', + 'length': '3'}), + ("{\"key\": \"2.6\", \"key2\": \"13.002\"}", + {'description': 'A list of expressions and values', 'type': 'kvlist', + 'default': '{\"key\": \"5.2\", \"key2\": \"2.5\"}', + 'order': '2', 'items': 'float', 'value': '{\"key\": \"5.67\", \"key2\": \"2.5\"}', 'minimum': '2.5', + 'listSize': '2'}), + ("{\"key\": \"2.4\", \"key2\": \"1.002\"}", + {'description': 'A list of expressions and values', 'type': 'kvlist', + 'default': '{\"key\": \"2.2\", \"key2\": \"2.5\"}', 'order': '2', 'items': 'float', + 'value': '{\"key\": \"1.67\", \"key2\": \"2.5\"}', 'maximum': '2.5', 'listSize': '2'}), + ("{\"key\": \"2.0\"}", {'description': 'A list of expressions and values', 'type': 'kvlist', + 'default': '{\"key\": \"2.2\"}', 'order': '2', 'items': 'float', 'value': '{\"2.5\"}', + 'listSize': '1', 'minimum': '2', 'maximum': '2.5'}), + ("{\"key\": \"2.5\"}", {'description': 'A list of expressions and values', 'type': 'kvlist', + 'default': '{\"key\": \"2.2\"}', 'order': '2', 'items': 'float', 'value': '{\"2.5\"}', + 'listSize': '1', 'minimum': '2', 'maximum': '2.5'}) ]) def test_good__validate_value_per_optional_attribute(self, new_value_entry, storage_value_entry): storage_client_mock = MagicMock(spec=StorageClientAsync) diff --git a/tests/unit/python/fledge/services/core/api/control_service/test_acl_management.py b/tests/unit/python/fledge/services/core/api/control_service/test_acl_management.py index 0953357de7..793ac54cd3 100644 --- a/tests/unit/python/fledge/services/core/api/control_service/test_acl_management.py +++ b/tests/unit/python/fledge/services/core/api/control_service/test_acl_management.py @@ -97,8 +97,38 @@ async def test_good_get_acl_by_name(self, client): ({"name": ""}, "ACL name cannot be empty."), ({"name": "test"}, "service parameter is required."), ({"name": "test", "service": 1}, "service must be a list."), - ({"name": "test", "service": []}, "url parameter is required."), - ({"name": "test", "service": [], "url": 1}, "url must be a list.") + ({"name": "test", "service": []}, "service list cannot be empty."), + ({"name": "test", "service": [1]}, "service elements must be an object."), + ({"name": "test", "service": ["1"]}, "service elements must be an object."), + ({"name": "test", "service": ["1", {}]}, "service elements must be an object."), + ({"name": "test", "service": [{}]}, "service object cannot be empty."), + ({"name": "test", "service": [{"foo": "bar"}]}, "Either type or name Key-Value Pair is missing for service."), + ({"name": "test", "service": [{"type": 1}]}, "Value must be a string for service type."), + ({"name": "test", "service": [{"type": ""}]}, "Value cannot be empty for service type."), + ({"name": "test", "service": [{"name": 1}]}, "Value must be a string for service name."), + ({"name": "test", "service": [{"name": ""}]}, "Value cannot be empty for service name."), + ({"name": "test", "service": [{"type": "T1"}]}, "url parameter is required."), + ({"name": "test", "service": [{"type": "T1"}], "url": 1}, "url must be a list."), + ({"name": "test", "service": [{"type": "T1"}], "url": [{}]}, "url child Key-Value Pair is missing."), + ({"name": "test", "service": [{"type": "T1"}], "url": [{"url": []}]}, "Value must be a string for url object."), + ({"name": "test", "service": [{"type": "T1"}], "url": [{"url": ""}]}, "Value cannot be empty for url object."), + ({"name": "test", "service": [{"type": "T1"}], "url": [{"acl": ""}]}, "Value must be an array for acl object."), + ({"name": "test", "service": [{"type": "T1"}], "url": [{"acl": [1]}]}, "acl elements must be an object."), + ({"name": "test", "service": [{"type": "T1"}], "url": [{"acl": [{}]}]}, "acl object cannot be empty."), + ({"name": "test", "service": [{"type": "T1"}], "url": [{"acl": [{"type": "Core"}]}]}, + "url child Key-Value Pair is missing."), + ({"name": "test", "service": [{"type": "T1"}], "url": [{"url": "URI/write", "acl": ""}]}, + "Value must be an array for acl object."), + ({"name": "test", "service": [{"type": "T1"}], "url": [{"url": "URI/write", "acl": []}, 1]}, + "url elements must be an object."), + ({"name": "test", "service": [{"name": "S1"}], "url": [{"url": "URI/write", "acl": []}, {"acl": []}]}, + "url child Key-Value Pair is missing."), + ({"name": "test", "service": [{"name": "S1"}], "url": [{"url": "URI/write", "acl": []}, {"acl": ""}]}, + "Value must be an array for acl object."), + ({"name": "test", "service": [{"name": "S1"}], "url": [{"url": "URI/write", "acl": []}, {"acl": [1]}]}, + "acl elements must be an object."), + ({"name": "test", "service": [{"name": "S1"}], "url": [{"url": "URI/write", "acl": []}, {"acl": [{}]}]}, + "acl object cannot be empty.") ]) async def test_bad_add_acl(self, client, payload, message): resp = await client.post('/fledge/ACL', data=json.dumps(payload)) @@ -110,7 +140,7 @@ async def test_bad_add_acl(self, client, payload, message): async def test_duplicate_add_acl(self, client): acl_name = "testACL" - request_payload = {"name": acl_name, "service": [], "url": []} + request_payload = {"name": acl_name, "service": [{'name': 'Fledge Storage'}], "url": []} result = {'count': 1, 'rows': [ {'name': acl_name, 'service': [{'name': 'Fledge Storage'}, {'type': 'Southbound'}], 'url': [{'url': '/fledge/south/operation', 'acl': [{'type': 'Southbound'}]}]}]} @@ -132,7 +162,7 @@ async def test_duplicate_add_acl(self, client): async def test_good_add_acl(self, client): acl_name = "testACL" - request_payload = {"name": acl_name, "service": [], "url": []} + request_payload = {"name": acl_name, "service": [{"type": "Notification"}], "url": []} result = {"count": 0, "rows": []} insert_result = {"response": "inserted", "rows_affected": 1} acl_query_payload = {"return": ["name"], "where": {"column": "name", "condition": "=", "value": acl_name}} @@ -155,11 +185,11 @@ async def test_good_add_acl(self, client): assert 200 == resp.status result = await resp.text() json_response = json.loads(result) - assert {'name': acl_name, 'service': [], 'url': []} == json_response + assert {'name': acl_name, 'service': [{"type": "Notification"}], 'url': []} == json_response audit_info_patch.assert_called_once_with('ACLAD', request_payload) args, _ = insert_tbl_patch.call_args_list[0] assert 'control_acl' == args[0] - assert {'name': acl_name, 'service': '[]', 'url': '[]'} == json.loads(args[1]) + assert {'name': acl_name, 'service': '[{"type": "Notification"}]', 'url': '[]'} == json.loads(args[1]) args, _ = query_tbl_patch.call_args_list[0] assert 'control_acl' == args[0] assert acl_query_payload == json.loads(args[1]) @@ -167,7 +197,46 @@ async def test_good_add_acl(self, client): @pytest.mark.parametrize("payload, message", [ ({}, "Nothing to update for the given payload."), ({"service": 1}, "service must be a list."), - ({"url": 1}, "url must be a list.") + ({"url": 1}, "url must be a list."), + ({"service": []}, "service list cannot be empty."), + ({"service": [1]}, "service elements must be an object."), + ({"service": ["1"]}, "service elements must be an object."), + ({"service": ["1", {}]}, "service elements must be an object."), + ({"service": [{}]}, "service object cannot be empty."), + ({"service": [{"foo": "bar"}]}, "Either type or name Key-Value Pair is missing for service."), + ({"service": [{"type": 1}]}, "Value must be a string for service type."), + ({"service": [{"type": ""}]}, "Value cannot be empty for service type."), + ({"service": [{"name": 1}]}, "Value must be a string for service name."), + ({"service": [{"name": ""}]}, "Value cannot be empty for service name."), + ({"url": 1}, "url must be a list."), + ({"url": [{}]}, "url child Key-Value Pair is missing."), + ({"url": [{"url": []}]}, "Value must be a string for url object."), + ({"url": [{"url": ""}]}, "Value cannot be empty for url object."), + ({"url": [{"acl": ""}]}, "Value must be an array for acl object."), + ({"url": [{"acl": [1]}]}, "acl elements must be an object."), + ({"url": [{"acl": [{}]}]}, "acl object cannot be empty."), + ({"url": [{"acl": [{"type": "Core"}]}]}, "url child Key-Value Pair is missing."), + ({"url": [{"url": "URI/write", "acl": ""}]}, "Value must be an array for acl object."), + ({"url": [{"url": "URI/write", "acl": []}, 1]}, "url elements must be an object."), + ({"url": [{"url": "URI/write", "acl": []}, {"acl": []}]}, "url child Key-Value Pair is missing."), + ({"url": [{"url": "URI/write", "acl": []}, {"acl": ""}]}, "Value must be an array for acl object."), + ({"url": [{"url": "URI/write", "acl": []}, {"acl": [1]}]}, "acl elements must be an object."), + ({"url": [{"url": "URI/write", "acl": []}, {"acl": [{}]}]}, "acl object cannot be empty."), + ({"service": [{"foo": "bar"}], "url": []}, "Either type or name Key-Value Pair is missing for service."), + ({"url": [], "service": []}, "service list cannot be empty."), + ({"url": [], "service": [{}]}, "service object cannot be empty."), + ({"url": [], "service": [{"type": 1}]}, "Value must be a string for service type."), + ({"url": [], "service": [{"type": ""}]}, "Value cannot be empty for service type."), + ({"url": [], "service": [{"name": 1}]}, "Value must be a string for service name."), + ({"url": [], "service": [{"name": ""}]}, "Value cannot be empty for service name."), + ({"service": [{"name": "myService"}], "url": 1}, "url must be a list."), + ({"service": [{}], "url": 1}, "service object cannot be empty."), + ({"service": [{"name": "SVC"}], "url": [{"url": "URI/write", "acl": ""}]}, + "Value must be an array for acl object."), + ({"service": [{"name": "SVC"}], "url": [{"url": "", "acl": ""}]}, "Value cannot be empty for url object."), + ({"service": [{"name": "SVC"}], "url": [{"blah": "", "acl": []}]}, "url child Key-Value Pair is missing."), + ({"service": [{"name": "SVC"}], "url": [{"url": "URI/write", "acl": []}, {"acl": [{}]}]}, + "acl object cannot be empty.") ]) async def test_bad_update_acl(self, client, payload, message): acl_name = "testACL" @@ -180,7 +249,7 @@ async def test_bad_update_acl(self, client, payload, message): async def test_update_acl_not_found(self, client): acl_name = "testACL" - req_payload = {"service": []} + req_payload = {"service": [{"type": "Notification"}]} result = {"count": 0, "rows": []} value = await mock_coro(result) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(result)) query_payload = {"return": ["name", "service", "url"], "where": { @@ -200,11 +269,11 @@ async def test_update_acl_not_found(self, client): assert query_payload == json.loads(args[1]) @pytest.mark.parametrize("payload", [ - {"service": []}, - {"service": [{"service": [{"name": "Sinusoid"}, {"type": "Southbound"}]}]}, - {"service": [], "url": []}, - {"service": [], "url": [{"url": "/fledge/south/operation", "acl": [{"type": "Southbound"}]}]}, - {"service": [{"service": [{"name": "Sinusoid"}, {"type": "Southbound"}]}], + {"service": [{"name": "Sinusoid"}, {"type": "Southbound"}]}, + {"service": [{"name": "Sinusoid"}], "url": []}, + {"service": [{"type": "Southbound"}], "url": [{"url": "/fledge/south/operation", + "acl": [{"type": "Southbound"}]}]}, + {"service": [{"name": "Sinusoid"}, {"type": "Southbound"}], "url": [{"url": "/fledge/south/operation", "acl": [{"type": "Southbound"}]}]} ]) async def test_update_acl(self, client, payload): diff --git a/tests/unit/python/fledge/services/core/api/test_alerts.py b/tests/unit/python/fledge/services/core/api/test_alerts.py new file mode 100644 index 0000000000..c02d8e7903 --- /dev/null +++ b/tests/unit/python/fledge/services/core/api/test_alerts.py @@ -0,0 +1,85 @@ +import asyncio +import json +import sys +from unittest.mock import MagicMock, patch +import pytest +from aiohttp import web + +from fledge.common.storage_client.storage_client import StorageClientAsync +from fledge.common.alert_manager import AlertManager +from fledge.services.core import connect, routes, server +from fledge.services.core.api import alerts + + +__author__ = "Ashish Jabble" +__copyright__ = "Copyright (c) 2024 Dianomic Systems Inc." +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + +class TestAlerts: + """ Alerts API """ + + @pytest.fixture + def client(self, loop, test_client): + app = web.Application(loop=loop) + routes.setup(app) + return loop.run_until_complete(test_client(app)) + + async def async_mock(self, return_value): + return return_value + + def setup_method(self): + storage_client_mock = MagicMock(StorageClientAsync) + server.Server._alert_manager = AlertManager(storage_client_mock) + + def teardown_method(self): + server.Server._alert_manager = None + + async def test_get_all(self, client): + rv = await self.async_mock([]) if sys.version_info.major == 3 and sys.version_info.minor >= 8 \ + else asyncio.ensure_future(self.async_mock([])) + + with patch.object(server.Server._alert_manager, 'get_all', return_value=rv): + resp = await client.get('/fledge/alert') + assert 200 == resp.status + json_response = json.loads(await resp.text()) + assert 'alerts' in json_response + assert [] == json_response['alerts'] + + async def test_bad_get_all(self, client): + with patch.object(server.Server._alert_manager, 'get_all', side_effect=Exception): + with patch.object(alerts._LOGGER, 'error') as patch_logger: + resp = await client.get('/fledge/alert') + assert 500 == resp.status + assert '' == resp.reason + json_response = json.loads(await resp.text()) + assert 'message' in json_response + assert '' == json_response['message'] + assert 1 == patch_logger.call_count + + async def test_delete(self, client): + rv = await self.async_mock("Nothing to delete.") \ + if sys.version_info.major == 3 and sys.version_info.minor >= 8 else ( + asyncio.ensure_future(self.async_mock("Nothing to delete."))) + with patch.object(server.Server._alert_manager, 'delete', return_value=rv): + resp = await client.delete('/fledge/alert') + assert 200 == resp.status + json_response = json.loads(await resp.text()) + assert 'message' in json_response + assert "Nothing to delete." == json_response['message'] + + @pytest.mark.parametrize("url, msg, exception, status_code, log_count", [ + ('/fledge/alert', '', Exception, 500, 1), + ('/fledge/alert/blah', 'blah alert not found.', KeyError, 404, 0) + ]) + async def test_bad_delete(self, client, url, msg, exception, status_code, log_count): + with patch.object(server.Server._alert_manager, 'delete', side_effect=exception): + with patch.object(alerts._LOGGER, 'error') as patch_logger: + resp = await client.delete(url) + assert status_code == resp.status + assert msg == resp.reason + json_response = json.loads(await resp.text()) + assert 'message' in json_response + assert msg == json_response['message'] + assert log_count == patch_logger.call_count + diff --git a/tests/unit/python/fledge/services/core/api/test_common_ping.py b/tests/unit/python/fledge/services/core/api/test_common_ping.py index f957e8839e..b0dafea97c 100644 --- a/tests/unit/python/fledge/services/core/api/test_common_ping.py +++ b/tests/unit/python/fledge/services/core/api/test_common_ping.py @@ -27,9 +27,9 @@ import aiohttp from aiohttp import web -from fledge.services.core import routes -from fledge.services.core import connect +from fledge.services.core import connect, routes, server as core_server from fledge.services.core.api.common import _logger +from fledge.common.alert_manager import AlertManager from fledge.common.web import middleware from fledge.common.storage_client.storage_client import StorageClientAsync from fledge.common.configuration_manager import ConfigurationManager @@ -85,13 +85,14 @@ async def mock_coro(*args, **kwargs): host_name, ip_addresses = get_machine_detail attrs = {"query_tbl_with_payload.return_value": await mock_coro()} mock_storage_client_async = MagicMock(spec=StorageClientAsync, **attrs) + core_server.Server._alert_manager = AlertManager(mock_storage_client_async) + core_server.Server._alert_manager.alerts = [] with patch.object(middleware._logger, 'debug') as logger_info: with patch.object(connect, 'get_storage_async', return_value=mock_storage_client_async): with patch.object(mock_storage_client_async, 'query_tbl_with_payload', return_value=_rv) as query_patch: app = web.Application(loop=loop, middlewares=[middleware.optional_auth_middleware]) # fill route table routes.setup(app) - server = await aiohttp_server(app) await server.start_server(loop=loop) @@ -115,6 +116,7 @@ async def mock_coro(*args, **kwargs): assert content_dict['health'] == "green" assert content_dict['safeMode'] is False assert re.match(SEMANTIC_VERSIONING_REGEX, content_dict['version']) is not None + assert content_dict['alerts'] == 0 query_patch.assert_called_once_with('statistics', payload) log_params = 'Received %s request for %s', 'GET', '/fledge/ping' logger_info.assert_called_once_with(*log_params) @@ -172,6 +174,7 @@ async def mock_coro(*args, **kwargs): assert content_dict['health'] == "green" assert content_dict['safeMode'] is False assert re.search(SEMANTIC_VERSIONING_REGEX, content_dict['version']) is not None + assert content_dict['alerts'] == 0 query_patch.assert_called_once_with('statistics', payload) log_params = 'Received %s request for %s', 'GET', '/fledge/ping' logger_info.assert_called_once_with(*log_params) @@ -236,6 +239,7 @@ async def mock_get_category_item(): assert content_dict['health'] == "green" assert content_dict['safeMode'] is False assert re.match(SEMANTIC_VERSIONING_REGEX, content_dict['version']) is not None + assert content_dict['alerts'] == 0 mock_get_cat.assert_called_once_with('rest_api', 'allowPing') query_patch.assert_called_once_with('statistics', payload) log_params = 'Received %s request for %s', 'GET', '/fledge/ping' @@ -361,6 +365,7 @@ def mock_coro(*args, **kwargs): assert content_dict['health'] == "green" assert content_dict['safeMode'] is False assert re.match(SEMANTIC_VERSIONING_REGEX, content_dict['version']) is not None + assert content_dict['alerts'] == 0 query_patch.assert_called_once_with('statistics', payload) logger_info.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/ping') @@ -426,6 +431,7 @@ def mock_coro(*args, **kwargs): assert content_dict['ipAddresses'] == ip_addresses assert content_dict['health'] == "green" assert re.match(SEMANTIC_VERSIONING_REGEX, content_dict['version']) is not None + assert content_dict['alerts'] == 0 query_patch.assert_called_once_with('statistics', payload) logger_info.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/ping') @@ -503,6 +509,7 @@ async def mock_get_category_item(): assert content_dict['health'] == "green" assert content_dict['safeMode'] is False assert re.match(SEMANTIC_VERSIONING_REGEX, content_dict['version']) is not None + assert content_dict['alerts'] == 0 mock_get_cat.assert_called_once_with('rest_api', 'allowPing') query_patch.assert_called_once_with('statistics', payload) logger_info.assert_called_once_with('Received %s request for %s', 'GET', '/fledge/ping') diff --git a/tests/unit/python/fledge/services/core/api/test_service.py b/tests/unit/python/fledge/services/core/api/test_service.py index 767954568b..feaf4fa4b3 100644 --- a/tests/unit/python/fledge/services/core/api/test_service.py +++ b/tests/unit/python/fledge/services/core/api/test_service.py @@ -972,7 +972,7 @@ async def async_mock(return_value): assert 'install' == actual['action'] assert -1 == actual['status'] assert '' == actual['log_file_uri'] - patch_fetch_available_package.assert_called_once_with('service') + patch_fetch_available_package.assert_called_once_with() args, kwargs = query_tbl_patch.call_args_list[0] assert 'packages' == args[0] actual = json.loads(args[1]) @@ -1019,7 +1019,7 @@ async def async_mock(return_value): resp = await client.post('/fledge/service?action=install', data=json.dumps(param)) assert 404 == resp.status assert "'{} service is not available for the given repository'".format(pkg_name) == resp.reason - patch_fetch_available_package.assert_called_once_with('service') + patch_fetch_available_package.assert_called_once_with() args, kwargs = query_tbl_patch.call_args_list[0] assert 'packages' == args[0] assert payload == json.loads(args[1]) @@ -1161,7 +1161,7 @@ async def q_result(*arg): args, kwargs = insert_table_patch.call_args assert 'scheduled_processes' == args[0] p = json.loads(args[1]) - assert {'name': 'management', 'script': '["services/management"]'} == p + assert {'name': 'management', 'priority': 300, 'script': '["services/management"]'} == p patch_get_cat_info.assert_called_once_with(category_name=data['name']) async def test_dupe_management_service_schedule(self, client): @@ -1221,7 +1221,7 @@ def q_result(*arg): args, kwargs = insert_table_patch.call_args assert 'scheduled_processes' == args[0] p = json.loads(args[1]) - assert {'name': 'management', 'script': '["services/management"]'} == p + assert {'name': 'management', 'priority': 300, 'script': '["services/management"]'} == p patch_get_cat_info.assert_called_once_with(category_name=data['name']) @pytest.mark.parametrize("param", [ diff --git a/tests/unit/python/fledge/tasks/purge/test_purge.py b/tests/unit/python/fledge/tasks/purge/test_purge.py index d211667e0d..679b04b41f 100644 --- a/tests/unit/python/fledge/tasks/purge/test_purge.py +++ b/tests/unit/python/fledge/tasks/purge/test_purge.py @@ -118,10 +118,8 @@ async def store_purge(self, **kwargs): } @pytest.mark.parametrize("conf, expected_return, expected_calls", [ - (config["purgeAgeSize"], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'purge'}), (config["purgeAge"], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'purge'}), (config["purgeSize"], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'purge'}), - (config["retainAgeSize"], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'retainall'}), (config["retainAge"], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'retainall'}), (config["retainSize"], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'retainall'}), (config["retainSizeAny"], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'retainany'}) @@ -158,13 +156,59 @@ async def test_purge_data(self, conf, expected_return, expected_calls): side_effect=self.store_purge) as mock_storage_purge: with patch.object(audit, 'information', return_value=_rv2) as audit_info: # Test the positive case when all if conditions in purge_data pass - t_expected_return = await p.purge_data(conf) assert expected_return == await p.purge_data(conf) assert audit_info.called _, kwargs = mock_storage_purge.call_args assert kwargs == expected_calls assert patch_storage.called - assert 4 == patch_storage.call_count + assert 2 == patch_storage.call_count + args, _ = patch_storage.call_args + assert 'streams' == args[0] + assert payload == json.loads(args[1]) + + @pytest.mark.parametrize("conf, expected_return, expected_calls", [ + (config["purgeAgeSize"], (2, 4), [{'sent_id': 0, 'size': '20', 'flag': 'purge'}, + {'sent_id': 0, 'age': '72', 'flag': 'purge'}]), + (config["retainAgeSize"], (2, 4), [{'sent_id': 0, 'size': '20', 'flag': 'retainall'}, + {'sent_id': 0, 'age': '72', 'flag': 'retainall'}]) + ]) + async def test_data_with_age_and_size(self, conf, expected_return, expected_calls): + mock_storage_client_async = MagicMock(spec=StorageClientAsync) + mock_audit_logger = AuditLogger(mock_storage_client_async) + mock_stream_result = q_result('streams') + payload = {"aggregate": {"operation": "min", "column": "last_object"}} + # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. + if sys.version_info.major == 3 and sys.version_info.minor >= 8: + _rv1 = await mock_stream_result + _rv2 = await mock_value("") + else: + _rv1 = asyncio.ensure_future(mock_stream_result) + _rv2 = asyncio.ensure_future(mock_value("")) + + with patch.object(FledgeProcess, '__init__'): + with patch.object(mock_audit_logger, "__init__", return_value=None): + p = Purge() + p._logger = FLCoreLogger + p._logger.info = MagicMock() + p._logger.error = MagicMock() + p._logger.debug = MagicMock() + p._storage_async = MagicMock(spec=StorageClientAsync) + p._readings_storage_async = MagicMock(spec=ReadingsStorageClientAsync) + audit = p._audit + with patch.object(p._storage_async, "query_tbl_with_payload", return_value=_rv1 + ) as patch_storage: + with patch.object(p._readings_storage_async, 'purge', + side_effect=self.store_purge) as mock_storage_purge: + with patch.object(audit, 'information', return_value=_rv2) as audit_info: + assert expected_return == await p.purge_data(conf) + assert audit_info.called + assert 2 == mock_storage_purge.call_count + args, kwargs = mock_storage_purge.call_args_list[0] + assert expected_calls[0] == kwargs + args, kwargs = mock_storage_purge.call_args_list[1] + assert expected_calls[1] == kwargs + assert patch_storage.called + assert 2 == patch_storage.call_count args, _ = patch_storage.call_args assert 'streams' == args[0] assert payload == json.loads(args[1])