diff --git a/C/common/config_category.cpp b/C/common/config_category.cpp index 0ff76b10e7..3b14e60885 100644 --- a/C/common/config_category.cpp +++ b/C/common/config_category.cpp @@ -464,6 +464,10 @@ string ConfigCategory::getItemAttribute(const string& itemName, return m_items[i]->m_mandatory; case FILE_ATTR: return m_items[i]->m_file; + case VALIDITY_ATTR: + return m_items[i]->m_validity; + case GROUP_ATTR: + return m_items[i]->m_group; default: throw new ConfigItemAttributeNotFound(); } @@ -514,6 +518,12 @@ bool ConfigCategory::setItemAttribute(const string& itemName, case LENGTH_ATTR: m_items[i]->m_length = value; return true; + case VALIDITY_ATTR: + m_items[i]->m_validity = value; + return true; + case GROUP_ATTR: + m_items[i]->m_group = value; + return true; default: return false; } @@ -946,7 +956,7 @@ ConfigCategory::CategoryItem::CategoryItem(const string& name, m_order = ""; } - if (item.HasMember("length")) + if (item.HasMember("length")) { m_length = item["length"].GetString(); } @@ -1030,6 +1040,23 @@ ConfigCategory::CategoryItem::CategoryItem(const string& name, m_displayName = ""; } + if (item.HasMember("validity")) + { + m_validity = item["validity"].GetString(); + } + else + { + m_validity = ""; + } + if (item.HasMember("group")) + { + m_group = item["group"].GetString(); + } + else + { + m_group = ""; + } + if (item.HasMember("options")) { const Value& options = item["options"]; @@ -1310,12 +1337,14 @@ ConfigCategory::CategoryItem::CategoryItem(const CategoryItem& rhs) } m_file = rhs.m_file; m_itemType = rhs.m_itemType; + m_validity = rhs.m_validity; + m_group = rhs.m_group; } /** * Create a JSON representation of the configuration item * - * @param full false is the deafult, true evaluates all the members of the CategoryItem + * @param full false is the default, true evaluates all the members of the CategoryItem * */ string ConfigCategory::CategoryItem::toJSON(const bool full) const @@ -1365,7 +1394,7 @@ ostringstream convert; convert << ", \"order\" : \"" << m_order << "\""; } - if (!m_length.empty()) + if (!m_length.empty()) { convert << ", \"length\" : \"" << m_length << "\""; } @@ -1385,10 +1414,20 @@ ostringstream convert; convert << ", \"readonly\" : \"" << m_readonly << "\""; } - if (!m_mandatory.empty()) - { - convert << ", \"mandatory\" : \"" << m_mandatory << "\""; - } + if (!m_mandatory.empty()) + { + convert << ", \"mandatory\" : \"" << m_mandatory << "\""; + } + + if (!m_validity.empty()) + { + convert << ", \"validity\" : \"" << JSONescape(m_validity) << "\""; + } + + if (!m_group.empty()) + { + convert << ", \"group\" : \"" << m_group << "\""; + } if (!m_file.empty()) { @@ -1432,7 +1471,7 @@ ostringstream convert; convert << ", \"displayName\" : \"" << m_displayName << "\""; } - if (!m_length.empty()) + if (!m_length.empty()) { convert << ", \"length\" : \"" << m_length << "\""; } @@ -1452,10 +1491,20 @@ ostringstream convert; convert << ", \"readonly\" : \"" << m_readonly << "\""; } - if (!m_mandatory.empty()) - { - convert << ", \"mandatory\" : \"" << m_mandatory << "\""; - } + if (!m_mandatory.empty()) + { + convert << ", \"mandatory\" : \"" << m_mandatory << "\""; + } + + if (!m_validity.empty()) + { + convert << ", \"validity\" : \"" << JSONescape(m_validity) << "\""; + } + + if (!m_group.empty()) + { + convert << ", \"group\" : \"" << m_group << "\""; + } if (!m_file.empty()) { diff --git a/C/common/include/asset_tracking.h b/C/common/include/asset_tracking.h index b6e2ace2d5..fd27a1f2c5 100644 --- a/C/common/include/asset_tracking.h +++ b/C/common/include/asset_tracking.h @@ -61,6 +61,9 @@ class AssetTrackingTuple { {} std::string& getAssetName() { return m_assetName; }; + std::string getPluginName() { return m_pluginName;} + std::string getEventName() { return m_eventName;} + std::string getServiceName() { return m_serviceName;} bool isDeprecated() { return m_deprecated; }; void unDeprecate() { m_deprecated = false; }; diff --git a/C/common/include/config_category.h b/C/common/include/config_category.h index 76803e0f77..24aee86af1 100644 --- a/C/common/include/config_category.h +++ b/C/common/include/config_category.h @@ -124,7 +124,9 @@ class ConfigCategory { FILE_ATTR, MINIMUM_ATTR, MAXIMUM_ATTR, - LENGTH_ATTR}; + LENGTH_ATTR, + VALIDITY_ATTR, + GROUP_ATTR}; std::string getItemAttribute(const std::string& itemName, ItemAttribute itemAttribute) const; @@ -166,6 +168,8 @@ class ConfigCategory { m_options; std::string m_file; ItemType m_itemType; + std::string m_validity; + std::string m_group; }; std::vector m_items; std::string m_name; diff --git a/C/common/include/storage_asset_tracking.h b/C/common/include/storage_asset_tracking.h index 32c7d2d8fe..1fe2dc4695 100644 --- a/C/common/include/storage_asset_tracking.h +++ b/C/common/include/storage_asset_tracking.h @@ -83,18 +83,10 @@ namespace std class ManagementClient; -typedef std::unordered_multiset, StorageAssetTrackingTuplePtrEqual> StorageAssetCacheSet; +typedef std::unordered_map, std::hash, StorageAssetTrackingTuplePtrEqual> StorageAssetCacheMap; -typedef std::unordered_multiset, StorageAssetTrackingTuplePtrEqual>::iterator StorageAssetCacheSetItr; +typedef std::unordered_map, std::hash, StorageAssetTrackingTuplePtrEqual>::iterator StorageAssetCacheMapItr; -struct StorageAssetCacheSetItrCmp{ - - bool operator ()(StorageAssetCacheSetItr x, StorageAssetCacheSetItr y) - { - return x != y; - } - -}; /** * The StorageAssetTracker class provides the asset tracking functionality. * There are methods to populate asset tracking cache from asset_tracker DB table, @@ -112,7 +104,8 @@ class StorageAssetTracker { bool getFledgeConfigInfo(); static StorageAssetTracker *getStorageAssetTracker(); static void releaseStorageAssetTracker(); - int compareDatapoints(const std::string& dp1, const std::string& dp2); + void updateCache(std::set dpSet, StorageAssetTrackingTuple* ptr); + bool getDeprecated(StorageAssetTrackingTuple* ptr); private: static StorageAssetTracker *instance; @@ -120,8 +113,9 @@ class StorageAssetTracker { std::string m_fledgeService; std::string m_service; std::string m_event; + std::set getDataPointsSet(std::string strDatapoints); - StorageAssetCacheSet storageAssetTrackerTuplesCache; + StorageAssetCacheMap storageAssetTrackerTuplesCache; }; #endif diff --git a/C/common/include/storage_client.h b/C/common/include/storage_client.h index ea10cb72be..4df4b8a701 100644 --- a/C/common/include/storage_client.h +++ b/C/common/include/storage_client.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -48,20 +49,30 @@ class StorageClient { ResultSet *queryTable(const std::string& tablename, const Query& query); ReadingSet *queryTableToReadings(const std::string& tableName, const Query& query); int insertTable(const std::string& schema, const std::string& tableName, const InsertValues& values); - int updateTable(const std::string& schema, const std::string& tableName, const InsertValues& values, const Where& where); - int updateTable(const std::string& schema, const std::string& tableName, const JSONProperties& json, const Where& where); - int updateTable(const std::string& schema, const std::string& tableName, const InsertValues& values, const JSONProperties& json, const Where& where); - int updateTable(const std::string& schema, const std::string& tableName, const ExpressionValues& values, const Where& where); - int updateTable(const std::string& schema, const std::string& tableName, std::vector>& updates); - int updateTable(const std::string& schema, const std::string& tableName, const InsertValues& values, const ExpressionValues& expressoins, const Where& where); + int updateTable(const std::string& schema, const std::string& tableName, const InsertValues& values, + const Where& where, const UpdateModifier *modifier = NULL); + int updateTable(const std::string& schema, const std::string& tableName, const JSONProperties& json, + const Where& where, const UpdateModifier *modifier = NULL); + int updateTable(const std::string& schema, const std::string& tableName, const InsertValues& values, + const JSONProperties& json, const Where& where, const UpdateModifier *modifier = NULL); + int updateTable(const std::string& schema, const std::string& tableName, const ExpressionValues& values, + const Where& where, const UpdateModifier *modifier = NULL); + int updateTable(const std::string& schema, const std::string& tableName, + std::vector>& updates, const UpdateModifier *modifier = NULL); + int updateTable(const std::string& schema, const std::string& tableName, const InsertValues& values, + const ExpressionValues& expressoins, const Where& where, const UpdateModifier *modifier = NULL); int deleteTable(const std::string& schema, const std::string& tableName, const Query& query); int insertTable(const std::string& tableName, const InsertValues& values); - int updateTable(const std::string& tableName, const InsertValues& values, const Where& where); - int updateTable(const std::string& tableName, const JSONProperties& json, const Where& where); - int updateTable(const std::string& tableName, const InsertValues& values, const JSONProperties& json, const Where& where); - int updateTable(const std::string& tableName, const ExpressionValues& values, const Where& where); - int updateTable(const std::string& tableName, std::vector>& updates); - int updateTable(const std::string& tableName, const InsertValues& values, const ExpressionValues& expressoins, const Where& where); + int updateTable(const std::string& tableName, const InsertValues& values, const Where& where, const UpdateModifier *modifier = NULL); + int updateTable(const std::string& tableName, const JSONProperties& json, const Where& where, const UpdateModifier *modifier = NULL); + int updateTable(const std::string& tableName, const InsertValues& values, const JSONProperties& json, + const Where& where, const UpdateModifier *modifier = NULL); + int updateTable(const std::string& tableName, const ExpressionValues& values, const Where& where, + const UpdateModifier *modifier = NULL); + int updateTable(const std::string& tableName, std::vector>& updates, + const UpdateModifier *modifier = NULL); + int updateTable(const std::string& tableName, const InsertValues& values, const ExpressionValues& expressions, + const Where& where, const UpdateModifier *modifier = NULL); int deleteTable(const std::string& tableName, const Query& query); bool readingAppend(Reading& reading); bool readingAppend(const std::vector & readings); diff --git a/C/common/include/update_modifier.h b/C/common/include/update_modifier.h new file mode 100644 index 0000000000..6d3745921c --- /dev/null +++ b/C/common/include/update_modifier.h @@ -0,0 +1,32 @@ +#ifndef _UPDATE_MODIFIER_H +#define _UPDATE_MODIFIER_H +/* + * Fledge storage client. + * + * Copyright (c) 2022 Dianonic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ +#include + + +/** + * Update modifier + */ +class UpdateModifier { + public: + UpdateModifier(const std::string& modifier) : + m_modifier(modifier) + { + }; + ~UpdateModifier(); + const std::string toJSON() const { return m_modifier; }; + private: + UpdateModifier(const UpdateModifier&); + UpdateModifier& operator=(UpdateModifier const&); + const std::string m_modifier; +}; +#endif + diff --git a/C/common/storage_asset_tracking.cpp b/C/common/storage_asset_tracking.cpp index 5d85379541..3ba3ad0238 100644 --- a/C/common/storage_asset_tracking.cpp +++ b/C/common/storage_asset_tracking.cpp @@ -62,36 +62,15 @@ void StorageAssetTracker::populateStorageAssetTrackingCache() try { std::vector& vec = m_mgtClient->getStorageAssetTrackingTuples(m_service); + for (StorageAssetTrackingTuple* & rec : vec) { - auto it = storageAssetTrackerTuplesCache.find(rec); - if (it == storageAssetTrackerTuplesCache.end()) - { - // tuple not found in cache , so add it - storageAssetTrackerTuplesCache.insert(rec); - } - else + set setOfDPs = getDataPointsSet(rec->m_datapoints); + if (setOfDPs.size() == 0) { - // tuple present and count value < count of reading, update cache - if ((*it)->m_maxCount < rec->m_maxCount) - { - storageAssetTrackerTuplesCache.erase(it); - storageAssetTrackerTuplesCache.insert(rec); - } - else if ((*it)->m_maxCount == rec->m_maxCount) - { - // case where counts are same but datapoints are different - // "a", "b", "c" and "a", "b", "foo" - // keep both the records - if ((*it)->m_datapoints.compare(rec->m_datapoints)) - { - storageAssetTrackerTuplesCache.insert(rec); - } - } + Logger::getLogger()->warn("%s:%d Datapoints unavailable for service %s ", __FUNCTION__, __LINE__, m_service.c_str()); } - - Logger::getLogger()->debug("%s:%d Added storage asset tracker tuple to cache: '%s'", __FUNCTION__, __LINE__, - rec->assetToString().c_str()); + storageAssetTrackerTuplesCache[rec] = setOfDPs; } delete (&vec); } @@ -101,131 +80,9 @@ void StorageAssetTracker::populateStorageAssetTrackingCache() return; } - return; } -/** - * Find whether the Storage asset tracking tuple exists in the cache or not - * - * Return the pointer to the tuple - * - * @param tuple StorageAssetTrackingTuple Type - * @return A pointer to StorageAssetTrackingTuple in cache or null - */ - - -StorageAssetTrackingTuple* StorageAssetTracker::findStorageAssetTrackingCache(StorageAssetTrackingTuple& tuple) -{ - StorageAssetTrackingTuple *ptr = &tuple; - std::unordered_multiset::const_iterator it = storageAssetTrackerTuplesCache.find(ptr); - - if (it == storageAssetTrackerTuplesCache.end()) - { - Logger::getLogger()->debug("%s:%d :tuple not found in cache ", __FUNCTION__, __LINE__); - return NULL; - } - else - { - auto rangeItr = storageAssetTrackerTuplesCache.equal_range(ptr); - - unsigned int max = 0; - std::set maxItr; - for(auto r = rangeItr.first; r != rangeItr.second; ++r) - { - // case where maxcount in cache greater than tuple arg, simply return that itr to cache - if ((*r)->m_maxCount > ptr->m_maxCount) - { - return (*r); - } - if ((*r)->m_maxCount > max) - { - max = (*r)->m_maxCount; - - Logger::getLogger()->debug("%s:%d, max value = %d", __FILE__, __LINE__, max); - } - } - - for(auto r = rangeItr.first; r != rangeItr.second; ++r) - { - //prepare set of iterators with maxiumum value - if ((*r)->m_maxCount == max) - { - maxItr.insert(r); - } - } - - - for(auto r = rangeItr.first; r != rangeItr.second; ++r) - { - - // tuple present and its value > count of max in cache, update in cache, remove rest - if ( ptr->m_maxCount > max) - { - Logger::getLogger()->debug("%s:%d tuple present and count value < count of reading, update cache, erased dp%s ", __FUNCTION__, __LINE__, (*it)->m_datapoints.c_str()); - - storageAssetTrackerTuplesCache.erase(rangeItr.first, rangeItr.second); - return NULL; - } - - else if (ptr->m_maxCount == max) - { - // case where counts are same but datapoints are different - // "a", "b", "c" and "a", "b", "foo" - // keep both the records incoming and max already present in cache - // delete rest - - // for all the records which have less count than maxItr and incoming, delete - if (maxItr.find(r) == maxItr.end()) - { - storageAssetTrackerTuplesCache.erase(r); - } - else - { - - // incoming has same count as maximum, check their dps - - if (compareDatapoints(ptr->m_datapoints,(*r)->m_datapoints)) - { - // dps different however count same , need to update in cache - return NULL; - } - else - { - // dps same and count also same , dont update - return *r; - } - } - } - - } - - // dont need updation , return pointer to tuple in cache - return *it; - } -} - -/** - * Add storage asset tracking tuple via microservice management API and in cache - * - * @param tuple New tuple to add in DB and in cache - */ -void StorageAssetTracker::addStorageAssetTrackingTuple(StorageAssetTrackingTuple& tuple) -{ - std::unordered_multiset::const_iterator it = storageAssetTrackerTuplesCache.find(&tuple); - - bool rv = m_mgtClient->addStorageAssetTrackingTuple(tuple.m_serviceName, tuple.m_pluginName, tuple.m_assetName, tuple.m_eventName, false, tuple.m_datapoints, tuple.m_maxCount); - - if (rv) // insert into cache only if DB operation succeeded - { - StorageAssetTrackingTuple *ptr = new StorageAssetTrackingTuple(tuple); - storageAssetTrackerTuplesCache.insert(ptr); - Logger::getLogger()->info("%s:%d: Added tuple to cache: %s, insert in db successful ", __FUNCTION__, __LINE__, tuple.assetToString().c_str()); - } - else - Logger::getLogger()->error("%s:%d: Failed to insert storage asset tracking tuple into DB: '%s'", __FUNCTION__, __LINE__, tuple.assetToString().c_str()); -} - /** * Return Plugin Information in the Fledge configuration * @@ -294,28 +151,153 @@ bool StorageAssetTracker::getFledgeConfigInfo() return false; } - /** - * Compare the Datapoints in StorageAssetTracker, they can be '"' enclosed + * Updates datapoints present in the arg dpSet in the cache * - * @return int result of comparison of datapoints strings , 0 when equal + * @param dpSet set of datapoints string values to be updated in cache + * @param ptr StorageAssetTrackingTuple* , as key in cache (map) + * Retval void */ -int StorageAssetTracker::compareDatapoints(const std::string& dp1, const std::string& dp2) + +void StorageAssetTracker::updateCache(std::set dpSet, StorageAssetTrackingTuple* ptr) { - std::string temp1, temp2; - for( int i = 0; i < dp1.size() ; ++i) + if(ptr == nullptr) { - if (dp1[i] != '"') - temp1.push_back(dp1[i]); + Logger::getLogger()->error("%s:%d: StorageAssetTrackingTuple should not be NULL pointer", __FUNCTION__, __LINE__); + return; } - for( int i = 0; i < dp2.size() ; ++i) + unsigned int sizeOfInputSet = dpSet.size(); + StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(ptr); + + // search for the record in cache , if not present, simply update cache and return + if (it == storageAssetTrackerTuplesCache.end()) { - if (dp2[i] != '"') - temp2.push_back(dp2[i]); + Logger::getLogger()->debug("%s:%d :tuple not found in cache ", __FUNCTION__, __LINE__); + storageAssetTrackerTuplesCache[ptr] = dpSet; + + std::string strDatapoints; + unsigned int count = 0; + for (auto itr : dpSet) + { + strDatapoints.append(itr); + strDatapoints.append(","); + count++; + } + if (strDatapoints[strDatapoints.size()-1] == ',') + strDatapoints.pop_back(); + + bool rv = m_mgtClient->addStorageAssetTrackingTuple(ptr->getServiceName(), ptr->getPluginName(), ptr->getAssetName(), ptr->getEventName(), false, strDatapoints, count); + if (rv) + { + storageAssetTrackerTuplesCache[ptr] = dpSet; + } + else + Logger::getLogger()->error("%s:%d: Failed to insert storage asset tracking tuple into DB: '%s'", __FUNCTION__, __LINE__, (ptr->getAssetName()).c_str()); + + return; } + else + { + // record is found in cache , compare the datapoints of the argument ptr to that present in the cache + // update the cache with datapoints present in argument record but absent in cache + // + std::set &cacheRecord = it->second; + unsigned int sizeOfCacheRecord = cacheRecord.size(); + + // store all the datapoints to be updated in string strDatapoints which is sent to management_client + std::string strDatapoints; + unsigned int count = 0; + for (auto itr : cacheRecord) + { + strDatapoints.append(itr); + strDatapoints.append(","); + count++; + } + + // check which datapoints are not present in cache record, and need to be updated + // in cache and db, store them in string strDatapoints, in comma-separated format + for(auto itr: dpSet) + { + if (cacheRecord.find(itr) == cacheRecord.end()) + { + strDatapoints.append(itr); + strDatapoints.append(","); + count++; + } + } - return temp1.compare(temp2); + // remove the last comma + if (strDatapoints[strDatapoints.size()-1] == ',') + strDatapoints.pop_back(); + + if (count <= sizeOfCacheRecord) + { + // No need to update as count of cache record is not getting increased + return; + } + + // Update the DB + bool rv = m_mgtClient->addStorageAssetTrackingTuple(ptr->getServiceName(), ptr->getPluginName(), ptr->getAssetName(), ptr->getEventName(), false, strDatapoints, count); + if(rv) + { + // if update of DB successful , then update the CacheRecord + for(auto itr: dpSet) + { + if (cacheRecord.find(itr) == cacheRecord.end()) + { + cacheRecord.insert(itr); + } + } + } + else + { + // Log error if Update DB unsuccessful + Logger::getLogger()->error("%s:%d: Failed to insert storage asset tracking tuple into DB: '%s'", __FUNCTION__, __LINE__, (ptr->getAssetName()).c_str()); + + } + } +} + +//This function takes a string of datapoints in comma-separated format and returns +//set of string datapoint values +std::set StorageAssetTracker::getDataPointsSet(std::string strDatapoints) +{ + + std::set tokens; + stringstream st(strDatapoints); + std::string temp; + + while(getline(st, temp, ',')) + { + tokens.insert(temp); + } + + return tokens; } +/** This function takes a StorageAssetTrackingTuple pointer and searches for + * it in cache, if found then returns its Deprecated status + * + * @param ptr StorageAssetTrackingTuple* , as key in cache (map) + * Retval bool Deprecation status + */ + + +bool StorageAssetTracker::getDeprecated(StorageAssetTrackingTuple* ptr) +{ + StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(ptr); + + if (it == storageAssetTrackerTuplesCache.end()) + { + Logger::getLogger()->debug("%s:%d :tuple not found in cache ", __FUNCTION__, __LINE__); + return false; + } + else + { + return (it->first)->isDeprecated(); + } + + return false; +} diff --git a/C/common/storage_client.cpp b/C/common/storage_client.cpp index 811f1770aa..28732a4fe0 100755 --- a/C/common/storage_client.cpp +++ b/C/common/storage_client.cpp @@ -583,11 +583,12 @@ int StorageClient::insertTable(const string& schema, const string& tableName, co * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @param where The conditions to match the updated rows + * @param modifier Optional storage modifier * @return int The number of rows updated */ -int StorageClient::updateTable(const string& tableName, const InsertValues& values, const Where& where) +int StorageClient::updateTable(const string& tableName, const InsertValues& values, const Where& where, const UpdateModifier *modifier) { - return updateTable(DEFAULT_SCHEMA, tableName, values, where); + return updateTable(DEFAULT_SCHEMA, tableName, values, where, modifier); } /** @@ -597,9 +598,10 @@ int StorageClient::updateTable(const string& tableName, const InsertValues& valu * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @param where The conditions to match the updated rows + * @param modifier Optional storage modifier * @return int The number of rows updated */ -int StorageClient::updateTable(const string& schema, const string& tableName, const InsertValues& values, const Where& where) +int StorageClient::updateTable(const string& schema, const string& tableName, const InsertValues& values, const Where& where, const UpdateModifier *modifier) { static HttpClient *httpClient = this->getHttpClient(); // to initialize m_seqnum_map[thread_id] for this thread try { @@ -614,8 +616,12 @@ int StorageClient::updateTable(const string& schema, const string& tableName, co ostringstream convert; - convert << "{ \"updates\" : [ "; - convert << "{ \"where\" : "; + convert << "{ \"updates\" : [ {"; + if (modifier) + { + convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ], "; + } + convert << "\"where\" : "; convert << where.toJSON(); convert << ", \"values\" : "; convert << values.toJSON(); @@ -662,11 +668,12 @@ int StorageClient::updateTable(const string& schema, const string& tableName, co * @param tableName The name of the table into which data will be added * @param values The expressions to update into the table * @param where The conditions to match the updated rows + * @param modifier Optional update modifier * @return int The number of rows updated */ -int StorageClient::updateTable(const string& tableName, const ExpressionValues& values, const Where& where) +int StorageClient::updateTable(const string& tableName, const ExpressionValues& values, const Where& where, const UpdateModifier *modifier) { - return updateTable(DEFAULT_SCHEMA, tableName, values, where); + return updateTable(DEFAULT_SCHEMA, tableName, values, where, modifier); } /** @@ -676,9 +683,10 @@ int StorageClient::updateTable(const string& tableName, const ExpressionValues& * @param tableName The name of the table into which data will be added * @param values The expressions to update into the table * @param where The conditions to match the updated rows + * @param modifier Optional update modifier * @return int The number of rows updated */ -int StorageClient::updateTable(const string& schema, const string& tableName, const ExpressionValues& values, const Where& where) +int StorageClient::updateTable(const string& schema, const string& tableName, const ExpressionValues& values, const Where& where, const UpdateModifier *modifier) { static HttpClient *httpClient = this->getHttpClient(); // to initialize m_seqnum_map[thread_id] for this thread try { @@ -693,8 +701,12 @@ int StorageClient::updateTable(const string& schema, const string& tableName, co ostringstream convert; - convert << "{ \"updates\" : [ "; - convert << "{ \"where\" : "; + convert << "{ \"updates\" : [ {"; + if (modifier) + { + convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ], "; + } + convert << "\"where\" : "; convert << where.toJSON(); convert << ", \"expressions\" : "; convert << values.toJSON(); @@ -740,11 +752,12 @@ int StorageClient::updateTable(const string& schema, const string& tableName, co * * @param tableName The name of the table into which data will be added * @param updates The expressions and condition pairs to update in the table + * @param modifier Optional update modifier * @return int The number of rows updated */ -int StorageClient::updateTable(const string& tableName, vector>& updates) +int StorageClient::updateTable(const string& tableName, vector>& updates, const UpdateModifier *modifier) { - return updateTable(DEFAULT_SCHEMA, tableName, updates); + return updateTable(DEFAULT_SCHEMA, tableName, updates, modifier); } /** @@ -753,9 +766,10 @@ int StorageClient::updateTable(const string& tableName, vector>& updates) +int StorageClient::updateTable(const string& schema, const string& tableName, vector>& updates, const UpdateModifier *modifier) { static HttpClient *httpClient = this->getHttpClient(); // to initialize m_seqnum_map[thread_id] for this thread try { @@ -777,7 +791,12 @@ int StorageClient::updateTable(const string& schema, const string& tableName, ve { convert << ", "; } - convert << "{ \"where\" : "; + convert << "{ "; + if (modifier) + { + convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ], "; + } + convert << "\"where\" : "; convert << it->second->toJSON(); convert << ", \"expressions\" : "; convert << it->first->toJSON(); @@ -827,11 +846,12 @@ int StorageClient::updateTable(const string& schema, const string& tableName, ve * @param values The values to insert into the table * @param expressions The expression to update inthe table * @param where The conditions to match the updated rows + * @param modifier Optional update modifier * @return int The number of rows updated */ -int StorageClient::updateTable(const string& tableName, const InsertValues& values, const ExpressionValues& expressions, const Where& where) +int StorageClient::updateTable(const string& tableName, const InsertValues& values, const ExpressionValues& expressions, const Where& where, const UpdateModifier *modifier) { - return updateTable(DEFAULT_SCHEMA, tableName, values, expressions, where); + return updateTable(DEFAULT_SCHEMA, tableName, values, expressions, where, modifier); } /** @@ -842,15 +862,20 @@ int StorageClient::updateTable(const string& tableName, const InsertValues& valu * @param values The values to insert into the table * @param expressions The expression to update inthe table * @param where The conditions to match the updated rows + * @param modifier Optional update modifier * @return int The number of rows updated */ -int StorageClient::updateTable(const string& schema, const string& tableName, const InsertValues& values, const ExpressionValues& expressions, const Where& where) +int StorageClient::updateTable(const string& schema, const string& tableName, const InsertValues& values, const ExpressionValues& expressions, const Where& where, const UpdateModifier *modifier) { try { ostringstream convert; - convert << "{ \"updates\" : [ "; - convert << "{ \"where\" : "; + convert << "{ \"updates\" : [ { "; + if (modifier) + { + convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ], "; + } + convert << "\"where\" : "; convert << where.toJSON(); convert << ", \"values\" : "; convert << values.toJSON(); @@ -899,11 +924,12 @@ int StorageClient::updateTable(const string& schema, const string& tableName, co * @param tableName The name of the table into which data will be added * @param json The values to insert into the table * @param where The conditions to match the updated rows + * @param modifier Optional update modifier * @return int The number of rows updated */ -int StorageClient::updateTable(const string& tableName, const JSONProperties& values, const Where& where) +int StorageClient::updateTable(const string& tableName, const JSONProperties& values, const Where& where, const UpdateModifier *modifier) { - return updateTable(DEFAULT_SCHEMA, tableName, values, where); + return updateTable(DEFAULT_SCHEMA, tableName, values, where, modifier); } /** @@ -915,13 +941,17 @@ int StorageClient::updateTable(const string& tableName, const JSONProperties& va * @param where The conditions to match the updated rows * @return int The number of rows updated */ -int StorageClient::updateTable(const string& schema, const string& tableName, const JSONProperties& values, const Where& where) +int StorageClient::updateTable(const string& schema, const string& tableName, const JSONProperties& values, const Where& where, const UpdateModifier *modifier) { try { ostringstream convert; - convert << "{ \"updates\" : [ "; - convert << "{ \"where\" : "; + convert << "{ \"updates\" : [ {"; + if (modifier) + { + convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ]"; + } + convert << "\"where\" : "; convert << where.toJSON(); convert << ", "; convert << values.toJSON(); @@ -969,11 +999,12 @@ int StorageClient::updateTable(const string& schema, const string& tableName, co * @param values The values to insert into the table * @param jsonProp The JSON Properties to update * @param where The conditions to match the updated rows + * @param modifier Optional update modifier * @return int The number of rows updated */ -int StorageClient::updateTable(const string& tableName, const InsertValues& values, const JSONProperties& jsonProp, const Where& where) +int StorageClient::updateTable(const string& tableName, const InsertValues& values, const JSONProperties& jsonProp, const Where& where, const UpdateModifier *modifier) { - return updateTable(DEFAULT_SCHEMA, tableName, values, jsonProp, where); + return updateTable(DEFAULT_SCHEMA, tableName, values, jsonProp, where, modifier); } /** @@ -984,15 +1015,20 @@ int StorageClient::updateTable(const string& tableName, const InsertValues& valu * @param values The values to insert into the table * @param jsonProp The JSON Properties to update * @param where The conditions to match the updated rows + * @param modifier Optional update modifier * @return int The number of rows updated */ -int StorageClient::updateTable(const string& schema, const string& tableName, const InsertValues& values, const JSONProperties& jsonProp, const Where& where) +int StorageClient::updateTable(const string& schema, const string& tableName, const InsertValues& values, const JSONProperties& jsonProp, const Where& where, const UpdateModifier *modifier) { try { ostringstream convert; - convert << "{ \"updates\" : [ "; - convert << "{ \"where\" : "; + convert << "{ \"updates\" : [ {"; + if (modifier) + { + convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\", "; + } + convert << "\"where\" : "; convert << where.toJSON(); convert << ", \"values\" : "; convert << values.toJSON(); diff --git a/C/plugins/north/OMF/include/OMFHint.h b/C/plugins/north/OMF/include/OMFHint.h index 85e7f598d3..a78fa7997b 100644 --- a/C/plugins/north/OMF/include/OMFHint.h +++ b/C/plugins/north/OMF/include/OMFHint.h @@ -79,6 +79,67 @@ class OMFAFLocationHint : public OMFHint ~OMFAFLocationHint() {}; }; +/** + * A Legacy type hint, tells the OMF plugin to send complex types for this asset + */ +class OMFLegacyTypeHint : public OMFHint +{ +public: + OMFLegacyTypeHint(const std::string& name) { m_hint = name; }; + ~OMFLegacyTypeHint() {}; +}; + +/** + * A Source hint, defines the data source for the asset or datapoint + */ +class OMFSourceHint : public OMFHint +{ +public: + OMFSourceHint(const std::string& name) { m_hint = name; }; + ~OMFSourceHint() {}; +}; + +/** + * A unit of measurement hint, defines the unit of measurement for a datapoint + */ +class OMFUOMHint : public OMFHint +{ +public: + OMFUOMHint(const std::string& name) { m_hint = name; }; + ~OMFUOMHint() {}; +}; + + +/** + * A minimum hint, defines the minimum value for a property + */ +class OMFMinimumHint : public OMFHint +{ +public: + OMFMinimumHint(const std::string& name) { m_hint = name; }; + ~OMFMinimumHint() {}; +}; + + +/** + * A maximum hint, defines the maximum value for a property + */ +class OMFMaximumHint : public OMFHint +{ +public: + OMFMaximumHint(const std::string& name) { m_hint = name; }; + ~OMFMaximumHint() {}; +}; + +/** + * A interpolation hint, defines the interpolation value for a property + */ +class OMFInterpolationHint : public OMFHint +{ +public: + OMFInterpolationHint(const std::string& name) { m_hint = name; }; + ~OMFInterpolationHint() {}; +}; /** * A set of hints for a reading * diff --git a/C/plugins/north/OMF/include/basetypes.h b/C/plugins/north/OMF/include/basetypes.h new file mode 100644 index 0000000000..df6e33ee6a --- /dev/null +++ b/C/plugins/north/OMF/include/basetypes.h @@ -0,0 +1,171 @@ +#ifndef _BASETYPES_H +#define _BASETYPES_H +/* + * Fledge OSIsoft OMF interface to PI Server. + * + * Copyright (c) 2022 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ +#include + +static const char *baseOMFTypes = QUOTE( + [ + { + "id":"Double64", + "type":"object", + "classification":"dynamic", + "properties":{ + "Double64":{ + "type":["number", "null"], + "format":"float64" + }, + "Time":{ + "type":"string", + "format":"date-time", + "isindex":true + } + } + }, + { + "id":"Double32", + "type":"object", + "classification":"dynamic", + "properties":{ + "Double32":{ + "type":["number", "null"], + "format":"float32" + }, + "Time":{ + "type":"string", + "format":"date-time", + "isindex":true + } + } + }, + { + "id":"Integer16", + "type":"object", + "classification":"dynamic", + "properties":{ + "Integer16":{ + "type":["integer","null"], + "format":"int16", + }, + "Time":{ + "type":"string", + "format":"date-time", + "isindex":true + } + } + }, + { + "id":"Integer32", + "type":"object", + "classification":"dynamic", + "properties":{ + "Integer32":{ + "type":["integer","null"], + "format":"int32", + }, + "Time":{ + "type":"string", + "format":"date-time", + "isindex":true + } + } + }, + { + "id":"Integer64", + "type":"object", + "classification":"dynamic", + "properties":{ + "Integer64":{ + "type":["integer","null"], + "format":"int64", + }, + "Time":{ + "type":"string", + "format":"date-time", + "isindex":true + } + } + }, + { + "id":"UInteger16", + "type":"object", + "classification":"dynamic", + "properties":{ + "UInteger16":{ + "type":["integer","null"], + "format":"uint16", + }, + "Time":{ + "type":"string", + "format":"date-time", + "isindex":true + } + } + }, + { + "id":"UInteger32", + "type":"object", + "classification":"dynamic", + "properties":{ + "UInteger32":{ + "type":["integer","null"], + "format":"uint32", + }, + "Time":{ + "type":"string", + "format":"date-time", + "isindex":true + } + } + }, + { + "id":"UInteger64", + "type":"object", + "classification":"dynamic", + "properties":{ + "UInteger64":{ + "type":["integer","null"], + "format":"uint64", + }, + "Time":{ + "type":"string", + "format":"date-time", + "isindex":true + } + } + }, + { + "id":"String", + "type":"object", + "classification":"dynamic", + "properties":{ + "String":{ + "type":["string","null"] + }, + "Time":{ + "type":"string", + "format":"date-time", + "isindex":true + } + } + }, + { + "id":"FledgeAsset", + "type":"object", + "classification":"static", + "properties":{ + "AssetId": {"type": "string", "isindex": true }, + "Name" : { "type": "string", "isname": true } + } + + } + ]); + +#endif diff --git a/C/plugins/common/include/ocs.h b/C/plugins/north/OMF/include/ocs.h similarity index 100% rename from C/plugins/common/include/ocs.h rename to C/plugins/north/OMF/include/ocs.h diff --git a/C/plugins/north/OMF/include/omf.h b/C/plugins/north/OMF/include/omf.h index 8872ec0fd7..a7995fde90 100644 --- a/C/plugins/north/OMF/include/omf.h +++ b/C/plugins/north/OMF/include/omf.h @@ -19,6 +19,13 @@ #define OMF_HINT "OMFHint" +// The following will force the OMF version for EDs endpoints +// Remove or comment out the line below to prevent the forcing +// of the version +#define EDS_OMF_VERSION "1.0" +#define CR_OMF_VERSION "1.0" + + #define TYPE_ID_DEFAULT 1 #define FAKE_ASSET_KEY "_default_start_id_" #define OMF_TYPE_STRING "string" @@ -101,6 +108,20 @@ class OMF // Destructor ~OMF(); + void setOMFVersion(std::string& omfversion) + { + m_OMFVersion = omfversion; + if (omfversion.compare("1.0") == 0 + || omfversion.compare("1.1") == 0) + { + m_linkedProperties = false; + } + else + { + m_linkedProperties = true; + } + }; + /** * Send data to PI Server passing a vector of readings. * @@ -204,6 +225,8 @@ class OMF bool getConnected() const { return m_connected; }; void setConnected(const bool connectionStatus) { m_connected = connectionStatus; }; + void setLegacyMode(bool legacy) { m_legacy = legacy; }; + static std::string ApplyPIServerNamingRulesObj(const std::string &objName, bool *changed); static std::string ApplyPIServerNamingRulesPath(const std::string &objName, bool *changed); static std::string ApplyPIServerNamingRulesInvalidChars(const std::string &objName, bool *changed); @@ -230,7 +253,7 @@ class OMF const std::string createStaticData(const Reading& reading); // Create data Link message, with 'Data', for current row - std::string createLinkData(const Reading& reading, std::string& AFHierarchyLevel, std::string& prefix, std::string& objectPrefix, OMFHints *hints); + std::string createLinkData(const Reading& reading, std::string& AFHierarchyLevel, std::string& prefix, std::string& objectPrefix, OMFHints *hints, bool legacy); /** * Creata data for readings data content, with 'Data', for one row @@ -321,6 +344,12 @@ class OMF bool HandleAFMapNames(Document& JSon); bool HandleAFMapMetedata(Document& JSon); + // Start of support for using linked containers + bool sendBaseTypes(); + // End of support for using linked containers + // + string createAFLinks(Reading &reading, OMFHints *hints); + private: // Use for the evaluation of the OMFDataTypes.typesShort union t_typeCount { @@ -435,12 +464,47 @@ class OMF std::vector> *m_staticData; + /** + * The version of OMF we are talking + */ + std::string m_OMFVersion; + /** + * Support sending properties via links + */ + bool m_linkedProperties; + + /** + * The container for this asset and data point has been sent in + * this session. + */ + std::map + m_containerSent; + + /** + * The data message for this asset and data point has been sent in + * this session. + */ + std::map + m_assetSent; + + /** + * The link for this asset and data point has been sent in + * this session. + */ + std::map + m_linkSent; + + /** + * Force the data to be sent using the legacy, complex OMF types + */ + bool m_legacy; }; /** * The OMFData class. - * A reading is formatted with OMF specifications + * A reading is formatted with OMF specifications using the original + * type creation scheme implemented by the OMF plugin */ class OMFData { diff --git a/C/plugins/north/OMF/include/omflinkeddata.h b/C/plugins/north/OMF/include/omflinkeddata.h new file mode 100644 index 0000000000..c2e174a962 --- /dev/null +++ b/C/plugins/north/OMF/include/omflinkeddata.h @@ -0,0 +1,104 @@ +#ifndef OMFLINKEDDATA_H +#define OMFLINKEDDATA_H +/* + * Fledge OSIsoft OMF interface to PI Server. + * + * Copyright (c) 2022 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ + +#include +#include +#include + +/** + * The OMFLinkedData class. + * A reading is formatted with OMF specifications using the linked + * type creation scheme supported for OMF Version 1.2 onwards. + * + * This is based on the new mechanism discussed at AVEVA World 2022 and + * the mechanism is detailed in the Google Doc, + * https://docs.google.com/document/d/1w0e7VRqX7xzc0lEBLq-sYhgaHE0ABasOa6EC9dJMrMs/edit + * + * The principle is to use links to containers in OMF with each container being a single + * data point in the asset. There are no specific types for the assets, they share a set + * of base types via these links. This should allow for readings that have different sets + * of datapoints for each asset. + * + * It is also a goal of this mechanism to move away from the need to persist state data + * between invocations and make the process more robust. + */ +class OMFLinkedData +{ + public: + OMFLinkedData( std::map *containerSent, + std::map *assetSent, + std::map *linkSent, + const OMF_ENDPOINT PIServerEndpoint = ENDPOINT_CR) : + m_containerSent(containerSent), + m_assetSent(assetSent), + m_linkSent(linkSent), + m_endpoint(PIServerEndpoint), + m_doubleFormat("float64"), + m_integerFormat("int64") + {}; + std::string processReading(const Reading& reading, + const std::string& DefaultAFLocation = std::string(), + OMFHints *hints = NULL); + bool flushContainers(HttpSender& sender, const std::string& path, std::vector >& header); + void setFormats(const std::string& doubleFormat, const std::string& integerFormat) + { + m_doubleFormat = doubleFormat; + m_integerFormat = integerFormat; + }; + private: + std::string sendContainer(std::string& link, Datapoint *dp, const std::string& format, OMFHints * hints); + bool isTypeSupported(DatapointValue& dataPoint) + { + switch (dataPoint.getType()) + { + case DatapointValue::DatapointTag::T_FLOAT: + case DatapointValue::DatapointTag::T_INTEGER: + case DatapointValue::DatapointTag::T_STRING: + return true; + default: + return false; + } + }; + + private: + /** + * The container for this asset and data point has been sent in + * this session. + */ + std::map *m_containerSent; + + /** + * The data message for this asset and data point has been sent in + * this session. + */ + std::map *m_assetSent; + + /** + * The link for this asset and data point has been sent in + * this session. + */ + std::map *m_linkSent; + + /** + * The endpoint to which we are sending data + */ + OMF_ENDPOINT m_endpoint; + + + /** + * The set of containers to flush + */ + std::string m_containers; + std::string m_doubleFormat; + std::string m_integerFormat; +}; +#endif diff --git a/C/plugins/north/OMF/linkdata.cpp b/C/plugins/north/OMF/linkdata.cpp new file mode 100644 index 0000000000..f3f8d0d9ec --- /dev/null +++ b/C/plugins/north/OMF/linkdata.cpp @@ -0,0 +1,386 @@ +/* + * Fledge OSIsoft OMF interface to PI Server. + * + * Copyright (c) 2022 Dianomic Systems + * + * Released under the Apache 2.0 Licence + * + * Author: Mark Riddoch + */ + +#include +#include +#include +#include +#include +#include +#include +#include "string_utils.h" +#include + +#include +#include + +#include + +using namespace std; +/** + * OMFLinkedData constructor, generates the OMF message containing the data + * + * @param reading Reading for which the OMF message must be generated + * @param AFHierarchyPrefix Unused at the current stage + * @param hints OMF hints for the specific reading for changing the behaviour of the operation + * + */ +string OMFLinkedData::processReading(const Reading& reading, const string& AFHierarchyPrefix, OMFHints *hints) +{ + string outData; + bool changed; + + + string assetName = reading.getAssetName(); + // Apply any TagName hints to modify the containerid + if (hints) + { + const std::vector omfHints = hints->getHints(); + for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) + { + if (typeid(**it) == typeid(OMFTagNameHint)) + { + assetName = (*it)->getHint(); + Logger::getLogger()->info("Using OMF TagName hint: %s", assetName.c_str()); + } + if (typeid(**it) == typeid(OMFTagHint)) + { + assetName = (*it)->getHint(); + Logger::getLogger()->info("Using OMF Tag hint: %s", assetName.c_str()); + } + } + } + + + // Get reading data + const vector data = reading.getReadingData(); + unsigned long skipDatapoints = 0; + + Logger::getLogger()->info("Processing %s with new OMF method", assetName.c_str()); + + bool needDelim = false; + if (m_assetSent->find(assetName) == m_assetSent->end()) + { + // Send the data message to create the asset instance + outData.append("{ \"typeid\":\"FledgeAsset\", \"values\":[ { \"AssetId\":\""); + outData.append(assetName + "\",\"Name\":\""); + outData.append(assetName + "\""); + outData.append("} ] }"); + needDelim = true; + m_assetSent->insert(pair(assetName, true)); + } + + /** + * This loop creates the data values for each of the datapoints in the + * reading. + */ + for (vector::const_iterator it = data.begin(); it != data.end(); ++it) + { + string dpName = (*it)->getName(); + if (dpName.compare(OMF_HINT) == 0) + { + // Don't send the OMF Hint to the PI Server + continue; + } + if (!isTypeSupported((*it)->getData())) + { + skipDatapoints++;; + continue; + } + else + { + if (needDelim) + { + outData.append(","); + } + else + { + needDelim = true; + } + string format; + if (hints) + { + const vector omfHints = hints->getHints(dpName); + for (auto hit = omfHints.cbegin(); hit != omfHints.cend(); hit++) + { + if (typeid(**hit) == typeid(OMFNumberHint)) + { + format = (*hit)->getHint(); + break; + } + if (typeid(**hit) == typeid(OMFIntegerHint)) + { + format = (*hit)->getHint(); + break; + } + + } + } + + // Create the link for the asset if not already created + string link = assetName + "." + dpName; + string baseType; + auto container = m_containerSent->find(link); + if (container == m_containerSent->end()) + { + baseType = sendContainer(link, *it, format, hints); + m_containerSent->insert(pair(link, baseType)); + } + else + { + baseType = container->second; + } + if (baseType.empty()) + { + // Type is not supported, skip the datapoint + continue; + } + if (m_linkSent->find(link) == m_linkSent->end()) + { + outData.append("{ \"typeid\":\"__Link\","); + outData.append("\"values\":[ { \"source\" : {"); + outData.append("\"typeid\": \"FledgeAsset\","); + outData.append("\"index\":\"" + assetName); + outData.append("\" }, \"target\" : {"); + outData.append("\"containerid\" : \""); + outData.append(link); + outData.append("\" } } ] },"); + + m_linkSent->insert(pair(link, true)); + } + + // Convert reading data into the OMF JSON string + outData.append("{\"containerid\": \"" + link); + outData.append("\", \"values\": [{"); + + // Base type we are using for this data point + outData.append("\"" + baseType + "\": "); + // Add datapoint Value + outData.append((*it)->getData().toString()); + outData.append(", "); + // Append Z to getAssetDateTime(FMT_STANDARD) + outData.append("\"Time\": \"" + reading.getAssetDateUserTime(Reading::FMT_STANDARD) + "Z" + "\""); + outData.append("} ] }"); + } + } + Logger::getLogger()->debug("Created data messasges %s", outData.c_str()); + return outData; +} + +/** + * Send the container message for the linked datapoint + * + * @param linkName The name to use for the container + * @param dp The datapoint to process + * @param format The format to use based on a hint, this may be empty + * @param hints Hints related to this asset + * @return The base type linked in the container + */ +string OMFLinkedData::sendContainer(string& linkName, Datapoint *dp, const string& format, OMFHints * hints) +{ + string baseType; + switch (dp->getData().getType()) + { + case DatapointValue::T_STRING: + baseType = "String"; + break; + case DatapointValue::T_INTEGER: + { + string intFormat; + if (!format.empty()) + intFormat = format; + else + intFormat = m_integerFormat; + if (intFormat.compare("int64") == 0) + baseType = "Integer64"; + else if (intFormat.compare("int32") == 0) + baseType = "Integer32"; + else if (intFormat.compare("int16") == 0) + baseType = "Integer16"; + else if (intFormat.compare("uint64") == 0) + baseType = "UInteger64"; + else if (intFormat.compare("uint32") == 0) + baseType = "UInteger32"; + else if (intFormat.compare("uint16") == 0) + baseType = "UInteger16"; + break; + } + case DatapointValue::T_FLOAT: + { + string doubleFormat; + if (!format.empty()) + doubleFormat = format; + else + doubleFormat = m_doubleFormat; + if (doubleFormat.compare("float64") == 0) + baseType = "Double64"; + else if (doubleFormat.compare("float32") == 0) + baseType = "Double32"; + break; + } + default: + Logger::getLogger()->error("Unsupported type %s", dp->getData().getTypeStr()); + // Not supported + return baseType; + } + + string dataSource = "Fledge"; + string uom, minimum, maximum, interpolation; + bool propertyOverrides = false; + + + if (hints) + { + const vector omfHints = hints->getHints(); + for (auto it = omfHints.cbegin(); it != omfHints.end(); it++) + { + if (typeid(**it) == typeid(OMFSourceHint)) + { + dataSource = (*it)->getHint(); + } + } + + const vector dpHints = hints->getHints(dp->getName()); + for (auto it = dpHints.cbegin(); it != dpHints.end(); it++) + { + if (typeid(**it) == typeid(OMFSourceHint)) + { + dataSource = (*it)->getHint(); + } + if (typeid(**it) == typeid(OMFUOMHint)) + { + uom = (*it)->getHint(); + propertyOverrides = true; + } + if (typeid(**it) == typeid(OMFMinimumHint)) + { + minimum = (*it)->getHint(); + propertyOverrides = true; + } + if (typeid(**it) == typeid(OMFMaximumHint)) + { + maximum = (*it)->getHint(); + propertyOverrides = true; + } + if (typeid(**it) == typeid(OMFInterpolationHint)) + { + interpolation = (*it)->getHint(); + propertyOverrides = true; + } + } + } + + string container = "{ \"id\" : \"" + linkName; + container += "\", \"typeid\" : \""; + container += baseType; + container += "\", \"name\" : \""; + container += dp->getName(); + container += "\", \"datasource\" : \"" + dataSource + "\""; + + if (propertyOverrides) + { + container += ", \"propertyoverrides\" : { \""; + container += baseType; + container += "\" : {"; + string delim = ""; + if (!uom.empty()) + { + delim = ","; + container += "\"uom\" : \""; + container += uom; + container += "\""; + } + if (!minimum.empty()) + { + container += delim; + delim = ","; + container += "\"minimum\" : "; + container += minimum; + } + if (!maximum.empty()) + { + container += delim; + delim = ","; + container += "\"maximum\" : "; + container += maximum; + } + if (!interpolation.empty()) + { + container += delim; + delim = ","; + container += "\"interpolation\" : \""; + container += interpolation; + container += "\""; + } + container += "} }"; + } + container += "}"; + + Logger::getLogger()->debug("Built container: %s", container.c_str()); + + if (! m_containers.empty()) + m_containers += ","; + m_containers.append(container); + + return baseType; +} + +/** + * Flush the container definitions that have been built up + * + * @return true if the containers where succesfully flushed + */ +bool OMFLinkedData::flushContainers(HttpSender& sender, const string& path, vector >& header) +{ + if (m_containers.empty()) + return true; // Nothing to flush + string payload = "[" + m_containers + "]"; + m_containers = ""; + + Logger::getLogger()->debug("Flush container information: %s", payload.c_str()); + + // Write to OMF endpoint + try + { + int res = sender.sendRequest("POST", + path, + header, + payload); + if ( ! (res >= 200 && res <= 299) ) + { + Logger::getLogger()->error("Sending containers, HTTP code %d - %s %s", + res, + sender.getHostPort().c_str(), + path.c_str()); + return false; + } + } + // Exception raised for HTTP 400 Bad Request + catch (const BadRequest& e) + { + + Logger::getLogger()->warn("Sending containers, not blocking issue: %s - %s %s", + e.what(), + sender.getHostPort().c_str(), + path.c_str()); + + return false; + } + catch (const std::exception& e) + { + + Logger::getLogger()->error("Sending containers, %s - %s %s", + e.what(), + sender.getHostPort().c_str(), + path.c_str()); + return false; + } + return true; +} diff --git a/C/plugins/common/ocs.cpp b/C/plugins/north/OMF/ocs.cpp similarity index 100% rename from C/plugins/common/ocs.cpp rename to C/plugins/north/OMF/ocs.cpp diff --git a/C/plugins/north/OMF/omf.cpp b/C/plugins/north/OMF/omf.cpp index c29ef5d86d..80036d92af 100644 --- a/C/plugins/north/OMF/omf.cpp +++ b/C/plugins/north/OMF/omf.cpp @@ -30,6 +30,9 @@ #include #include +#include +#include + using namespace std; using namespace rapidjson; @@ -227,11 +230,13 @@ OMF::OMF(HttpSender& sender, m_path(path), m_typeId(id), m_producerToken(token), - m_sender(sender) + m_sender(sender), + m_legacy(false) { m_lastError = false; m_changeTypeId = false; m_OMFDataTypes = NULL; + m_OMFVersion = "1.0"; } /** @@ -543,7 +548,8 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) , AFHierarchyLevel.c_str() ); // Create data for Static Data message - string typeLinkData = OMF::createLinkData(row, AFHierarchyLevel, prefix, objectPrefix, hints); + string typeLinkData = OMF::createLinkData(row, AFHierarchyLevel, prefix, objectPrefix, hints, true); + string payload = "[" + typeLinkData + "]"; // Build an HTTPS POST with 'resLinkData' headers // and 'typeLinkData' JSON payload @@ -553,7 +559,7 @@ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) res = m_sender.sendRequest("POST", m_path, resLinkData, - typeLinkData); + payload); if (!(res >= 200 && res <= 299)) { Logger::getLogger()->error("Sending JSON dataType message 'Data' (lynk) - error: HTTP code |%d| - %s %s", @@ -1031,9 +1037,19 @@ uint32_t OMF::sendToServer(const vector& readings, gettimeofday(&start, NULL); #endif - // Create a superset of all found datapoints for each assetName - // the superset[assetName] is then passed to routines which handle - // creation of OMF data types + if (m_linkedProperties) + { + if (!sendBaseTypes()) + { + Logger::getLogger()->error("Unable to send base types, linked assets will not be sent"); + m_linkedProperties = false; + } + } + + // Create a superset of all the datapoints for each assetName + // the superset[assetName] is then passed to routines which handles + // creation of OMF data types. This is used for the initial type + // handling of complex data types. OMF::setMapObjectTypes(readings, m_SuperSetDataPoints); #if INSTRUMENT @@ -1067,6 +1083,12 @@ uint32_t OMF::sendToServer(const vector& readings, string OMFHintAFHierarchyTmp; string OMFHintAFHierarchy; + bool legacyType = m_legacy; + + // Create the class that deals with the linked data generation + OMFLinkedData linkedData(&m_containerSent, &m_assetSent, &m_linkSent, m_PIServerEndpoint); + linkedData.setFormats(getFormatType(OMF_TYPE_FLOAT), getFormatType(OMF_TYPE_INTEGER)); + bool pendingSeparator = false; ostringstream jsonData; jsonData << "["; @@ -1112,6 +1134,15 @@ uint32_t OMF::sendToServer(const vector& readings, ,OMFHintAFHierarchy.c_str() ); } } + for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) + { + if (typeid(**it) == typeid(OMFLegacyTypeHint)) + { + Logger::getLogger()->info("Using OMF Legacy Type hint: %s", (*it)->getHint().c_str()); + legacyType = true; + break; + } + } } // Applies the PI-Server naming rules to the AssetName @@ -1166,118 +1197,157 @@ uint32_t OMF::sendToServer(const vector& readings, } } - if (! usingTagHint) + if (! AFHierarchySent) { - /* - * Check the OMFHints, if there are any, to see if we have a - * type name that should be used for this asset. - * We will still create the type, but the name will be fixed - * as the value of this hint. - */ - bool usingTypeNameHint = false; - if (hints) + setAFHierarchy(); + } + + string outData; + // Use old style complex types if the user has forced it via configuration, + // we are running against an EDS endpoint or Connector Relay or we have types defined for this + // asset already + if (legacyType || m_PIServerEndpoint == ENDPOINT_EDS || + m_PIServerEndpoint == ENDPOINT_CR || + m_OMFDataTypes->find(keyComplete) != m_OMFDataTypes->end()) + { + // Legacy type support + if (! usingTagHint) { - const vector omfHints = hints->getHints(); - for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) + /* + * Check the OMFHints, if there are any, to see if we have a + * type name that should be used for this asset. + * We will still create the type, but the name will be fixed + * as the value of this hint. + */ + bool usingTypeNameHint = false; + if (hints) { - if (typeid(**it) == typeid(OMFTypeNameHint)) + const vector omfHints = hints->getHints(); + for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { - Logger::getLogger()->info("Using OMF TypeName hint: %s", (*it)->getHint().c_str()); - keyComplete.append("_" + (*it)->getHint()); - usingTypeNameHint = true; - break; + if (typeid(**it) == typeid(OMFTypeNameHint)) + { + Logger::getLogger()->info("Using OMF TypeName hint: %s", (*it)->getHint().c_str()); + keyComplete.append("_" + (*it)->getHint()); + usingTypeNameHint = true; + break; + } } } - } - - if (! AFHierarchySent) - { - setAFHierarchy(); - } - auto it = m_SuperSetDataPoints.find(m_assetName); - if (it == m_SuperSetDataPoints.end()) { - // The asset has only unsupported properties, so it is ignored - continue; - } - sendDataTypes = (m_lastError == false && skipSentDataTypes == true) ? - // Send if not already sent - !OMF::getCreatedTypes(keyComplete, *reading, hints) : - // Always send types - true; + auto it = m_SuperSetDataPoints.find(m_assetName); + if (it == m_SuperSetDataPoints.end()) { + // The asset has only unsupported properties, so it is ignored + continue; + } - Reading* datatypeStructure = NULL; - if (sendDataTypes && !usingTypeNameHint) - { - // Increment type-id of assetName in in memory cache - OMF::incrementAssetTypeIdOnly(keyComplete); - // Remove data and keep type-id - OMF::clearCreatedTypes(keyComplete); + sendDataTypes = (m_lastError == false && skipSentDataTypes == true) ? + // Send if not already sent + !OMF::getCreatedTypes(keyComplete, *reading, hints) : + // Always send types + true; - // Get the supersetDataPoints for current assetName - auto it = m_SuperSetDataPoints.find(m_assetName); - if (it != m_SuperSetDataPoints.end()) + Reading* datatypeStructure = NULL; + if (sendDataTypes && !usingTypeNameHint) { - datatypeStructure = (*it).second; + // Increment type-id of assetName in in memory cache + OMF::incrementAssetTypeIdOnly(keyComplete); + // Remove data and keep type-id + OMF::clearCreatedTypes(keyComplete); + + // Get the supersetDataPoints for current assetName + auto it = m_SuperSetDataPoints.find(m_assetName); + if (it != m_SuperSetDataPoints.end()) + { + datatypeStructure = (*it).second; + } } - } - if (m_sendFullStructure) { + if (m_sendFullStructure) { + + // The AF hierarchy is created/recreated if an OMF type message is sent + // it sends the hierarchy once + if (sendDataTypes and ! AFHierarchySent) + { + if (!handleAFHierarchy()) + { + m_lastError = true; + return 0; + } - // The AF hierarchy is created/recreated if an OMF type message is sent - // it sends the hierarchy once - if (sendDataTypes and ! AFHierarchySent) + AFHierarchySent = true; + } + } + + if (usingTypeNameHint) { - if (!handleAFHierarchy()) + if (sendDataTypes && !OMF::handleDataTypes(keyComplete, + *reading, skipSentDataTypes, hints)) { + // Failure m_lastError = true; return 0; } + } + else + { + // Check first we have supersetDataPoints for the current reading + if ((sendDataTypes && datatypeStructure == NULL) || + // Handle the data types of the current reading + (sendDataTypes && + // Send data type + !OMF::handleDataTypes(keyComplete, *datatypeStructure, skipSentDataTypes, hints) && + // Data type not sent: + (!m_changeTypeId || + // Increment type-id and re-send data types + !OMF::handleTypeErrors(keyComplete, *datatypeStructure, hints)))) + { + // Remove all assets supersetDataPoints + OMF::unsetMapObjectTypes(m_SuperSetDataPoints); - AFHierarchySent = true; + // Failure + m_lastError = true; + return 0; + } } + + // Create the key for dataTypes sending once + typeId = OMF::getAssetTypeId(m_assetName); } - if (usingTypeNameHint) + measurementId = generateMeasurementId(m_assetName); + + outData = OMFData(*reading, measurementId, m_PIServerEndpoint, AFHierarchyPrefix, hints ).OMFdataVal(); + } + else + { + // We do this before the send so we know if it was sent for the first time + // in the processReading call + auto asset_sent = m_assetSent.find(m_assetName); + // Send data for this reading using the new mechanism + outData = linkedData.processReading(*reading, AFHierarchyPrefix, hints); + if (asset_sent == m_assetSent.end()) { - if (sendDataTypes && !OMF::handleDataTypes(keyComplete, - *reading, skipSentDataTypes, hints)) + // If the hierarchy has not already been sent then send it + if (! AFHierarchySent) { - // Failure - m_lastError = true; - return 0; + if (!handleAFHierarchy()) + { + m_lastError = true; + return 0; + } + AFHierarchySent = true; } - } - else - { - // Check first we have supersetDataPoints for the current reading - if ((sendDataTypes && datatypeStructure == NULL) || - // Handle the data types of the current reading - (sendDataTypes && - // Send data type - !OMF::handleDataTypes(keyComplete, *datatypeStructure, skipSentDataTypes, hints) && - // Data type not sent: - (!m_changeTypeId || - // Increment type-id and re-send data types - !OMF::handleTypeErrors(keyComplete, *datatypeStructure, hints)))) - { - // Remove all assets supersetDataPoints - OMF::unsetMapObjectTypes(m_SuperSetDataPoints); - // Failure - m_lastError = true; - return 0; + string af = createAFLinks(*reading, hints); + if (! af.empty()) + { + outData.append(","); + outData.append(af); } } - - // Create the key for dataTypes sending once - typeId = OMF::getAssetTypeId(m_assetName); } - - measurementId = generateMeasurementId(m_assetName); - - string outData = OMFData(*reading, measurementId, m_PIServerEndpoint, AFHierarchyPrefix, hints ).OMFdataVal(); if (!outData.empty()) { jsonData << (pendingSeparator ? ", " : "") << outData; @@ -1311,6 +1381,9 @@ uint32_t OMF::sendToServer(const vector& readings, gettimeofday(&t3, NULL); #endif + vector> containerHeader = OMF::createMessageHeader("Container"); + linkedData.flushContainers(m_sender, m_path, containerHeader); + /** * Types messages sent, now transform each reading to OMF format. * @@ -1706,7 +1779,7 @@ const vector> OMF::createMessageHeader(const std::string& t res.push_back(pair("messagetype", type)); res.push_back(pair("producertoken", m_producerToken)); - res.push_back(pair("omfversion", "1.0")); + res.push_back(pair("omfversion", m_OMFVersion)); res.push_back(pair("messageformat", "JSON")); res.push_back(pair("action", action)); @@ -1884,6 +1957,7 @@ const std::string OMF::createTypeData(const Reading& reading, OMFHints *hints) } } + /** * Creates the Container message for data type definition * @@ -2115,9 +2189,13 @@ const std::string OMF::createStaticData(const Reading& reading) * Note: type is 'Data' * * @param reading A reading data + * @param AFHierarchyLevel The AF eleemnt we are placing the reading in + * @param AFHierarchyPrefix The prefix we use for thr AF Eleement + * @param objectPrefix The object prefix we are using for this asset + * @param legacy We are using legacy, complex types for this reading * @return Type JSON message as string */ -std::string OMF::createLinkData(const Reading& reading, std::string& AFHierarchyLevel, std::string& AFHierarchyPrefix, std::string& objectPrefix, OMFHints *hints) +std::string OMF::createLinkData(const Reading& reading, std::string& AFHierarchyLevel, std::string& AFHierarchyPrefix, std::string& objectPrefix, OMFHints *hints, bool legacy) { string targetTypeId; @@ -2129,7 +2207,7 @@ std::string OMF::createLinkData(const Reading& reading, std::string& AFHierarch long typeId = getAssetTypeId(assetName); - string lData = "[{\"typeid\": \"__Link\", \"values\": ["; + string lData = "{\"typeid\": \"__Link\", \"values\": ["; // Handles the structure for the Connector Relay // not supported by PI Web API @@ -2160,7 +2238,7 @@ std::string OMF::createLinkData(const Reading& reading, std::string& AFHierarch // Add asset_name lData.append(assetName); - lData.append("\"}},"); + lData.append("\"}}"); } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { @@ -2172,60 +2250,73 @@ std::string OMF::createLinkData(const Reading& reading, std::string& AFHierarch StringReplace(tmpStr, "_placeholder_src_type_", AFHierarchyPrefix + "_" + AFHierarchyLevel + "_typeid"); StringReplace(tmpStr, "_placeholder_src_idx_", AFHierarchyPrefix + "_" + AFHierarchyLevel ); - StringReplace(tmpStr, "_placeholder_tgt_type_", targetTypeId); - StringReplace(tmpStr, "_placeholder_tgt_idx_", "A_" + objectPrefix + "_" + assetName + - generateSuffixType(assetName, typeId) ); + + if (legacy) + { + StringReplace(tmpStr, "_placeholder_tgt_type_", targetTypeId); + StringReplace(tmpStr, "_placeholder_tgt_idx_", "A_" + objectPrefix + "_" + assetName + + generateSuffixType(assetName, typeId) ); + } + else + { + StringReplace(tmpStr, "_placeholder_tgt_type_", "FledgeAsset"); + StringReplace(tmpStr, "_placeholder_tgt_idx_", assetName); + } lData.append(tmpStr); - lData.append(","); } - lData.append("{\"source\": {\"typeid\": \""); - // Add type_id + '_' + asset_name + '__typename_sensor' - OMF::setAssetTypeTag(assetName, - "typename_sensor", - lData); + if (legacy) + { + lData.append(",{\"source\": {\"typeid\": \""); - lData.append("\", \"index\": \""); + // Add type_id + '_' + asset_name + '__typename_sensor' + OMF::setAssetTypeTag(assetName, + "typename_sensor", + lData); - if (m_PIServerEndpoint == ENDPOINT_CR) - { - // Add asset_name - lData.append(assetName); - } - else if (m_PIServerEndpoint == ENDPOINT_OCS || - m_PIServerEndpoint == ENDPOINT_ADH || - m_PIServerEndpoint == ENDPOINT_EDS) - { - // Add asset_name - lData.append(assetName); - } - else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) - { - lData.append("A_" + objectPrefix + "_" + assetName + generateSuffixType(assetName, typeId) ); - } + lData.append("\", \"index\": \""); - measurementId = generateMeasurementId(assetName); + if (m_PIServerEndpoint == ENDPOINT_CR) + { + // Add asset_name + lData.append(assetName); + } + else if (m_PIServerEndpoint == ENDPOINT_OCS || + m_PIServerEndpoint == ENDPOINT_ADH || + m_PIServerEndpoint == ENDPOINT_EDS) + { + // Add asset_name + lData.append(assetName); + } + else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) + { + lData.append("A_" + objectPrefix + "_" + assetName + generateSuffixType(assetName, typeId) ); + } + + measurementId = generateMeasurementId(assetName); - // Apply any TagName hints to modify the containerid - if (hints) - { - const std::vector omfHints = hints->getHints(); - for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) + // Apply any TagName hints to modify the containerid + if (hints) { - if (typeid(**it) == typeid(OMFTagNameHint)) + const std::vector omfHints = hints->getHints(); + for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { - measurementId = (*it)->getHint(); - Logger::getLogger()->info("Using OMF TagName hint: %s", measurementId.c_str()); - break; + if (typeid(**it) == typeid(OMFTagNameHint)) + { + measurementId = (*it)->getHint(); + Logger::getLogger()->info("Using OMF TagName hint: %s", measurementId.c_str()); + break; + } } } - } - lData.append("\"}, \"target\": {\"containerid\": \"" + measurementId); + lData.append("\"}, \"target\": {\"containerid\": \"" + measurementId); - lData.append("\"}}]}]"); + lData.append("\"}}"); + } + lData.append("]}"); // Return JSON string return lData; @@ -4525,3 +4616,121 @@ std::string OMF::ApplyPIServerNamingRulesPath(const std::string &objName, bool * return (nameFixed); } + +/** + * Send the base types that we use to define all the data point values + * + * @return true If the data types were sent correctly. Otherwsie false. + */ +bool OMF::sendBaseTypes() +{ + vector> resType = OMF::createMessageHeader("Type"); + + // Build an HTTPS POST with 'resType' headers + // and 'typeData' JSON payload + // Then get HTTPS POST ret code and return 0 to client on error + try + { + int res = m_sender.sendRequest("POST", + m_path, + resType, + baseOMFTypes); + if ( ! (res >= 200 && res <= 299) ) + { + Logger::getLogger()->error("Sending base data types message 'Type', HTTP code %d - %s %s", + res, + m_sender.getHostPort().c_str(), + m_path.c_str()); + return false; + } + } + // Exception raised for HTTP 400 Bad Request + catch (const BadRequest& e) + { + if (OMF::isDataTypeError(e.what())) + { + // Data type error: force type-id change + m_changeTypeId = true; + } + string errorMsg = errorMessageHandler(e.what()); + + Logger::getLogger()->warn("Sending dataType message 'Type', not blocking issue: %s %s - %s %s", + (m_changeTypeId ? "Data Type " : "" ), + errorMsg.c_str(), + m_sender.getHostPort().c_str(), + m_path.c_str()); + + return false; + } + catch (const std::exception& e) + { + string errorMsg = errorMessageHandler(e.what()); + + Logger::getLogger()->error("Sending dataType message 'Type', %s - %s %s", + errorMsg.c_str(), + m_sender.getHostPort().c_str(), + m_path.c_str()); + m_connected = false; + return false; + } + Logger::getLogger()->info("Base types successully sent"); + return true; +} + +/** + * Create the messages to link the asset into the right place in the AF structure + * + * @param reading The reading beign sent + * @param hints OMF Hints for this reading + */ +string OMF::createAFLinks(Reading& reading, OMFHints *hints) +{ +string AFDataMessage; + + if (m_sendFullStructure) + { + string assetName = m_assetName; + string AFHierarchyLevel; + string prefix; + string objectPrefix; + + auto rule = m_AssetNamePrefix.find(assetName); + if (rule != m_AssetNamePrefix.end()) + { + auto itemArray = rule->second; + objectPrefix = ""; + + for (auto &item : itemArray) + { + string AFHierarchy; + string prefix; + + AFHierarchy = std::get<0>(item); + generateAFHierarchyPrefixLevel(AFHierarchy, prefix, AFHierarchyLevel); + + prefix = std::get<1>(item); + + if (objectPrefix.empty()) + { + objectPrefix = prefix; + } + + Logger::getLogger()->debug("%s - assetName :%s: AFHierarchy :%s: prefix :%s: objectPrefix :%s: AFHierarchyLevel :%s: ", __FUNCTION__ + ,assetName.c_str() + , AFHierarchy.c_str() + , prefix.c_str() + , objectPrefix.c_str() + , AFHierarchyLevel.c_str() ); + + // Create data for Static Data message + AFDataMessage = OMF::createLinkData(reading, AFHierarchyLevel, prefix, objectPrefix, hints, false); + + } + } + else + { + Logger::getLogger()->error("AF hiererachy is not defined for the asset Name |%s|", assetName.c_str()); + } + } + return AFDataMessage; +} diff --git a/C/plugins/north/OMF/omfhints.cpp b/C/plugins/north/OMF/omfhints.cpp index d1500a3933..38846115e0 100644 --- a/C/plugins/north/OMF/omfhints.cpp +++ b/C/plugins/north/OMF/omfhints.cpp @@ -142,6 +142,14 @@ OMFHints::OMFHints(const string& hints) { m_hints.push_back(new OMFAFLocationHint(itr->value.GetString())); } + else if (strcmp(name, "LegacyType") == 0) + { + m_hints.push_back(new OMFLegacyTypeHint(itr->value.GetString())); + } + else if (strcmp(name, "source") == 0) + { + m_hints.push_back(new OMFSourceHint(itr->value.GetString())); + } else if (strcmp(name, "datapoint") == 0) { const Value &child = itr->value; @@ -176,6 +184,41 @@ OMFHints::OMFHints(const string& hints) { hints.push_back(new OMFTagHint(dpitr->value.GetString())); } + else if (strcmp(name, "uom") == 0) + { + hints.push_back(new OMFUOMHint(dpitr->value.GetString())); + } + else if (strcmp(name, "source") == 0) + { + hints.push_back(new OMFSourceHint(dpitr->value.GetString())); + } + else if (strcmp(name, "minimum") == 0) + { + hints.push_back(new OMFMinimumHint(dpitr->value.GetString())); + } + else if (strcmp(name, "maximum") == 0) + { + hints.push_back(new OMFMaximumHint(dpitr->value.GetString())); + } + else if (strcmp(name, "interpolation") == 0) + { + string interpolation = dpitr->value.GetString(); + if (interpolation.compare("continuous") + && interpolation.compare("discrete") + && interpolation.compare("stepwisecontinuousleading") + && interpolation.compare("stepwisecontinuousfollowing")) + { + Logger::getLogger()->warn("Invalid value for interpolation hint for %s, only continuous, discrete, stepwisecontinuousleading, and stepwisecontinuousfollowing are supported", dpname.c_str()); + } + else + { + hints.push_back(new OMFInterpolationHint(interpolation)); + } + } + else if (strcmp(name, "name")) // Ignore the name + { + Logger::getLogger()->warn("Invalid OMF hint '%s'", name); + } } m_datapointHints.insert(std::pair>(dpname, hints)); } @@ -211,6 +254,41 @@ OMFHints::OMFHints(const string& hints) { hints.push_back(new OMFTagHint(dpitr->value.GetString())); } + else if (strcmp(name, "uom") == 0) + { + hints.push_back(new OMFUOMHint(dpitr->value.GetString())); + } + else if (strcmp(name, "source") == 0) + { + hints.push_back(new OMFSourceHint(dpitr->value.GetString())); + } + else if (strcmp(name, "minimum") == 0) + { + hints.push_back(new OMFMinimumHint(dpitr->value.GetString())); + } + else if (strcmp(name, "maximum") == 0) + { + hints.push_back(new OMFMaximumHint(dpitr->value.GetString())); + } + else if (strcmp(name, "interpolation") == 0) + { + string interpolation = dpitr->value.GetString(); + if (interpolation.compare("continuous") + && interpolation.compare("discrete") + && interpolation.compare("stepwisecontinuousleading") + && interpolation.compare("stepwisecontinuousfollowing")) + { + Logger::getLogger()->warn("Invalid value for interpolation hint for %s, only continuous, discrete, stepwisecontinuousleading, and stepwisecontinuousfollowing are supported", dpname.c_str()); + } + else + { + hints.push_back(new OMFInterpolationHint(interpolation)); + } + } + else if (strcmp(name, "name")) // Ignore the name + { + Logger::getLogger()->warn("Invalid OMF hint '%s'", name); + } } m_datapointHints.insert(std::pair>(dpname, hints)); } diff --git a/C/plugins/north/OMF/plugin.cpp b/C/plugins/north/OMF/plugin.cpp index fd79b5ad27..30bc0bb99b 100755 --- a/C/plugins/north/OMF/plugin.cpp +++ b/C/plugins/north/OMF/plugin.cpp @@ -88,7 +88,8 @@ using namespace SimpleWeb; #define ENDPOINT_URL_PI_WEB_API "https://HOST_PLACEHOLDER:PORT_PLACEHOLDER/piwebapi/omf" #define ENDPOINT_URL_CR "https://HOST_PLACEHOLDER:PORT_PLACEHOLDER/ingress/messages" #define ENDPOINT_URL_OCS "https://dat-b.osisoft.com:PORT_PLACEHOLDER/api/v1/tenants/TENANT_ID_PLACEHOLDER/Namespaces/NAMESPACE_ID_PLACEHOLDER/omf" -#define ENDPOINT_URL_ADH "https://uswe.datahub.connect.aveva.com:PORT_PLACEHOLDER/api/v1/Tenants/TENANT_ID_PLACEHOLDER/Namespaces/NAMESPACE_ID_PLACEHOLDER/omf" +#define ENDPOINT_URL_ADH "https://REGION_PLACEHOLDER.datahub.connect.aveva.com:PORT_PLACEHOLDER/api/v1/Tenants/TENANT_ID_PLACEHOLDER/Namespaces/NAMESPACE_ID_PLACEHOLDER/omf" + #define ENDPOINT_URL_EDS "http://localhost:PORT_PLACEHOLDER/api/v1/tenants/default/namespaces/default/omf" static bool s_connected = true; // if true, access to PI Web API is working @@ -130,6 +131,20 @@ enum OMF_ENDPOINT_PORT { } \ ) +/* + * Note that the properties "group" is used to group related items, these will appear in different tabs, + * using the group name, in the GUI. + * + * This GUI functionality has yet to be implemented. + * + * Current groups used are + * "Authentication" Items relating to authentication with the endpoint + * "Connection" Connection tuning items + * "Formats & Types" Controls for the way formats and tyoes are defined + * "Asset Framework" Asset framework configuration items + * "Cloud" Things related to OCS or ADH only + * "Advanced" Adds to the Advanced tab that already exists + */ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( { "plugin": { @@ -146,11 +161,20 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "order": "1", "displayName": "Endpoint" }, + "ADHRegions": { + "description": "AVEVA Data Hub region", + "type": "enumeration", + "options":["US-West", "EU-West", "Australia"], + "default": "US-West", + "order": "2", + "displayName": "ADH Region", + "validity" : "PIServerEndpoint == \"AVEVA Data Hub\"" + }, "SendFullStructure": { "description": "It sends the minimum OMF structural messages to load data into Data Archive if disabled", "type": "boolean", "default": "true", - "order": "2", + "order": "3", "displayName": "Send full structure", "validity" : "PIServerEndpoint == \"PI Web API\"" }, @@ -159,14 +183,14 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "type": "enumeration", "options":["Concise", "Use Type Suffix", "Use Attribute Hash", "Backward compatibility"], "default": "Concise", - "order": "3", + "order": "4", "displayName": "Naming Scheme" }, "ServerHostname": { "description": "Hostname of the server running the endpoint either PI Web API or Connector Relay", "type": "string", "default": "localhost", - "order": "4", + "order": "5", "displayName": "Server hostname", "validity" : "PIServerEndpoint != \"Edge Data Store\" && PIServerEndpoint != \"OSIsoft Cloud Services\" && PIServerEndpoint != \"AVEVA Data Hub\"" }, @@ -174,7 +198,7 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "description": "Port on which the endpoint either PI Web API or Connector Relay or Edge Data Store is listening, 0 will use the default one", "type": "integer", "default": "0", - "order": "5", + "order": "6", "displayName": "Server port, 0=use the default", "validity" : "PIServerEndpoint != \"OSIsoft Cloud Services\" && PIServerEndpoint != \"AVEVA Data Hub\"" }, @@ -182,8 +206,9 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "description": "The producer token that represents this Fledge stream", "type": "string", "default": "omf_north_0001", - "order": "6", + "order": "7", "displayName": "Producer Token", + "group" : "Authentication", "validity" : "PIServerEndpoint == \"Connector Relay\"" }, "source": { @@ -191,71 +216,81 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "type": "enumeration", "options":["readings", "statistics"], "default": "readings", - "order": "7", + "order": "8", "displayName": "Data Source" }, "StaticData": { "description": "Static data to include in each sensor reading sent to the PI Server.", "type": "string", "default": "Location: Palo Alto, Company: Dianomic", - "order": "8", + "order": "9", "displayName": "Static Data" }, "OMFRetrySleepTime": { "description": "Seconds between each retry for the communication with the OMF PI Connector Relay, NOTE : the time is doubled at each attempt.", "type": "integer", "default": "1", - "order": "9", + "order": "10", + "group": "Connection", "displayName": "Sleep Time Retry" }, "OMFMaxRetry": { "description": "Max number of retries for the communication with the OMF PI Connector Relay", "type": "integer", "default": "3", - "order": "10", + "order": "11", + "group": "Connection", "displayName": "Maximum Retry" }, "OMFHttpTimeout": { "description": "Timeout in seconds for the HTTP operations with the OMF PI Connector Relay", "type": "integer", "default": "10", - "order": "11", + "order": "12", + "group": "Connection", "displayName": "HTTP Timeout" }, "formatInteger": { "description": "OMF format property to apply to the type Integer", - "type": "string", + "type": "enumeration", "default": "int64", - "order": "12", + "options": ["int64", "int32", "int16", "uint64", "uint32", "uint16"], + "order": "13", + "group": "Formats & Types", "displayName": "Integer Format" }, "formatNumber": { "description": "OMF format property to apply to the type Number", - "type": "string", + "type": "enumeration", "default": "float64", - "order": "13", + "options": ["float64", "float32"], + "order": "14", + "group": "Formats & Types", "displayName": "Number Format" }, "compression": { "description": "Compress readings data before sending to PI server", "type": "boolean", "default": "true", - "order": "14", + "order": "15", + "group": "Connection", "displayName": "Compression" }, "DefaultAFLocation": { "description": "Defines the default location in the Asset Framework hierarchy in which the assets will be created, each level is separated by /, PI Web API only.", "type": "string", "default": "/fledge/data_piwebapi/default", - "order": "15", + "order": "16", "displayName": "Default Asset Framework Location", + "group" : "Asset Framework", "validity" : "PIServerEndpoint == \"PI Web API\"" }, "AFMap": { "description": "Defines a set of rules to address where assets should be placed in the AF hierarchy.", "type": "JSON", "default": AF_HIERARCHY_RULES, - "order": "16", + "order": "17", + "group" : "Asset Framework", "displayName": "Asset Framework hierarchy rules", "validity" : "PIServerEndpoint == \"PI Web API\"" @@ -265,14 +300,14 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "description": "These errors are considered not blocking in the communication with the PI Server, the sending operation will proceed with the next block of data if one of these is encountered", "type": "JSON", "default": NOT_BLOCKING_ERRORS_DEFAULT, - "order": "17" , + "order": "18" , "readonly": "true" }, "streamId": { "description": "Identifies the specific stream to handle and the related information, among them the ID of the last object streamed.", "type": "integer", "default": "0", - "order": "18" , + "order": "19" , "readonly": "true" }, "PIWebAPIAuthenticationMethod": { @@ -280,7 +315,8 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "type": "enumeration", "options":["anonymous", "basic", "kerberos"], "default": "anonymous", - "order": "19", + "order": "20", + "group": "Authentication", "displayName": "PI Web API Authentication Method", "validity" : "PIServerEndpoint == \"PI Web API\"" }, @@ -288,7 +324,8 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "description": "User id of PI Web API to be used with the basic access authentication.", "type": "string", "default": "user_id", - "order": "20", + "order": "21", + "group": "Authentication", "displayName": "PI Web API User Id", "validity" : "PIServerEndpoint == \"PI Web API\" && PIWebAPIAuthenticationMethod == \"basic\"" }, @@ -296,7 +333,8 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "description": "Password of the user of PI Web API to be used with the basic access authentication.", "type": "password", "default": "password", - "order": "21" , + "order": "22" , + "group": "Authentication", "displayName": "PI Web API Password", "validity" : "PIServerEndpoint == \"PI Web API\" && PIWebAPIAuthenticationMethod == \"basic\"" }, @@ -304,7 +342,8 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "description": "Keytab file name used for Kerberos authentication in PI Web API.", "type": "string", "default": "piwebapi_kerberos_https.keytab", - "order": "22" , + "order": "23" , + "group": "Authentication", "displayName": "PI Web API Kerberos keytab file", "validity" : "PIServerEndpoint == \"PI Web API\" && PIWebAPIAuthenticationMethod == \"kerberos\"" }, @@ -312,7 +351,8 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "description" : "Specifies the namespace where the information are stored and it is used for the interaction with AVEVA Data Hub or OCS", "type" : "string", "default": "name_space", - "order": "23", + "order": "24", + "group" : "Cloud", "displayName" : "Namespace", "validity" : "PIServerEndpoint == \"OSIsoft Cloud Services\" || PIServerEndpoint == \"AVEVA Data Hub\"" }, @@ -320,7 +360,8 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "description" : "Tenant id associated to the specific AVEVA Data Hub or OCS account", "type" : "string", "default": "ocs_tenant_id", - "order": "24", + "order": "25", + "group" : "Cloud", "displayName" : "Tenant ID", "validity" : "PIServerEndpoint == \"OSIsoft Cloud Services\" || PIServerEndpoint == \"AVEVA Data Hub\"" }, @@ -328,7 +369,8 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "description" : "Client id associated to the specific account, it is used to authenticate when using the AVEVA Data Hub or OCS", "type" : "string", "default": "ocs_client_id", - "order": "25", + "order": "26", + "group" : "Cloud", "displayName" : "Client ID", "validity" : "PIServerEndpoint == \"OSIsoft Cloud Services\" || PIServerEndpoint == \"AVEVA Data Hub\"" }, @@ -336,7 +378,8 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "description" : "Client secret associated to the specific account, it is used to authenticate with AVEVA Data Hub or OCS", "type" : "password", "default": "ocs_client_secret", - "order": "26", + "order": "27", + "group" : "Cloud", "displayName" : "Client Secret", "validity" : "PIServerEndpoint == \"OSIsoft Cloud Services\" || PIServerEndpoint == \"AVEVA Data Hub\"" }, @@ -344,8 +387,16 @@ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( "description": "These errors are considered not blocking in the communication with the PI Web API, the sending operation will proceed with the next block of data if one of these is encountered", "type": "JSON", "default": NOT_BLOCKING_ERRORS_DEFAULT_PI_WEB_API, - "order": "27" , + "order": "28" , "readonly": "true" + }, + "Legacy": { + "description": "Force all data to be sent using complex OMF types", + "type": "boolean", + "default": "false", + "order": "29", + "group": "Formats & Types", + "displayName": "Complex Types" } } ); @@ -405,6 +456,8 @@ typedef struct // Per asset DataTypes std::map assetsDataTypes; + string omfversion; + bool legacy; } CONNECTOR_INFO; unsigned long calcTypeShort (const string& dataTypes); @@ -417,7 +470,7 @@ void AuthKerberosSetup (string& keytabFile, string& keytabFi string OCSRetrieveAuthToken (CONNECTOR_INFO* connInfo); int PIWebAPIGetVersion (CONNECTOR_INFO* connInfo, std::string &version, bool logMessage = true); double GetElapsedTime (struct timeval *startTime); -bool IsPIWebAPIConnected (CONNECTOR_INFO* connInfo); +bool IsPIWebAPIConnected (CONNECTOR_INFO* connInfo, std::string& version); /** @@ -470,6 +523,7 @@ PLUGIN_HANDLE plugin_init(ConfigCategory* configData) // PIServerEndpoint handling string PIServerEndpoint = configData->getValue("PIServerEndpoint"); + string ADHRegions = configData->getValue("ADHRegions"); string ServerHostname = configData->getValue("ServerHostname"); string ServerPort = configData->getValue("ServerPort"); string url; @@ -494,7 +548,13 @@ PLUGIN_HANDLE plugin_init(ConfigCategory* configData) { Logger::getLogger()->debug("End point manually selected - AVEVA Data Hub"); connInfo->PIServerEndpoint = ENDPOINT_ADH; - url = ENDPOINT_URL_ADH; + url = ENDPOINT_URL_ADH; + std::string region = "uswe"; + if(ADHRegions.compare("EU-West") == 0) + region = "euno"; + else if(ADHRegions.compare("Australia") == 0) + region = "auea"; + StringReplace(url, "REGION_PLACEHOLDER", region); endpointPort = ENDPOINT_PORT_ADH; } else if(PIServerEndpoint.compare("OSIsoft Cloud Services") == 0) @@ -683,6 +743,13 @@ PLUGIN_HANDLE plugin_init(ConfigCategory* configData) } + // Fetch legacy OMF type option + string legacy = configData->getValue("Legacy"); + if (legacy == "True" || legacy == "true" || legacy == "TRUE") + connInfo->legacy = true; + else + connInfo->legacy = false; + #if VERBOSE_LOG // Log plugin configuration Logger::getLogger()->info("%s plugin configured: URL=%s, " @@ -733,7 +800,7 @@ void plugin_start(const PLUGIN_HANDLE handle, PLUGIN_NAME, storedData.c_str()); } - else if(JSONData.HasMember(TYPE_ID_KEY) && + else if (JSONData.HasMember(TYPE_ID_KEY) && (JSONData[TYPE_ID_KEY].IsString() || JSONData[TYPE_ID_KEY].IsNumber())) { @@ -802,20 +869,51 @@ uint32_t plugin_send(const PLUGIN_HANDLE handle, gettimeofday(&startTime, NULL); #endif CONNECTOR_INFO* connInfo = (CONNECTOR_INFO *)handle; + string version; // Check if the endpoint is PI Web API and if the PI Web API server is available - if (!IsPIWebAPIConnected(connInfo)) + if (!IsPIWebAPIConnected(connInfo, version)) { + Logger::getLogger()->fatal("OMF Endpoint is not available"); return 0; } + // FIXME - The above call is not working. Investigate why? FOGL-7293 + // Above call does not always populate version + if (version.empty()) + { + PIWebAPIGetVersion(connInfo, version, false); + } + + Logger::getLogger()->info("Version is '%s'", version.c_str()); + + // Until we know better assume OMF 1.2 as this is the base base point + // to give us the flexible type support we need + connInfo->omfversion = "1.2"; + if (version.find("2019") != std::string::npos) + { + connInfo->omfversion = "1.0"; + } + else if (version.find("2020") != std::string::npos) + { + connInfo->omfversion = "1.1"; + } + else if (version.find("2021") != std::string::npos) + { + connInfo->omfversion = "1.2"; + } + Logger::getLogger()->info("Using OMF Version '%s'", connInfo->omfversion.c_str()); /** - * Select the proper library in relation to the need, - * LibcurlHttps is needed to integrate Kerberos as the SimpleHttp does not support it - * the Libcurl integration implements only HTTPS not HTTP at the current stage + * Select the transport library based on the authentication method and transport encryption + * requirements. + * + * LibcurlHttps is used to integrate Kerberos as the SimpleHttp does not support it + * the Libcurl integration implements only HTTPS not HTTP currently. We use SimpleHttp or + * SimpleHttps, as appropriate for the URL given, if not using Kerberos + * * * The handler is allocated using "Hostname : port", connect_timeout and request_timeout. - * Default is no timeout at all + * Default is no timeout */ if (connInfo->PIWebAPIAuthMethod.compare("k") == 0) { @@ -830,18 +928,18 @@ uint32_t plugin_send(const PLUGIN_HANDLE handle, if (connInfo->protocol.compare("http") == 0) { connInfo->sender = new SimpleHttp(connInfo->hostAndPort, - connInfo->timeout, - connInfo->timeout, - connInfo->retrySleepTime, - connInfo->maxRetry); + connInfo->timeout, + connInfo->timeout, + connInfo->retrySleepTime, + connInfo->maxRetry); } else { connInfo->sender = new SimpleHttps(connInfo->hostAndPort, - connInfo->timeout, - connInfo->timeout, - connInfo->retrySleepTime, - connInfo->maxRetry); + connInfo->timeout, + connInfo->timeout, + connInfo->retrySleepTime, + connInfo->maxRetry); } } @@ -862,7 +960,7 @@ uint32_t plugin_send(const PLUGIN_HANDLE handle, connInfo->sender->setOCSToken (connInfo->OCSToken); } - // Allocate the PI Server data protocol + // Allocate the OMF class that implements the PI Server data protocol connInfo->omf = new OMF(*connInfo->sender, connInfo->path, connInfo->assetsDataTypes, @@ -876,6 +974,20 @@ uint32_t plugin_send(const PLUGIN_HANDLE handle, connInfo->omf->setPIServerEndpoint(connInfo->PIServerEndpoint); connInfo->omf->setDefaultAFLocation(connInfo->DefaultAFLocation); connInfo->omf->setAFMap(connInfo->AFMap); +#ifdef EDS_OMF_VERSION + if (connInfo->PIServerEndpoint == ENDPOINT_EDS) + { + connInfo->omfversion = EDS_OMF_VERSION; + } +#endif + + // Version for Connector Relay is 1.0 only. + if (connInfo->PIServerEndpoint == ENDPOINT_CR) + { + connInfo->omfversion = CR_OMF_VERSION; + } + + connInfo->omf->setOMFVersion(connInfo->omfversion); // Generates the prefix to have unique asset_id across different levels of hierarchies string AFHierarchyLevel; @@ -892,7 +1004,15 @@ uint32_t plugin_send(const PLUGIN_HANDLE handle, connInfo->omf->setStaticData(&connInfo->staticData); connInfo->omf->setNotBlockingErrors(connInfo->notBlockingErrors); - // Send data + if (connInfo->omfversion == "1.1" || connInfo->omfversion == "1.0") { + Logger::getLogger()->info("Setting LegacyType to be true for OMF Version '%s'. This will force use old style complex types. ", connInfo->omfversion.c_str()); + connInfo->omf->setLegacyMode(true); + } + else + { + connInfo->omf->setLegacyMode(connInfo->legacy); + } + // Send the readings data to the PI Server uint32_t ret = connInfo->omf->sendToServer(readings, connInfo->compression); @@ -1418,7 +1538,6 @@ int PIWebAPIGetVersion(CONNECTOR_INFO* connInfo, std::string &version, bool logM _PIWebAPI->setAuthBasicCredentials(connInfo->PIWebAPICredentials); int httpCode = _PIWebAPI->GetVersion(connInfo->hostAndPort, version, logMessage); - delete _PIWebAPI; return httpCode; @@ -1574,9 +1693,10 @@ double GetElapsedTime(struct timeval *startTime) * Check if the PI Web API server is available by reading the product version * * @param connInfo The CONNECTOR_INFO data structure + * @param version Returned version string * @return Connection status */ -bool IsPIWebAPIConnected(CONNECTOR_INFO* connInfo) +bool IsPIWebAPIConnected(CONNECTOR_INFO* connInfo, std::string& version) { static std::chrono::steady_clock::time_point nextCheck; @@ -1586,7 +1706,6 @@ bool IsPIWebAPIConnected(CONNECTOR_INFO* connInfo) if (now >= nextCheck) { - std::string version; int httpCode = PIWebAPIGetVersion(connInfo, version, false); if (httpCode >= 500) { diff --git a/C/plugins/storage/postgres/connection.cpp b/C/plugins/storage/postgres/connection.cpp index 0c8e335ad2..baa6656e93 100644 --- a/C/plugins/storage/postgres/connection.cpp +++ b/C/plugins/storage/postgres/connection.cpp @@ -350,6 +350,14 @@ Connection::Connection() connectErrorTime = time(0); } } + + logSQL("Set", "session time zone 'UTC' "); + PGresult *res = PQexec(dbConnection, " set session time zone 'UTC' "); + if (PQresultStatus(res) != PGRES_COMMAND_OK) + { + Logger::getLogger()->error("set session time zone failed: %s", PQerrorMessage(dbConnection)); + } + PQclear(res); } /** @@ -967,6 +975,7 @@ SQLBuffer sql; int row = 0; ostringstream convert; + bool allowZero = false; std::size_t arr = payload.find("updates"); bool changeReqd = (arr == std::string::npos || arr > 8); @@ -1278,6 +1287,21 @@ SQLBuffer sql; return false; } } + if (iter->HasMember("modifier") && (*iter)["modifier"].IsArray()) + { + const Value& modifier = (*iter)["modifier"]; + for (Value::ConstValueIterator modifiers = modifier.Begin(); modifiers != modifier.End(); ++modifiers) + { + if (modifiers->IsString()) + { + string mod = modifiers->GetString(); + if (mod.compare("allowzero") == 0) + { + allowZero = true; + } + } + } + } sql.append(';'); } } @@ -1288,7 +1312,8 @@ SQLBuffer sql; delete[] query; if (PQresultStatus(res) == PGRES_COMMAND_OK) { - if (atoi(PQcmdTuples(res)) == 0) + int rowsUpdated = atoi(PQcmdTuples(res)); + if (rowsUpdated == 0 && allowZero == false) { raiseError("update", "No rows where updated"); return -1; diff --git a/C/plugins/storage/postgres/plugin.cpp b/C/plugins/storage/postgres/plugin.cpp index 996e29eae1..06212cfae3 100644 --- a/C/plugins/storage/postgres/plugin.cpp +++ b/C/plugins/storage/postgres/plugin.cpp @@ -28,6 +28,8 @@ using namespace rapidjson; #define DEFAULT_SCHEMA "fledge" +#define OR_DEFAULT_SCHEMA(x) ((x) ? (x) : DEFAULT_SCHEMA) + /** * The Postgres plugin interface */ @@ -85,9 +87,8 @@ int plugin_common_insert(PLUGIN_HANDLE handle, char *schema, char *table, char * ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); -if (!schema) schema = DEFAULT_SCHEMA; - int result = connection->insert(std::string(schema) + "." + std::string(table), std::string(data)); + int result = connection->insert(std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(data)); manager->release(connection); return result; } @@ -101,9 +102,7 @@ ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; -if (!schema) schema = DEFAULT_SCHEMA; - - bool rval = connection->retrieve(std::string(schema) + "." + std::string(table), std::string(query), results); + bool rval = connection->retrieve(std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(query), results); manager->release(connection); if (rval) { @@ -119,9 +118,8 @@ int plugin_common_update(PLUGIN_HANDLE handle, char *schema, char *table, char * { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); -if (!schema) schema = DEFAULT_SCHEMA; - int result = connection->update(std::string(schema) + "." + std::string(table), std::string(data)); + int result = connection->update(std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(data)); manager->release(connection); return result; } @@ -134,9 +132,7 @@ int plugin_common_delete(PLUGIN_HANDLE handle, char *schema , char *table, char ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); -if (!schema) schema = DEFAULT_SCHEMA; - - int result = connection->deleteRows(std::string(schema) + "." + std::string(table), std::string(condition)); + int result = connection->deleteRows(std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(condition)); manager->release(connection); return result; } @@ -348,8 +344,6 @@ int plugin_schema_update(PLUGIN_HANDLE handle, ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); - if (!schema) schema = DEFAULT_SCHEMA; - // create_schema handles both create and update schema // schema value gets parsed from the payload int result = connection->create_schema(std::string(payload)); diff --git a/C/plugins/storage/sqlite/common/connection.cpp b/C/plugins/storage/sqlite/common/connection.cpp index 33f2283a97..21f9b61de5 100644 --- a/C/plugins/storage/sqlite/common/connection.cpp +++ b/C/plugins/storage/sqlite/common/connection.cpp @@ -1384,6 +1384,7 @@ int Connection::update(const string& schema, const string& table, const string& Document document; SQLBuffer sql; vector asset_codes; +bool allowZero = false; int row = 0; ostringstream convert; @@ -1681,6 +1682,21 @@ vector asset_codes; col++; } } + if (iter->HasMember("modifier") && (*iter)["modifier"].IsArray()) + { + const Value& modifier = (*iter)["modifier"]; + for (Value::ConstValueIterator modifiers = modifier.Begin(); modifiers != modifier.End(); ++modifiers) + { + if (modifiers->IsString()) + { + string mod = modifiers->GetString(); + if (mod.compare("allowzero") == 0) + { + allowZero = true; + } + } + } + } if (col == 0) { raiseError("update", @@ -1757,7 +1773,7 @@ vector asset_codes; int return_value=0; - if (update == 0) + if (update == 0 && allowZero == false) { char buf[100]; snprintf(buf, sizeof(buf), diff --git a/C/plugins/storage/sqlite/common/include/readings_catalogue.h b/C/plugins/storage/sqlite/common/include/readings_catalogue.h index 9894e048fe..0bddc55863 100644 --- a/C/plugins/storage/sqlite/common/include/readings_catalogue.h +++ b/C/plugins/storage/sqlite/common/include/readings_catalogue.h @@ -134,6 +134,7 @@ class ReadingsCatalogue { bool latestDbUpdate(sqlite3 *dbHandle, int newDbId); void preallocateNewDbsRange(int dbIdStart, int dbIdEnd); + bool getEmptyReadingTableReference(tyReadingReference& emptyTableReference); tyReadingReference getReadingReference(Connection *connection, const char *asset_code); bool attachDbsToAllConnections(); std::string sqlConstructMultiDb(std::string &sqlCmdBase, std::vector &assetCodes, bool considerExclusion=false); diff --git a/C/plugins/storage/sqlite/common/readings_catalogue.cpp b/C/plugins/storage/sqlite/common/readings_catalogue.cpp index 02b67f3956..61952824d7 100644 --- a/C/plugins/storage/sqlite/common/readings_catalogue.cpp +++ b/C/plugins/storage/sqlite/common/readings_catalogue.cpp @@ -1835,6 +1835,7 @@ ReadingsCatalogue::tyReadingReference ReadingsCatalogue::getReadingReference(Co AttachDbSync *attachSync = AttachDbSync::getInstance(); attachSync->lock(); + ReadingsCatalogue::tyReadingReference emptyTableReference = {-1, -1}; auto item = m_AssetReadingCatalogue.find(asset_code); if (item != m_AssetReadingCatalogue.end()) @@ -1874,11 +1875,15 @@ ReadingsCatalogue::tyReadingReference ReadingsCatalogue::getReadingReference(Co startReadingsId = 1; - success = createNewDB(dbHandle, dbId, startReadingsId, NEW_DB_ATTACH_REQUEST); - if (success) + if (!getEmptyReadingTableReference(emptyTableReference)) { - Logger::getLogger()->debug("getReadingReference - allocate a new db - create new dbs - dbId :%d: startReadingsIdOnDB :%d:", dbId, startReadingsId); + success = createNewDB(dbHandle, dbId, startReadingsId, NEW_DB_ATTACH_REQUEST); + if (success) + { + Logger::getLogger()->debug("getReadingReference - allocate a new db - create new dbs - dbId :%d: startReadingsIdOnDB :%d:", dbId, startReadingsId); + } } + } m_dbIdLast = dbIdEnd; m_dbIdCurrent++; @@ -1895,9 +1900,17 @@ ReadingsCatalogue::tyReadingReference ReadingsCatalogue::getReadingReference(Co { // Associate the asset to the reading_id { - ref.tableId = getMaxReadingsId(m_dbIdCurrent) + 1; - ref.dbId = m_dbIdCurrent; - + if (emptyTableReference.tableId > 0) + { + ref.tableId = emptyTableReference.tableId; + ref.dbId = emptyTableReference.dbId; + } + else + { + ref.tableId = getMaxReadingsId(m_dbIdCurrent) + 1; + ref.dbId = m_dbIdCurrent; + } + auto newItem = make_pair(ref.tableId, ref.dbId); auto newMapValue = make_pair(asset_code, newItem); m_AssetReadingCatalogue.insert(newMapValue); @@ -1907,11 +1920,20 @@ ReadingsCatalogue::tyReadingReference ReadingsCatalogue::getReadingReference(Co // Allocate the table in the reading catalogue { - sql_cmd = + if (emptyTableReference.tableId > 0) + { + + sql_cmd = " UPDATE " READINGS_DB ".asset_reading_catalogue SET asset_code ='" + string(asset_code) + "'" + + " WHERE db_id = " + to_string(emptyTableReference.dbId) + " AND table_id = " + to_string(emptyTableReference.tableId) + ";"; + } + else + { + sql_cmd = "INSERT INTO " READINGS_DB ".asset_reading_catalogue (table_id, db_id, asset_code) VALUES (" + to_string(ref.tableId) + "," + to_string(ref.dbId) + "," + "\"" + asset_code + "\")"; + } rc = SQLExec(dbHandle, sql_cmd.c_str()); if (rc != SQLITE_OK) @@ -1933,6 +1955,55 @@ ReadingsCatalogue::tyReadingReference ReadingsCatalogue::getReadingReference(Co } +/** + * Get Empty Reading Table + * + * @param emptyTableReference An empty reading table reference to be used for the given asset_code + * @return True of success, false on any error + * + */ +bool ReadingsCatalogue::getEmptyReadingTableReference(tyReadingReference &emptyTableReference) +{ + bool isEmptyTableAvailable = false; + sqlite3 *dbHandle; + string sql_cmd; + sqlite3_stmt *stmt; + + // Disable functionality temporarily to avoid regression + return false; + + ConnectionManager *manager = ConnectionManager::getInstance(); + Connection *connection = manager->allocate(); + dbHandle = connection->getDbHandle(); + + for (auto &item : m_AssetReadingCatalogue) + { + int tableId = item.second.first; + int dbId = item.second.second; + sql_cmd = "SELECT COUNT(*) FROM (SELECT 0 FROM readings_" + to_string(dbId) + ".readings_" + to_string(dbId) + "_" + to_string(tableId) + " LIMIT 1)"; + + if (sqlite3_prepare_v2(dbHandle, sql_cmd.c_str(), -1, &stmt, NULL) != SQLITE_OK) + { + raiseError("getEmptyReadingTableReference", sqlite3_errmsg(dbHandle)); + return false; + } + + if (SQLStep(stmt) == SQLITE_ROW) + { + if (sqlite3_column_int(stmt, 0) == 0) + { + isEmptyTableAvailable = true; + emptyTableReference.dbId = dbId; + emptyTableReference.tableId = tableId; + } + } + sqlite3_finalize(stmt); + } + + manager->release(connection); + return isEmptyTableAvailable; +} + /** * Retrieve the maximum readings id for the provided database id * diff --git a/C/plugins/storage/sqlitelb/common/connection.cpp b/C/plugins/storage/sqlitelb/common/connection.cpp index 4f7874e530..b5848f16cc 100644 --- a/C/plugins/storage/sqlitelb/common/connection.cpp +++ b/C/plugins/storage/sqlitelb/common/connection.cpp @@ -1309,6 +1309,7 @@ int Connection::update(const string& schema, // Default template parameter uses UTF8 and MemoryPoolAllocator. Document document; SQLBuffer sql; +bool allowZero = false; int row = 0; ostringstream convert; @@ -1606,6 +1607,21 @@ SQLBuffer sql; col++; } } + if (iter->HasMember("modifier") && (*iter)["modifier"].IsArray()) + { + const Value& modifier = (*iter)["modifier"]; + for (Value::ConstValueIterator modifiers = modifier.Begin(); modifiers != modifier.End(); ++modifiers) + { + if (modifiers->IsString()) + { + string mod = modifiers->GetString(); + if (mod.compare("allowzero") == 0) + { + allowZero = true; + } + } + } + } if (col == 0) { raiseError("update", @@ -1682,7 +1698,7 @@ SQLBuffer sql; int return_value=0; - if (update == 0) + if (update == 0 && allowZero == false) { char buf[100]; snprintf(buf, sizeof(buf), diff --git a/C/services/north/north.cpp b/C/services/north/north.cpp index e953db9395..1645c3c52b 100755 --- a/C/services/north/north.cpp +++ b/C/services/north/north.cpp @@ -224,9 +224,9 @@ pid_t pid; close(1); close(2); // redirect fd's 0,1,2 to /dev/null - (void)open("/dev/null", O_RDWR); // stdin - (void)dup(0); // stdout GCC bug 66425 produces warning - (void)dup(0); // stderr GCC bug 66425 produces warning + open("/dev/null", O_RDWR); // stdin + if (dup(0) == -1) {} // stdout GCC bug 66425 produces warning + if (dup(0) == -1) {} // stderr GCC bug 66425 produces warning return 0; } diff --git a/C/services/south/include/ingest.h b/C/services/south/include/ingest.h index 160849624d..79f969c928 100644 --- a/C/services/south/include/ingest.h +++ b/C/services/south/include/ingest.h @@ -24,6 +24,7 @@ #include #include #include +#include #define SERVICE_NAME "Fledge South" @@ -77,6 +78,9 @@ class Ingest : public ServiceHandler { const std::string& event); void unDeprecateStorageAssetTrackingRecord(StorageAssetTrackingTuple* currentTuple, const std::string& assetName, const std::string&, const unsigned int&); + void setStatistics(const std::string& option); + + std::string getStringFromSet(const std::set &dpSet); private: @@ -127,6 +131,8 @@ class Ingest : public ServiceHandler { bool m_storageFailed; int m_storesFailed; int m_statsUpdateFails; + enum { STATS_BOTH, STATS_ASSET, STATS_SERVICE } + m_statisticsOption; }; #endif diff --git a/C/services/south/ingest.cpp b/C/services/south/ingest.cpp index 978f5a593d..956e41fc1b 100755 --- a/C/services/south/ingest.cpp +++ b/C/services/south/ingest.cpp @@ -173,36 +173,40 @@ void Ingest::updateStats() } if (it->second) + { + if (m_statisticsOption == STATS_BOTH || m_statisticsOption == STATS_ASSET) { - // Prepare fledge.statistics update - key = it->first; - for (auto & c: key) c = toupper(c); + // Prepare fledge.statistics update + key = it->first; + for (auto & c: key) c = toupper(c); - // Prepare "WHERE key = name - Where *wPluginStat = new Where("key", conditionStat, key); + // Prepare "WHERE key = name + Where *wPluginStat = new Where("key", conditionStat, key); - // Prepare value = value + inc - ExpressionValues *updateValue = new ExpressionValues; - updateValue->push_back(Expression("value", "+", (int) it->second)); + // Prepare value = value + inc + ExpressionValues *updateValue = new ExpressionValues; + updateValue->push_back(Expression("value", "+", (int) it->second)); - statsUpdates.emplace_back(updateValue, wPluginStat); + statsUpdates.emplace_back(updateValue, wPluginStat); + } readings += it->second; } } - if(readings) + if (readings) { Where *wPluginStat = new Where("key", conditionStat, "READINGS"); ExpressionValues *updateValue = new ExpressionValues; updateValue->push_back(Expression("value", "+", (int) readings)); statsUpdates.emplace_back(updateValue, wPluginStat); - } - if(readings) - { - Where *wPluginStat = new Where("key", conditionStat, m_serviceName + INGEST_SUFFIX); - ExpressionValues *updateValue = new ExpressionValues; - updateValue->push_back(Expression("value", "+", (int) readings)); - statsUpdates.emplace_back(updateValue, wPluginStat); + + if (m_statisticsOption == STATS_BOTH || m_statisticsOption == STATS_SERVICE) + { + Where *wPluginStat = new Where("key", conditionStat, m_serviceName + INGEST_SUFFIX); + ExpressionValues *updateValue = new ExpressionValues; + updateValue->push_back(Expression("value", "+", (int) readings)); + statsUpdates.emplace_back(updateValue, wPluginStat); + } } if (m_discardedReadings) { @@ -275,7 +279,8 @@ Ingest::Ingest(StorageClient& storage, m_mgtClient(mgmtClient), m_failCnt(0), m_storageFailed(false), - m_storesFailed(0) + m_storesFailed(0), + m_statisticsOption(STATS_BOTH) { m_shutdown = false; m_running = true; @@ -543,7 +548,6 @@ void Ingest::processQueue() string assetName = reading->getAssetName(); const std::vector dpVec = reading->getReadingData(); std::string temp; - std::set tempSet; // first sort the individual datapoints // e.g. dp2, dp3, dp1 push them in a set,to make them @@ -557,19 +561,17 @@ void Ingest::processQueue() temp.clear(); - // make a string from sorted datapoints in a reading - int i = 0; - for (auto setItr: tempSet) - { - if ( i> 0) temp.append(","); - temp.append(setItr); - ++i; - } - // Push them in a set so as to avoid duplication of datapoints // a reading of d1, d2, d3 and another d2,d3,d1 , second will be discarded - - assetDatapointMap[assetName].insert(temp); + // + for (auto dp: tempSet) + { + set &s= assetDatapointMap[assetName]; + if (s.find(dp) == s.end()) + { + s.insert(dp); + } + } if (lastAsset.compare(assetName)) { @@ -606,30 +608,15 @@ void Ingest::processQueue() for (auto itr : assetDatapointMap) { std::set &s = itr.second; - - for (auto dp : s) + unsigned int count = s.size(); + StorageAssetTrackingTuple storageTuple(m_serviceName,m_pluginName, itr.first, "store", false, "",count); + StorageAssetTrackingTuple *ptr = &storageTuple; + satracker->updateCache(s, ptr); + bool deprecated = satracker->getDeprecated(ptr); + if (deprecated == true) { - unsigned int c = count(dp.begin(), dp.end(), ','); - StorageAssetTrackingTuple storageTuple(m_serviceName, - m_pluginName, - itr.first, - "store", false, dp, c+1); - - - StorageAssetTrackingTuple* rv = satracker->findStorageAssetTrackingCache(storageTuple); - if (rv == NULL) - { - // Record not found in cache , please update cache - Logger::getLogger()->debug("%s:%d record not found in cache ", __FUNCTION__, __LINE__); - satracker->addStorageAssetTrackingTuple(storageTuple); - } - else - { - //record found undeprecate the record - Logger::getLogger()->debug("%s:%d Record found in cache , undeprecate it", __FUNCTION__,__LINE__); - unDeprecateStorageAssetTrackingRecord(rv, itr.first, dp, c+1); - } - } + unDeprecateStorageAssetTrackingRecord(ptr, itr.first, getStringFromSet(s), count); + } } delete q; @@ -772,9 +759,7 @@ void Ingest::processQueue() string lastAsset; int *lastStat = NULL; - std::map > assetDatapointMap; - for (vector::iterator it = m_data->begin(); it != m_data->end(); ++it) { Reading *reading = *it; @@ -785,37 +770,34 @@ void Ingest::processQueue() // first sort the individual datapoints // e.g. dp2, dp3, dp1 push them in a set,to make them // dp1,dp2,dp3 - for ( auto dp : dpVec) { - temp.clear(); + temp.clear(); temp.append(dp->getName()); tempSet.insert(temp); } + temp.clear(); - // make a string from sorted datapoints in a reading - int i = 0; - for (auto setItr: tempSet) - { - if ( i> 0) temp.append(","); - temp.append(setItr); - ++i; - } // Push them in a set so as to avoid duplication of datapoints // a reading of d1, d2, d3 and another d2,d3,d1 , second will be discarded - - assetDatapointMap[assetName].insert(temp); + // + for (auto dp: tempSet) + { + set &s= assetDatapointMap[assetName]; + if (s.find(dp) == s.end()) + { + s.insert(dp); + } + } if (lastAsset.compare(assetName)) { - AssetTrackingTuple tuple(m_serviceName, m_pluginName, assetName, "Ingest"); - // Check Asset record exists AssetTrackingTuple* res = tracker->findAssetTrackingCache(tuple); if (res == NULL) @@ -843,39 +825,19 @@ void Ingest::processQueue() } - - for (auto itr : assetDatapointMap) - { - for (auto dp : itr.second) + for (auto itr : assetDatapointMap) + { + std::set &s = itr.second; + unsigned int count = s.size(); + StorageAssetTrackingTuple storageTuple(m_serviceName,m_pluginName, itr.first, "store", false, "",count); + StorageAssetTrackingTuple *ptr = &storageTuple; + satracker->updateCache(s, ptr); + bool deprecated = satracker->getDeprecated(ptr); + if (deprecated == true) { - unsigned int c= count(dp.begin(), dp.end(), ','); - - StorageAssetTrackingTuple storageTuple(m_serviceName, - m_pluginName, - itr.first, - "store", false, dp, c+1); - - Logger::getLogger()->debug("%s Dp string dp = %s", __FUNCTION__, dp.c_str()); - - StorageAssetTrackingTuple* rv = satracker->findStorageAssetTrackingCache(storageTuple); - - - if (rv == NULL) - { - // Record not found in cache , please update cache - Logger::getLogger()->debug("%s:%d record not found in cache add it", __FUNCTION__, __LINE__); - satracker->addStorageAssetTrackingTuple(storageTuple); - } - else - { - //record found undeprecate the record - Logger::getLogger()->debug("%s:%d No need for updation , undeprecate it", __FUNCTION__,__LINE__); - - unDeprecateStorageAssetTrackingRecord(rv, itr.first, dp, c+1); - } + unDeprecateStorageAssetTrackingRecord(ptr, itr.first, getStringFromSet(s), count); } - } - + } { unique_lock lck(m_statsMutex); for (auto &it : statsEntriesCurrQueue) @@ -1201,12 +1163,12 @@ void Ingest::unDeprecateStorageAssetTrackingRecord(StorageAssetTrackingTuple* cu datapoints, count); - vector tokens; + vector tokens; stringstream dpStringStream(datapoints); string temp; while(getline(dpStringStream, temp, ',')) { - tokens.push_back(temp); + tokens.push_back(temp); } ostringstream convert; @@ -1214,7 +1176,7 @@ void Ingest::unDeprecateStorageAssetTrackingRecord(StorageAssetTrackingTuple* cu convert << "\"datapoints\":["; for (unsigned int i = 0; i < tokens.size() ; ++i) { - convert << "\"" << tokens[i].c_str() << "\"" ; + convert << "\"" << tokens[i].c_str() << "\"" ; if (i < tokens.size()-1){ convert << ","; } @@ -1275,4 +1237,36 @@ void Ingest::unDeprecateStorageAssetTrackingRecord(StorageAssetTrackingTuple* cu delete updatedTuple; } +/** + * Set the statistics option. The statistics collection regime may be one of + * "per asset", "per service" or "per asset & service". + * + * @param option The desired statistics collection regime + */ +void Ingest::setStatistics(const string& option) +{ + unique_lock lck(m_statsMutex); + if (option.compare("per asset") == 0) + m_statisticsOption = STATS_ASSET; + else if (option.compare("per service") == 0) + m_statisticsOption = STATS_SERVICE; + else + m_statisticsOption = STATS_BOTH; +} +/* + * Returns comma-separated string from set of datapoints + */ +std::string Ingest::getStringFromSet(const std::set &dpSet) +{ + std::string s; + for (auto itr: dpSet) + { + s.append(itr); + s.append(","); + } + // remove the last comma + if (s[s.size() -1] == ',') + s.pop_back(); + return s; +} diff --git a/C/services/south/south.cpp b/C/services/south/south.cpp index f03c7d4140..811b457575 100755 --- a/C/services/south/south.cpp +++ b/C/services/south/south.cpp @@ -142,8 +142,8 @@ pid_t pid; close(2); // redirect fd's 0,1,2 to /dev/null (void)open("/dev/null", O_RDWR); // stdin - (void)dup(0); // stdout GCC bug 66425 produces warning - (void)dup(0); // stderr GCC bug 66425 produces warning + if (dup(0) == -1) {} // stdout Workaround for GCC bug 66425 produces warning + if (dup(0) == -1) {} // stderr WOrkaround for GCC bug 66425 produces warning return 0; } @@ -391,6 +391,11 @@ void SouthService::start(string& coreAddress, unsigned short corePort) Ingest ingest(storage, timeout, threshold, m_name, pluginName, m_mgtClient); m_ingest = &ingest; + if (m_configAdvanced.itemExists("statistics")) + { + m_ingest->setStatistics(m_configAdvanced.getValue("statistics")); + } + try { m_readingsPerSec = 1; if (m_configAdvanced.itemExists("readingsPerSec")) @@ -830,6 +835,10 @@ void SouthService::configChange(const string& categoryName, const string& catego if (categoryName.compare(m_name+"Advanced") == 0) { m_configAdvanced = ConfigCategory(m_name+"Advanced", category); + if (m_configAdvanced.itemExists("statistics")) + { + m_ingest->setStatistics(m_configAdvanced.getValue("statistics")); + } if (! southPlugin->isAsync()) { try { @@ -977,6 +986,12 @@ void SouthService::addConfigDefaults(DefaultConfigCategory& defaultConfig) defaultConfig.addItem("logLevel", "Minimum logging level reported", "warning", "warning", logLevels); defaultConfig.setItemDisplayName("logLevel", "Minimum Log Level"); + + /* Add the set of logging levels to the service */ + vector statistics = { "per asset", "per service", "per asset & service" }; + defaultConfig.addItem("statistics", "Collect statistics either for every asset ingested, for the service in total or both", + "per asset & service", "per asset & service", statistics); + defaultConfig.setItemDisplayName("statistics", "Statistics Collection"); } /** diff --git a/C/services/storage/storage.cpp b/C/services/storage/storage.cpp index 2adac6bcbb..4f3439b937 100644 --- a/C/services/storage/storage.cpp +++ b/C/services/storage/storage.cpp @@ -92,6 +92,7 @@ string coreAddress = "localhost"; bool daemonMode = true; string myName = SERVICE_NAME; bool returnPlugin = false; +bool returnReadingsPlugin = false; string logLevel = "warning"; for (int i = 1; i < argc; i++) @@ -116,24 +117,38 @@ string logLevel = "warning"; { returnPlugin = true; } + else if (!strncmp(argv[i], "--readingsplugin", 8)) + { + returnReadingsPlugin = true; + } else if (!strncmp(argv[i], "--logLevel=", 11)) { logLevel = &argv[i][11]; } } - if (returnPlugin == false && daemonMode && makeDaemon() == -1) + if (returnPlugin == false && returnReadingsPlugin == false && daemonMode && makeDaemon() == -1) { // Failed to run in daemon mode cout << "Failed to run as deamon - proceeding in interactive mode." << endl; } + if (returnPlugin && returnReadingsPlugin) + { + cout << "You can not specify --plugin and --readingsplugin together"; + exit(1); + } + StorageService *service = new StorageService(myName); Logger::getLogger()->setMinLevel(logLevel); if (returnPlugin) { cout << service->getPluginName() << " " << service->getPluginManagedStatus() << endl; } + else if (returnReadingsPlugin) + { + cout << service->getReadingPluginName() << " " << service->getPluginManagedStatus() << endl; + } else { service->start(coreAddress, corePort); @@ -174,8 +189,8 @@ pid_t pid; close(2); // redirect fd's 0,1,2 to /dev/null (void)open("/dev/null", O_RDWR); // stdin - (void)dup(0); // stdout GCC bug 66425 produces warning - (void)dup(0); // stderr GCC bug 66425 produces warning + if (dup(0) == -1) {} // stdout Workaround GCC bug 66425 produces warning + if (dup(0) == -1) {} // stderr Workaround GCC bug 66425 produces warning return 0; } @@ -522,3 +537,16 @@ string StorageService::getPluginManagedStatus() { return string(config->getValue("managedStatus")); } + +/** + * Return the name of the configured reading plugin + */ +string StorageService::getReadingPluginName() +{ + string rval = config->getValue("readingPlugin"); + if (rval.empty()) + { + rval = config->getValue("plugin"); + } + return rval; +} diff --git a/Makefile b/Makefile index 98821a2bdb..c1c80d666b 100644 --- a/Makefile +++ b/Makefile @@ -14,16 +14,16 @@ $(if $(PLATFORM_RH), $(info Platform is $(PLATFORM_RH) $(OS_VERSION))) ifneq ("$(PLATFORM_RH)","") ifeq ("$(OS_VERSION_PREFIX)", "7") # CentOS we need rh-python36 and devtoolset-7 - PIP_INSTALL_REQUIREMENTS := source scl_source enable rh-python36 && pip3 install -Ir + PIP_INSTALL_REQUIREMENTS := source scl_source enable rh-python36 && python3 -m pip install -Ir PYTHON_BUILD_PACKAGE = source scl_source enable rh-python36 && python3 setup.py build -b ../$(PYTHON_BUILD_DIR) CMAKE := source scl_source enable rh-python36 && source scl_source enable devtoolset-7 && cmake else - PIP_INSTALL_REQUIREMENTS := pip3 install -Ir + PIP_INSTALL_REQUIREMENTS := python3 -m pip install -Ir PYTHON_BUILD_PACKAGE = python3 setup.py build -b ../$(PYTHON_BUILD_DIR) CMAKE := cmake endif else - PIP_INSTALL_REQUIREMENTS := pip3 install -Ir + PIP_INSTALL_REQUIREMENTS := python3 -m pip install -Ir PYTHON_BUILD_PACKAGE = python3 setup.py build -b ../$(PYTHON_BUILD_DIR) CMAKE := cmake endif diff --git a/VERSION b/VERSION index d247bccda7..496130b3aa 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -fledge_version=2.0.1 -fledge_schema=58 +fledge_version=2.1.0 +fledge_schema=59 diff --git a/docs/91_version_history.rst b/docs/91_version_history.rst index ea564db3df..14c0d9d4f4 100644 --- a/docs/91_version_history.rst +++ b/docs/91_version_history.rst @@ -25,6 +25,61 @@ Version History Fledge v2 ========== +v2.1.0 +------- + +Release Date: 2022-12-26 + +- **Fledge Core** + + - New Features: + + - North plugins run as a task rather than a service would be run by the Python sending task rather than the C++ sending task. This resulted in filter pipelines not being applied to the task. This has now been resolved. + - A new mechanism has been introduced that allows configuration items within a category to have a group associated with them. This allows items that relate to a particular mechanism be recognised as related by clients of the API and display decisions to be taken based on these groups. + - The asset browser APIs have been enhanced to allow for a window of data in the past to be returned. In conjunction a new timespan entry point has been added to allow the oldest and newest date for which an asset exists within the reading buffer to be returned. + - An option has been added to the advanced configuration of south services that allow the statistics that are generated by the south service to be tailored. Statistics may be kept for the service as a whole, each asset ingested by the service or both. This setting relates to a given service and may be different in different south services. Full details are available in the tuning guide within the documentation. + - Two new types of user are now available in Fledge; users that can view the configuration only and users that can view the data only. + + + - Bug Fix: + + - The reset and purge scripts have been improved such that if the reading plugin is different from the storage plugin the data will be removed from the appropriate plugins. + - A problem that prevented items from being disabled in the user interface when they were not valid for the current configuration has been resolved. + - An issue that would sometimes cause the error `Not all updates in a transaction succeeded` to be logged when updating the users access token has been resolved. + - An issue that could cause properties of configuration items to be lost or incorrectly updated has been resolved. + + +- **GUI** + + - New Features: + + - The graphical user interface for viewing the configuration of the south and north services and tasks has now been updated to display the configuration items in multiple tabs. + - The user interface now supports two types of view only users; those that can view the configuration and those that can view the data only. + + + - Bug Fix: + + - An issue that could leave two menu items selected in the menu pane of the user interface has been resolved. + - The tab view of tabular data in the user interface has been updated to show the date as well as the time related to readings. + + +- **Services & Plugins** + + - New Features: + + - A new north plugin, fledge-north-opcuaclient, has been created to send data with OPC UA Client to an OPC UA Server. + - The asset filter has been updated to support the ability to map datapoint names for an asset. + - The OMF north plugin now supports all ADH regions. + - The OMF north plugin has been updated to allow support for OMF 1.2 features. This allows for better control of types within OMF resulting in the OMF plugin now dealing more cleanly with assets with different datapoints in different readings. Any assets that are already being sent to an OMF endpoint will continue to use the previous type mechanism. A number of new OMF hints are also supported. + - The S2OPCUA south plugin has been updated to allow the timestamp for readings to be taken from the OPC UA server itself rather than the time that it was received by Fledge. + + + - Bug Fix: + + - An issue with building of the DNP3 plugin on the Raspberry Pi platform has been resolved. + - The S2OPCUA south plugin has been updated to resolve an issue with duplicate browse names causing data from two OPC UA variables being stored in the same Fledge datapoint. The plugin has also been updated to give more options for how the assets are structured. The option of a single asset for all datapoints and an asset put OPC UA object have been added. It is also possible to use the OPC UA object name as the prefix for asset names in the case of a single variable per asset as well as the current option of a fixed prefix for the browse name of the variable. + + v2.0.1 ------- @@ -55,6 +110,7 @@ Release Date: 2022-10-20 - An issue that prevented a change to the units used for reading rate, e.g. per second, per minute or per hour, not being actioned until a service was restarted has now been fixed. If the rate was also changed then this change would be actioned. - It was possible to set a reading rate of 0 readings, this would cause the south service to fail. It is now not possible to set a rate of 0. + - **Services & Plugins** - New Features: diff --git a/docs/Makefile b/docs/Makefile index 861c49bff7..d59a779d04 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -23,11 +23,11 @@ generated: fledge_plugins.rst plugin_and_services_configuration fledge_plugins.rst: @echo Building page with table of plugins - @bash scripts/fledge_plugin_list fledge_plugins.rst + @bash scripts/fledge_plugin_list fledge_plugins.rst $(DOCBRANCH) plugin_and_services_configuration: @echo Building plugin and service configuration appendices - @bash scripts/plugin_and_service_documentation + @bash scripts/plugin_and_service_documentation $(DOCBRANCH) clean: @echo Clean Doc build artifacts diff --git a/docs/OMF.rst b/docs/OMF.rst index 9a49ebc333..657f9c256f 100644 --- a/docs/OMF.rst +++ b/docs/OMF.rst @@ -43,7 +43,7 @@ Select PI Web API from the Endpoint options. - **Asset Framework Hierarchies Rules:** A set of rules that allow specific readings to be placed elsewhere in the Asset Framework. These rules can be based on the name of the asset itself or some metadata associated with the asset. See `Asset Framework Hierarchy Rules`_. - PI Web API authentication - **PI Web API Authentication Method:** The authentication method to be used: anonymous, basic or kerberos. - Anonymous equates to no authentication, basic authentication requires a user name and password, and Kerberos allows integration with your single signon environment. + Anonymous equates to no authentication, basic authentication requires a user name and password, and Kerberos allows integration with your Single Sign-On environment. - **PI Web API User Id:** For Basic authentication, the user name to authenticate with the PI Web API. - **PI Web API Password:** For Basic authentication, the password of the user we are using to authenticate. - **PI Web API Kerberos keytab file:** The Kerberos keytab file used to authenticate. @@ -56,6 +56,7 @@ Select PI Web API from the Endpoint options. - **Number Format:** Used to match Fledge data types to the data type configured in PI. The default is float64 but may be set to any OMF datatype that supports floating point values. - **Compression:** Compress the readings data before sending them to the PI Web API OMF endpoint. This setting is not related to data compression in the PI Data Archive. + - **Complex Types:** Used to force the plugin to send OMF data types as complex types rather than the newer linked types. Linked types are the default way to send data and allows assets to have different sets of data points in different readings. See :ref:`Linked_Types`. Edge Data Store OMF Endpoint ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -97,7 +98,7 @@ The second screen will request the following information: | |omf_plugin_adh_config| | +-------------------------+ -Select AVEVA Data Hubfrom the Endpoint options. +Select AVEVA Data Hub from the Endpoint options. - Basic Information - **Endpoint:** This is the type of OMF endpoint. In this case, choose AVEVA Data Hub. @@ -464,6 +465,10 @@ that adds this hint to ensure this is the case. "OMFHint" : { "type" : "pump" } +.. note:: + + This hint only has meaning when using the complex type legacy mode with this plugin. + Tag Name Hint ~~~~~~~~~~~~~ @@ -473,6 +478,27 @@ Specifies that a specific tag name should be used when storing data in the PI Se "OMFHint" : { "tagName" : "AC1246" } +Legacy Type Hint +~~~~~~~~~~~~~~~~ + +Use legacy style complex types for this reading rather that the newer linked data types. + +.. code-block:: console + + "OMFHint" : { "LegacyType" : "true" } + +The allows the older mechanism to be forced for a single asset. See :ref:`Linked_Types`. + +Source Hint +~~~~~~~~~~~ + +The default data source that is associated with tags in the PI Server is Fledge, however this can be overridden using the data source hint. This hint may be applied to the entire asset or to specific datapoints within the asset. + +.. code-block:: console + + "OMFHint" : { "source" : "Fledge23" } + + Datapoint Specific Hint ~~~~~~~~~~~~~~~~~~~~~~~ @@ -488,6 +514,22 @@ to apply. The above hint applies to the datapoint *voltage* in the asset and applies a *number format* hint to that datapoint. +If more than one datapoint within a reading is required to have OMF hints +attached to them this may be done by using an array as a child of the +datapoint item. + +.. code-block:: console + + "OMFHint" : { "datapoint" : [ + { "name" : "voltage:, "number" : "float32", "uom" : "volt" }, + { "name" : "current:, "number" : "uint32", "uom" : "milliampere } + ] + } + +The example above attaches a number hint to both the voltage and current +datapoints and to the current datapoint. It assigns a unit of measure +of milliampere. The unit of measure for the voltage is set to be volts. + Asset Framework Location Hint ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -517,6 +559,45 @@ Note the following when defining an *AFLocation* hint: - If you move a Container, OMF North will not recreate it. If you then edit the AF Location hint, the Container will appear in the new location. +Unit Of Measure Hint +~~~~~~~~~~~~~~~~~~~~ + +A unit of measure, or uom hint is used to associate one of the units of +measurement defined within your PI Server with a particular data point +within an asset. + +.. code-block:: console + + "OMFHint" : { "datapoint" : { "name" : "height:, "uom" : "meter" } } + +Minimum Hint +~~~~~~~~~~~~ + +A minimum hint is used to associate a minimum value in the PI Point created for a data point. + +.. code-block:: console + + "OMFHint" : { "datapoint" : { "name" : "height:, "minimum" : "0" } } + +Maximum Hint +~~~~~~~~~~~~ + +A maximum hint is used to associate a maximum value in the PI Point created for a data point. + +.. code-block:: console + + "OMFHint" : { "datapoint" : { "name" : "height:, "maximum" : "100000" } } + +Interpolation +~~~~~~~~~~~~~ + +The interpolation hint sets the interpolation value used within the PI Server, interpolation values supported are continuous, discrete, stepwisecontinuousleading, and stepwisecontinuousfollowing. + +.. code-block:: console + + "OMFHint" : { "datapoint" : { "name" : "height:, "interpolation" : "continuous" } } + + Adding OMF Hints ~~~~~~~~~~~~~~~~ @@ -524,3 +605,13 @@ An OMF Hint is implemented as a string data point on a reading with the data point name of *OMFHint*. It can be added at any point in the processing of the data, however a specific plugin is available for adding the hints, the |OMFHint filter plugin|. + +.. _Linked_Types: + +Linked Types +------------ + +Versions of this plugin prior to 2.1.0 created a complex type within OMF for each asset that included all of the data points within that asset. This suffered from a limitation in that readings had to contain values for all of the data points of an asset in order to be accepted by the OMF end point. Following the introduction of OMF version 1.2 it was possible to use the linking features of OMF to avoid the need to create complex types for an asset and instead create empty assets and link the data points to this shell asset. This allows readings to only contain a subset of datapoints and still be successfully sent to the PI Server, or other end points. + +As of version 2.1.0 this linking approach is used for all new assets created, if assets exist within the PI Server from versions of the plugin prior to 2.1.0 then the older, complex types will be used. It is possible to force the plugin to use complex types for all assets, both old and new, using the configuration option. It is also to force a particular asset to use the complex type mechanism using an OMFHint. + diff --git a/docs/building_fledge/06_testing.rst b/docs/building_fledge/06_testing.rst index 72de797bf3..9c40cd7e71 100644 --- a/docs/building_fledge/06_testing.rst +++ b/docs/building_fledge/06_testing.rst @@ -192,7 +192,7 @@ Note: This following instructions assume you have downloaded and installed the C $ cd fledge-south-coap $ sudo cp -r python/fledge/plugins/south/coap /usr/local/fledge/python/fledge/plugins/south/ $ sudo cp python/requirements-coap.txt /usr/local/fledge/python/ - $ sudo pip3 install -r /usr/local/fledge/python/requirements-coap.txt + $ sudo python3 -m pip install -r /usr/local/fledge/python/requirements-coap.txt $ sudo chown -R root:root /usr/local/fledge/python/fledge/plugins/south/coap $ curl -sX POST http://localhost:8081/fledge/service -d '{"name": "CoAP", "type": "south", "plugin": "coap", "enabled": true}' diff --git a/docs/conf.py b/docs/conf.py index 2c5df5d56a..635789cc32 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -175,4 +175,6 @@ ], } -subprocess.run(["make generated"], shell=True, check=True) +# Pass Plugin DOCBRANCH argument in Makefile ; by default develop +# NOTE: During release time we need to replace DOCBRANCH with actual released version +subprocess.run(["make generated DOCBRANCH='2.1.0RC'"], shell=True, check=True) diff --git a/docs/images/south_advanced.jpg b/docs/images/south_advanced.jpg index 0695d1bde9..3a0df711b8 100644 Binary files a/docs/images/south_advanced.jpg and b/docs/images/south_advanced.jpg differ diff --git a/docs/images/stats_options.jpg b/docs/images/stats_options.jpg new file mode 100644 index 0000000000..a302fd73d5 Binary files /dev/null and b/docs/images/stats_options.jpg differ diff --git a/docs/rest_api_guide/04_RESTuser.rst b/docs/rest_api_guide/04_RESTuser.rst index 7aea4d4e37..924e5f5aeb 100644 --- a/docs/rest_api_guide/04_RESTuser.rst +++ b/docs/rest_api_guide/04_RESTuser.rst @@ -47,20 +47,28 @@ GET all assets An array of JSON objects, one per asset. -+--------------+--------+----------------------------------------------------+------------------------+ -| Name | Type | Description | Example | -+==============+========+====================================================+========================+ -| [].assetCode | string | The code of the asset | fogbench/accelerometer | -+--------------+--------+----------------------------------------------------+------------------------+ -| [].count | number | The number of recorded readings for the asset code | 22359 | -+--------------+--------+----------------------------------------------------+------------------------+ - +.. list-table:: + :widths: 20 20 50 30 + :header-rows: 1 + + * - Name + - Type + - Description + - Example + * - assetCode + - string + - The code of the asset + - fogbench/accelerometer + * - count + - number + - The number of recorded readings for the asset code + - 180 **Example** .. code-block:: console - $ curl -s http://localhost:8081/fledge/asset + $ curl -sX GET http://localhost:8081/fledge/asset [ { "count": 18, "assetCode": "fogbench/accelerometer" }, { "count": 18, "assetCode": "fogbench/gyroscope" }, { "count": 18, "assetCode": "fogbench/humidity" }, @@ -87,27 +95,44 @@ GET asset readings **Request Parameters** -- **limit** - set the limit of the number of readings to return. If not specified, the defaults is 20 readings. + - **limit** - set the limit of the number of readings to return. If not specified, the defaults is 20 readings. + + - **skip** - the number of assets to skip. This is used in conjunction with limit and allows the caller to not just get the last N readings, but to get a set of readings from the past. + + - **seconds** - this is essentially an alternative form of limit, but here the limit is expressed in seconds rather than a number of readings. It will return the readings for the last N seconds. Note that this can not be used in conjunction with the *limit* and *skip* or with *hours* and *minutes* request parameters. + + - **minutes** - this is essentially an alternative form of limit, but here the limit is expressed in minutes rather than a number of readings. It will return the readings for the last N minutes. Note that this can not be used in conjunction with the *limit* and *skip* or with *seconds* and *hours* request parameters. + + - **hours** - this is essentially an alternative form of limit, but here the limit is expressed in hours rather than a number of readings. It will return the readings for the last N hours. Note that this can not be used in conjunction with the *limit* and *skip* or with *seconds* and *minutes* request parameters. + - **previous** - This is used in conjunction with the *hours*, *minutes* or *seconds* request parameter and allows the caller to get not just the most recent readings but historical readings. The value of *previous* is defined in hours, minutes or seconds dependent upon the parameter it is used with and defines how long ago the data that is returned should end. If the caller passes a set of parameters *seconds=30&previous=120* the call will return 30 seconds worth of data and the newest data returned will be 120 seconds old. **Response Payload** An array of JSON objects with the readings data for a series of readings sorted in reverse chronological order. -+--------------+-------------+---------------------------------------------------+-----------------------------------+ -| Name | Type | Description | Example | -+==============+=============+===================================================+===================================+ -| [].timestamp | timestamp | The time at which the reading was received. | 2018-04-16 14:33:18.215 | -+--------------+-------------+---------------------------------------------------+-----------------------------------+ -| [].reading | JSON object | The JSON reading object received from the sensor. | {"reading": {"x":0, "y":0, "z":1} | -+--------------+-------------+---------------------------------------------------+-----------------------------------+ - +.. list-table:: + :widths: 20 20 50 30 + :header-rows: 1 + + * - Name + - Type + - Description + - Example + * - timestamp + - timestamp + - The time at which the reading was received + - 2018-04-16 14:33:18.215 + * - reading + - JSON object + - The JSON reading object received from the sensor + - {"reading": {"x":0, "y":0, "z":1} **Example** .. code-block:: console - $ curl -s http://localhost:8081/fledge/asset/fogbench%2Faccelerometer + $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Faccelerometer [ { "reading": { "x": 0, "y": -2, "z": 0 }, "timestamp": "2018-04-19 14:20:59.692" }, { "reading": { "x": 0, "y": 0, "z": -1 }, "timestamp": "2018-04-19 14:20:54.643" }, { "reading": { "x": -1, "y": 2, "z": 1 }, "timestamp": "2018-04-19 14:20:49.899" }, @@ -127,7 +152,8 @@ An array of JSON objects with the readings data for a series of readings sorted { "reading": { "x": 0, "y": -2, "z": 1 }, "timestamp": "2018-04-19 14:06:05.864" }, { "reading": { "x": -1, "y": -2, "z": 0 }, "timestamp": "2018-04-19 13:45:15.881" } ] $ - $ curl -s http://localhost:8081/fledge/asset/fogbench%2Faccelerometer?limit=5 + + $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Faccelerometer?limit=5 [ { "reading": { "x": 0, "y": -2, "z": 0 }, "timestamp": "2018-04-19 14:20:59.692" }, { "reading": { "x": 0, "y": 0, "z": -1 }, "timestamp": "2018-04-19 14:20:54.643" }, { "reading": { "x": -1, "y": 2, "z": 1 }, "timestamp": "2018-04-19 14:20:49.899" }, @@ -135,6 +161,20 @@ An array of JSON objects with the readings data for a series of readings sorted { "reading": { "x": -1, "y": -2, "z": -2 }, "timestamp": "2018-04-19 14:20:42.746" } ] $ +Using *seconds* and *previous* to obtain historical data. + +.. code-block:: console + + $ curl -sX GET http://localhost:8081/fledge/asset/sinusoid?seconds=5\&previous=60|jq + [ + { "reading": { "sinusoid": 1 }, "timestamp": "2022-11-09 09:37:51.930688" }, + { "reading": { "sinusoid": 0.994521895 }, "timestamp": "2022-11-09 09:37:50.930887" }, + { "reading": { "sinusoid": 0.978147601 }, "timestamp": "2022-11-09 09:37:49.933698" }, + { "reading": { "sinusoid": 0.951056516 }, "timestamp": "2022-11-09 09:37:48.930644" }, + { "reading": { "sinusoid": 0.913545458 }, "timestamp": "2022-11-09 09:37:47.930950" } + ] + +The above call returned 5 seconds of data from the current time minus 65 seconds to the current time minus 5 seconds. GET asset reading ~~~~~~~~~~~~~~~~~ @@ -150,27 +190,45 @@ GET asset reading **Request Parameters** -- **limit** - set the limit of the number of readings to return. If not specified, the defaults is 20 single readings. + - **limit** - set the limit of the number of readings to return. If not specified, the defaults is 20 single readings. + + - **skip** - the number of assets to skip. This is used in conjunction with limit and allows the caller to not just get the last N readings, but to get a set of readings from the past. + + - **seconds** - this is essentially an alternative form of limit, but here the limit is expressed in seconds rather than a number of readings. It will return the readings for the last N seconds. Note that this can not be used in conjunction with the *limit* and *skip* or with *hours* and *minutes* request parameters. + + - **minutes** - this is essentially an alternative form of limit, but here the limit is expressed in minutes rather than a number of readings. It will return the readings for the last N minutes. Note that this can not be used in conjunction with the *limit* and *skip* or with *seconds* and *hours* request parameters. + + - **hours** - this is essentially an alternative form of limit, but here the limit is expressed in hours rather than a number of readings. It will return the readings for the last N hours. Note that this can not be used in conjunction with the *limit* and *skip* or with *seconds* and *minutes* request parameters. + + - **previous** - This is used in conjunction with the *hours*, *minutes* or *seconds* request parameter and allows the caller to get not just the most recent readings but historical readings. The value of *previous* is defined in hours, minutes or seconds dependent upon the parameter it is used with and defines how long ago the data that is returned should end. If the caller passes a set of parameters *seconds=30&previous=120* the call will return 30 seconds worth of data and the newest data returned will be 120 seconds old. **Response Payload** An array of JSON objects with a series of readings sorted in reverse chronological order. -+-----------+-------------+---------------------------------------------+-------------------------+ -| Name | Type | Description | Example | -+===========+=============+=============================================+=========================+ -| timestamp | timestamp | The time at which the reading was received. | 2018-04-16 14:33:18.215 | -+-----------+-------------+---------------------------------------------+-------------------------+ -| {reading} | JSON object | The value of the specified reading. | "temperature": 20 | -+-----------+-------------+---------------------------------------------+-------------------------+ - +.. list-table:: + :widths: 20 20 50 30 + :header-rows: 1 + + * - Name + - Type + - Description + - Example + * - timestamp + - timestamp + - The time at which the reading was received + - 2018-04-16 14:33:18.215 + * - {reading} + - JSON object + - The value of the specified reading + - {"temperature": 20} **Example** .. code-block:: console - $ curl -s http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature + $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature [ { "temperature": 20, "timestamp": "2018-04-19 14:20:59.692" }, { "temperature": 33, "timestamp": "2018-04-19 14:20:54.643" }, { "temperature": 35, "timestamp": "2018-04-19 14:20:49.899" }, @@ -190,7 +248,8 @@ An array of JSON objects with a series of readings sorted in reverse chronologic { "temperature": 46, "timestamp": "2018-04-19 14:06:05.864" }, { "temperature": 10, "timestamp": "2018-04-19 13:45:15.881" } ] $ - $ curl -s http://localhost:8081/fledge/asset/fogbench%2Faccelerometer?limit=5 + + $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Faccelerometer?limit=5 [ { "temperature": 20, "timestamp": "2018-04-19 14:20:59.692" }, { "temperature": 33, "timestamp": "2018-04-19 14:20:54.643" }, { "temperature": 35, "timestamp": "2018-04-19 14:20:49.899" }, @@ -213,31 +272,125 @@ GET asset reading summary **Response Payload** -An array of JSON objects with a series of readings sorted in reverse chronological order. - -+-------------------+--------+--------------------------------------------+---------+ -| Name | Type | Description | Example | -+===================+========+============================================+=========+ -| {reading}.average | number | The average value of the set of |br| | 27 | -| | | sensor values selected in the query string | | -+-------------------+--------+--------------------------------------------+---------+ -| {reading}.min | number | The minimum value of the set of |br| | 0 | -| | | sensor values selected in the query string | | -+-------------------+--------+--------------------------------------------+---------+ -| {reading}.max | number | The maximum value of the set of |br| | 47 | -| | | sensor values selected in the query string | | -+-------------------+--------+--------------------------------------------+---------+ - +A JSON object of a reading by asset code. + +.. list-table:: + :widths: 20 20 50 30 + :header-rows: 1 + + * - Name + - Type + - Description + - Example + * - {reading}.max + - number + - The maximum value of the set of sensor values selected in the query string + - 47 + * - {reading}.min + - number + - The minimum value of the set of sensor values selected in the query string + - 0 + * - {reading}.average + - number + - The average value of the set of sensor values selected in the query string + - 27 **Example** .. code-block:: console - $ curl -s http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature/summary + $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature/summary { "temperature": { "max": 47, "min": 0, "average": 27 } } $ +GET all asset reading timespan +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``GET /fledge/asset/timespan`` - Return newest and oldest timestamp of each asset for which we hold readings in the buffer. + + +**Response Payload** + +An array of JSON objects with newest and oldest timestamps of the readings held for each asset. + +.. list-table:: + :widths: 20 20 50 30 + :header-rows: 1 + + * - Name + - Type + - Description + - Example + * - asset_code + - string + - The asset code for which the timestamps refer + - sinusoid + * - oldest + - string + - The oldest timestamp held in the buffer for this asset + - "2022-11-08 17:07:02.623258" + * - newest + - string + - The newest timestamp held in the buffer for this asset + - "2022-11-09 14:52:50.069432" + +**Example** + +.. code-block:: console + + $ curl -sX GET http://localhost:8081/fledge/asset/timespan + [ + { + "oldest": "2022-11-08 17:07:02.623258", + "newest": "2022-11-09 14:52:50.069432", + "asset_code": "sinusoid" + } + ] + +GET asset reading timespan +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``GET /fledge/asset/{code}/timespan`` - Return newest and oldest timestamp for which we hold readings in the buffer. + + +**Path Parameters** + +- **code** - the asset code to retrieve. + + +**Response Payload** + +A JSON object with the newest and oldest timestamps for the asset held in the storage buffer. + +.. list-table:: + :widths: 20 20 50 30 + :header-rows: 1 + + * - Name + - Type + - Description + - Example + * - oldest + - string + - The oldest timestamp held in the buffer for this asset + - "2022-11-08 17:07:02.623258" + * - newest + - string + - The newest timestamp held in the buffer for this asset + - "2022-11-09 14:52:50.069432" + +**Example** + +.. code-block:: console + + $ curl -sX GET http://localhost:8081/fledge/asset/sinusoid/timespan|jq + { + "oldest": "2022-11-08 17:07:02.623258", + "newest": "2022-11-09 14:59:14.069207" + } + + GET timed average asset reading ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -249,37 +402,54 @@ GET timed average asset reading - **code** - the asset code to retrieve. - **reading** - the sensor from the assets JSON formatted reading. - **Request Parameters** -- **limit** - set the limit of the number of readings to return. If not specified, the defaults is 20 single readings. + - **limit** - set the limit of the number of readings to return. If not specified, the defaults is 20 readings. + + - **skip** - the number of assets to skip. This is used in conjunction with limit and allows the caller to not just get the last N readings, but to get a set of readings from the past. + + - **seconds** - this is essentially an alternative form of limit, but here the limit is expressed in seconds rather than a number of readings. It will return the readings for the last N seconds. Note that this can not be used in conjunction with the *limit* and *skip* or with *hours* and *minutes* request parameters. + + - **minutes** - this is essentially an alternative form of limit, but here the limit is expressed in minutes rather than a number of readings. It will return the readings for the last N minutes. Note that this can not be used in conjunction with the *limit* and *skip* or with *seconds* and *hours* request parameters. + - **hours** - this is essentially an alternative form of limit, but here the limit is expressed in hours rather than a number of readings. It will return the readings for the last N hours. Note that this can not be used in conjunction with the *limit* and *skip* or with *seconds* and *minutes* request parameters. + + - **previous** - This is used in conjunction with the *hours*, *minutes* or *seconds* request parameter and allows the caller to get not just the most recent readings but historical readings. The value of *previous* is defined in hours, minutes or seconds dependent upon the parameter it is used with and defines how long ago the data that is returned should end. If the caller passes a set of parameters *seconds=30&previous=120* the call will return 30 seconds worth of data and the newest data returned will be 120 seconds old. **Response Payload** An array of JSON objects with a series of readings sorted in reverse chronological order. -+-----------+-----------+--------------------------------------------+---------------------+ -| Name | Type | Description | Example | -+===========+===========+============================================+=====================+ -| timestamp | timestamp | The time the reading represents. | 2018-04-16 14:33:18 | -+-----------+-----------+--------------------------------------------+---------------------+ -| average | number | The average value of the set of |br| | 27 | -| | | sensor values selected in the query string | | -+-----------+-----------+--------------------------------------------+---------------------+ -| min | number | The minimum value of the set of |br| | 0 | -| | | sensor values selected in the query string | | -+-----------+-----------+--------------------------------------------+---------------------+ -| max | number | The maximum value of the set of |br| | 47 | -| | | sensor values selected in the query string | | -+-----------+-----------+--------------------------------------------+---------------------+ - +.. list-table:: + :widths: 20 20 50 30 + :header-rows: 1 + + * - Name + - Type + - Description + - Example + * - timestamp + - timestamp + - The time the reading represents + - 2018-04-16 14:33:18 + * - max + - number + - The maximum value of the set of sensor values selected in the query string + - 47 + * - min + - number + - The minimum value of the set of sensor values selected in the query string + - 0 + * - average + - number + - The average value of the set of sensor values selected in the query string + - 27 **Example** .. code-block:: console - $ curl -s http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature/series + $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature/series [ { "timestamp": "2018-04-19 14:20:59", "max": 20, "min": 20, "average": 20 }, { "timestamp": "2018-04-19 14:20:54", "max": 33, "min": 33, "average": 33 }, { "timestamp": "2018-04-19 14:20:49", "max": 35, "min": 35, "average": 35 }, @@ -290,15 +460,26 @@ An array of JSON objects with a series of readings sorted in reverse chronologic { "timestamp": "2018-04-19 14:06:05", "max": 46, "min": 5, "average": 27.8 }, { "timestamp": "2018-04-19 13:45:15", "max": 10, "min": 10, "average": 10 } ] $ - $ curl -s http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature/series + + $ curl -sX GET http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature/series?limit=5 [ { "timestamp": "2018-04-19 14:20:59", "max": 20, "min": 20, "average": 20 }, { "timestamp": "2018-04-19 14:20:54", "max": 33, "min": 33, "average": 33 }, { "timestamp": "2018-04-19 14:20:49", "max": 35, "min": 35, "average": 35 }, { "timestamp": "2018-04-19 14:20:47", "max": 0, "min": 0, "average": 0 }, { "timestamp": "2018-04-19 14:20:42", "max": 37, "min": 37, "average": 37 } ] +Using *seconds* and *previous* to obtain historical data. +.. code-block:: console - - - + $ curl http://localhost:8081/fledge/asset/fogbench%2Fhumidity/temperature/series?seconds=5\&previous=60|jq + [ + { "timestamp": "2022-11-09 09:37:51.930688", "max": 20, "min": 20, "average": 20 }, + { "timestamp": "2022-11-09 09:37:50.930887", "max": 33, "min": 33, "average": 33 }, + { "timestamp": "2022-11-09 09:37:49.933698", "max": 0, "min": 0, "average": 0 }, + { "timestamp": "2022-11-09 09:37:48.930644", "max": 5, "min": 1, "average": 4 }, + { "timestamp": "2022-11-09 09:37:47.930950", "max": 0, "min": 37, "average": 37 } + ] + $ + +The above call returned 5 seconds of data from the current time minus 65 seconds to the current time minus 5 seconds. diff --git a/docs/scripts/fledge_plugin_list b/docs/scripts/fledge_plugin_list index d773d942ce..b90cabc1ff 100755 --- a/docs/scripts/fledge_plugin_list +++ b/docs/scripts/fledge_plugin_list @@ -1,48 +1,15 @@ #!/usr/bin/env bash -function table { - list="$1" - title=$2 - tableType=$3 - echo ".. list-table:: ${title}" >> "$output" - echo " :widths: 20 50" >> "$output" - echo " :header-rows: 1" >> "$output" - echo "" >> "$output" - echo " * - Name" >> "$output" - echo " - Description" >> "$output" - for repo in ${list} - do - product=$(echo "$repo}" | sed -e 's/-.*//') - org="fledge-iot" - if [[ ${product} == "foglamp" ]]; then org="dianomic"; fi - type=$(echo "${repo}" | sed -e 's/fledge-//' -e 's/foglamp-//' -e 's/-.*//') - name=$(echo "${repo}" | sed -e 's/fledge-//' -e 's/foglamp-//' -e "s/${type}-//") - echo "${repo}", "${name}", "${type}" >>/tmp/repos - if [[ ${type} = "$tableType" ]]; then - description=$(echo "$fledgeRepos" | python3 -c 'import json,sys;repos=json.load(sys.stdin);fRepo = [r for r in repos if r["name"] == "'"${repo}"'" ];print(fRepo[0]["description"])') - if [[ "${description}" = "None" ]]; then - description="A ${name} ${type} plugin" - fi - echo " * - ${name}" >> "$output" - echo " - ${description}" >> "$output" - fi - done - if [[ ${tableType} = "north" ]]; then - echo " * - OMF" >> "$output" - echo " - Send data to OSIsoft PI Server, Edge Data Store or OSIsoft Cloud Services" >> "$output" - fi - echo "" >> "$output" -} - -if [[ "${GITHUB_ACCESS_TOKEN}" == "" ]]; then - echo "You must have set a GitHub access token environment variable; export GITHUB_ACCESS_TOKEN=YOUR_ACCESS_TOKEN" +if [[ "${USERNAME}" == "" ]] || [[ "${GITHUB_ACCESS_TOKEN}" == "" ]]; then + echo "You must have set a GitHub username & access token environment variable. Like export USERNAME=YOUR_USERNAME; export GITHUB_ACCESS_TOKEN=YOUR_ACCESS_TOKEN"; exit 1 fi output=$1 rm -f $output +DOCBRANCH=$2 header="Authorization: token ${GITHUB_ACCESS_TOKEN}" -# Get total number of respository pages in fledge org. +# Get total number of repository pages in fledge org. fledgeRepoPagesCount=$(curl -sI https://api.github.com/orgs/fledge-iot/repos | grep -oP '\d+(?=>; rel="last")') fledgeRepos=$(curl -H "$header" -sX GET https://api.github.com/orgs/fledge-iot/repos\?page=[1-$fledgeRepoPagesCount]) fledgeRepos="$(echo $fledgeRepos | sed 's/\] \[/,/g')" @@ -58,9 +25,8 @@ fledge_internal_repos=$(curl -sX GET -H "$header" -H "Accept: application/vnd.gi fledge_wip_repos_name=$(echo ${fledge_wip_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) fledge_poc_repos_name=$(echo ${fledge_poc_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) fledge_internal_repos_name=$(echo ${fledge_internal_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) -EXCLUDE_FLEDGE_TOPIC_REPOSITORIES=$(echo ${fledge_wip_repos_name} ${fledge_poc_repos_name} ${fledge_internal_repos_name} | sort -f) -echo "EXCLUDED FLEDGE TOPIC REPOS..."$EXCLUDE_FLEDGE_TOPIC_REPOSITORIES -export EXCLUDE_FLEDGE_TOPIC_REPOSITORIES=$EXCLUDE_FLEDGE_TOPIC_REPOSITORIES +export EXCLUDE_FLEDGE_TOPIC_REPOSITORIES=$(echo ${fledge_wip_repos_name} ${fledge_poc_repos_name} ${fledge_internal_repos_name} | sort -f) +echo "EXCLUDED FLEDGE TOPIC REPOS LIST: $EXCLUDE_FLEDGE_TOPIC_REPOSITORIES" fetchFledgeReposPyScript=' import os,json,sys;\ repos=json.load(sys.stdin);\ @@ -71,6 +37,39 @@ print("\n".join(fRepos)); ' REPOSITORIES=$(echo ${fledgeRepos} | python3 -c "$fetchFledgeReposPyScript" | sort -f) echo "REPOS LIST: "${REPOSITORIES} + +function table { + list="$1" + title=$2 + tableType=$3 + echo ".. list-table:: ${title}" >> "$output" + echo " :widths: 20 50" >> "$output" + echo " :header-rows: 1" >> "$output" + echo "" >> "$output" + echo " * - Name" >> "$output" + echo " - Description" >> "$output" + for repo in ${list} + do + product=$(echo "$repo}" | sed -e 's/-.*//') + type=$(echo "${repo}" | sed -e 's/fledge-//' -e 's/-.*//') + name=$(echo "${repo}" | sed -e 's/fledge-//' -e "s/${type}-//") + if [[ ${type} = "$tableType" ]]; then + is_branch_exists=$(git ls-remote -h https://${USERNAME}:${GITHUB_ACCESS_TOKEN}@github.com/fledge-iot/${repo}.git ${DOCBRANCH} | grep -c "refs/heads/${DOCBRANCH}") + if [[ ${is_branch_exists} -gt 0 ]]; then + description=$(echo "$fledgeRepos" | python3 -c 'import json,sys;repos=json.load(sys.stdin);fRepo = [r for r in repos if r["name"] == "'"${repo}"'" ];print(fRepo[0]["description"])') + if [[ "${description}" = "None" ]]; then description="A ${name} ${type} plugin"; fi + echo " * - ${name}" >> "$output" + echo " - ${description}" >> "$output" + fi + fi + done + if [[ ${tableType} = "north" ]]; then + echo " * - OMF" >> "$output" + echo " - Send data to OSIsoft PI Server, Edge Data Store or OSIsoft Cloud Services" >> "$output" + fi + echo "" >> "$output" +} + cat >> $output << EOF1 Fledge Plugins ============== diff --git a/docs/scripts/plugin_and_service_documentation b/docs/scripts/plugin_and_service_documentation index 35a6ce1dad..89e11cdc2a 100644 --- a/docs/scripts/plugin_and_service_documentation +++ b/docs/scripts/plugin_and_service_documentation @@ -1,76 +1,14 @@ #!/bin/bash -# The best default branch to use develop rather than master. As new repository do not have a branch called master any more and having main branch only. -if [[ "${DOCBRANCH}" == "" ]]; then - # NOTE: During release time we need to replace develop and nightly build with actual released version - export DOCBRANCH=2.0.1RC - export ARCHIVE_BUILD=nightly -else - # only for dev purpose - if [[ "${DOCBRANCH}" == FOGL-* ]]; then - export DOCBRANCH=${DOCBRANCH} - export ARCHIVE_BUILD="fixes\/${DOCBRANCH}" - elif [[ "${DOCBRANCH}" == *RC ]]; then - export DOCBRANCH=${DOCBRANCH} - export ARCHIVE_BUILD=$(echo ${DOCBRANCH} | sed -e 's/RC//g') - else - export DOCBRANCH=${DOCBRANCH} - export ARCHIVE_BUILD=nightly - fi -fi - -echo "Default ${DOCBRANCH} branch used for plugin documentation and ${ARCHIVE_BUILD} build used for source code documentation" -# Tweaks required in plugin developer guide to enable source code documentation -sed -i 's/ARCHIVE_BUILD_NAME/'"${ARCHIVE_BUILD}"'/g' plugin_developers_guide/00_source_code_doc.rst - -function plugin_and_service_doc { - repo_name=$1 - dest=$2 - dir_type=$3 - product=$(echo ${repo_name} | sed -e 's/-.*//') - org=fledge-iot - type=$(echo ${repo_name} | sed -e 's/fledge-//' -e 's/-.*//') - name=$(echo ${repo_name} | sed -e 's/fledge-//' -e "s/${type}-//") - mkdir -p /tmp/doc.$$ - cd /tmp/doc.$$ - git clone -b ${DOCBRANCH} --single-branch https://${USERNAME}:${GITHUB_ACCESS_TOKEN}@github.com/${org}/${repo_name}.git - - if [[ ${type} != "service" ]]; then - # cloned directory replaced with installed directory name which is defined in Package file for each repo - installed_plugin_dir_name=$(cat ${repo_name}/Package | grep plugin_install_dirname= | sed -e "s/plugin_install_dirname=//g") - if [[ ${installed_plugin_dir_name} == "\${plugin_name}" ]]; then - installed_plugin_dir_name=$(cat ${repo_name}/Package | grep plugin_name= | sed -e "s/plugin_name=//g") - fi - old_plugin_name=$(echo ${repo_name} | cut -d '-' -f3-) - new_plugin_name=$(echo ${repo_name/$old_plugin_name/$installed_plugin_dir_name}) - if [[ ${repo_name} != ${new_plugin_name} ]]; then - mv ${repo_name} ${new_plugin_name} - fi - repo_name=${new_plugin_name} - else - repo_name=fledge-${type}-${name} - fi - cd - - if [ -d /tmp/doc.$$/${repo_name}/docs ]; then - rm -rf ${dir_type}/${repo_name} - mkdir -p ${dir_type}/${repo_name} - cp -r /tmp/doc.$$/${repo_name}/docs/. ${dir_type}/${repo_name} - if [ -f ${dir_type}/${repo_name}/index.rst ]; then - echo " ${repo_name}/index" >> $dest - else - echo "*** WARNING: index.rst file is missing for ${repo_name}." - fi - else - echo "*** WARNING: ${repo_name} docs directory is missing." - fi - rm -rf /tmp/doc.$$ -} - -# Always create a fresh set of Plugin documentation -if [ -d plugins ]; then - rm -rf plugins +if [[ "${USERNAME}" == "" ]] || [[ "${GITHUB_ACCESS_TOKEN}" == "" ]]; then + echo "You must have set a GitHub username & access token environment variable. Like export USERNAME=YOUR_USERNAME; export GITHUB_ACCESS_TOKEN=YOUR_ACCESS_TOKEN"; + exit 1 fi +# Always create a fresh set of Plugin & Service documentation +if [ -d plugins ]; then rm -rf plugins; fi mkdir plugins +if [ -d services ]; then rm -rf services; fi +mkdir services cat > plugins/south.rst << EOFSOUTH ******************** @@ -115,18 +53,15 @@ Fledge Notification Delivery Plugins EOFNOTIFY -if [[ "${USERNAME}" == "" ]]; then - echo "You must have set a GitHub username environment variable; export USERNAME=YOUR_USERNAME" - exit 1 -fi - -if [[ "${GITHUB_ACCESS_TOKEN}" == "" ]]; then - echo "You must have set a GitHub access token environment variable; export GITHUB_ACCESS_TOKEN=YOUR_ACCESS_TOKEN" - exit 1 -fi - +DOCBRANCH="$1" +# TODO: source code documentation always point to nightly irrespective of the DOCBRANCH +# In future may point to specific branch/release version +ARCHIVE_BUILD="nightly" +echo "Default ${DOCBRANCH} branch used for plugin documentation and ${ARCHIVE_BUILD} build used for source code documentation" +# Tweaks required in plugin developer guide to enable source code documentation +sed -i 's/ARCHIVE_BUILD_NAME/'"${ARCHIVE_BUILD}"'/g' plugin_developers_guide/00_source_code_doc.rst header="Authorization: token ${GITHUB_ACCESS_TOKEN}" -# Get total number of respository pages in fledge org. +# Get total number of repository pages in fledge org. fledgeRepoPagesCount=$(curl -sI https://api.github.com/orgs/fledge-iot/repos | grep -oP '\d+(?=>; rel="last")') fledgeRepos=$(curl -H "$header" -sX GET https://api.github.com/orgs/fledge-iot/repos\?page=[1-$fledgeRepoPagesCount]) fledgeRepos="$(echo $fledgeRepos | sed 's/\] \[/,/g')" @@ -142,9 +77,8 @@ fledge_internal_repos=$(curl -sX GET -H "$header" -H "Accept: application/vnd.gi fledge_wip_repos_name=$(echo ${fledge_wip_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) fledge_poc_repos_name=$(echo ${fledge_poc_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) fledge_internal_repos_name=$(echo ${fledge_internal_repos} | python3 -c "$fetchTopicReposPyScript" | sort -f) -EXCLUDE_FLEDGE_TOPIC_REPOSITORIES=$(echo ${fledge_wip_repos_name} ${fledge_poc_repos_name} ${fledge_internal_repos_name} | sort -f) -echo "EXCLUDED FLEDGE TOPIC REPOS..."$EXCLUDE_FLEDGE_TOPIC_REPOSITORIES -export EXCLUDE_FLEDGE_TOPIC_REPOSITORIES=$EXCLUDE_FLEDGE_TOPIC_REPOSITORIES +export EXCLUDE_FLEDGE_TOPIC_REPOSITORIES=$(echo ${fledge_wip_repos_name} ${fledge_poc_repos_name} ${fledge_internal_repos_name} | sort -f) +echo "EXCLUDED FLEDGE TOPIC REPOS LIST: $EXCLUDE_FLEDGE_TOPIC_REPOSITORIES" fetchFledgeReposPyScript=' import os,json,sys;\ repos=json.load(sys.stdin);\ @@ -155,9 +89,52 @@ print("\n".join(fRepos)); ' REPOSITORIES=$(echo ${fledgeRepos} | python3 -c "$fetchFledgeReposPyScript" | sort -f) echo "REPOS LIST: "${REPOSITORIES} + +function plugin_and_service_doc { + repo_name=$1 + dest=$2 + dir_type=$3 + type=$(echo ${repo_name} | sed -e 's/fledge-//' -e 's/-.*//') + name=$(echo ${repo_name} | sed -e 's/fledge-//' -e "s/${type}-//") + mkdir -p /tmp/doc.$$ + cd /tmp/doc.$$ + git clone -b ${DOCBRANCH} --single-branch https://${USERNAME}:${GITHUB_ACCESS_TOKEN}@github.com/fledge-iot/${repo_name}.git + + if [[ ${type} != "service" ]]; then + # cloned directory replaced with installed directory name which is defined in Package file for each repo + installed_plugin_dir_name=$(cat ${repo_name}/Package | grep plugin_install_dirname= | sed -e "s/plugin_install_dirname=//g") + if [[ ${installed_plugin_dir_name} == "\${plugin_name}" ]]; then + installed_plugin_dir_name=$(cat ${repo_name}/Package | grep plugin_name= | sed -e "s/plugin_name=//g") + fi + old_plugin_name=$(echo ${repo_name} | cut -d '-' -f3-) + new_plugin_name=$(echo ${repo_name/$old_plugin_name/$installed_plugin_dir_name}) + if [[ ${repo_name} != ${new_plugin_name} ]]; then + mv ${repo_name} ${new_plugin_name} + fi + repo_name=${new_plugin_name} + else + repo_name=fledge-${type}-${name} + fi + cd - + if [ -d /tmp/doc.$$/${repo_name}/docs ]; then + rm -rf ${dir_type}/${repo_name} + mkdir -p ${dir_type}/${repo_name} + cp -r /tmp/doc.$$/${repo_name}/docs/. ${dir_type}/${repo_name} + if [ -f ${dir_type}/${repo_name}/index.rst ]; then + echo " ${repo_name}/index" >> $dest + else + echo "*** WARNING: index.rst file is missing for ${repo_name}." + fi + else + echo "*** WARNING: ${repo_name} docs directory is missing." + fi + rm -rf /tmp/doc.$$ +} + + for repo in ${REPOSITORIES} do - type=$(echo $repo | sed -e 's/fledge-//' -e 's/foglamp-//' -e 's/-.*//') + type=$(echo $repo | sed -e 's/fledge-//' -e 's/-.*//') if [ "$type" = "south" -o "$type" = "north" -o $type = "filter" -o $type = "rule" -o $type = "notify" ]; then dest=plugins/${type}.rst plugin_and_service_doc $repo $dest "plugins" @@ -171,18 +148,12 @@ echo '.. include:: ../../fledge-north-OMF.rst' > plugins/fledge-north-OMF/index. # Append OMF.rst to the end of the file rather than including it so that we may # edit the links to prevent duplicates cat OMF.rst >> plugins/fledge-north-OMF/index.rst -sed -i -e 's/Naming_Scheme/Naming_Scheme_plugin/' plugins/fledge-north-OMF/index.rst +sed -i -e 's/Naming_Scheme/Naming_Scheme_plugin/' -e 's/Linked_Types/Linked_Types_Plugin/' plugins/fledge-north-OMF/index.rst # Create the Threshold rule documentation mkdir plugins/fledge-rule-Threshold ln -s $(pwd)/fledge-rule-Threshold/images plugins/fledge-rule-Threshold/images echo '.. include:: ../../fledge-rule-Threshold/index.rst' > plugins/fledge-rule-Threshold/index.rst -# Always create a fresh set of Service documentation -if [ -d services ]; then - rm -rf services -fi -mkdir services - cat > services/index.rst << EOFSERVICES ******************* Additional Services @@ -198,7 +169,7 @@ SERVICE_REPOS=$(echo "${REPOSITORIES}" | grep -o "[a-zA-Z0-9\-]*-service-[a-zA-Z echo "SERVICE REPOS LIST: "${SERVICE_REPOS} for repo in ${SERVICE_REPOS} do - type=$(echo $repo | sed -e 's/fledge-//' -e 's/foglamp-//' -e 's/-.*//') + type=$(echo $repo | sed -e 's/fledge-//' -e 's/-.*//') dest=services/index.rst plugin_and_service_doc $repo $dest "services" done diff --git a/docs/tuning_fledge.rst b/docs/tuning_fledge.rst index 3ab256b272..4c29d1d5db 100644 --- a/docs/tuning_fledge.rst +++ b/docs/tuning_fledge.rst @@ -1,5 +1,6 @@ .. Images .. |south_advanced| image:: images/south_advanced.jpg +.. |stats_options| image:: images/stats_options.jpg .. |north_advanced| image:: images/north_advanced.jpg .. |service_monitor| image:: images/service_monitor.jpg .. |scheduler_advanced| image:: images/scheduler_advanced.jpg @@ -49,6 +50,18 @@ The south services within Fledge each have a set of advanced configuration optio - *Minimum Log Level* - This configuration option can be used to set the logs that will be seen for this service. It defines the level of logging that is send to the syslog and may be set to *error*, *warning*, *info* or *debug*. Logs of the level selected and higher will be sent to the syslog. You may access the contents of these logs by selecting the log icon in the bottom left of this screen. + - *Statistics Collection* - This configuration option can be used to control how detailed the statistics collected by the south service are. There are three options that may be selected + + +-----------------+ + | |stats_options| | + +-----------------+ + + The default, *per asset & per service* setting will collect one statistic per asset ingested and an overall statistic for the entire service. The *per service* option just collects the overall service ingest statistics and the *per asset* option just collects the statistics for each asset and not for the entire service. + +.. note:: + + The *Statistics Collection* setting will not remove any existing statistics, these will remain and remain to be represented in the statistics history. This only impacts new values that are collected. It is recommended that this be set before a service is started for the first time if the desire it to have no statistics values recorded for either assets or the service. + Tuning Buffer Usage ------------------- diff --git a/python/fledge/common/configuration_manager.py b/python/fledge/common/configuration_manager.py index d518d063ad..c05bba4465 100644 --- a/python/fledge/common/configuration_manager.py +++ b/python/fledge/common/configuration_manager.py @@ -39,7 +39,7 @@ _valid_type_strings = sorted(['boolean', 'integer', 'float', 'string', 'IPv4', 'IPv6', 'X509 certificate', 'password', 'JSON', 'URL', 'enumeration', 'script', 'code', 'northTask', 'ACL']) _optional_items = sorted(['readonly', 'order', 'length', 'maximum', 'minimum', 'rule', 'deprecated', 'displayName', - 'validity', 'mandatory']) + 'validity', 'mandatory', 'group']) RESERVED_CATG = ['South', 'North', 'General', 'Advanced', 'Utilities', 'rest_api', 'Security', 'service', 'SCHEDULER', 'SMNTR', 'PURGE_READ', 'Notifications'] @@ -226,11 +226,11 @@ async def _merge_category_vals(self, category_val_new, category_val_storage, kee for item_name_new, item_val_new in category_val_new_copy.items(): item_val_storage = category_val_storage_copy.get(item_name_new) if item_val_storage is not None: - for o_attr in item_val_storage.keys(): - # Merge optional attributes - if o_attr in _optional_items: - item_val_new[o_attr] = item_val_storage.get(o_attr) - item_val_new['value'] = item_val_storage.get('value') + if item_val_new['type'] == item_val_storage.get('type'): + item_val_new['value'] = item_val_storage.get('value') + else: + if 'value' not in item_val_new: + item_val_new['value'] = item_val_new['default'] category_val_storage_copy.pop(item_name_new) if "deprecated" in item_val_new and item_val_new['deprecated'] == 'true': audit = AuditLogger(self._storage) @@ -263,7 +263,8 @@ async def _validate_category_val(self, category_name, category_val, set_value_va .format(category_name, item_name, type(item_val))) optional_item_entries = {'readonly': 0, 'order': 0, 'length': 0, 'maximum': 0, 'minimum': 0, - 'deprecated': 0, 'displayName': 0, 'rule': 0, 'validity': 0, 'mandatory': 0} + 'deprecated': 0, 'displayName': 0, 'rule': 0, 'validity': 0, 'mandatory': 0, + 'group': 0} expected_item_entries = {'description': 0, 'default': 0, 'type': 0} if require_entry_value: @@ -327,7 +328,7 @@ def get_entry_val(k): entry_val)) is False: raise ValueError('For {} category, entry value must be an integer or float for item name ' '{}; got {}'.format(category_name, entry_name, type(entry_val))) - elif entry_name == 'rule' or entry_name == 'displayName' or entry_name == 'validity': + elif entry_name in ('displayName', 'group', 'rule', 'validity'): if not isinstance(entry_val, str): raise ValueError('For {} category, entry value must be string for item name {}; got {}' .format(category_name, entry_name, type(entry_val))) @@ -975,7 +976,7 @@ async def set_optional_value_entry(self, category_name, item_name, optional_entr new_value_entry)) is False: raise ValueError('For {} category, entry value must be an integer or float for optional item ' '{}; got {}'.format(category_name, optional_entry_name, type(new_value_entry))) - elif optional_entry_name == 'rule' or optional_entry_name == 'displayName' or optional_entry_name == 'validity': + elif optional_entry_name in ('displayName', 'group', 'rule', 'validity'): if not isinstance(new_value_entry, str): raise ValueError('For {} category, entry value must be string for optional item {}; got {}' .format(category_name, optional_entry_name, type(new_value_entry))) diff --git a/python/fledge/common/storage_client/payload_builder.py b/python/fledge/common/storage_client/payload_builder.py index 0aa74cae69..6fb1f025df 100644 --- a/python/fledge/common/storage_client/payload_builder.py +++ b/python/fledge/common/storage_client/payload_builder.py @@ -337,6 +337,17 @@ def DISTINCT(cls, cols): cls.query_payload["return"] = cols return cls + @classmethod + def MODIFIER(cls, arg): + if arg is None: + return cls + if not isinstance(arg, list): + return cls + if len(arg) == 0: + return cls + cls.query_payload["modifier"] = arg + return cls + @classmethod def UPDATE_TABLE(cls, tbl_name): return cls.FROM(tbl_name) diff --git a/python/fledge/common/web/middleware.py b/python/fledge/common/web/middleware.py index 89f4082828..24e5570c80 100644 --- a/python/fledge/common/web/middleware.py +++ b/python/fledge/common/web/middleware.py @@ -68,7 +68,7 @@ async def middleware(request): token = request.headers.get('authorization') except: token = request.headers.get('Authorization', None) - + if token: try: # validate the token and get user id @@ -82,6 +82,8 @@ async def middleware(request): request.token = token # set if user is admin request.user_is_admin = True if int(request.user["role_id"]) == 1 else False + # validate request path + await validate_requests(request) except(User.InvalidToken, User.TokenExpired) as e: raise web.HTTPUnauthorized(reason=e) except (jwt.DecodeError, jwt.ExpiredSignatureError) as e: @@ -154,3 +156,39 @@ def handle_api_exception(ex, _class=None, if_trace=0): return web.Response(status=500, body=json.dumps({'error': err_msg}).encode('utf-8'), content_type='application/json') + + +async def validate_requests(request): + """ + a) With "view" based user role id=3 only + - read access operations (GET calls) + - change profile (PUT call) + - logout (PUT call) + b) With "data-view" based user role id=4 only + - ping (GET call) + - browser asset read operation (GET call) + - service (GET call) + - statistics, statistics history, statistics rate (GET call) + - user profile (GET call) + - user roles (GET call) + - change profile (PUT call) + - logout (PUT call) + """ + user_id = request.user['id'] + if int(request.user["role_id"]) == 3 and request.method != 'GET': + supported_endpoints = ['/fledge/user', '/fledge/user/{}/password'.format(user_id), '/logout'] + if not str(request.rel_url).endswith(tuple(supported_endpoints)): + raise web.HTTPForbidden + elif int(request.user["role_id"]) == 4: + if request.method == 'GET': + supported_endpoints = ['/fledge/asset', '/fledge/ping', '/fledge/statistics', + '/fledge/user?id={}'.format(user_id), '/fledge/user/role'] + if not (str(request.rel_url).startswith(tuple(supported_endpoints) + ) or str(request.rel_url).endswith('/fledge/service')): + raise web.HTTPForbidden + elif request.method == 'PUT': + supported_endpoints = ['/fledge/user', '/fledge/user/{}/password'.format(user_id), '/logout'] + if not str(request.rel_url).endswith(tuple(supported_endpoints)): + raise web.HTTPForbidden + else: + raise web.HTTPForbidden diff --git a/python/fledge/services/core/api/asset_tracker.py b/python/fledge/services/core/api/asset_tracker.py index f4d9903b88..5f1828c2f0 100644 --- a/python/fledge/services/core/api/asset_tracker.py +++ b/python/fledge/services/core/api/asset_tracker.py @@ -104,9 +104,15 @@ async def deprecate_asset_track_entry(request: web.Request) -> web.Response: if response[0]['deprecated_ts'] == "": # Update deprecated_ts column entry current_time = common_utils.local_timestamp() + if event_name in ('Ingest', 'store'): + audit_event_name = "Ingest & store" + and_where_val = ['event', 'in', ["Ingest", "store"]] + else: + audit_event_name = event_name + and_where_val = ['event', '=', event_name] update_payload = PayloadBuilder().SET(deprecated_ts=current_time).WHERE( ['service', '=', svc_name]).AND_WHERE(['asset', '=', asset_name]).AND_WHERE( - ['event', '=', event_name]).AND_WHERE(['deprecated_ts', 'isnull']).payload() + and_where_val).AND_WHERE(['deprecated_ts', 'isnull']).payload() update_result = await storage_client.update_tbl("asset_tracker", update_payload) if 'response' in update_result: response = update_result['response'] @@ -115,22 +121,22 @@ async def deprecate_asset_track_entry(request: web.Request) -> web.Response: svc_name, asset_name, event_name)) try: audit = AuditLogger(storage_client) - audit_details = {'asset': asset_name, 'service': svc_name, 'event' : event_name} + audit_details = {'asset': asset_name, 'service': svc_name, 'event': audit_event_name} await audit.information('ASTDP', audit_details) except: - _logger.warning("Failed to log the audit entry for {} deprecation".format(asset_name)) + _logger.warning("Failed to log the audit entry for {} deprecation.".format(asset_name)) pass else: raise StorageServerError else: - raise KeyError('Asset record already deprecated.') + raise KeyError('{} asset record already deprecated.'.format(asset_name)) else: raise ValueError('No record found in asset tracker for given service: {} asset: {} event: {}'.format( svc_name, asset_name, event_name)) else: raise StorageServerError except StorageServerError as err: - msg = str(err) + msg = err.error raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "Storage error: {}".format(msg)})) except KeyError as err: msg = str(err) @@ -142,8 +148,9 @@ async def deprecate_asset_track_entry(request: web.Request) -> web.Response: msg = str(ex) raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) else: - _logger.info("Asset '{}' has been deprecated".format(asset_name)) - return web.json_response({'success': "Asset record entry has been deprecated."}) + msg = "For {} event, {} asset record entry has been deprecated.".format(event_name, asset_name) + _logger.info(msg) + return web.json_response({'success': msg}) async def get_datapoint_usage(request: web.Request) -> web.Response: diff --git a/python/fledge/services/core/api/browser.py b/python/fledge/services/core/api/browser.py index 0102f4d4fe..56f49ffa0d 100644 --- a/python/fledge/services/core/api/browser.py +++ b/python/fledge/services/core/api/browser.py @@ -65,9 +65,11 @@ def setup(app): """ Add the routes for the API endpoints supported by the data browser """ app.router.add_route('GET', '/fledge/asset', asset_counts) + app.router.add_route('GET', '/fledge/asset/timespan', asset_timespan) app.router.add_route('GET', '/fledge/asset/{asset_code}', asset) app.router.add_route('GET', '/fledge/asset/{asset_code}/latest', asset_latest) app.router.add_route('GET', '/fledge/asset/{asset_code}/summary', asset_all_readings_summary) + app.router.add_route('GET', '/fledge/asset/{asset_code}/timespan', asset_reading_timespan) app.router.add_route('GET', '/fledge/asset/{asset_code}/{reading}', asset_reading) app.router.add_route('GET', '/fledge/asset/{asset_code}/{reading}/summary', asset_summary) app.router.add_route('GET', '/fledge/asset/{asset_code}/{reading}/series', asset_averages) @@ -75,7 +77,7 @@ def setup(app): app.router.add_route('GET', '/fledge/asset/{asset_code}/{reading}/bucket/{bucket_size}', asset_readings_with_bucket_size) app.router.add_route('GET', '/fledge/structure/asset', asset_structure) - # The developer Purge by Asset naem API entry points + # The developer Purge by Asset name API entry points app.router.add_route('DELETE', '/fledge/asset', asset_purge_all) app.router.add_route('DELETE', '/fledge/asset/{asset_code}', asset_purge) @@ -174,13 +176,20 @@ async def asset(request): curl -sX GET "http://localhost:8081/fledge/asset/fogbench_humidity?limit=1&skip=1&order=asc curl -sX GET "http://localhost:8081/fledge/asset/fogbench_humidity?limit=1&skip=1&order=desc curl -sX GET http://localhost:8081/fledge/asset/fogbench_humidity?seconds=60 + curl -sX GET http://localhost:8081/fledge/asset/fogbench_humidity?seconds=60&previous=600 """ asset_code = request.match_info.get('asset_code', '') _select = PayloadBuilder().SELECT(("reading", "user_ts")).ALIAS("return", ("user_ts", "timestamp")).chain_payload() _where = PayloadBuilder(_select).WHERE(["asset_code", "=", asset_code]).chain_payload() - if 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query: + if 'previous' in request.query and ( + 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query): + _and_where = where_window(request, _where) + elif 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query: _and_where = where_clause(request, _where) + elif 'previous' in request.query: + msg = "the parameter previous can only be given if one of seconds, minutes or hours is also given" + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) else: # Add the order by and limit, offset clause _and_where = prepare_limit_skip_payload(request, _where) @@ -277,8 +286,14 @@ async def asset_reading(request): _select = PayloadBuilder().SELECT(("user_ts", ["reading", reading])) \ .ALIAS("return", ("user_ts", "timestamp"), ("reading", reading)).chain_payload() _where = PayloadBuilder(_select).WHERE(["asset_code", "=", asset_code]).chain_payload() - if 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query: + if 'previous' in request.query and ( + 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query): + _and_where = where_window(request, _where) + elif 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query: _and_where = where_clause(request, _where) + elif 'previous' in request.query: + msg = "the parameter previous can only be given if one of seconds, minutes or hours is also given" + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) else: # Add the order by and limit, offset clause _and_where = prepare_limit_skip_payload(request, _where) @@ -324,19 +339,26 @@ async def asset_all_readings_summary(request): asset_code = request.match_info.get('asset_code', '') # TODO: Use only the latest asset read to determine the data points to use. This # avoids reading every single reading into memory and creating a very big result set See FOGL-2635 - payload = PayloadBuilder().SELECT("reading").WHERE(["asset_code", "=", asset_code]).LIMIT(1).ORDER_BY(["user_ts", "desc"]).payload() + payload = PayloadBuilder().SELECT("reading").WHERE( + ["asset_code", "=", asset_code]).LIMIT(1).ORDER_BY(["user_ts", "desc"]).payload() _readings = connect.get_readings_async() results = await _readings.query(payload) if not results['rows']: - raise web.HTTPNotFound(reason="{} asset_code not found".format(asset_code)) + raise KeyError("{} asset_code not found".format(asset_code)) # TODO: FOGL-1768 when support available from storage layer then avoid multiple calls # Find keys in readings reading_keys = list(results['rows'][-1]['reading'].keys()) rows = [] _where = PayloadBuilder().WHERE(["asset_code", "=", asset_code]).chain_payload() - if 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query: + if 'previous' in request.query and ( + 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query): + _and_where = where_window(request, _where) + elif 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query: _and_where = where_clause(request, _where) + elif 'previous' in request.query: + msg = "the parameter previous can only be given if one of seconds, minutes or hours is also given" + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) else: # Add limit, offset clause _and_where = prepare_limit_skip_payload(request, _where) @@ -501,8 +523,14 @@ async def asset_averages(request): ('reading', 'avg', 'average')).chain_payload() _where = PayloadBuilder(_aggregate).WHERE(["asset_code", "=", asset_code]).chain_payload() - if 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query: + if 'previous' in request.query and ( + 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query): + _and_where = where_window(request, _where) + elif 'seconds' in request.query or 'minutes' in request.query or 'hours' in request.query: _and_where = where_clause(request, _where) + elif 'previous' in request.query: + msg = "the parameter previous can only be given if one of seconds, minutes or hours is also given" + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) else: # Add LIMIT, OFFSET _and_where = prepare_limit_skip_payload(request, _where) @@ -554,6 +582,33 @@ def where_clause(request, where): return payload +def where_window(request, where): + val = 0 + previous = 0 + try: + if 'seconds' in request.query and request.query['seconds'] != '': + val = int(request.query['seconds']) + previous = int(request.query['previous']) + elif 'minutes' in request.query and request.query['minutes'] != '': + val = int(request.query['minutes']) * 60 + previous = int(request.query['previous']) * 60 + elif 'hours' in request.query and request.query['hours'] != '': + val = int(request.query['hours']) * 60 * 60 + previous = int(request.query['previous']) * 60 * 60 + + if val < 0: + raise ValueError + except ValueError: + raise web.HTTPBadRequest(reason="Time must be a positive integer") + + # if no time units then NO AND_WHERE condition applied + if val == 0: + return where + + payload = PayloadBuilder(where).AND_WHERE(['user_ts', 'newer', val + previous]).chain_payload() + return PayloadBuilder(payload).AND_WHERE(['user_ts', 'older', previous]).chain_payload() + + async def asset_datapoints_with_bucket_size(request: web.Request) -> web.Response: """ Retrieve datapoints for an asset. @@ -591,22 +646,22 @@ async def asset_datapoints_with_bucket_size(request: web.Request) -> web.Respons raise ValueError('length must be a positive integer') length_found = True # No user start parameter: decrease default start by the user provided length - if start_found == False: + if start_found is False: start = ts - length use_microseconds = False # Check subsecond request in start start_micros = "{:.6f}".format(start).split('.')[1] - if start_found == True and start_micros != '000000': + if start_found is True and start_micros != '000000': use_microseconds = True else: # No decimal part, check subsecond request in length start_micros = "{:.6f}".format(length).split('.')[1] - if length_found == True and start_micros != '000000': + if length_found is True and start_micros != '000000': use_microseconds = True # Build UTC datetime start/stop from start timestamp with/without microseconds - if use_microseconds == False: + if use_microseconds is False: start_date = datetime.datetime.fromtimestamp(start, datetime.timezone.utc).strftime("%Y-%m-%d %H:%M:%S") stop_date = datetime.datetime.fromtimestamp(start + length, datetime.timezone.utc).strftime("%Y-%m-%d %H:%M:%S") else: @@ -682,7 +737,7 @@ async def asset_readings_with_bucket_size(request: web.Request) -> web.Response: if length < 0: raise ValueError('length must be a positive integer') # No user start parameter: decrease default start by the user provided length - if start_found == False: + if start_found is False: start = ts - length # Build datetime from timestamp @@ -746,10 +801,9 @@ async def asset_structure(request): } } """ - payload = PayloadBuilder().GROUP_BY("asset_code").payload() - results = {} try: + payload = PayloadBuilder().ORDER_BY(["asset_code"]).payload() _readings = connect.get_readings_async() results = await _readings.query(payload) rows = results['rows'] @@ -783,11 +837,13 @@ async def asset_structure(request): # The following two routines are not really browsing data but this is probably a logical # place to put them as they share the same URL stem + + async def asset_purge_all(request): """ Purge all the assets for which we have recorded readings Returns: - json result with details of assets putge + json result with details of assets purge :Example: curl -sX DELETE http://localhost:8081/fledge/asset @@ -810,7 +866,7 @@ async def asset_purge_all(request): { "start_time": start_time, "end_time": end_time, - "rowsRemoved": results['purged'] }) + "rowsRemoved": results['purged']}) except KeyError: msg = results['message'] raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) @@ -850,7 +906,7 @@ async def asset_purge(request): "start_time": start_time, "end_time": end_time, "rowsRemoved": results['purged'], - "asset" : asset_code }) + "asset": asset_code}) except KeyError: msg = results['message'] raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) @@ -860,3 +916,60 @@ async def asset_purge(request): else: return web.json_response(results) + +async def asset_timespan(request): + """ + Return the timespan of the buffered readings for each asset. The returned data includes the timestamp + of the oldest and newest reading for each asset that we hold in the buffer + + :Example: + curl -sX GET http://localhost:8081/fledge/asset/timespan + """ + try: + payload = PayloadBuilder().AGGREGATE(["min", "user_ts"], ["max", "user_ts"]).GROUP_BY("asset_code") \ + .ALIAS('aggregate', ('user_ts', 'min', 'oldest'), ('user_ts', 'max', 'newest')).payload() + # Call storage service + _readings = connect.get_readings_async() + results = await _readings.query(payload) + response = results['rows'] + except (KeyError, IndexError) as err: + msg = str(err) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except (TypeError, ValueError) as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + except Exception as exc: + msg = str(exc) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + return web.json_response(response) + + +async def asset_reading_timespan(request): + """ + Return the timespan of the buffered readings for the given asset . The returned data includes the timestamp + of the oldest and newest reading for the asset that we hold in the buffer + + :Example: + curl -sX GET http://localhost:8081/fledge/asset/sinusoid/timespan + """ + try: + asset_code = request.match_info.get('asset_code', '') + payload = PayloadBuilder().WHERE( + ["asset_code", "=", asset_code]).AGGREGATE(["min", "user_ts"], ["max", "user_ts"]).ALIAS( + 'aggregate', ('user_ts', 'min', 'oldest'), ('user_ts', 'max', 'newest')).payload() + # Call storage service + _readings = connect.get_readings_async() + results = await _readings.query(payload) + response = results['rows'][0] + except (KeyError, IndexError) as err: + msg = str(err) + raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg})) + except (TypeError, ValueError) as err: + msg = str(err) + raise web.HTTPBadRequest(reason=msg, body=json.dumps({"message": msg})) + except Exception as exc: + msg = str(exc) + raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg})) + else: + return web.json_response(response) diff --git a/python/fledge/services/core/api/task.py b/python/fledge/services/core/api/task.py index 819518d736..18860e549e 100644 --- a/python/fledge/services/core/api/task.py +++ b/python/fledge/services/core/api/task.py @@ -159,11 +159,8 @@ async def add_task(request): plugin_module_path = "{}/python/fledge/plugins/{}/{}".format(_FLEDGE_ROOT, task_type, plugin) plugin_info = common.load_and_fetch_python_plugin_info(plugin_module_path, plugin, task_type) plugin_config = plugin_info['config'] - script = '["tasks/north"]' - process_name = 'north' except FileNotFoundError as ex: # Checking for C-type plugins - script = '["tasks/north_c"]' plugin_info = apiutils.get_plugin_info(plugin, dir=task_type) if not plugin_info: msg = "Plugin {} does not appear to be a valid plugin".format(plugin) @@ -181,10 +178,10 @@ async def add_task(request): _logger.error(msg) return web.HTTPBadRequest(reason=msg) plugin_config = plugin_info['config'] - process_name = 'north_c' if not plugin_config: _logger.exception("Plugin %s import problem from path %s. %s", plugin, plugin_module_path, str(ex)) - raise web.HTTPNotFound(reason='Plugin "{}" import problem from path "{}"'.format(plugin, plugin_module_path)) + raise web.HTTPNotFound(reason='Plugin "{}" import problem from path "{}"'.format(plugin, + plugin_module_path)) except TypeError as ex: raise web.HTTPBadRequest(reason=str(ex)) except Exception as ex: @@ -208,7 +205,6 @@ async def add_task(request): _logger.exception(msg) raise web.HTTPBadRequest(reason=msg) - # Check whether category name already exists category_info = await config_mgr.get_category_all_items(category_name=name) if category_info is not None: @@ -219,6 +215,9 @@ async def add_task(request): if count != 0: raise web.HTTPBadRequest(reason='A north instance with this name already exists') + # Always run with C based sending process task + process_name = 'north_c' + script = '["tasks/north_c"]' # Check that the process name is not already registered count = await check_scheduled_processes(storage, process_name) if count == 0: # Create the scheduled process entry for the new task diff --git a/python/fledge/services/core/user_model.py b/python/fledge/services/core/user_model.py index 4f13750377..289526766c 100644 --- a/python/fledge/services/core/user_model.py +++ b/python/fledge/services/core/user_model.py @@ -259,7 +259,9 @@ async def get(cls, uid=None, username=None): async def refresh_token_expiry(cls, token): storage_client = connect.get_storage_async() exp = datetime.now() + timedelta(seconds=JWT_EXP_DELTA_SECONDS) - payload = PayloadBuilder().SET(token_expiration=str(exp)).WHERE(['token', '=', token]).payload() + """ MODIFIER with allowzero is passed in payload so that storage returns rows_affected 0 in any case """ + payload = PayloadBuilder().SET(token_expiration=str(exp)).WHERE(['token', '=', token] + ).MODIFIER(["allowzero"]).payload() await storage_client.update_tbl("user_logins", payload) @classmethod diff --git a/python/requirements.txt b/python/requirements.txt index d40351dbd9..47cd3e821b 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,7 +1,8 @@ # Common - REST interface aiohttp==3.8.1 aiohttp_cors==0.7.0 -cchardet==2.1.4 +cchardet==2.1.4;python_version<"3.9" +cchardet==2.1.7;python_version>="3.9" yarl==1.7.2 pyjwt==1.6.4 diff --git a/requirements.sh b/requirements.sh index 5286a4a564..743889acfa 100755 --- a/requirements.sh +++ b/requirements.sh @@ -158,7 +158,8 @@ if [[ $YUM_PLATFORM = true ]]; then yum install -y rh-postgresql13 yum install -y rh-postgresql13-postgresql-devel else - yum install -y python36 + yum install -y python3 + yum install -y python3-devel yum install -y postgresql yum install -y postgresql-devel fi diff --git a/scripts/common/get_engine_management.sh b/scripts/common/get_engine_management.sh index a67c9820c7..a724270101 100755 --- a/scripts/common/get_engine_management.sh +++ b/scripts/common/get_engine_management.sh @@ -25,7 +25,13 @@ get_engine_management() { storage_info=( $($FLEDGE_ROOT/scripts/services/storage --plugin) ) if [ "${storage_info[0]}" != "$1" ]; then - echo "" + # Not the storage plugin, maybe beign used for readings + storage_info=( $($FLEDGE_ROOT/scripts/services/storage --readingplugin) ) + if [ "${storage_info[0]}" != "$1" ]; then + echo "" + else + echo "${storage_info[1]}" + fi else echo "${storage_info[1]}" fi diff --git a/scripts/common/get_readings_plugin.sh b/scripts/common/get_readings_plugin.sh new file mode 100755 index 0000000000..49b98ee030 --- /dev/null +++ b/scripts/common/get_readings_plugin.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +##-------------------------------------------------------------------- +## Copyright (c) 2022 OSIsoft, LLC +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +##-------------------------------------------------------------------- + +__author__="Massimiliano Pinto" +__version__="1.0" + +# Get the storage database plugin from the Storage microservice cache file +get_readings_plugin() { +if [ "${FLEDGE_ROOT}" ]; then + $FLEDGE_ROOT/scripts/services/storage --readingsPlugin | cut -d' ' -f1 +elif [ -x scripts/services/storage ]; then + scripts/services/storage --readingsPlugin | cut -d' ' -f1 +else + logger "Unable to find Fledge storage script." + exit 1 +fi +} diff --git a/scripts/fledge b/scripts/fledge index e3f3a875cd..74af95db0f 100755 --- a/scripts/fledge +++ b/scripts/fledge @@ -587,17 +587,17 @@ get_fledge_version() { ## Get Fledge rest API URL ## get_rest_api_url() { - pid_file=${FLEDGE_DATA}/var/run/fledge.core.pid export PYTHONPATH=${FLEDGE_ROOT} - if [[ -f ${pid_file} ]]; then - REST_API_URL=`cat ${pid_file} | python3 -m scripts.common.json_parse get_rest_api_url_from_pid` + # Check whether pid_file exists and its contents are not empty + if [[ -s ${pid_file} ]]; then + REST_API_URL=$(cat ${pid_file} | python3 -m scripts.common.json_parse get_rest_api_url_from_pid) fi # Sets a default value if it not possible to determine the proper value using the pid file if [ ! "${REST_API_URL}" ]; then - export REST_API_URL=http://localhost:8081 + export REST_API_URL="http://localhost:8081" fi } diff --git a/scripts/plugins/storage/postgres/downgrade/58.sql b/scripts/plugins/storage/postgres/downgrade/58.sql new file mode 100644 index 0000000000..49af36659b --- /dev/null +++ b/scripts/plugins/storage/postgres/downgrade/58.sql @@ -0,0 +1,4 @@ +-- Delete roles +DELETE FROM fledge.roles WHERE name IN ('view','data-view'); +-- Reset auto increment +ALTER SEQUENCE fledge.roles_id_seq RESTART WITH 3 \ No newline at end of file diff --git a/scripts/plugins/storage/postgres/init.sql b/scripts/plugins/storage/postgres/init.sql index 5c4c8160b4..3098557080 100644 --- a/scripts/plugins/storage/postgres/init.sql +++ b/scripts/plugins/storage/postgres/init.sql @@ -848,7 +848,9 @@ GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA fledge TO PUBLIC; DELETE FROM fledge.roles; INSERT INTO fledge.roles ( name, description ) VALUES ('admin', 'All CRUD privileges'), - ('user', 'All CRUD operations and self profile management'); + ('user', 'All CRUD operations and self profile management'), + ('view', 'Only to view the configuration'), + ('data-view', 'Only read the data in buffer'); -- Users diff --git a/scripts/plugins/storage/postgres/upgrade/59.sql b/scripts/plugins/storage/postgres/upgrade/59.sql new file mode 100644 index 0000000000..d475631b7f --- /dev/null +++ b/scripts/plugins/storage/postgres/upgrade/59.sql @@ -0,0 +1,4 @@ +-- Roles +INSERT INTO fledge.roles ( name, description ) + VALUES ('view', 'Only to view the configuration'), + ('data-view', 'Only read the data in buffer'); \ No newline at end of file diff --git a/scripts/plugins/storage/sqlite/downgrade/58.sql b/scripts/plugins/storage/sqlite/downgrade/58.sql new file mode 100644 index 0000000000..5d4996ef28 --- /dev/null +++ b/scripts/plugins/storage/sqlite/downgrade/58.sql @@ -0,0 +1,5 @@ +-- Delete roles +DELETE FROM fledge.roles WHERE name IN ('view','data-view'); +-- Reset auto increment +-- You cannot use ALTER TABLE for that. The autoincrement counter is stored in a separate table named "sqlite_sequence". You can modify the value there +UPDATE sqlite_sequence SET seq=1 WHERE name="roles"; diff --git a/scripts/plugins/storage/sqlite/init.sql b/scripts/plugins/storage/sqlite/init.sql index 8e36d612b9..105912711f 100644 --- a/scripts/plugins/storage/sqlite/init.sql +++ b/scripts/plugins/storage/sqlite/init.sql @@ -638,7 +638,9 @@ CREATE TABLE fledge.acl_usage ( DELETE FROM fledge.roles; INSERT INTO fledge.roles ( name, description ) VALUES ('admin', 'All CRUD privileges'), - ('user', 'All CRUD operations and self profile management'); + ('user', 'All CRUD operations and self profile management'), + ('view', 'Only to view the configuration'), + ('data-view', 'Only read the data in buffer'); -- Users DELETE FROM fledge.users; diff --git a/scripts/plugins/storage/sqlite/upgrade/59.sql b/scripts/plugins/storage/sqlite/upgrade/59.sql new file mode 100644 index 0000000000..d475631b7f --- /dev/null +++ b/scripts/plugins/storage/sqlite/upgrade/59.sql @@ -0,0 +1,4 @@ +-- Roles +INSERT INTO fledge.roles ( name, description ) + VALUES ('view', 'Only to view the configuration'), + ('data-view', 'Only read the data in buffer'); \ No newline at end of file diff --git a/scripts/plugins/storage/sqlitelb.sh b/scripts/plugins/storage/sqlitelb.sh index f7f8a05308..16189cfb9e 100755 --- a/scripts/plugins/storage/sqlitelb.sh +++ b/scripts/plugins/storage/sqlitelb.sh @@ -45,7 +45,7 @@ if [ "${DEFAULT_SQLITE_DB_FILE_READINGS_FLAG}" ]; then export DEFAULT_SQLITE_DB_FILE_READINGS="${DEFAULT_SQLITE_DB_FILE_READINGS_BASE}.db" fi -USAGE="Usage: `basename ${0}` {start|stop|status|init|reset|help}" +USAGE="Usage: `basename ${0}` {start|stop|status|init|reset|purge|help}" # Check FLEDGE_ROOT if [ -z ${FLEDGE_ROOT+x} ]; then @@ -189,8 +189,8 @@ sqlite_start() { FOUND_SCHEMAS=`${SQLITE_SQL} ${DEFAULT_SQLITE_DB_FILE_READINGS} "ATTACH DATABASE '${DEFAULT_SQLITE_DB_FILE_READINGS}' AS 'readings'; SELECT name FROM sqlite_master WHERE type='table'"` if [ ! "${FOUND_SCHEMAS}" ]; then - # Create the readings database - sqlite_create_db_readings "$1" "immediate" + # Reset if not found. + sqlite_reset_db_readings "$1" "immediate" fi fi @@ -244,20 +244,15 @@ sqlite_reset_db_fledge() { sqlite_log "info" "Building the metadata for the Fledge Plugin '${PLUGIN}' ..." "logonly" "pretty" fi - if [[ -f $DEFAULT_SQLITE_DB_FILE && $2 != "immediate" ]]; then - # Remove service schema files as per name - schema=$(${SQLITE_SQL} ${DEFAULT_SQLITE_DB_FILE} 'select name from service_schema;') + # 0- Remove service schema files + schema=`sqlite3 ${DEFAULT_SQLITE_DB_FILE} 'select name from service_schema;'` for f in $schema; do - echo "Removing $f service schema..." - echo "'${FLEDGE_DATA}/${f}.db'" - rm -f ${FLEDGE_DATA}/${f}.db* - echo "Removal of $f service schema Done!" - if [ -d "${FLEDGE_DATA}/buckets" ]; then + rm ${FLEDGE_DATA}/${f}.db* + if [ -d "${FLEDGE_DATA}/buckets" ]; then echo "Removed user data from ${FLEDGE_DATA}/buckets" rm -rf ${FLEDGE_DATA}/buckets - fi + fi done - fi # 1- Drop all databases in DEFAULT_SQLITE_DB_FILE if [ -f "${DEFAULT_SQLITE_DB_FILE}" ]; then @@ -347,7 +342,9 @@ sqlite_status() { if [[ "$1" == "noisy" ]]; then sqlite_log "info" "SQLite 3 database '${DEFAULT_SQLITE_DB_FILE}' ready." "all" "pretty" else - sqlite_log "info" "SQLite 3 database '${DEFAULT_SQLITE_DB_FILE}' ready." "logonly" "pretty" + if [[ "$1" != "skip" ]]; then + sqlite_log "info" "SQLite 3 database '${DEFAULT_SQLITE_DB_FILE}' ready." "logonly" "pretty" + fi fi echo "0" else @@ -372,7 +369,9 @@ sqlite_status_readings() { if [[ "$1" == "noisy" ]]; then sqlite_log "info" "SQLite 3 database '${DEFAULT_SQLITE_DB_FILE_READINGS}' ready." "all" "pretty" else - sqlite_log "info" "SQLite 3 database '${DEFAULT_SQLITE_DB_FILE_READINGS}' ready." "logonly" "pretty" + if [[ "$1" != "skip" ]]; then + sqlite_log "info" "SQLite 3 database '${DEFAULT_SQLITE_DB_FILE_READINGS}' ready." "logonly" "pretty" + fi fi echo "0" else @@ -415,7 +414,7 @@ sqlite_schema_update() { ret_code=$? SET_VERSION_MSG="Fledge DB version not found in fledge.'${VERSION_TABLE}', setting version [${NEW_VERSION}]" - if [[ "$1" == "noisy" ]]; then + if [[ "$2" == "noisy" ]]; then sqlite_log "info" "${SET_VERSION_MSG}" "all" "pretty" else sqlite_log "info" "${SET_VERSION_MSG}" "logonly" "pretty" @@ -437,7 +436,9 @@ sqlite_schema_update() { fi else # Just log up-to-date - sqlite_log "info" "Fledge DB schema is up to date to version [${CURR_VER}]" "logonly" "pretty" + if [[ "$2" != "skip" ]]; then + sqlite_log "info" "Fledge DB schema is up to date to version [${CURR_VER}]" "logonly" "pretty" + fi return 0 fi fi @@ -544,23 +545,14 @@ if [[ ! -d ${FLEDGE_DATA} ]]; then fi # Extract plugin -engine_management=`get_engine_management $PLUGIN` +engine_management="false" # Settings if the database is managed by Fledge case "$engine_management" in "true") - MANAGED=true - - # Check if sqlitei3 is present in the expected path - # We don't need to manage SQLite3 db - # This will be removed in next commits - SQLITE_SQL="$FLEDGE_ROOT/plugins/storage/sqlite/bin/psql" - if ! [[ -x "${SQLITE_SQL}" ]]; then - sqlite_log "err" "SQLite program not found: the database server cannot be managed." "all" "pretty" - exit 1 - fi - - print_output="noisy" + # SQLite does not support managed storage. Ignore this option + print_output="silent" + MANAGED=false ;; "false") diff --git a/scripts/plugins/storage/sqlitelb/downgrade/58.sql b/scripts/plugins/storage/sqlitelb/downgrade/58.sql new file mode 100644 index 0000000000..5d4996ef28 --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/downgrade/58.sql @@ -0,0 +1,5 @@ +-- Delete roles +DELETE FROM fledge.roles WHERE name IN ('view','data-view'); +-- Reset auto increment +-- You cannot use ALTER TABLE for that. The autoincrement counter is stored in a separate table named "sqlite_sequence". You can modify the value there +UPDATE sqlite_sequence SET seq=1 WHERE name="roles"; diff --git a/scripts/plugins/storage/sqlitelb/init.sql b/scripts/plugins/storage/sqlitelb/init.sql index 9721c1a438..8da2ee2443 100644 --- a/scripts/plugins/storage/sqlitelb/init.sql +++ b/scripts/plugins/storage/sqlitelb/init.sql @@ -1,5 +1,5 @@ ---------------------------------------------------------------------- --- Copyright (c) 2021 OSIsoft, LLC +-- Copyright (c) 2022 OSIsoft, LLC -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. @@ -519,7 +519,7 @@ CREATE INDEX tasks_ix1 CREATE TABLE fledge.omf_created_objects ( configuration_key character varying(255) NOT NULL, -- FK to fledge.configuration type_id integer NOT NULL, -- Identifies the specific PI Server type - asset_code character varying(50) NOT NULL, + asset_code character varying(255) NOT NULL, CONSTRAINT omf_created_objects_pkey PRIMARY KEY (configuration_key,type_id, asset_code), CONSTRAINT omf_created_objects_fk1 FOREIGN KEY (configuration_key) REFERENCES configuration (key) MATCH SIMPLE @@ -580,6 +580,22 @@ CREATE TABLE fledge.plugin_data ( data JSON NOT NULL DEFAULT '{}', CONSTRAINT plugin_data_pkey PRIMARY KEY (key) ); +-- Create packages table +CREATE TABLE fledge.packages ( + id uuid NOT NULL, -- PK + name character varying(255) NOT NULL, -- Package name + action character varying(10) NOT NULL, -- APT actions: + -- list + -- install + -- purge + -- update + status INTEGER NOT NULL, -- exit code + -- -1 - in-progress + -- 0 - success + -- Non-Zero - failed + log_file_uri character varying(255) NOT NULL, -- Package Log file relative path + CONSTRAINT packages_pkey PRIMARY KEY ( id ) ); + -- Create filters table CREATE TABLE fledge.filters ( name character varying(255) NOT NULL, @@ -622,7 +638,9 @@ CREATE TABLE fledge.acl_usage ( DELETE FROM fledge.roles; INSERT INTO fledge.roles ( name, description ) VALUES ('admin', 'All CRUD privileges'), - ('user', 'All CRUD operations and self profile management'); + ('user', 'All CRUD operations and self profile management'), + ('view', 'Only to view the configuration'), + ('data-view', 'Only read the data in buffer'); -- Users DELETE FROM fledge.users; diff --git a/scripts/plugins/storage/sqlitelb/upgrade/59.sql b/scripts/plugins/storage/sqlitelb/upgrade/59.sql new file mode 100644 index 0000000000..d475631b7f --- /dev/null +++ b/scripts/plugins/storage/sqlitelb/upgrade/59.sql @@ -0,0 +1,4 @@ +-- Roles +INSERT INTO fledge.roles ( name, description ) + VALUES ('view', 'Only to view the configuration'), + ('data-view', 'Only read the data in buffer'); \ No newline at end of file diff --git a/scripts/services/storage b/scripts/services/storage index de290c4774..dcea1179e9 100755 --- a/scripts/services/storage +++ b/scripts/services/storage @@ -41,6 +41,9 @@ else if [[ "$1" != "--plugin" ]]; then write_log "" "scripts.services.storage" "info" "Fledge storage microservice found in FLEDGE_ROOT location: ${FLEDGE_ROOT}" "logonly" "" fi + if [[ "$1" != "--readingsPlugin" ]]; then + write_log "" "scripts.services.storage" "info" "Fledge storage microservice found in FLEDGE_ROOT location: ${FLEDGE_ROOT}" "logonly" "" + fi fi fi @@ -56,6 +59,18 @@ if [[ "$1" != "--plugin" ]]; then ${pluginScriptPath}/${storagePlugin}.sh init ${FLEDGE_SCHEMA} ${managedEngine} fi +if [[ "$1" != "--readingsPlugin" ]]; then + # Get db schema + FLEDGE_VERSION_FILE="${FLEDGE_ROOT}/VERSION" + FLEDGE_SCHEMA=`cat ${FLEDGE_VERSION_FILE} | tr -d ' ' | grep -i "FLEDGE_SCHEMA=" | sed -e 's/\(.*\)=\(.*\)/\2/g'` + # Get storage engine + res=(`${storageExec} --readingsPlugin`) + storagePlugin=${res[0]} + managedEngine=${res[1]} + # Call plugin check: this will create database if not set yet + ${pluginScriptPath}/${storagePlugin}.sh init ${FLEDGE_SCHEMA} ${managedEngine} +fi + # Run storage service ${storageExec} "$@" exit 0 diff --git a/scripts/storage b/scripts/storage index b7908cc791..1c2f8c955e 100755 --- a/scripts/storage +++ b/scripts/storage @@ -26,6 +26,7 @@ # Include common code source "${FLEDGE_ROOT}/scripts/common/get_storage_plugin.sh" +source "${FLEDGE_ROOT}/scripts/common/get_readings_plugin.sh" PLUGIN_TO_USE="" @@ -40,6 +41,7 @@ storage_log() { PLUGIN_TO_USE=`get_storage_plugin` +READINGS_PLUGIN_TO_USE=`get_readings_plugin` if [[ "${#PLUGIN_TO_USE}" -eq 0 ]]; then storage_log "err" "Missing plugin from Fledge storage service" "all" "pretty" fi @@ -54,8 +56,39 @@ if [[ ! -x "$PLUGIN_SCRIPT" ]]; then fi -# Pass action in $1 and FLEDGE_VERSION in $2 -source "$PLUGIN_SCRIPT" $1 $2 +# The reset must be executed on both the storage and readings plugins, if the +# readings are stored in a different plugin. On the readings plugin this becomes +# a purge operation. +# +# The purge action is only executed via the readings plugin if defined, or +# the main storage plugin is not defined. + +if [[ "$1" == "reset" ]] ; then + # Pass action in $1 and FLEDGE_VERSION in $2 + source "$PLUGIN_SCRIPT" $1 $2 + + if [[ "$PLUGIN_TO_USE" != "$READINGS_PLUGIN_TO_USE" ]]; then + READINGS_SCRIPT="$FLEDGE_ROOT/scripts/plugins/storage/$READINGS_PLUGIN_TO_USE.sh" + if [[ -x "$READINGS_SCRIPT" ]]; then + source "$READINGS_SCRIPT" purge $2 + fi + fi +elif [[ "$1" == "purge" ]]; then + # Pass action in $1 and FLEDGE_VERSION in $2 + + if [[ "$PLUGIN_TO_USE" != "$READINGS_PLUGIN_TO_USE" ]]; then + READINGS_SCRIPT="$FLEDGE_ROOT/scripts/plugins/storage/$READINGS_PLUGIN_TO_USE.sh" + # Soem readings plugins, notably sqlitememory, do not have a script + if [[ -x "$READINGS_SCRIPT" ]]; then + source "$READINGS_SCRIPT" $1 $2 + fi + else + source "$PLUGIN_SCRIPT" $1 $2 + fi +else + # Pass any other operation to the storage plugin + source "$PLUGIN_SCRIPT" $1 $2 +fi # exit cannot be used because the script is sourced. #exit $? diff --git a/tests/README.rst b/tests/README.rst index 10f504d816..47c9db148e 100644 --- a/tests/README.rst +++ b/tests/README.rst @@ -43,7 +43,7 @@ You can test Fledge from your development environment or after installing Fledge To install the dependencies required to run python tests, run the following command from FLEDGE_ROOT :: - pip3 install -r python/requirements-test.txt --user + python3 -m pip install -r python/requirements-test.txt --user sudo apt install jq libxslt-dev diff --git a/tests/system/lab/test b/tests/system/lab/test index 39e53af2d9..137630d483 100755 --- a/tests/system/lab/test +++ b/tests/system/lab/test @@ -134,6 +134,9 @@ setup_north_pi_egress () { }, "DefaultAFLocation": { "value": "/PIlabSinelvl1/PIlabSinelvl2/PIlabSinelvl3" + }, + "Legacy": { + "value": "false" } } }' @@ -174,6 +177,9 @@ setup_north_pi_egress () { }, "DefaultAFLocation": { "value": "/PIlabSinelvl1/PIlabSinelvl2/PIlabSinelvl3" + }, + "Legacy": { + "value": "false" } } }' diff --git a/tests/system/python/README.rst b/tests/system/python/README.rst index aa60fcda7b..ee39edbb67 100644 --- a/tests/system/python/README.rst +++ b/tests/system/python/README.rst @@ -56,7 +56,7 @@ Test Prerequisites Install the following prerequisites to run a System test :: - pip3 install pytest + python3 -m pip install pytest Also, Fledge must have: diff --git a/tests/system/python/api/test_authentication.py b/tests/system/python/api/test_authentication.py index 338d8effb1..3eedb9d5bd 100644 --- a/tests/system/python/api/test_authentication.py +++ b/tests/system/python/api/test_authentication.py @@ -46,7 +46,7 @@ def change_to_auth_mandatory(reset_and_start_fledge, fledge_url, wait_time): class TestAuthenticationAPI: def test_login_username_regular_user(self, change_to_auth_mandatory, fledge_url, wait_time): - time.sleep(wait_time * 2) + time.sleep(wait_time * 3) conn = http.client.HTTPConnection(fledge_url) conn.request("POST", "/fledge/login", json.dumps({"username": "user", "password": "fledge"})) r = conn.getresponse() @@ -112,8 +112,10 @@ def test_get_roles(self, fledge_url): r = r.read().decode() jdoc = json.loads(r) assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', - 'id': 2, 'name': 'user'}]} == jdoc + {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, + {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, + {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} + ]} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, @@ -124,7 +126,17 @@ def test_get_roles(self, fledge_url): 'description': ''}, 'message': 'admin1 user has been created successfully'}), ({"username": "bogus", "password": "Fl3dG$", "role_id": 2}, {'user': {'userName': 'bogus', 'userId': 5, 'roleId': 2, 'accessMethod': 'any', 'realName': '', - 'description': ''}, 'message': 'bogus user has been created successfully'}) + 'description': ''}, 'message': 'bogus user has been created successfully'}), + ({"username": "view", "password": "V!3w@1", "role_id": 3, "real_name": "View", + "description": "Only to view the configuration"}, + {'user': { + 'userName': 'view', 'userId': 6, 'roleId': 3, 'accessMethod': 'any', 'realName': 'View', + 'description': 'Only to view the configuration'}, 'message': 'view user has been created successfully'}), + ({"username": "dataView", "password": "DV!3w@1", "role_id": 4, "real_name": "DataView", + "description": "Only read the data in buffer"}, + {'user': { + 'userName': 'dataview', 'userId': 7, 'roleId': 4, 'accessMethod': 'any', 'realName': 'DataView', + 'description': 'Only read the data in buffer'}, 'message': 'dataview user has been created successfully'}) ]) def test_create_user(self, fledge_url, form_data, expected_values): conn = http.client.HTTPConnection(fledge_url) diff --git a/tests/system/python/api/test_configuration.py b/tests/system/python/api/test_configuration.py index 1d7b797ef6..8662e31911 100644 --- a/tests/system/python/api/test_configuration.py +++ b/tests/system/python/api/test_configuration.py @@ -167,7 +167,8 @@ def test_create_category(self, fledge_url): 'item2': {'type': 'integer', 'description': 'An Integer check', 'default': '2'}, 'item3': {'type': 'password', 'description': 'A password check', 'default': 'Fledge'}, 'item4': {'type': 'string', 'description': 'A string check', 'default': 'fledge'}, - 'item5': {'type': 'script', 'description': 'A script check', 'default': ''} + 'item5': {'type': 'script', 'description': 'A script check', 'default': ''}, + 'item6': {'type': 'string', 'description': 'A string check', 'default': 'test', 'group': 'Advanced'} } payload.update({'value': conf}) conn = http.client.HTTPConnection(fledge_url) @@ -179,12 +180,15 @@ def test_create_category(self, fledge_url): assert cat_name == jdoc['key'] assert "a publisher" == jdoc['description'] assert "Publisher" == jdoc['displayName'] - expected_value = {'item1': {'type': 'boolean', 'default': 'false', 'value': 'false', 'description': 'A Boolean check'}, - 'item2': {'type': 'integer', 'description': 'An Integer check', 'default': '2', 'value': '2'}, - 'item3': {'type': 'password', 'description': 'A password check', 'default': 'Fledge', 'value': "****"}, - 'item4': {'type': 'string', 'description': 'A string check', 'default': 'fledge', 'value': 'fledge'}, - 'item5': {'type': 'script', 'description': 'A script check', 'default': '', 'value': ''} - } + expected_value = { + 'item1': {'type': 'boolean', 'default': 'false', 'value': 'false', 'description': 'A Boolean check'}, + 'item2': {'type': 'integer', 'description': 'An Integer check', 'default': '2', 'value': '2'}, + 'item3': {'type': 'password', 'description': 'A password check', 'default': 'Fledge', 'value': "****"}, + 'item4': {'type': 'string', 'description': 'A string check', 'default': 'fledge', 'value': 'fledge'}, + 'item5': {'type': 'script', 'description': 'A script check', 'default': '', 'value': ''}, + 'item6': {'type': 'string', 'description': 'A string check', 'default': 'test', 'value': 'test', + 'group': 'Advanced'} + } assert Counter(expected_value) == Counter(jdoc['value']) def test_get_category_item(self, fledge_url): @@ -199,32 +203,71 @@ def test_get_category_item(self, fledge_url): assert 'A Boolean check' == jdoc['description'] assert 'false' == jdoc['value'] + # Get optional attribute + encoded_url = '/fledge/category/{}/item6'.format(quote(cat_name)) + conn.request("GET", encoded_url) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert 'string' == jdoc['type'] + assert 'Advanced' == jdoc['group'] + def test_set_configuration_item(self, fledge_url): + new_value = "true" conn = http.client.HTTPConnection(fledge_url) encoded_url = '/fledge/category/{}/item1'.format(quote(cat_name)) - conn.request("PUT", encoded_url, body=json.dumps({"value": "true"})) + conn.request("PUT", encoded_url, body=json.dumps({"value": new_value})) r = conn.getresponse() assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) assert 'boolean' == jdoc['type'] + assert new_value == jdoc['value'] + assert 'false' == jdoc['default'] + # Verify new value in GET endpoint conn.request("GET", encoded_url) r = conn.getresponse() assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) assert 'boolean' == jdoc['type'] - assert 'true' == jdoc['value'] + assert new_value == jdoc['value'] assert 'false' == jdoc['default'] + # set optional attribute + new_val = "Security" + encoded_url = '/fledge/category/{}/item6'.format(quote(cat_name)) + conn.request("PUT", encoded_url, body=json.dumps({"group": new_val})) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert 'test' == jdoc['default'] + assert 'test' == jdoc['value'] + assert new_val == jdoc['group'] + + # Verify new value in GET endpoint + conn.request("GET", encoded_url) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert 'test' == jdoc['default'] + assert 'test' == jdoc['value'] + assert new_val == jdoc['group'] + def test_update_configuration_item_bulk(self, fledge_url): - expected = {'item1': {'default': 'false', 'value': 'false', 'description': 'A Boolean check', 'type': 'boolean'}, - 'item2': {'default': '2', 'value': '1', 'description': 'An Integer check', 'type': 'integer'}, - 'item3': {'default': 'Fledge', 'value': '****', 'description': 'A password check', 'type': 'password'}, - 'item4': {'default': 'fledge', 'value': 'new', 'description': 'A string check', 'type': 'string'}, - 'item5': {'type': 'script', 'description': 'A script check', 'default': '', 'value': ''} - } + expected = { + 'item1': {'type': 'boolean', 'default': 'false', 'value': 'false', 'description': 'A Boolean check'}, + 'item2': {'type': 'integer', 'description': 'An Integer check', 'default': '2', 'value': '1'}, + 'item3': {'type': 'password', 'description': 'A password check', 'default': 'Fledge', 'value': "****"}, + 'item4': {'type': 'string', 'description': 'A string check', 'default': 'fledge', 'value': 'new'}, + 'item5': {'type': 'script', 'description': 'A script check', 'default': '', 'value': ''}, + 'item6': {'type': 'string', 'description': 'A string check', 'default': 'test', 'value': 'test', + 'group': 'Security'} + } conn = http.client.HTTPConnection(fledge_url) encoded_url = '/fledge/category/{}'.format(quote(cat_name)) conn.request("PUT", encoded_url, body=json.dumps({"item1": "false", "item2": "1", "item4": "new"})) @@ -328,11 +371,15 @@ def test_upload_script(self, fledge_url): upload_script = 'curl -s -F "script=@{}" {} | jq --raw-output ".value,.file,.default,.description,.type"'.format(script_path, url) exit_code = os.system(upload_script) assert 0 == exit_code - expected = {'item4': {'value': 'fledge', 'default': 'fledge', 'type': 'string', 'description': 'A string check'}, - 'item5': {'default': '', 'value': 'import logging\nfrom logging.handlers import SysLogHandler\n\n\ndef notify35(message):\n logger = logging.getLogger(__name__)\n logger.setLevel(level=logging.INFO)\n handler = SysLogHandler(address=\'/dev/log\')\n logger.addHandler(handler)\n\n logger.info("notify35 called with {}".format(message))\n print("Notification alert: " + str(message))\n', 'file': script_file_path, 'type': 'script', 'description': 'A script check'}, - 'item3': {'value': '****', 'default': 'Fledge', 'type': 'password', 'description': 'A password check'}, - 'item1': {'value': 'false', 'default': 'false', 'type': 'boolean', 'description': 'A Boolean check'}, - 'item2': {'value': '1', 'default': '2', 'type': 'integer', 'description': 'An Integer check'}} + expected = { + 'item1': {'type': 'boolean', 'default': 'false', 'value': 'false', 'description': 'A Boolean check'}, + 'item2': {'type': 'integer', 'description': 'An Integer check', 'default': '2', 'value': '1'}, + 'item3': {'type': 'password', 'description': 'A password check', 'default': 'Fledge', 'value': "****"}, + 'item4': {'type': 'string', 'description': 'A string check', 'default': 'fledge', 'value': 'fledge'}, + 'item5': {'default': '', 'value': 'import logging\nfrom logging.handlers import SysLogHandler\n\n\ndef notify35(message):\n logger = logging.getLogger(__name__)\n logger.setLevel(level=logging.INFO)\n handler = SysLogHandler(address=\'/dev/log\')\n logger.addHandler(handler)\n\n logger.info("notify35 called with {}".format(message))\n print("Notification alert: " + str(message))\n', 'file': script_file_path, 'type': 'script', 'description': 'A script check'}, + 'item6': {'type': 'string', 'description': 'A string check', 'default': 'test', 'value': 'test', + 'group': 'Security'} + } conn = http.client.HTTPConnection(fledge_url) conn.request("GET", encoded_url) r = conn.getresponse() diff --git a/tests/system/python/api/test_endpoints_with_different_user_types.py b/tests/system/python/api/test_endpoints_with_different_user_types.py new file mode 100644 index 0000000000..084c6fe79d --- /dev/null +++ b/tests/system/python/api/test_endpoints_with_different_user_types.py @@ -0,0 +1,373 @@ +# -*- coding: utf-8 -*- + +# FLEDGE_BEGIN +# See: http://fledge-iot.readthedocs.io/ +# FLEDGE_END + +""" Test REST API endpoints with different user types """ + +import http.client +import json +import time +import pytest + + +__author__ = "Ashish Jabble" +__copyright__ = "Copyright (c) 2022 Dianomic Systems" +__license__ = "Apache 2.0" +__version__ = "${VERSION}" + +TOKEN = None +VIEW_USERNAME = "view" +VIEW_PWD = "V!3w@1" +DATA_VIEW_USERNAME = "dataview" +DATA_VIEW_PWD = "DV!3w$" + + +@pytest.fixture +def change_to_auth_mandatory(fledge_url, wait_time): + # Wait for fledge server to start + time.sleep(wait_time) + conn = http.client.HTTPConnection(fledge_url) + conn.request("PUT", '/fledge/category/rest_api', json.dumps({"authentication": "mandatory"})) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert "mandatory" == jdoc['authentication']['value'] + + conn.request("PUT", '/fledge/restart', json.dumps({})) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert "Fledge restart has been scheduled." == jdoc['message'] + + +def test_setup(reset_and_start_fledge, change_to_auth_mandatory, fledge_url, wait_time): + time.sleep(wait_time * 3) + conn = http.client.HTTPConnection(fledge_url) + # Admin login + conn.request("POST", "/fledge/login", json.dumps({"username": "admin", "password": "fledge"})) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert "Logged in successfully." == jdoc['message'] + assert "token" in jdoc + assert jdoc['admin'] + admin_token = jdoc["token"] + # Create view user + view_payload = {"username": VIEW_USERNAME, "password": VIEW_PWD, "role_id": 3, "real_name": "View", + "description": "Only to view the configuration"} + conn.request("POST", "/fledge/admin/user", body=json.dumps(view_payload), headers={"authorization": admin_token}) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert "{} user has been created successfully".format(VIEW_USERNAME) == jdoc["message"] + + # Create Data view user + data_view_payload = {"username": DATA_VIEW_USERNAME, "password": DATA_VIEW_PWD, "role_id": 4, + "real_name": "DataView", "description": "Only read the data in buffer"} + conn.request("POST", "/fledge/admin/user", body=json.dumps(data_view_payload), + headers={"authorization": admin_token}) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert "{} user has been created successfully".format(DATA_VIEW_USERNAME) == jdoc["message"] + + +class TestAPIEndpointsWithViewUserType: + def test_login(self, fledge_url, wait_time): + time.sleep(wait_time * 2) + conn = http.client.HTTPConnection(fledge_url) + conn.request("POST", "/fledge/login", json.dumps({"username": VIEW_USERNAME, "password": VIEW_PWD})) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert "Logged in successfully." == jdoc['message'] + assert "token" in jdoc + assert not jdoc['admin'] + global TOKEN + TOKEN = jdoc["token"] + + @pytest.mark.parametrize(("method", "route_path", "http_status_code"), [ + # common + ("GET", "/fledge/ping", 200), ("PUT", "/fledge/shutdown", 403), ("PUT", "/fledge/restart", 403), + # health + ("GET", "/fledge/health/storage", 200), ("GET", "/fledge/health/logging", 200), + # user & roles + ("GET", "/fledge/user", 200), ("PUT", "/fledge/user", 500), ("PUT", "/fledge/user/1/password", 403), + ("PUT", "/fledge/user/3/password", 500), ("GET", "/fledge/user/role", 200), + # auth + ("POST", "/fledge/login", 403), ("PUT", "/fledge/31/logout", 401), + ("GET", "/fledge/auth/ott", 200), + # admin + ("POST", "/fledge/admin/user", 403), ("DELETE", "/fledge/admin/3/delete", 403), ("PUT", "/fledge/admin/3", 403), + ("PUT", "/fledge/admin/3/enable", 403), ("PUT", "/fledge/admin/3/reset", 403), + # category + ("GET", "/fledge/category", 200), ("POST", "/fledge/category", 403), ("GET", "/fledge/category/General", 200), + ("PUT", "/fledge/category/General", 403), ("DELETE", "/fledge/category/General", 403), + ("POST", "/fledge/category/General/children", 403), ("GET", "/fledge/category/General/children", 200), + ("DELETE", "/fledge/category/General/children/Advanced", 403), + ("DELETE", "/fledge/category/General/parent", 403), + ("GET", "/fledge/category/rest_api/allowPing", 200), ("PUT", "/fledge/category/rest_api/allowPing", 403), + ("DELETE", "/fledge/category/rest_api/allowPing/value", 403), + ("POST", "/fledge/category/rest_api/allowPing/upload", 403), + # schedule processes & schedules + ("GET", "/fledge/schedule/process", 200), ("POST", "/fledge/schedule/process", 403), + ("GET", "/fledge/schedule/process/purge", 200), + ("GET", "/fledge/schedule", 200), ("POST", "/fledge/schedule", 403), ("GET", "/fledge/schedule/type", 200), + ("GET", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0", 200), + ("PUT", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0/enable", 403), + ("PUT", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0/disable", 403), + ("PUT", "/fledge/schedule/enable", 403), ("PUT", "/fledge/schedule/disable", 403), + ("POST", "/fledge/schedule/start/2176eb68-7303-11e7-8cf7-a6006ad3dba0", 403), + ("PUT", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0", 403), + ("DELETE", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0", 403), + # tasks + ("GET", "/fledge/task", 200), ("GET", "/fledge/task/state", 200), ("GET", "/fledge/task/latest", 200), + ("GET", "/fledge/task/123", 404), ("PUT", "/fledge/task/123/cancel", 403), + ("POST", "/fledge/scheduled/task", 403), ("DELETE", "/fledge/scheduled/task/blah", 403), + # service + ("POST", "/fledge/service", 403), ("GET", "/fledge/service", 200), ("DELETE", "/fledge/service/blah", 403), + # ("GET", "/fledge/service/available", 200), -- checked manually and commented out only to avoid apt-update + ("GET", "/fledge/service/installed", 200), + ("PUT", "/fledge/service/Southbound/blah/update", 403), ("POST", "/fledge/service/blah/otp", 403), + # south & north + ("GET", "/fledge/south", 200), ("GET", "/fledge/north", 200), + # asset browse + ("GET", "/fledge/asset", 200), ("GET", "/fledge/asset/sinusoid", 200), + ("GET", "/fledge/asset/sinusoid/latest", 200), + ("GET", "/fledge/asset/sinusoid/summary", 404), ("GET", "/fledge/asset/sinusoid/sinusoid", 200), + ("GET", "/fledge/asset/sinusoid/sinusoid/summary", 404), ("GET", "/fledge/asset/sinusoid/sinusoid/series", 200), + ("GET", "/fledge/asset/sinusoid/bucket/1", 200), ("GET", "/fledge/asset/sinusoid/sinusoid/bucket/1", 200), + ("GET", "/fledge/structure/asset", 200), ("DELETE", "/fledge/asset", 403), + ("DELETE", "/fledge/asset/sinusoid", 403), + # asset tracker + ("GET", "/fledge/track", 200), ("GET", "/fledge/track/storage/assets", 200), + ("PUT", "/fledge/track/service/foo/asset/bar/event/Ingest", 403), + # statistics + ("GET", "/fledge/statistics", 200), ("GET", "/fledge/statistics/history", 200), + ("GET", "/fledge/statistics/rate?periods=1&statistics=FOO", 200), + # audit trail + ("POST", "/fledge/audit", 403), ("GET", "/fledge/audit", 200), ("GET", "/fledge/audit/logcode", 200), + ("GET", "/fledge/audit/severity", 200), + # backup & restore + ("GET", "/fledge/backup", 200), ("POST", "/fledge/backup", 403), ("POST", "/fledge/backup/upload", 403), + ("GET", "/fledge/backup/status", 200), ("GET", "/fledge/backup/123", 404), + ("DELETE", "/fledge/backup/123", 403), ("GET", "/fledge/backup/123/download", 404), + ("PUT", "/fledge/backup/123/restore", 403), + # package update + # ("GET", "/fledge/update", 200), -- checked manually and commented out only to avoid apt-update run + ("PUT", "/fledge/update", 403), + # certs store + ("GET", "/fledge/certificate", 200), ("POST", "/fledge/certificate", 403), + ("DELETE", "/fledge/certificate/user", 403), + # support bundle + ("GET", "/fledge/support", 200), ("GET", "/fledge/support/foo", 400), ("POST", "/fledge/support", 403), + # syslogs & package logs + ("GET", "/fledge/syslog", 200), ("GET", "/fledge/package/log", 200), ("GET", "/fledge/package/log/foo", 400), + ("GET", "/fledge/package/install/status", 404), + # plugins + ("GET", "/fledge/plugins/installed", 200), + # ("GET", "/fledge/plugins/available", 200), -- checked manually and commented out only to avoid apt-update + ("POST", "/fledge/plugins", 403), ("PUT", "/fledge/plugins/south/sinusoid/update", 403), + ("DELETE", "/fledge/plugins/south/sinusoid", 403), ("GET", "/fledge/service/foo/persist", 404), + ("GET", "/fledge/service/foo/plugin/omf/data", 404), ("POST", "/fledge/service/foo/plugin/omf/data", 403), + ("DELETE", "/fledge/service/foo/plugin/omf/data", 403), + # filters + ("POST", "/fledge/filter", 403), ("PUT", "/fledge/filter/foo/pipeline", 403), + ("GET", "/fledge/filter/foo/pipeline", 404), ("GET", "/fledge/filter/bar", 404), ("GET", "/fledge/filter", 200), + ("DELETE", "/fledge/filter/foo/pipeline", 403), ("DELETE", "/fledge/filter/bar", 403), + # snapshots + ("GET", "/fledge/snapshot/plugins", 403), ("POST", "/fledge/snapshot/plugins", 403), + ("PUT", "/fledge/snapshot/plugins/1", 403), ("DELETE", "/fledge/snapshot/plugins/1", 403), + ("GET", "/fledge/snapshot/category", 403), ("POST", "/fledge/snapshot/category", 403), + ("PUT", "/fledge/snapshot/category/1", 403), ("DELETE", "/fledge/snapshot/category/1", 403), + ("GET", "/fledge/snapshot/schedule", 403), ("POST", "/fledge/snapshot/schedule", 403), + ("PUT", "/fledge/snapshot/schedule/1", 403), ("DELETE", "/fledge/snapshot/schedule/1", 403), + # repository + ("POST", "/fledge/repository", 403), + # ACL + ("POST", "/fledge/ACL", 403), ("GET", "/fledge/ACL", 200), ("GET", "/fledge/ACL/foo", 404), + ("PUT", "/fledge/ACL/foo", 403), ("DELETE", "/fledge/ACL/foo", 403), ("PUT", "/fledge/service/foo/ACL", 403), + ("DELETE", "/fledge/service/foo/ACL", 403), + # python packages + ("GET", "/fledge/python/packages", 200), ("POST", "/fledge/python/package", 403), + # notification + ("GET", "/fledge/notification", 200), ("GET", "/fledge/notification/plugin", 404), + ("GET", "/fledge/notification/type", 200), ("GET", "/fledge/notification/N1", 400), + ("POST", "/fledge/notification", 403), ("PUT", "/fledge/notification/N1", 403), + ("DELETE", "/fledge/notification/N1", 403), ("GET", "/fledge/notification/N1/delivery", 404), + ("POST", "/fledge/notification/N1/delivery", 403), ("GET", "/fledge/notification/N1/delivery/C1", 404), + ("DELETE", "/fledge/notification/N1/delivery/C1", 403) + ]) + def test_endpoints(self, fledge_url, method, route_path, http_status_code, storage_plugin): + # FIXME: Once below JIRA is resolved + if storage_plugin == 'postgres': + if route_path == '/fledge/statistics/rate?periods=1&statistics=FOO': + pytest.skip('Due to FOGL-7097') + conn = http.client.HTTPConnection(fledge_url) + conn.request(method, route_path, headers={"authorization": TOKEN}) + r = conn.getresponse() + assert http_status_code == r.status + r.read().decode() + + def test_logout_me(self, fledge_url): + conn = http.client.HTTPConnection(fledge_url) + conn.request("PUT", '/fledge/logout', headers={"authorization": TOKEN}) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert jdoc['logout'] + + +class TestAPIEndpointsWithDataViewUserType: + def test_login(self, fledge_url, wait_time): + time.sleep(wait_time * 2) + conn = http.client.HTTPConnection(fledge_url) + conn.request("POST", "/fledge/login", json.dumps({"username": DATA_VIEW_USERNAME, "password": DATA_VIEW_PWD})) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert "Logged in successfully." == jdoc['message'] + assert "token" in jdoc + assert not jdoc['admin'] + global TOKEN + TOKEN = jdoc["token"] + + @pytest.mark.parametrize(("method", "route_path", "http_status_code"), [ + # common + ("GET", "/fledge/ping", 200), ("PUT", "/fledge/shutdown", 403), ("PUT", "/fledge/restart", 403), + # health + ("GET", "/fledge/health/storage", 403), ("GET", "/fledge/health/logging", 403), + # user & roles + ("GET", "/fledge/user", 403), ("PUT", "/fledge/user", 500), ("PUT", "/fledge/user/1/password", 403), + ("PUT", "/fledge/user/4/password", 500), ("GET", "/fledge/user/role", 200), + # auth + ("POST", "/fledge/login", 403), ("PUT", "/fledge/31/logout", 401), + ("GET", "/fledge/auth/ott", 403), + # admin + ("POST", "/fledge/admin/user", 403), ("DELETE", "/fledge/admin/3/delete", 403), ("PUT", "/fledge/admin/3", 403), + ("PUT", "/fledge/admin/3/enable", 403), ("PUT", "/fledge/admin/3/reset", 403), + # category + ("GET", "/fledge/category", 403), ("POST", "/fledge/category", 403), ("GET", "/fledge/category/General", 403), + ("PUT", "/fledge/category/General", 403), ("DELETE", "/fledge/category/General", 403), + ("POST", "/fledge/category/General/children", 403), ("GET", "/fledge/category/General/children", 403), + ("DELETE", "/fledge/category/General/children/Advanced", 403), + ("DELETE", "/fledge/category/General/parent", 403), + ("GET", "/fledge/category/rest_api/allowPing", 403), ("PUT", "/fledge/category/rest_api/allowPing", 403), + ("DELETE", "/fledge/category/rest_api/allowPing/value", 403), + ("POST", "/fledge/category/rest_api/allowPing/upload", 403), + # schedule processes & schedules + ("GET", "/fledge/schedule/process", 403), ("POST", "/fledge/schedule/process", 403), + ("GET", "/fledge/schedule/process/purge", 403), + ("GET", "/fledge/schedule", 403), ("POST", "/fledge/schedule", 403), ("GET", "/fledge/schedule/type", 403), + ("GET", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0", 403), + ("PUT", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0/enable", 403), + ("PUT", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0/disable", 403), + ("PUT", "/fledge/schedule/enable", 403), ("PUT", "/fledge/schedule/disable", 403), + ("POST", "/fledge/schedule/start/2176eb68-7303-11e7-8cf7-a6006ad3dba0", 403), + ("PUT", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0", 403), + ("DELETE", "/fledge/schedule/2176eb68-7303-11e7-8cf7-a6006ad3dba0", 403), + # tasks + ("GET", "/fledge/task", 403), ("GET", "/fledge/task/state", 403), ("GET", "/fledge/task/latest", 403), + ("GET", "/fledge/task/123", 403), ("PUT", "/fledge/task/123/cancel", 403), + ("POST", "/fledge/scheduled/task", 403), ("DELETE", "/fledge/scheduled/task/blah", 403), + # service + ("POST", "/fledge/service", 403), ("GET", "/fledge/service", 200), ("DELETE", "/fledge/service/blah", 403), + ("GET", "/fledge/service/available", 403), ("GET", "/fledge/service/installed", 403), + ("PUT", "/fledge/service/Southbound/blah/update", 403), ("POST", "/fledge/service/blah/otp", 403), + # south & north + ("GET", "/fledge/south", 403), ("GET", "/fledge/north", 403), + # asset browse + ("GET", "/fledge/asset", 200), ("GET", "/fledge/asset/sinusoid", 200), + ("GET", "/fledge/asset/sinusoid/latest", 200), + ("GET", "/fledge/asset/sinusoid/summary", 404), ("GET", "/fledge/asset/sinusoid/sinusoid", 200), + ("GET", "/fledge/asset/sinusoid/sinusoid/summary", 404), ("GET", "/fledge/asset/sinusoid/sinusoid/series", 200), + ("GET", "/fledge/asset/sinusoid/bucket/1", 200), ("GET", "/fledge/asset/sinusoid/sinusoid/bucket/1", 200), + ("GET", "/fledge/structure/asset", 403), ("DELETE", "/fledge/asset", 403), + ("DELETE", "/fledge/asset/sinusoid", 403), + # asset tracker + ("GET", "/fledge/track", 403), ("GET", "/fledge/track/storage/assets", 403), + ("PUT", "/fledge/track/service/foo/asset/bar/event/Ingest", 403), + # statistics + ("GET", "/fledge/statistics", 200), ("GET", "/fledge/statistics/history", 200), + ("GET", "/fledge/statistics/rate?periods=1&statistics=FOO", 200), + # audit trail + ("POST", "/fledge/audit", 403), ("GET", "/fledge/audit", 403), ("GET", "/fledge/audit/logcode", 403), + ("GET", "/fledge/audit/severity", 403), + # backup & restore + ("GET", "/fledge/backup", 403), ("POST", "/fledge/backup", 403), ("POST", "/fledge/backup/upload", 403), + ("GET", "/fledge/backup/status", 403), ("GET", "/fledge/backup/123", 403), + ("DELETE", "/fledge/backup/123", 403), ("GET", "/fledge/backup/123/download", 403), + ("PUT", "/fledge/backup/123/restore", 403), + # package update + # ("GET", "/fledge/update", 200), -- checked manually and commented out only to avoid apt-update + ("PUT", "/fledge/update", 403), + # certs store + ("GET", "/fledge/certificate", 403), ("POST", "/fledge/certificate", 403), + ("DELETE", "/fledge/certificate/user", 403), + # support bundle + ("GET", "/fledge/support", 403), ("GET", "/fledge/support/foo", 403), ("POST", "/fledge/support", 403), + # syslogs & package logs + ("GET", "/fledge/syslog", 403), ("GET", "/fledge/package/log", 403), ("GET", "/fledge/package/log/foo", 403), + ("GET", "/fledge/package/install/status", 403), + # plugins + ("GET", "/fledge/plugins/installed", 403), ("GET", "/fledge/plugins/available", 403), + ("POST", "/fledge/plugins", 403), ("PUT", "/fledge/plugins/south/sinusoid/update", 403), + ("DELETE", "/fledge/plugins/south/sinusoid", 403), ("GET", "/fledge/service/foo/persist", 403), + ("GET", "/fledge/service/foo/plugin/omf/data", 403), ("POST", "/fledge/service/foo/plugin/omf/data", 403), + ("DELETE", "/fledge/service/foo/plugin/omf/data", 403), + # filters + ("POST", "/fledge/filter", 403), ("PUT", "/fledge/filter/foo/pipeline", 403), + ("GET", "/fledge/filter/foo/pipeline", 403), ("GET", "/fledge/filter/bar", 403), ("GET", "/fledge/filter", 403), + ("DELETE", "/fledge/filter/foo/pipeline", 403), ("DELETE", "/fledge/filter/bar", 403), + # snapshots + ("GET", "/fledge/snapshot/plugins", 403), ("POST", "/fledge/snapshot/plugins", 403), + ("PUT", "/fledge/snapshot/plugins/1", 403), ("DELETE", "/fledge/snapshot/plugins/1", 403), + ("GET", "/fledge/snapshot/category", 403), ("POST", "/fledge/snapshot/category", 403), + ("PUT", "/fledge/snapshot/category/1", 403), ("DELETE", "/fledge/snapshot/category/1", 403), + ("GET", "/fledge/snapshot/schedule", 403), ("POST", "/fledge/snapshot/schedule", 403), + ("PUT", "/fledge/snapshot/schedule/1", 403), ("DELETE", "/fledge/snapshot/schedule/1", 403), + # repository + ("POST", "/fledge/repository", 403), + # ACL + ("POST", "/fledge/ACL", 403), ("GET", "/fledge/ACL", 403), ("GET", "/fledge/ACL/foo", 403), + ("PUT", "/fledge/ACL/foo", 403), ("DELETE", "/fledge/ACL/foo", 403), ("PUT", "/fledge/service/foo/ACL", 403), + ("DELETE", "/fledge/service/foo/ACL", 403), + # python packages + ("GET", "/fledge/python/packages", 403), ("POST", "/fledge/python/package", 403), + # notification + ("GET", "/fledge/notification", 403), ("GET", "/fledge/notification/plugin", 403), + ("GET", "/fledge/notification/type", 403), ("GET", "/fledge/notification/N1", 403), + ("POST", "/fledge/notification", 403), ("PUT", "/fledge/notification/N1", 403), + ("DELETE", "/fledge/notification/N1", 403), ("GET", "/fledge/notification/N1/delivery", 403), + ("POST", "/fledge/notification/N1/delivery", 403), ("GET", "/fledge/notification/N1/delivery/C1", 403), + ("DELETE", "/fledge/notification/N1/delivery/C1", 403) + ]) + def test_endpoints(self, fledge_url, method, route_path, http_status_code, storage_plugin): + # FIXME: Once below JIRA is resolved + if storage_plugin == 'postgres': + if route_path == '/fledge/statistics/rate?periods=1&statistics=FOO': + pytest.skip('Due to FOGL-7097') + conn = http.client.HTTPConnection(fledge_url) + conn.request(method, route_path, headers={"authorization": TOKEN}) + r = conn.getresponse() + assert http_status_code == r.status + r.read().decode() + + def test_logout_me(self, fledge_url): + conn = http.client.HTTPConnection(fledge_url) + conn.request("PUT", '/fledge/logout', headers={"authorization": TOKEN}) + r = conn.getresponse() + assert 200 == r.status + r = r.read().decode() + jdoc = json.loads(r) + assert jdoc['logout'] diff --git a/tests/system/python/conftest.py b/tests/system/python/conftest.py index 19a0d4f94c..eca0c587ef 100644 --- a/tests/system/python/conftest.py +++ b/tests/system/python/conftest.py @@ -222,7 +222,8 @@ def clone_make_install(): @pytest.fixture def start_north_pi_v2(): def _start_north_pi_server_c(fledge_url, pi_host, pi_port, pi_token, north_plugin="OMF", - taskname="NorthReadingsToPI", start_task=True, naming_scheme="Backward compatibility"): + taskname="NorthReadingsToPI", start_task=True, naming_scheme="Backward compatibility", + pi_use_legacy="true"): """Start north task""" _enabled = "true" if start_task else "false" @@ -239,7 +240,8 @@ def _start_north_pi_server_c(fledge_url, pi_host, pi_port, pi_token, north_plugi "producerToken": {"value": pi_token}, "ServerHostname": {"value": pi_host}, "ServerPort": {"value": str(pi_port)}, - "NamingScheme": {"value": naming_scheme} + "NamingScheme": {"value": naming_scheme}, + "Legacy": {"value": pi_use_legacy} } } conn.request("POST", '/fledge/scheduled/task', json.dumps(data)) @@ -257,7 +259,8 @@ def _start_north_task_omf_web_api(fledge_url, pi_host, pi_port, pi_db="Dianomic" pi_user=None, pi_pwd=None, north_plugin="OMF", taskname="NorthReadingsToPI_WebAPI", start_task=True, naming_scheme="Backward compatibility", - default_af_location="fledge/room1/machine1"): + default_af_location="fledge/room1/machine1", + pi_use_legacy="true"): """Start north task""" _enabled = True if start_task else False @@ -278,7 +281,8 @@ def _start_north_task_omf_web_api(fledge_url, pi_host, pi_port, pi_db="Dianomic" "ServerPort": {"value": str(pi_port)}, "compression": {"value": "true"}, "DefaultAFLocation": {"value": default_af_location}, - "NamingScheme": {"value": naming_scheme} + "NamingScheme": {"value": naming_scheme}, + "Legacy": {"value": pi_use_legacy} } } @@ -297,7 +301,8 @@ def _start_north_omf_as_a_service(fledge_url, pi_host, pi_port, pi_db="Dianomic" pi_user=None, pi_pwd=None, north_plugin="OMF", service_name="NorthReadingsToPI_WebAPI", start=True, naming_scheme="Backward compatibility", - default_af_location="fledge/room1/machine1"): + default_af_location="fledge/room1/machine1", + pi_use_legacy="true"): """Start north service""" _enabled = True if start else False @@ -314,7 +319,8 @@ def _start_north_omf_as_a_service(fledge_url, pi_host, pi_port, pi_db="Dianomic" "ServerPort": {"value": str(pi_port)}, "compression": {"value": "true"}, "DefaultAFLocation": {"value": default_af_location}, - "NamingScheme": {"value": naming_scheme} + "NamingScheme": {"value": naming_scheme}, + "Legacy": {"value": pi_use_legacy} } } @@ -429,98 +435,57 @@ def read_data_from_pi_web_api(): def _read_data_from_pi_web_api(host, admin, password, pi_database, af_hierarchy_list, asset, sensor): """ This method reads data from pi web api """ - # List of pi databases - dbs = None - # PI logical grouping of attributes and child elements - elements = None - # List of elements - url_elements_list = None - # Element's recorded data url - url_recorded_data = None - # Resources in the PI Web API are addressed by WebID, parameter used for deletion of element - web_id = None - # List of elements - url_elements_data_list = None - username_password = "{}:{}".format(admin, password) username_password_b64 = base64.b64encode(username_password.encode('ascii')).decode("ascii") headers = {'Authorization': 'Basic %s' % username_password_b64} try: - conn = http.client.HTTPSConnection(host, context=ssl._create_unverified_context()) - conn.request("GET", '/piwebapi/assetservers', headers=headers) + ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) + ctx.options |= ssl.PROTOCOL_TLSv1_1 + # With ssl.CERT_NONE as verify_mode, validation errors such as untrusted or expired cert + # are ignored and do not abort the TLS/SSL handshake. + ctx.verify_mode = ssl.CERT_NONE + conn = http.client.HTTPSConnection(host, context=ctx) + conn.request("GET", '/piwebapi/dataservers', headers=headers) res = conn.getresponse() r = json.loads(res.read().decode()) - dbs = r["Items"][0]["Links"]["Databases"] - - if dbs is not None: - conn.request("GET", dbs, headers=headers) - res = conn.getresponse() - r = json.loads(res.read().decode()) - for el in r["Items"]: - if el["Name"] == pi_database: - url_elements_list = el["Links"]["Elements"] - - # This block is for iteration when we have multi-level hierarchy. - # For example, if we have DefaultAFLocation as "fledge/room1/machine1" then - # it will recursively find elements of "fledge" and then "room1". - # And next block is for finding element of "machine1". - - af_level_count = 0 - for level in af_hierarchy_list[:-1]: - if url_elements_list is not None: - conn.request("GET", url_elements_list, headers=headers) - res = conn.getresponse() - r = json.loads(res.read().decode()) - for el in r["Items"]: - if el["Name"] == af_hierarchy_list[af_level_count]: - url_elements_list = el["Links"]["Elements"] - if af_level_count == 0: - web_id_root = el["WebId"] - af_level_count = af_level_count + 1 - - if url_elements_list is not None: - conn.request("GET", url_elements_list, headers=headers) + points= r["Items"][0]['Links']["Points"] + + if points is not None: + conn.request("GET", points, headers=headers) res = conn.getresponse() - r = json.loads(res.read().decode()) - items = r["Items"] - for el in items: - if el["Name"] == af_hierarchy_list[-1]: - url_elements_data_list = el["Links"]["Elements"] - - if url_elements_data_list is not None: - conn.request("GET", url_elements_data_list, headers=headers) - res = conn.getresponse() - r = json.loads(res.read().decode()) - items = r["Items"] - for el2 in items: - if el2["Name"] == asset: - url_recorded_data = el2["Links"]["RecordedData"] - web_id = el2["WebId"] - - _data_pi = {} - if url_recorded_data is not None: - conn.request("GET", url_recorded_data, headers=headers) - res = conn.getresponse() - r = json.loads(res.read().decode()) - _items = r["Items"] - for el in _items: - _recoded_value_list = [] - for _head in sensor: - # This checks if the recorded datapoint is present in the items that we retrieve from the PI server. - if _head in el["Name"]: - elx = el["Items"] - for _el in elx: - _recoded_value_list.append(_el["Value"]) - _data_pi[_head] = _recoded_value_list - - # Delete recorded elements - conn.request("DELETE", '/piwebapi/elements/{}'.format(web_id_root), headers=headers) - res = conn.getresponse() - res.read() - - return _data_pi - except (KeyError, IndexError, Exception): + r=json.loads(res.read().decode()) + data = r["Items"] + if data is not None: + value = None + if sensor == '': + search_string = asset + else: + search_string = "{}.{}".format(asset, sensor) + for el in data: + if search_string in el["Name"]: + value_url = el["Links"]["Value"] + if value_url is not None: + conn.request("GET", value_url, headers=headers) + res = conn.getresponse() + r = json.loads(res.read().decode()) + value = r["Value"] + if not value: + print("Could not find the latest reading of asset ->{}. sensor->{}".format(asset, + sensor)) + return value + else: + print("The latest value of asset->{}.sensor->{} is {}".format(asset, sensor, value)) + return(value) + else: + print("Data inside points not found.") + return None + else: + print("Could not find the points.") + return None + + except (KeyError, IndexError, Exception) as ex: + print("Failed to read data due to {}".format(ex)) return None return _read_data_from_pi_web_api @@ -672,7 +637,9 @@ def pytest_addoption(parser): help="PI Server user login password") parser.addoption("--pi-token", action="store", default="omf_north_0001", help="OMF Producer Token") - + parser.addoption("--pi-use-legacy", action="store", default="true", + help="Set false to override the default plugin behaviour i.e. for OMF version >=1.2.x to send linked data types.") + # OCS Config parser.addoption("--ocs-tenant", action="store", default="ocs_tenant_id", help="Tenant id of OCS") @@ -862,6 +829,11 @@ def pi_token(request): return request.config.getoption("--pi-token") +@pytest.fixture +def pi_use_legacy(request): + return request.config.getoption("--pi-use-legacy") + + @pytest.fixture def ocs_tenant(request): return request.config.getoption("--ocs-tenant") diff --git a/tests/system/python/iprpc/README.rst b/tests/system/python/iprpc/README.rst index 4de6571e46..bee9a917ba 100644 --- a/tests/system/python/iprpc/README.rst +++ b/tests/system/python/iprpc/README.rst @@ -79,7 +79,7 @@ To install the dependencies required to run python tests, run the following two :: cd $FLEDGE_ROOT/tests/system/python/iprpc - pip3 install -r requirements-iprpc-test.txt --user + python3 -m pip install -r requirements-iprpc-test.txt --user Test Execution diff --git a/tests/system/python/packages/test_authentication.py b/tests/system/python/packages/test_authentication.py index 1c029478cf..a20d77e0ed 100644 --- a/tests/system/python/packages/test_authentication.py +++ b/tests/system/python/packages/test_authentication.py @@ -567,8 +567,10 @@ def test_get_roles_with_password_token(self, fledge_url): r = r.read().decode() jdoc = json.loads(r) assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', - 'id': 2, 'name': 'user'}]} == jdoc + {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, + {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, + {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} + ]} == jdoc def test_get_roles_with_certificate_token(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) @@ -578,8 +580,10 @@ def test_get_roles_with_certificate_token(self, fledge_url): r = r.read().decode() jdoc = json.loads(r) assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', - 'id': 2, 'name': 'user'}]} == jdoc + {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, + {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, + {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} + ]} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, @@ -759,8 +763,7 @@ def test_admin_actions_forbidden_for_regular_user_with_pwd_token(self, fledge_ur _token = jdoc["token"] # Create User - conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", - "password": "User@123"}), + conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", "password": "User@123"}), headers={"authorization": _token}) r = conn.getresponse() assert 403 == r.status @@ -797,8 +800,7 @@ def test_admin_actions_forbidden_for_regular_user_with_cert_token(self, fledge_u _token = jdoc["token"] # Create User - conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", - "password": "User@123"}), + conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", "password": "User@123"}), headers={"authorization": _token}) r = conn.getresponse() assert 403 == r.status @@ -962,8 +964,10 @@ def test_get_roles(self, fledge_url): r = r.read().decode() jdoc = json.loads(r) assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', - 'id': 2, 'name': 'user'}]} == jdoc + {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, + {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, + {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} + ]} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, @@ -1080,8 +1084,7 @@ def test_admin_actions_forbidden_for_regular_user(self, fledge_url): _token = jdoc["token"] # Create User - conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", - "password": "User@123"}), + conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", "password": "User@123"}), headers={"authorization": _token}) r = conn.getresponse() assert 403 == r.status @@ -1269,8 +1272,10 @@ def test_get_roles(self, fledge_url): r = r.read().decode() jdoc = json.loads(r) assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', - 'id': 2, 'name': 'user'}]} == jdoc + {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, + {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, + {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} + ]} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, @@ -1351,8 +1356,7 @@ def test_admin_actions_forbidden_for_regular_user(self, fledge_url): _token = jdoc["token"] # Create User - conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", - "password": "User@123"}), + conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", "password": "User@123"}), headers={"authorization": _token}) r = conn.getresponse() assert 403 == r.status @@ -1673,8 +1677,10 @@ def test_get_roles_with_password_token(self): r = r.read().decode() jdoc = json.loads(r) assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', - 'id': 2, 'name': 'user'}]} == jdoc + {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, + {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, + {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} + ]} == jdoc def test_get_roles_with_certificate_token(self): conn = http.client.HTTPSConnection("localhost", 1995, context=context) @@ -1684,8 +1690,10 @@ def test_get_roles_with_certificate_token(self): r = r.read().decode() jdoc = json.loads(r) assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', - 'id': 2, 'name': 'user'}]} == jdoc + {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, + {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, + {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} + ]} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, @@ -1865,8 +1873,7 @@ def test_admin_actions_forbidden_for_regular_user_with_pwd_token(self): _token = jdoc["token"] # Create User - conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", - "password": "User@123"}), + conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", "password": "User@123"}), headers={"authorization": _token}) r = conn.getresponse() assert 403 == r.status @@ -1903,8 +1910,7 @@ def test_admin_actions_forbidden_for_regular_user_with_cert_token(self): _token = jdoc["token"] # Create User - conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", - "password": "User@123"}), + conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", "password": "User@123"}), headers={"authorization": _token}) r = conn.getresponse() assert 403 == r.status @@ -2072,8 +2078,10 @@ def test_get_roles(self): r = r.read().decode() jdoc = json.loads(r) assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', - 'id': 2, 'name': 'user'}]} == jdoc + {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, + {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, + {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} + ]} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, @@ -2190,8 +2198,7 @@ def test_admin_actions_forbidden_for_regular_user(self): _token = jdoc["token"] # Create User - conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", - "password": "User@123"}), + conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", "password": "User@123"}), headers={"authorization": _token}) r = conn.getresponse() assert 403 == r.status @@ -2386,8 +2393,10 @@ def test_get_roles(self): r = r.read().decode() jdoc = json.loads(r) assert {'roles': [{'description': 'All CRUD privileges', 'id': 1, 'name': 'admin'}, - {'description': 'All CRUD operations and self profile management', - 'id': 2, 'name': 'user'}]} == jdoc + {'description': 'All CRUD operations and self profile management', 'id': 2, 'name': 'user'}, + {'id': 3, 'name': 'view', 'description': 'Only to view the configuration'}, + {'id': 4, 'name': 'data-view', 'description': 'Only read the data in buffer'} + ]} == jdoc @pytest.mark.parametrize(("form_data", "expected_values"), [ ({"username": "any1", "password": "User@123", "real_name": "AJ", "description": "Nerd user"}, @@ -2468,8 +2477,7 @@ def test_admin_actions_forbidden_for_regular_user(self): _token = jdoc["token"] # Create User - conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", - "password": "User@123"}), + conn.request("POST", "/fledge/admin/user", body=json.dumps({"username": "other", "password": "User@123"}), headers={"authorization": _token}) r = conn.getresponse() assert 403 == r.status diff --git a/tests/system/python/packages/test_multiple_assets.py b/tests/system/python/packages/test_multiple_assets.py index f6a966baa9..70e6528422 100644 --- a/tests/system/python/packages/test_multiple_assets.py +++ b/tests/system/python/packages/test_multiple_assets.py @@ -94,103 +94,6 @@ def start_north(start_north_omf_as_a_service, fledge_url, yield start_north -@pytest.fixture -def read_data_from_pi_web_api(): - def _read_data_from_pi_web_api(host, admin, password, pi_database, af_hierarchy_list, asset, sensor): - """ This method reads data from pi web api """ - - # List of pi databases - dbs = None - # PI logical grouping of attributes and child elements - elements = None - # List of elements - url_elements_list = None - # Element's recorded data url - url_recorded_data = None - # Resources in the PI Web API are addressed by WebID, parameter used for deletion of element - web_id = None - # List of elements - url_elements_data_list = None - - username_password = "{}:{}".format(admin, password) - username_password_b64 = base64.b64encode(username_password.encode('ascii')).decode("ascii") - headers = {'Authorization': 'Basic %s' % username_password_b64} - - try: - conn = http.client.HTTPSConnection(host, context=ssl._create_unverified_context()) - conn.request("GET", '/piwebapi/assetservers', headers=headers) - res = conn.getresponse() - r = json.loads(res.read().decode()) - dbs = r["Items"][0]["Links"]["Databases"] - - if dbs is not None: - conn.request("GET", dbs, headers=headers) - res = conn.getresponse() - r = json.loads(res.read().decode()) - for el in r["Items"]: - if el["Name"] == pi_database: - url_elements_list = el["Links"]["Elements"] - - # This block is for iteration when we have multi-level hierarchy. - # For example, if we have DefaultAFLocation as "fledge/room1/machine1" then - # it will recursively find elements of "fledge" and then "room1". - # And next block is for finding element of "machine1". - - af_level_count = 0 - for level in af_hierarchy_list[:-1]: - if url_elements_list is not None: - conn.request("GET", url_elements_list, headers=headers) - res = conn.getresponse() - r = json.loads(res.read().decode()) - for el in r["Items"]: - if el["Name"] == af_hierarchy_list[af_level_count]: - url_elements_list = el["Links"]["Elements"] - if af_level_count == 0: - web_id_root = el["WebId"] - af_level_count = af_level_count + 1 - - if url_elements_list is not None: - conn.request("GET", url_elements_list, headers=headers) - res = conn.getresponse() - r = json.loads(res.read().decode()) - items = r["Items"] - for el in items: - if el["Name"] == af_hierarchy_list[-1]: - url_elements_data_list = el["Links"]["Elements"] - - if url_elements_data_list is not None: - conn.request("GET", url_elements_data_list, headers=headers) - res = conn.getresponse() - r = json.loads(res.read().decode()) - items = r["Items"] - for el2 in items: - if el2["Name"] == asset: - url_recorded_data = el2["Links"]["RecordedData"] - web_id = el2["WebId"] - - _data_pi = {} - if url_recorded_data is not None: - conn.request("GET", url_recorded_data, headers=headers) - res = conn.getresponse() - r = json.loads(res.read().decode()) - _items = r["Items"] - for el in _items: - _recoded_value_list = [] - for _head in sensor: - # This checks if the recorded datapoint is present in the items that we retrieve from the PI server. - if _head in el["Name"]: - elx = el["Items"] - for _el in elx: - _recoded_value_list.append(_el["Value"]) - _data_pi[_head] = _recoded_value_list - - return _data_pi - except (KeyError, IndexError, Exception): - return None - - return _read_data_from_pi_web_api - - def add_benchmark(fledge_url, name, count): data = { "name": name, @@ -272,13 +175,13 @@ def _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_d data_from_pi = None asset_name = "random-" + str(s) + str(a) print(asset_name) - recorded_datapoint = "{}measurement_{}".format(type_id, asset_name) + recorded_datapoint = "{}".format(asset_name) # Name of asset in the PI server - pi_asset_name = "{}-type{}".format(asset_name, type_id) + pi_asset_name = "{}".format(asset_name) while (data_from_pi is None or data_from_pi == []) and retry_count < retries: data_from_pi = read_data_from_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, af_hierarchy_level_list, - pi_asset_name, {recorded_datapoint}) + pi_asset_name, '') if data_from_pi is None: retry_count += 1 time.sleep(wait_time) diff --git a/tests/system/python/packages/test_omf_naming_scheme.py b/tests/system/python/packages/test_omf_naming_scheme.py index 517149d47c..2a46ff6a11 100644 --- a/tests/system/python/packages/test_omf_naming_scheme.py +++ b/tests/system/python/packages/test_omf_naming_scheme.py @@ -155,14 +155,14 @@ def _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_d while (data_from_pi is None or data_from_pi == []) and retry_count < retries: data_from_pi = read_data_from_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, af_hierarchy_level_list, - pi_asset_name, {recorded_datapoint}) + pi_asset_name, '') retry_count += 1 time.sleep(wait_time * 2) if data_from_pi is None or retry_count == retries: assert False, "Failed to read data from PI" - assert data_from_pi[recorded_datapoint][-1] == DATAPOINT_VALUE + assert int(data_from_pi) == DATAPOINT_VALUE class TestOMFNamingScheme: @@ -246,7 +246,7 @@ def test_omf_with_type_suffix_naming(self, reset_fledge, start_south, start_nort type_id = 1 recorded_datapoint = "{}".format(south_asset_name) # Name of asset in the PI server - pi_asset_name = "{}-type{}".format(south_asset_name, type_id) + pi_asset_name = "{}".format(south_asset_name) _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries, recorded_datapoint, pi_asset_name) @@ -286,7 +286,7 @@ def test_omf_with_attribute_hash_naming(self, reset_fledge, start_south, start_n if not skip_verify_north_interface: type_id = 1 - recorded_datapoint = "{}measurement_{}".format(type_id, south_asset_name) + recorded_datapoint = "{}".format(south_asset_name) # Name of asset in the PI server pi_asset_name = "{}".format(south_asset_name) _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries, @@ -328,8 +328,8 @@ def test_omf_with_backward_compatibility_naming(self, reset_fledge, start_south, if not skip_verify_north_interface: type_id = 1 - recorded_datapoint = "{}measurement_{}".format(type_id, south_asset_name) + recorded_datapoint = "{}".format(south_asset_name) # Name of asset in the PI server - pi_asset_name = "{}-type{}".format(south_asset_name, type_id) + pi_asset_name = "{}".format(south_asset_name) _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries, recorded_datapoint, pi_asset_name) diff --git a/tests/system/python/packages/test_omf_north_service.py b/tests/system/python/packages/test_omf_north_service.py index 34445f3e82..77d4a24fe1 100644 --- a/tests/system/python/packages/test_omf_north_service.py +++ b/tests/system/python/packages/test_omf_north_service.py @@ -164,13 +164,13 @@ def _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_d af_hierarchy_level_list = AF_HIERARCHY_LEVEL.split("/") type_id = 1 - recorded_datapoint = "{}measurement_{}".format(type_id, asset_name) + recorded_datapoint = asset_name # Name of asset in the PI server - pi_asset_name = "{}-type{}".format(asset_name, type_id) + pi_asset_name = asset_name while (data_from_pi is None or data_from_pi == []) and retry_count < retries: data_from_pi = read_data_from_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, af_hierarchy_level_list, - pi_asset_name, {recorded_datapoint}) + pi_asset_name, '') retry_count += 1 time.sleep(wait_time * 2) diff --git a/tests/system/python/packages/test_pi_webapi.py b/tests/system/python/packages/test_pi_webapi.py index 39e213138d..b2be96c9e1 100644 --- a/tests/system/python/packages/test_pi_webapi.py +++ b/tests/system/python/packages/test_pi_webapi.py @@ -106,20 +106,20 @@ def _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_d af_hierarchy_level_list = AF_HIERARCHY_LEVEL.split("/") type_id = 1 - recorded_datapoint = "{}measurement_{}".format(type_id, asset_name) + recorded_datapoint = asset_name # Name of asset in the PI server - PI_ASSET_NAME = "{}-type{}".format(asset_name, type_id) + PI_ASSET_NAME = asset_name while (data_from_pi is None or data_from_pi == []) and retry_count < retries: data_from_pi = read_data_from_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, af_hierarchy_level_list, - PI_ASSET_NAME, {recorded_datapoint}) + ASSET, '') retry_count += 1 time.sleep(wait_time * 2) if data_from_pi is None or retry_count == retries: assert False, "Failed to read data from PI" - assert data_from_pi[recorded_datapoint][-1] == DATAPOINT_VALUE + assert int(data_from_pi) == DATAPOINT_VALUE @pytest.fixture diff --git a/tests/system/python/pair/test_c_north_service_pair.py b/tests/system/python/pair/test_c_north_service_pair.py index e596c568dc..8f086d4f8f 100644 --- a/tests/system/python/pair/test_c_north_service_pair.py +++ b/tests/system/python/pair/test_c_north_service_pair.py @@ -257,13 +257,13 @@ def _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_d af_hierarchy_level_list = AF_HIERARCHY_LEVEL.split("/") type_id = 1 - recorded_datapoint = "{}measurement_{}".format(type_id, asset_name) + recorded_datapoint = "{}".format(asset_name) # Name of asset in the PI server - pi_asset_name = "{}-type{}".format(asset_name, type_id) + pi_asset_name = "{}".format(asset_name) while (data_from_pi is None or data_from_pi == []) and retry_count < retries: data_from_pi = read_data_from_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, af_hierarchy_level_list, - pi_asset_name, {recorded_datapoint}) + pi_asset_name, '') retry_count += 1 time.sleep(wait_time * 2) diff --git a/tests/system/python/pair/test_pyton_north_service_pair.py b/tests/system/python/pair/test_pyton_north_service_pair.py index fbd4ab3d90..d2a4884a4b 100644 --- a/tests/system/python/pair/test_pyton_north_service_pair.py +++ b/tests/system/python/pair/test_pyton_north_service_pair.py @@ -258,13 +258,13 @@ def _verify_egress(read_data_from_pi_web_api, pi_host, pi_admin, pi_passwd, pi_d af_hierarchy_level_list = AF_HIERARCHY_LEVEL.split("/") type_id = 1 - recorded_datapoint = "{}measurement_{}".format(type_id, asset_name) + recorded_datapoint = "{}".format(asset_name) # Name of asset in the PI server - pi_asset_name = "{}-type{}".format(asset_name, type_id) + pi_asset_name = "{}".format(asset_name) while (data_from_pi is None or data_from_pi == []) and retry_count < retries: data_from_pi = read_data_from_pi_web_api(pi_host, pi_admin, pi_passwd, pi_db, af_hierarchy_level_list, - pi_asset_name, {recorded_datapoint}) + pi_asset_name, '') retry_count += 1 time.sleep(wait_time * 2) diff --git a/tests/system/python/scripts/install_python_plugin b/tests/system/python/scripts/install_python_plugin index b220d92f50..2d26435806 100755 --- a/tests/system/python/scripts/install_python_plugin +++ b/tests/system/python/scripts/install_python_plugin @@ -47,7 +47,7 @@ copy_file_and_requirement () { cp -r /tmp/${REPO_NAME}/python/fledge/plugins/${PLUGIN_TYPE}/${PLUGIN_NAME} $FLEDGE_ROOT/python/fledge/plugins/${PLUGIN_TYPE}/ fi req_file=$(find /tmp/${REPO_NAME} -name requirement*.txt) - [ ! -z "${req_file}" ] && pip3 install --user -Ir ${req_file} ${USE_PIP_CACHE} || echo "No such external dependency needed for ${PLUGIN_NAME} plugin." + [ ! -z "${req_file}" ] && python3 -m pip install --user -Ir ${req_file} ${USE_PIP_CACHE} || echo "No such external dependency needed for ${PLUGIN_NAME} plugin." } clean diff --git a/tests/unit/C/CMakeLists.txt b/tests/unit/C/CMakeLists.txt index 99482eac35..4460dd7a2c 100644 --- a/tests/unit/C/CMakeLists.txt +++ b/tests/unit/C/CMakeLists.txt @@ -95,7 +95,8 @@ set_target_properties(plugins-common-lib PROPERTIES SOVERSION 1) set(LIB_NAME OMF) file(GLOB OMF_LIB_SOURCES ../../../C/plugins/north/OMF/omf.cpp - ../../../C/plugins/north/OMF/omfhints.cpp) + ../../../C/plugins/north/OMF/omfhints.cpp + ../../../C/plugins/north/OMF/linkdata.cpp) add_library(${LIB_NAME} SHARED ${OMF_LIB_SOURCES}) target_link_libraries(${LIB_NAME} diff --git a/tests/unit/C/plugins/storage/common/CMakeLists.txt b/tests/unit/C/plugins/storage/common/CMakeLists.txt index 1a1e62d70c..e752201cb6 100644 --- a/tests/unit/C/plugins/storage/common/CMakeLists.txt +++ b/tests/unit/C/plugins/storage/common/CMakeLists.txt @@ -23,6 +23,10 @@ set(common_sources "../../../../../../C/common/string_utils.cpp") # Link runTests with what we want to test and the GTest and pthread library add_executable(RunTests ${test_sources} ${common_sources} tests.cpp) + +#setting BOOST_COMPONENTS to use pthread library only +set(BOOST_COMPONENTS thread) +find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) target_link_libraries(RunTests ${GTEST_LIBRARIES} pthread ${COMMONLIB}) setup_target_for_coverage_gcovr_html( diff --git a/tests/unit/C/plugins/storage/postgres/CMakeLists.txt b/tests/unit/C/plugins/storage/postgres/CMakeLists.txt index 3bcde16789..fdb34f1896 100644 --- a/tests/unit/C/plugins/storage/postgres/CMakeLists.txt +++ b/tests/unit/C/plugins/storage/postgres/CMakeLists.txt @@ -86,6 +86,9 @@ target_link_libraries(${PROJECT_NAME} ${STORAGE_COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${PG_LIB}) target_link_libraries(${PROJECT_NAME} ${LIBCURL_LIB}) +#setting BOOST_COMPONENTS to use pthread library only +set(BOOST_COMPONENTS thread) +find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) target_link_libraries(${PROJECT_NAME} ${GTEST_LIBRARIES} pthread) # Add Python 3.x library if(${CMAKE_VERSION} VERSION_LESS "3.12.0") diff --git a/tests/unit/C/plugins/storage/sqlite/CMakeLists.txt b/tests/unit/C/plugins/storage/sqlite/CMakeLists.txt index b793408c84..2197d96779 100644 --- a/tests/unit/C/plugins/storage/sqlite/CMakeLists.txt +++ b/tests/unit/C/plugins/storage/sqlite/CMakeLists.txt @@ -78,6 +78,9 @@ target_link_libraries(${PROJECT_NAME} ${PLUGIN_SQLITE}) target_link_libraries(${PROJECT_NAME} ${STORAGE_COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${LIBCURL_LIB}) +#setting BOOST_COMPONENTS to use pthread library only +set(BOOST_COMPONENTS thread) +find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) target_link_libraries(${PROJECT_NAME} ${GTEST_LIBRARIES} pthread) # Add Python 3.x library if(${CMAKE_VERSION} VERSION_LESS "3.12.0") diff --git a/tests/unit/C/plugins/storage/sqlitelb/CMakeLists.txt b/tests/unit/C/plugins/storage/sqlitelb/CMakeLists.txt index 1b6eb2fb2c..1039fffc78 100644 --- a/tests/unit/C/plugins/storage/sqlitelb/CMakeLists.txt +++ b/tests/unit/C/plugins/storage/sqlitelb/CMakeLists.txt @@ -83,6 +83,9 @@ target_link_libraries(${PROJECT_NAME} ${PLUGIN_SQLITELB}) target_link_libraries(${PROJECT_NAME} ${STORAGE_COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${LIBCURL_LIB}) +#setting BOOST_COMPONENTS to use pthread library only +set(BOOST_COMPONENTS thread) +find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) target_link_libraries(${PROJECT_NAME} ${GTEST_LIBRARIES} pthread) # Add Python 3.x library if(${CMAKE_VERSION} VERSION_LESS "3.12.0") diff --git a/tests/unit/C/plugins/storage/sqlitememory/CMakeLists.txt b/tests/unit/C/plugins/storage/sqlitememory/CMakeLists.txt index b0e85de31e..5739768c83 100644 --- a/tests/unit/C/plugins/storage/sqlitememory/CMakeLists.txt +++ b/tests/unit/C/plugins/storage/sqlitememory/CMakeLists.txt @@ -84,6 +84,9 @@ target_link_libraries(${PROJECT_NAME} ${PLUGIN_SQLITEMEMORY}) target_link_libraries(${PROJECT_NAME} ${STORAGE_COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${LIBCURL_LIB}) +#setting BOOST_COMPONENTS to use pthread library only +set(BOOST_COMPONENTS thread) +find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) target_link_libraries(${PROJECT_NAME} ${GTEST_LIBRARIES} pthread) # Add Python 3.x library diff --git a/tests/unit/C/services/storage/postgres/CMakeLists.txt b/tests/unit/C/services/storage/postgres/CMakeLists.txt index c94b65091a..2a218ee9cb 100644 --- a/tests/unit/C/services/storage/postgres/CMakeLists.txt +++ b/tests/unit/C/services/storage/postgres/CMakeLists.txt @@ -26,6 +26,9 @@ file(GLOB utils_sources "../../../../../../C/common/json_utils.cpp") # Link runTests with what we want to test and the GTest and pthread library add_executable(RunTests ${test_sources} ${logger_sources} ${config_sources} ${utils_sources} tests.cpp) +#setting BOOST_COMPONENTS to use pthread library only +set(BOOST_COMPONENTS thread) +find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) target_link_libraries(RunTests ${GTEST_LIBRARIES} pthread) setup_target_for_coverage_gcovr_html( diff --git a/tests/unit/C/services/storage/postgres/plugins/common/CMakeLists.txt b/tests/unit/C/services/storage/postgres/plugins/common/CMakeLists.txt index 299ea45828..cd2447a805 100644 --- a/tests/unit/C/services/storage/postgres/plugins/common/CMakeLists.txt +++ b/tests/unit/C/services/storage/postgres/plugins/common/CMakeLists.txt @@ -23,6 +23,9 @@ set(common_sources "../../../../../../../../C/common/string_utils.cpp") # Link runTests with what we want to test and the GTest and pthread library add_executable(RunTests ${test_sources} ${common_sources} tests.cpp) +#setting BOOST_COMPONENTS to use pthread library only +set(BOOST_COMPONENTS thread) +find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) target_link_libraries(RunTests ${GTEST_LIBRARIES} pthread) setup_target_for_coverage_gcovr_html( diff --git a/tests/unit/python/fledge/common/storage_client/data/payload_modifier_set_where.json b/tests/unit/python/fledge/common/storage_client/data/payload_modifier_set_where.json new file mode 100644 index 0000000000..b2cc594ce7 --- /dev/null +++ b/tests/unit/python/fledge/common/storage_client/data/payload_modifier_set_where.json @@ -0,0 +1,11 @@ +{ + "values": { + "value": "token_expiration" + }, + "where": { + "column": "token", + "condition": "=", + "value": "TOKEN" + }, + "modifier": ["allowzero"] +} \ No newline at end of file diff --git a/tests/unit/python/fledge/common/storage_client/test_payload_builder.py b/tests/unit/python/fledge/common/storage_client/test_payload_builder.py index b1c01498ce..49a831f682 100644 --- a/tests/unit/python/fledge/common/storage_client/test_payload_builder.py +++ b/tests/unit/python/fledge/common/storage_client/test_payload_builder.py @@ -516,6 +516,14 @@ def test_update_set_where_payload(self, input_set, input_where, input_table, exp res = PayloadBuilder().SET(value=input_set).WHERE(input_where).UPDATE_TABLE(input_table).payload() assert expected == json.loads(res) + @pytest.mark.parametrize("input_set, input_where, input_modifier, expected", [ + ("token_expiration", ["token", "=", "TOKEN"], ["allowzero"], + _payload("data/payload_modifier_set_where.json")), + ]) + def test_modifier_with_set_where_payload(self, input_set, input_where, input_modifier, expected): + res = PayloadBuilder().SET(value=input_set).WHERE(input_where).MODIFIER(input_modifier).payload() + assert expected == json.loads(res) + @pytest.allure.feature("unit") @pytest.allure.story("payload_builder") diff --git a/tests/unit/python/fledge/common/test_configuration_manager.py b/tests/unit/python/fledge/common/test_configuration_manager.py index 3bdab1d8bb..c26546d62f 100644 --- a/tests/unit/python/fledge/common/test_configuration_manager.py +++ b/tests/unit/python/fledge/common/test_configuration_manager.py @@ -40,9 +40,9 @@ def test_supported_validate_type_strings(self): assert sorted(expected_types) == _valid_type_strings def test_supported_optional_items(self): - assert 10 == len(_optional_items) - assert ['deprecated', 'displayName', 'length', 'mandatory', 'maximum', 'minimum', 'order', 'readonly', - 'rule', 'validity'] == _optional_items + assert 11 == len(_optional_items) + assert ['deprecated', 'displayName', 'group', 'length', 'mandatory', 'maximum', 'minimum', 'order', + 'readonly', 'rule', 'validity'] == _optional_items def test_constructor_no_storage_client_defined_no_storage_client_passed( self, reset_singleton): @@ -852,6 +852,225 @@ async def test__merge_category_vals_no_mutual_items_include_original(self, reset assert test_config_storage is not c_return_value assert test_config_new is not test_config_storage + p1 = {ITEM_NAME: {"description": "test description val", "type": "string", "default": "test default val", + "value": "test value val", "readonly": "true"}} + p2 = {ITEM_NAME: {"description": "test description val", "type": "string", "default": "test default val", + "value": "test value val", "readonly": "false", "order": 3}} + p3 = {ITEM_NAME: {"description": "test description val", "type": "string", "default": "test default val", + "value": "test value val", "readonly": "true", "order": 3, "length": 80}} + p4 = {ITEM_NAME: {"description": "test description val", "type": "integer", "default": "1", "minimum": 0, + "maximum": 5}} + p5 = {"test_item_name_storage": {"description": "", "type": "integer", "default": "10", "value": "100", + "minimum": 10, "maximum": 100, "order": 1, "displayName": "test"}} + p6 = {"test_item_name_storage": {"description": "", "type": "integer", 'default': "3", "value": "100", + "rule": "value < 200", "order": 1}} + + @pytest.mark.parametrize("idx, new_config, keep_original_items", [ + (1, p1, False), (2, p2, False), (3, p3, False), (4, p4, False), (5, p5, False), (6, p6, False), + (1, p1, True), (2, p2, True), (3, p3, True), (4, p4, True), (5, p5, True), (6, p6, True), + ]) + async def test__merge_category_vals_with_optional_attributes(self, reset_singleton, idx, new_config, + keep_original_items): + def verify_data_ignore_original_items(): + assert len(actual_result) == 1 + actual = list(actual_result.values())[0] + if idx == 1: + assert ITEM_NAME in actual_result + assert 'test_item_name_storage' not in actual_result + assert len(actual) == 5 + assert actual['description'] == "test description val" + assert actual['type'] == "string" + assert actual['default'] == "test default val" + assert actual['value'] == "test value val" + assert actual["readonly"] == "true" + elif idx == 2: + assert ITEM_NAME in actual_result + assert 'test_item_name_storage' not in actual_result + assert len(actual) == 6 + assert actual['description'] == "test description val" + assert actual['type'] == "string" + assert actual['default'] == "test default val" + assert actual['value'] == "test value val" + assert actual["readonly"] == "false" + assert actual['order'] == 3 + elif idx == 3: + assert ITEM_NAME in actual_result + assert 'test_item_name_storage' not in actual_result + assert len(actual) == 7 + assert actual['description'] == "test description val" + assert actual['type'] == "string" + assert actual['default'] == "test default val" + assert actual['value'] == "test value val" + assert actual["readonly"] == "true" + assert actual['order'] == 3 + assert actual['length'] == 80 + elif idx == 4: + assert ITEM_NAME in actual_result + assert 'test_item_name_storage' not in actual_result + assert len(actual) == 6 + assert actual['description'] == "test description val" + assert actual['type'] == "integer" + assert actual['default'] == "1" + assert actual['value'] == "1" + assert actual['minimum'] == 0 + assert actual['maximum'] == 5 + elif idx == 5: + assert ITEM_NAME not in actual_result + assert 'test_item_name_storage' in actual_result + assert len(actual) == 8 + assert actual['description'] == "" + assert actual['type'] == "integer" + assert actual['default'] == "10" + assert actual['value'] == "100" + assert actual["minimum"] == 10 + assert actual['maximum'] == 100 + assert actual['order'] == 1 + assert actual['displayName'] == "test" + elif idx == 6: + assert ITEM_NAME not in actual_result + assert 'test_item_name_storage' in actual_result + assert len(actual) == 6 + assert actual['description'] == "" + assert actual['type'] == "integer" + assert actual['default'] == "3" + assert actual['value'] == "100" + assert actual["order"] == 1 + assert actual['rule'] == "value < 200" + + def verify_data_include_original_items(): + assert len(actual_result) == 2 + item_name1 = ITEM_NAME + item_name2 = 'test_item_name_storage' + assert item_name1 in actual_result + assert item_name2 in actual_result + actual_item1 = actual_result[item_name1] + actual_item2 = actual_result[item_name2] + if idx == 1: + assert len(actual_item1) == 5 + assert actual_item1['description'] == "test description val" + assert actual_item1['type'] == "string" + assert actual_item1['default'] == "test default val" + assert actual_item1['value'] == "test value val" + assert actual_item1["readonly"] == "true" + assert len(actual_item2) == 7 + assert actual_item2['description'] == "" + assert actual_item2['type'] == "integer" + assert actual_item2['default'] == "10" + assert actual_item2['value'] == "100" + assert actual_item2["minimum"] == 20 + assert actual_item2["maximum"] == 200 + assert actual_item2["order"] == 1 + elif idx == 2: + assert len(actual_item1) == 6 + assert actual_item1['description'] == "test description val" + assert actual_item1['type'] == "string" + assert actual_item1['default'] == "test default val" + assert actual_item1['value'] == "test value val" + assert actual_item1["readonly"] == "false" + assert actual_item1["order"] == 3 + assert len(actual_item2) == 7 + assert actual_item2['description'] == "" + assert actual_item2['type'] == "integer" + assert actual_item2['default'] == "10" + assert actual_item2['value'] == "100" + assert actual_item2["minimum"] == 20 + assert actual_item2["maximum"] == 200 + assert actual_item2["order"] == 1 + elif idx == 3: + assert len(actual_item1) == 7 + assert actual_item1['description'] == "test description val" + assert actual_item1['type'] == "string" + assert actual_item1['default'] == "test default val" + assert actual_item1['value'] == "test value val" + assert actual_item1["readonly"] == "true" + assert actual_item1["order"] == 3 + assert actual_item1['length'] == 80 + assert len(actual_item2) == 7 + assert actual_item2['description'] == "" + assert actual_item2['type'] == "integer" + assert actual_item2['default'] == "10" + assert actual_item2['value'] == "100" + assert actual_item2["minimum"] == 20 + assert actual_item2["maximum"] == 200 + assert actual_item2["order"] == 1 + elif idx == 4: + assert len(actual_item1) == 6 + assert actual_item1['description'] == "test description val" + assert actual_item1['type'] == "integer" + assert actual_item1['default'] == "1" + assert actual_item1['value'] == "1" + assert actual_item1["minimum"] == 0 + assert actual_item1['maximum'] == 5 + assert len(actual_item2) == 7 + assert actual_item2['description'] == "" + assert actual_item2['type'] == "integer" + assert actual_item2['default'] == "10" + assert actual_item2['value'] == "100" + assert actual_item2["minimum"] == 20 + assert actual_item2["maximum"] == 200 + assert actual_item2["order"] == 1 + elif idx == 5: + assert len(actual_item1) == 6 + assert actual_item1['description'] == "test description val" + assert actual_item1['type'] == "string" + assert actual_item1['default'] == "test default val" + assert actual_item1['value'] == "test value val" + assert actual_item1["readonly"] == "false" + assert actual_item1["order"] == 2 + assert len(actual_item2) == 8 + assert actual_item2['description'] == "" + assert actual_item2['type'] == "integer" + assert actual_item2['default'] == "10" + assert actual_item2['value'] == "100" + assert actual_item2["minimum"] == 10 + assert actual_item2["maximum"] == 100 + assert actual_item2["order"] == 1 + assert actual_item2["displayName"] == "test" + elif idx == 6: + assert len(actual_item1) == 6 + assert actual_item1['description'] == "test description val" + assert actual_item1['type'] == "string" + assert actual_item1['default'] == "test default val" + assert actual_item1['value'] == "test value val" + assert actual_item1["readonly"] == "false" + assert actual_item1["order"] == 2 + assert len(actual_item2) == 6 + assert actual_item2['description'] == "" + assert actual_item2['type'] == "integer" + assert actual_item2['default'] == "3" + assert actual_item2['value'] == "100" + assert actual_item2["order"] == 1 + assert actual_item2["rule"] == "value < 200" + + storage_client_mock = MagicMock(spec=StorageClientAsync) + c_mgr = ConfigurationManager(storage_client_mock) + storage_config = { + "test_item_name_storage": { + "description": "", + "type": "integer", + "default": "10", + "value": "100", + "minimum": 20, + "maximum": 200, + "order": 1 + }, + ITEM_NAME: { + "description": "test description val", + "type": "string", + "default": "test default val", + "value": "test value val", + "readonly": "false", + "order": 2 + } + } + actual_result = await c_mgr._merge_category_vals( + new_config, storage_config, keep_original_items=keep_original_items, category_name=CAT_NAME) + assert isinstance(actual_result, dict) + if keep_original_items: + getattr(verify_data_include_original_items, "__call__")() + else: + getattr(verify_data_ignore_original_items, "__call__")() + @pytest.mark.parametrize("payload, message", [ ((2, 'catvalue', 'catdesc'), "category_name must be a string"), (('catname', 'catvalue', 3), "category_description must be a string") @@ -3231,16 +3450,28 @@ async def async_mock(return_value): (float, 'maximum', '11.2', 'Maximum value should be greater than equal to Minimum value'), (int, 'minimum', '30', 'Minimum value should be less than equal to Maximum value'), (float, 'minimum', '50.0', 'Minimum value should be less than equal to Maximum value'), - (None, 'readonly', '1', "For catname category, entry value must be boolean for optional item name readonly; got "), - (None, 'deprecated', '1', "For catname category, entry value must be boolean for optional item name deprecated; got "), + (None, 'readonly', '1', + "For catname category, entry value must be boolean for optional item name readonly; got "), + (None, 'deprecated', '1', + "For catname category, entry value must be boolean for optional item name deprecated; got "), (None, 'rule', 2, "For catname category, entry value must be string for optional item rule; got "), - (None, 'displayName', 123, "For catname category, entry value must be string for optional item displayName; got "), - (None, 'length', '1a', "For catname category, entry value must be an integer for optional item length; got "), - (None, 'maximum', 'blah', "For catname category, entry value must be an integer or float for optional item maximum; got "), - (None, 'validity', 12, "For catname category, entry value must be string for optional item validity; got "), - (None, 'mandatory', '1', "For catname category, entry value must be boolean for optional item name mandatory; got "), + (None, 'displayName', 123, + "For catname category, entry value must be string for optional item displayName; got "), + (None, 'length', '1a', + "For catname category, entry value must be an integer for optional item length; got "), + (None, 'maximum', 'blah', + "For catname category, entry value must be an integer or float for optional item maximum; got "), + (None, 'validity', 12, + "For catname category, entry value must be string for optional item validity; got "), + (None, 'mandatory', '1', + "For catname category, entry value must be boolean for optional item name mandatory; got "), + (None, 'group', 5, + "For catname category, entry value must be string for optional item group; got "), + (None, 'group', True, + "For catname category, entry value must be string for optional item group; got ") ]) - async def test_set_optional_value_entry_bad_update(self, reset_singleton, _type, optional_key_name, new_value_entry, exc_msg): + async def test_set_optional_value_entry_bad_update(self, reset_singleton, _type, optional_key_name, + new_value_entry, exc_msg): async def async_mock(return_value): return return_value @@ -3258,7 +3489,7 @@ async def async_mock(return_value): storage_value_entry = {'length': '255', 'displayName': category_name, 'rule': 'value * 3 == 6', 'deprecated': 'false', 'readonly': 'true', 'type': 'string', 'order': '4', 'description': 'Test Optional', 'minimum': minimum, 'value': '13', 'maximum': maximum, - 'default': '13', 'validity': 'field X is set', 'mandatory': 'false'} + 'default': '13', 'validity': 'field X is set', 'mandatory': 'false', 'group': 'Security'} # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: diff --git a/tests/unit/python/fledge/services/core/api/test_asset_tracker_api.py b/tests/unit/python/fledge/services/core/api/test_asset_tracker_api.py index ed295552f7..9914b115cf 100644 --- a/tests/unit/python/fledge/services/core/api/test_asset_tracker_api.py +++ b/tests/unit/python/fledge/services/core/api/test_asset_tracker_api.py @@ -12,10 +12,10 @@ import pytest import sys -from fledge.services.core import routes -from fledge.services.core import connect +from fledge.common.audit_logger import AuditLogger from fledge.common.storage_client.storage_client import StorageClientAsync -from fledge.services.core.api import asset_tracker +from fledge.services.core import routes, connect +from fledge.services.core.api.asset_tracker import _logger, common_utils __author__ = "Ashish Jabble" @@ -24,6 +24,10 @@ __version__ = "${VERSION}" +async def mock_coro(return_value): + return return_value + + @pytest.allure.feature("unit") @pytest.allure.story("api", "asset-tracker") class TestAssetTracker: @@ -42,19 +46,16 @@ async def async_mock(): storage_client_mock = MagicMock(StorageClientAsync) rows = [{'asset': 'AirIntake', 'event': 'Ingest', 'fledge': 'Booth1', 'service': 'PT100_In1', - 'plugin': 'PT100', "timestamp": "2018-08-13 15:39:48.796263", "deprecatedTimestamp": "", 'data':'{}' - }, + 'plugin': 'PT100', "timestamp": "2018-08-13 15:39:48.796263", "deprecatedTimestamp": "", 'data': '{}'}, {'asset': 'AirIntake', 'event': 'Egress', 'fledge': 'Booth1', 'service': 'Display', - 'plugin': 'ShopFloorDisplay', "timestamp": "2018-08-13 16:00:00.134563", "deprecatedTimestamp": "", 'data':'{}' - } - ] + 'plugin': 'ShopFloorDisplay', "timestamp": "2018-08-13 16:00:00.134563", "deprecatedTimestamp": "", + 'data': '{}'}] payload = {'where': {'condition': '=', 'value': 1, 'column': '1'}, 'return': ['asset', 'event', 'service', 'fledge', 'plugin', {'alias': 'timestamp', 'column': 'ts', 'format': 'YYYY-MM-DD HH24:MI:SS.MS'}, {'alias': 'deprecatedTimestamp', 'column': 'deprecated_ts'}, 'data' ] } - # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. _rv = await async_mock() if sys.version_info.major == 3 and sys.version_info.minor >= 8 \ else asyncio.ensure_future(async_mock()) @@ -78,3 +79,118 @@ async def async_mock(): ]) async def test_get_asset_track_with_params(self, client, request_params, payload, loop): pass + + async def test_bad_deprecate_entry(self, client): + result = {"message": "failed"} + _rv = await mock_coro(result) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(result)) + storage_client_mock = MagicMock(StorageClientAsync) + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv): + resp = await client.put('/fledge/track/service/XXX/asset/XXX/event/XXXX') + assert 500 == resp.status + + async def test_deprecate_entry_not_found(self, client): + result = {"count": 0, "rows": []} + _rv = await mock_coro(result) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(result)) + storage_client_mock = MagicMock(StorageClientAsync) + asset = "blah" + service = "Test" + event = "Ingest" + message = "No record found in asset tracker for given service: {} asset: {} event: {}".format( + service, asset, event) + query_payload = {"return": ["deprecated_ts"], + "where": {"column": "service", "condition": "=", "value": service, + "and": {"column": "asset", "condition": "=", "value": asset, + "and": {"column": "event", "condition": "=", "value": event}}}} + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv) as patch_query_tbl: + resp = await client.put('/fledge/track/service/{}/asset/{}/event/{}'.format(service, asset, event)) + assert 404 == resp.status + assert message == resp.reason + result = await resp.text() + json_response = json.loads(result) + assert {"message": message} == json_response + args, _ = patch_query_tbl.call_args + assert 'asset_tracker' == args[0] + assert query_payload == json.loads(args[1]) + + async def test_already_deprecated_entry(self, client): + result = {'count': 1, 'rows': [{'deprecated_ts': '2022-11-18 06:11:13.657'}]} + _rv = await mock_coro(result) if sys.version_info >= (3, 8) else asyncio.ensure_future(mock_coro(result)) + storage_client_mock = MagicMock(StorageClientAsync) + asset = "Airtake" + service = "Sparkplug" + event = "Ingest" + message = "'{} asset record already deprecated.'".format(asset) + query_payload = {"return": ["deprecated_ts"], + "where": {"column": "service", "condition": "=", "value": service, + "and": {"column": "asset", "condition": "=", "value": asset, + "and": {"column": "event", "condition": "=", "value": event}}}} + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv) as patch_query_tbl: + resp = await client.put('/fledge/track/service/{}/asset/{}/event/{}'.format(service, asset, event)) + assert 400 == resp.status + assert message == resp.reason + result = await resp.text() + json_response = json.loads(result) + assert {"message": message} == json_response + args, _ = patch_query_tbl.call_args + assert 'asset_tracker' == args[0] + assert query_payload == json.loads(args[1]) + + @pytest.mark.parametrize("event, operator, event_list, audit_event", [ + ("Ingest", "in", ["Ingest", "store"], "Ingest & store"), + ("store", "in", ["Ingest", "store"], "Ingest & store"), + ("Filter", "=", "Filter", "Filter"), + ("Egress", "=", "Egress", "Egress") + ]) + async def test_deprecate_entry(self, client, event, operator, event_list, audit_event): + asset = "Airtake" + service = "Sparkplug" + ts = "2022-11-18 14:27:25.396383+05:30" + query_payload = {"return": ["deprecated_ts"], + "where": {"column": "service", "condition": "=", "value": service, + "and": {"column": "asset", "condition": "=", "value": asset, + "and": {"column": "event", "condition": "=", "value": event}}}} + update_payload = {"values": {"deprecated_ts": ts}, + "where": {"column": "service", "condition": "=", "value": service, + "and": {"column": "asset", "condition": "=", "value": asset, + "and": {"column": "event", "condition": operator, + "value": event_list, + "and": {"column": "deprecated_ts", "condition": "isnull"}}}}} + query_result = {'count': 1, 'rows': [{'deprecated_ts': ''}]} + update_result = {"response": "updated", "rows_affected": 1} + message = "For {} event, {} asset record entry has been deprecated.".format(event, asset) + if sys.version_info >= (3, 8): + _rv = await mock_coro(query_result) + _rv2 = await mock_coro(update_result) + _rv3 = await mock_coro(None) + else: + _rv = asyncio.ensure_future(mock_coro(query_result)) + _rv2 = asyncio.ensure_future(mock_coro(update_result)) + _rv3 = asyncio.ensure_future(mock_coro(None)) + + storage_client_mock = MagicMock(StorageClientAsync) + with patch.object(connect, 'get_storage_async', return_value=storage_client_mock): + with patch.object(storage_client_mock, 'query_tbl_with_payload', return_value=_rv) as patch_query_tbl: + with patch.object(common_utils, 'local_timestamp', return_value=ts): + with patch.object(storage_client_mock, 'update_tbl', return_value=_rv2) as patch_update_tbl: + with patch.object(AuditLogger, '__init__', return_value=None): + with patch.object(AuditLogger, 'information', return_value=_rv3) as patch_audit: + with patch.object(_logger, "info") as log_info: + resp = await client.put('/fledge/track/service/{}/asset/{}/event/{}'.format( + service, asset, event)) + assert 200 == resp.status + result = await resp.text() + json_response = json.loads(result) + assert {'success': message} == json_response + assert 1 == log_info.call_count + log_info.assert_called_once_with(message) + patch_audit.assert_called_once_with( + 'ASTDP', {'asset': asset, 'event': audit_event, 'service': service}) + args, _ = patch_update_tbl.call_args + assert 'asset_tracker' == args[0] + assert update_payload == json.loads(args[1]) + args1, _ = patch_query_tbl.call_args + assert 'asset_tracker' == args1[0] + assert query_payload == json.loads(args1[1]) diff --git a/tests/unit/python/fledge/services/core/api/test_browser_assets.py b/tests/unit/python/fledge/services/core/api/test_browser_assets.py index cc456e3b3f..93c0518233 100644 --- a/tests/unit/python/fledge/services/core/api/test_browser_assets.py +++ b/tests/unit/python/fledge/services/core/api/test_browser_assets.py @@ -92,7 +92,7 @@ def client(self, app, loop, test_client): return loop.run_until_complete(test_client(app)) def test_routes_count(self, app): - assert 12 == len(app.router.resources()) + assert 14 == len(app.router.resources()) def test_routes_info(self, app): for index, route in enumerate(app.router.routes()): @@ -103,41 +103,51 @@ def test_routes_info(self, app): assert "/fledge/asset" == res_info["path"] assert str(route.handler).startswith("