diff --git a/src/analytics/db_handler.cc b/src/analytics/db_handler.cc index 85167fd7212..87813febf6d 100644 --- a/src/analytics/db_handler.cc +++ b/src/analytics/db_handler.cc @@ -178,20 +178,22 @@ bool DbHandler::CreateTables() { GenDb::NewColVec& columns = col_list->columns_; columns.reserve(4); + uint64_t current_tm = UTCTimestampUsec(); + GenDb::NewCol *col(new GenDb::NewCol( - g_viz_constants.SYSTEM_OBJECT_START_TIME, UTCTimestampUsec(), 0)); + g_viz_constants.SYSTEM_OBJECT_START_TIME, current_tm, 0)); columns.push_back(col); GenDb::NewCol *flow_col(new GenDb::NewCol( - g_viz_constants.SYSTEM_OBJECT_FLOW_START_TIME, UTCTimestampUsec(), 0)); + g_viz_constants.SYSTEM_OBJECT_FLOW_START_TIME, current_tm, 0)); columns.push_back(flow_col); GenDb::NewCol *msg_col(new GenDb::NewCol( - g_viz_constants.SYSTEM_OBJECT_MSG_START_TIME, UTCTimestampUsec(), 0)); + g_viz_constants.SYSTEM_OBJECT_MSG_START_TIME, current_tm, 0)); columns.push_back(msg_col); GenDb::NewCol *stat_col(new GenDb::NewCol( - g_viz_constants.SYSTEM_OBJECT_STAT_START_TIME, UTCTimestampUsec(), 0)); + g_viz_constants.SYSTEM_OBJECT_STAT_START_TIME, current_tm, 0)); columns.push_back(stat_col); if (!dbif_->Db_AddColumnSync(col_list)) { diff --git a/src/opserver/analytics_database.sandesh b/src/opserver/analytics_database.sandesh index 2e0e3af9450..0f046215100 100644 --- a/src/opserver/analytics_database.sandesh +++ b/src/opserver/analytics_database.sandesh @@ -22,6 +22,7 @@ struct DatabasePurgeStats { 3: optional u64 rows_deleted 4: optional u64 duration 5: string purge_status + 6: list purge_status_details } struct DatabasePurgeInfo { diff --git a/src/opserver/analytics_db.py b/src/opserver/analytics_db.py index fb72590ba18..09ee2eb3ed7 100644 --- a/src/opserver/analytics_db.py +++ b/src/opserver/analytics_db.py @@ -155,21 +155,26 @@ def get_analytics_db_purge_status(self, redis_list): def db_purge(self, purge_cutoff, purge_id): total_rows_deleted = 0 # total number of rows deleted + purge_error_details = [] if (self._pool == None): self.connect_db() if not self._pool: self._logger.error('Connection to AnalyticsDb has Timed out') - return -1 + purge_error_details.append('Connection to AnalyticsDb has Timed out') + return (-1, purge_error_details) sysm = self._get_sysm() if (sysm == None): self._logger.error('Failed to connect SystemManager') - return -1 + purge_error_details.append('Failed to connect SystemManager') + return (-1, purge_error_details) try: table_list = sysm.get_keyspace_column_families(COLLECTOR_KEYSPACE) except Exception as e: self._logger.error("Exception: Purge_id %s Failed to get " "Analytics Column families %s" % (purge_id, e)) - return -1 + purge_error_details.append("Exception: Failed to get " + "Analytics Column families %s" % (e)) + return (-1, purge_error_details) # delete entries from message table msg_table = COLLECTOR_GLOBAL_TABLE @@ -180,7 +185,9 @@ def db_purge(self, purge_cutoff, purge_id): except Exception as e: self._logger.error("purge_id %s Failure in fetching " "message table columnfamily %s" % e) - return -1 + purge_error_details.append("Failure in fetching " + "message table columnfamily %s" % e) + return (-1, purge_error_details) for table in table_list: # purge from index tables @@ -207,7 +214,9 @@ def db_purge(self, purge_cutoff, purge_id): except Exception as e: self._logger.error("purge_id %s Failure in fetching " "the columnfamily %s" % e) - return -1 + purge_error_details.append("Failure in fetching " + "the columnfamily %s" % e) + return (-1, purge_error_details) b = cf.batch() try: # get all columns only in case of one message index table @@ -251,11 +260,15 @@ def db_purge(self, purge_cutoff, purge_id): except Exception as e: self._logger.error("Exception: Purge_id %s message table " "doesnot have uuid %s" % (purge_id, e)) + purge_error_details.append("Exception: Message table " + "doesnot have uuid %s" % (e)) except Exception as e: self._logger.error("Exception: Purge_id:%s table:%s " "error: %s" % (purge_id, table, e)) + purge_error_details.append("Exception: Table:%s " + "error: %s" % (table, e)) continue self._logger.info("Purge_id %s deleted %d rows from table: %s" % (purge_id, per_table_deleted, table)) @@ -267,7 +280,7 @@ def db_purge(self, purge_cutoff, purge_id): self._logger.info("Purge_id %s total rows deleted: %s" % (purge_id, total_rows_deleted)) - return total_rows_deleted + return (total_rows_deleted, purge_error_details) # end purge_data def get_dbusage_info(self, rest_api_port): diff --git a/src/opserver/opserver.py b/src/opserver/opserver.py index fb68922d133..bea789efdb8 100644 --- a/src/opserver/opserver.py +++ b/src/opserver/opserver.py @@ -1849,7 +1849,8 @@ def db_purge_operation(self, purge_cutoff, purge_id): self._analytics_db.number_of_purge_requests += 1 purge_info.number_of_purge_requests = \ self._analytics_db.number_of_purge_requests - total_rows_deleted = self._analytics_db.db_purge(purge_cutoff, purge_id) + total_rows_deleted, purge_stat.purge_status_details = \ + self._analytics_db.db_purge(purge_cutoff, purge_id) self._analytics_db.delete_db_purge_status() if (total_rows_deleted > 0):