465 lines
23 KiB
Python
465 lines
23 KiB
Python
def onStartup():
|
|
# ============================================================================
|
|
# Gateway Timer Script - Create Database Indexes, Tables, and Set Sync Status ON
|
|
# Delay: 30000 ms
|
|
#
|
|
# RUNTIME: Ignition Gateway (Python 2.7/Jython)
|
|
# NOTE: 'system' module is provided by Ignition runtime - linter warnings are expected
|
|
# DEPLOYMENT: Place in Gateway → Scripting → Timer Scripts
|
|
# TRIGGER: On Gateway startup with 30 second delay
|
|
#
|
|
# PURPOSE: This script performs comprehensive database initialization tasks:
|
|
# 1. Creates required database tables (scanner_reads, item_data, induction_data, tipper_status, lane_data, jam_area)
|
|
# 2. Adds all necessary columns to these tables
|
|
# 3. Creates optimized indexes for alarm queries (6 indexes)
|
|
# 4. Sets synchronization status tag to indicate completion
|
|
# 5. Resets Item bData_Ready bits to False
|
|
# ============================================================================
|
|
|
|
import time
|
|
from datetime import datetime
|
|
|
|
# Initialize comprehensi ve logging system
|
|
# Note: Logging levels are configured in Ignition Gateway settings
|
|
# Use logger.debug() for detailed diagnostics, logger.info() for important events
|
|
logger = system.util.getLogger("TPA8_Startup_Script")
|
|
|
|
# Log script initialization with timestamp
|
|
script_start_time = time.time()
|
|
current_timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
logger.info("TPA8 STARTUP SCRIPT: Initialization started at %s" % current_timestamp)
|
|
|
|
|
|
|
|
|
|
try:
|
|
# ========================================================================
|
|
# PHASE 1: SYSTEM MODULE AVAILABILITY CHECK
|
|
# ========================================================================
|
|
# Test if system is available
|
|
test = system.db
|
|
DS = "MariaDB"
|
|
DB_NAME = "ignition" # Database name for INFORMATION_SCHEMA queries
|
|
logger.info("PHASE 1: System module verification - SUCCESS (Database: %s)" % DS)
|
|
|
|
# ========================================================================
|
|
# PHASE 2: DATABASE CONNECTION VERIFICATION WITH RETRY
|
|
# ========================================================================
|
|
max_retries = 12 # 12 attempts * 5 seconds = 60 seconds max
|
|
retry_delay = 5 # Start with 5 second delays
|
|
connection_successful = False
|
|
|
|
logger.info("PHASE 2: Waiting for database to become available (up to 60 seconds)...")
|
|
|
|
for attempt in range(1, max_retries + 1):
|
|
try:
|
|
system.db.runScalarQuery("SELECT 1", DS)
|
|
connection_successful = True
|
|
logger.info("PHASE 2: Database connection established after %d attempt(s)" % attempt)
|
|
break
|
|
except Exception as conn_test_error:
|
|
if attempt < max_retries:
|
|
time.sleep(retry_delay)
|
|
else:
|
|
logger.error("PHASE 2: Database connection failed after %d attempts - %s" % (max_retries, str(conn_test_error)))
|
|
|
|
if not connection_successful:
|
|
logger.warning("PHASE 2: Proceeding with caution - connection may be unstable")
|
|
|
|
# ========================================================================
|
|
# PHASE 3: TABLE AND COLUMN CREATION
|
|
# ========================================================================
|
|
phase3_start_time = time.time()
|
|
logger.info("PHASE 3: Starting table and column creation...")
|
|
|
|
# Define table structures: table_name -> {column_name: column_definition}
|
|
table_structures = {
|
|
"scanner_reads": {
|
|
"id": "INT AUTO_INCREMENT PRIMARY KEY",
|
|
"t_stamp": "DATETIME DEFAULT CURRENT_TIMESTAMP",
|
|
"sScanner_Name": "VARCHAR(50) NOT NULL",
|
|
"diScanner_bad_reads": "TINYINT(1) DEFAULT 0",
|
|
"diScanner_comm_fault": "TINYINT(1) DEFAULT 0",
|
|
"diScanner_good_reads": "TINYINT(1) DEFAULT 0",
|
|
"diScanner_multi_items": "TINYINT(1) DEFAULT 0",
|
|
"diScanner_multi_reads": "TINYINT(1) DEFAULT 0",
|
|
"diScanner_no_data": "TINYINT(1) DEFAULT 0",
|
|
"diScanner_no_reads": "TINYINT(1) DEFAULT 0"
|
|
},
|
|
"item_data": {
|
|
"id": "INT AUTO_INCREMENT PRIMARY KEY",
|
|
"t_stamp": "DATETIME DEFAULT CURRENT_TIMESTAMP",
|
|
"adiSort_Code_0": "INT DEFAULT 0",
|
|
"adiSort_Code_1": "INT DEFAULT 0",
|
|
"adiSort_Code_2": "INT DEFAULT 0",
|
|
"adiSort_Code_3": "INT DEFAULT 0",
|
|
"asRequested_Dest_ID_0": "VARCHAR(20)",
|
|
"asRequested_Dest_ID_1": "VARCHAR(20)",
|
|
"asRequested_Dest_ID_2": "VARCHAR(20)",
|
|
"asRequested_Dest_ID_3": "VARCHAR(20)",
|
|
"bData_Ready": "TINYINT(1) DEFAULT 0",
|
|
"diPLC_RecordNumber": "INT",
|
|
"iItem_Carrier_Count": "INT",
|
|
"iPackage_Length": "INT",
|
|
"sActual_Dest_ID": "VARCHAR(20)",
|
|
"sBarcode": "VARCHAR(50)",
|
|
"sLocation_ID": "VARCHAR(20)"
|
|
},
|
|
"induction_data": {
|
|
"id": "INT AUTO_INCREMENT PRIMARY KEY",
|
|
"t_stamp": "DATETIME DEFAULT CURRENT_TIMESTAMP",
|
|
"sInduction_Name": "VARCHAR(50) NOT NULL",
|
|
"diTotal_Single_Carrier": "INT DEFAULT 0",
|
|
"diTotal_Double_Carrier": "INT DEFAULT 0"
|
|
},
|
|
"tipper_status": {
|
|
"id": "INT AUTO_INCREMENT PRIMARY KEY",
|
|
"t_stamp": "DATETIME DEFAULT CURRENT_TIMESTAMP",
|
|
"Name": "VARCHAR(50) NOT NULL",
|
|
"Tipper_Faulted": "TINYINT(1) DEFAULT 0",
|
|
"Dumping": "TINYINT(1) DEFAULT 0"
|
|
},
|
|
"lane_data": {
|
|
"id": "INT AUTO_INCREMENT PRIMARY KEY",
|
|
"t_stamp": "DATETIME DEFAULT CURRENT_TIMESTAMP",
|
|
"Name": "VARCHAR(64) NOT NULL",
|
|
"Full": "INT UNSIGNED DEFAULT 0",
|
|
"Half_Full": "INT UNSIGNED DEFAULT 0",
|
|
"Jam": "INT UNSIGNED DEFAULT 0",
|
|
"No_Container": "INT UNSIGNED DEFAULT 0",
|
|
"Block_Operation": "INT UNSIGNED DEFAULT 0",
|
|
"Faulted": "INT UNSIGNED DEFAULT 0",
|
|
"Disabled": "INT UNSIGNED DEFAULT 0"
|
|
},
|
|
"jam_area": {
|
|
"t_stamp": "DATETIME NOT NULL",
|
|
"Name": "VARCHAR(255)",
|
|
"Fluid_Inbound_Merge_1": "TINYINT(1) DEFAULT 0",
|
|
"Fluid_Inbound_Merge_2": "TINYINT(1) DEFAULT 0",
|
|
"Fluid_Inbound_Merge_3": "TINYINT(1) DEFAULT 0",
|
|
"Fluid_Inbound_Merge_4": "TINYINT(1) DEFAULT 0",
|
|
"Fluid_Inbound_Merge_5": "TINYINT(1) DEFAULT 0",
|
|
"Fluid_Inbound_Merge_6": "TINYINT(1) DEFAULT 0",
|
|
"Fluid_Inbound_Merge_7": "TINYINT(1) DEFAULT 0",
|
|
"Non_Con_Merge": "TINYINT(1) DEFAULT 0",
|
|
"NCP": "TINYINT(1) DEFAULT 0",
|
|
"Bulk_Inbound_MCM04": "TINYINT(1) DEFAULT 0",
|
|
"Fluid_Outbound_MCM04": "TINYINT(1) DEFAULT 0",
|
|
"Problem_Solve_MCM04": "TINYINT(1) DEFAULT 0",
|
|
"Bulk_Inbound_MCM05": "TINYINT(1) DEFAULT 0",
|
|
"Fluid_Outbound_MCM05": "TINYINT(1) DEFAULT 0",
|
|
"Problem_Solve_MCM05": "TINYINT(1) DEFAULT 0",
|
|
"NCS1": "TINYINT(1) DEFAULT 0",
|
|
"NCS2": "TINYINT(1) DEFAULT 0",
|
|
"BYB": "TINYINT(1) DEFAULT 0",
|
|
"BYA": "TINYINT(1) DEFAULT 0",
|
|
"BYC": "TINYINT(1) DEFAULT 0",
|
|
"BYD": "TINYINT(1) DEFAULT 0",
|
|
"Chutes_MCM04": "TINYINT(1) DEFAULT 0",
|
|
"Chutes_MCM05": "TINYINT(1) DEFAULT 0"
|
|
}
|
|
}
|
|
|
|
# ========================================================================
|
|
# STEP 3.1: CREATE TABLES
|
|
# ========================================================================
|
|
table_queries = [
|
|
("scanner_reads", """CREATE TABLE IF NOT EXISTS scanner_reads (
|
|
id INT AUTO_INCREMENT PRIMARY KEY,
|
|
t_stamp DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
sScanner_Name VARCHAR(50) NOT NULL,
|
|
diScanner_bad_reads TINYINT(1) DEFAULT 0,
|
|
diScanner_comm_fault TINYINT(1) DEFAULT 0,
|
|
diScanner_good_reads TINYINT(1) DEFAULT 0,
|
|
diScanner_multi_items TINYINT(1) DEFAULT 0,
|
|
diScanner_multi_reads TINYINT(1) DEFAULT 0,
|
|
diScanner_no_data TINYINT(1) DEFAULT 0,
|
|
diScanner_no_reads TINYINT(1) DEFAULT 0
|
|
)"""),
|
|
("item_data", """CREATE TABLE IF NOT EXISTS item_data (
|
|
id INT AUTO_INCREMENT PRIMARY KEY,
|
|
t_stamp DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
adiSort_Code_0 INT DEFAULT 0,
|
|
adiSort_Code_1 INT DEFAULT 0,
|
|
adiSort_Code_2 INT DEFAULT 0,
|
|
adiSort_Code_3 INT DEFAULT 0,
|
|
asRequested_Dest_ID_0 VARCHAR(20),
|
|
asRequested_Dest_ID_1 VARCHAR(20),
|
|
asRequested_Dest_ID_2 VARCHAR(20),
|
|
asRequested_Dest_ID_3 VARCHAR(20),
|
|
bData_Ready TINYINT(1) DEFAULT 0,
|
|
diPLC_RecordNumber INT,
|
|
iItem_Carrier_Count INT,
|
|
iPackage_Length INT,
|
|
sActual_Dest_ID VARCHAR(20),
|
|
sBarcode VARCHAR(50),
|
|
sLocation_ID VARCHAR(20)
|
|
)"""),
|
|
("induction_data", """CREATE TABLE IF NOT EXISTS induction_data (
|
|
id INT AUTO_INCREMENT PRIMARY KEY,
|
|
t_stamp DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
sInduction_Name VARCHAR(50) NOT NULL,
|
|
diTotal_Single_Carrier INT DEFAULT 0,
|
|
diTotal_Double_Carrier INT DEFAULT 0
|
|
)"""),
|
|
("tipper_status", """CREATE TABLE IF NOT EXISTS tipper_status (
|
|
id INT AUTO_INCREMENT PRIMARY KEY,
|
|
t_stamp DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
Name VARCHAR(50) NOT NULL,
|
|
Tipper_Faulted TINYINT(1) DEFAULT 0,
|
|
Dumping TINYINT(1) DEFAULT 0
|
|
)"""),
|
|
("lane_data", """CREATE TABLE IF NOT EXISTS lane_data (
|
|
id INT AUTO_INCREMENT PRIMARY KEY,
|
|
t_stamp DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
Name VARCHAR(64) NOT NULL,
|
|
Full INT UNSIGNED DEFAULT 0,
|
|
Half_Full INT UNSIGNED DEFAULT 0,
|
|
Jam INT UNSIGNED DEFAULT 0,
|
|
No_Container INT UNSIGNED DEFAULT 0,
|
|
Block_Operation INT UNSIGNED DEFAULT 0,
|
|
Faulted INT UNSIGNED DEFAULT 0,
|
|
Disabled INT UNSIGNED DEFAULT 0
|
|
)"""),
|
|
("jam_area", """CREATE TABLE IF NOT EXISTS jam_area (
|
|
t_stamp DATETIME NOT NULL,
|
|
Name VARCHAR(255),
|
|
Fluid_Inbound_Merge_1 TINYINT(1) DEFAULT 0,
|
|
Fluid_Inbound_Merge_2 TINYINT(1) DEFAULT 0,
|
|
Fluid_Inbound_Merge_3 TINYINT(1) DEFAULT 0,
|
|
Fluid_Inbound_Merge_4 TINYINT(1) DEFAULT 0,
|
|
Fluid_Inbound_Merge_5 TINYINT(1) DEFAULT 0,
|
|
Fluid_Inbound_Merge_6 TINYINT(1) DEFAULT 0,
|
|
Fluid_Inbound_Merge_7 TINYINT(1) DEFAULT 0,
|
|
Non_Con_Merge TINYINT(1) DEFAULT 0,
|
|
NCP TINYINT(1) DEFAULT 0,
|
|
Bulk_Inbound_MCM04 TINYINT(1) DEFAULT 0,
|
|
Fluid_Outbound_MCM04 TINYINT(1) DEFAULT 0,
|
|
Problem_Solve_MCM04 TINYINT(1) DEFAULT 0,
|
|
Bulk_Inbound_MCM05 TINYINT(1) DEFAULT 0,
|
|
Fluid_Outbound_MCM05 TINYINT(1) DEFAULT 0,
|
|
Problem_Solve_MCM05 TINYINT(1) DEFAULT 0,
|
|
NCS1 TINYINT(1) DEFAULT 0,
|
|
NCS2 TINYINT(1) DEFAULT 0,
|
|
BYB TINYINT(1) DEFAULT 0,
|
|
BYA TINYINT(1) DEFAULT 0,
|
|
BYC TINYINT(1) DEFAULT 0,
|
|
BYD TINYINT(1) DEFAULT 0,
|
|
Chutes_MCM04 TINYINT(1) DEFAULT 0,
|
|
Chutes_MCM05 TINYINT(1) DEFAULT 0,
|
|
INDEX idx_t_stamp (t_stamp)
|
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci""")
|
|
]
|
|
|
|
tables_created_count = 0
|
|
tables_existed_count = 0
|
|
tables_failed_count = 0
|
|
|
|
logger.info("STEP 3.1: Checking/creating %d tables..." % len(table_queries))
|
|
|
|
for table_name, query in table_queries:
|
|
try:
|
|
# Check if table exists
|
|
table_exists_query = "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s'" % (DB_NAME, table_name)
|
|
table_exists_result = system.db.runScalarQuery(table_exists_query, DS)
|
|
table_exists = int(table_exists_result) > 0 if table_exists_result is not None else False
|
|
|
|
if table_exists:
|
|
tables_existed_count += 1
|
|
logger.info("STEP 3.1: Table '%s' already exists (OK)" % table_name)
|
|
else:
|
|
step_start_time = time.time()
|
|
system.db.runUpdateQuery(query, DS)
|
|
step_duration = time.time() - step_start_time
|
|
tables_created_count += 1
|
|
logger.info("STEP 3.1: Table '%s' created successfully (%.3f seconds)" % (table_name, step_duration))
|
|
except Exception as e:
|
|
tables_failed_count += 1
|
|
logger.error("STEP 3.1: FAILED to create table '%s' - %s" % (table_name, str(e)))
|
|
|
|
logger.info("STEP 3.1: Completed - Created=%d, Existed=%d, Failed=%d" % (tables_created_count, tables_existed_count, tables_failed_count))
|
|
|
|
# ========================================================================
|
|
# STEP 3.2: ADD COLUMNS TO TABLES
|
|
# ========================================================================
|
|
total_columns_added_count = 0
|
|
total_columns_existed_count = 0
|
|
total_columns_failed_count = 0
|
|
|
|
logger.info("STEP 3.2: Checking/adding columns to tables...")
|
|
|
|
for table_name, columns in table_structures.items():
|
|
logger.info("STEP 3.2: Checking table '%s'..." % table_name)
|
|
table_columns_added = 0
|
|
table_columns_existed = 0
|
|
table_columns_failed = 0
|
|
|
|
for column_name, column_def in columns.items():
|
|
try:
|
|
# Check if column exists
|
|
column_check_query = "SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s' AND COLUMN_NAME = '%s'" % (DB_NAME, table_name, column_name)
|
|
column_exists_result = system.db.runScalarQuery(column_check_query, DS)
|
|
column_exists = int(column_exists_result) > 0 if column_exists_result is not None else False
|
|
|
|
if column_exists:
|
|
table_columns_existed += 1
|
|
total_columns_existed_count += 1
|
|
else:
|
|
# Column doesn't exist, add it
|
|
# Skip PRIMARY KEY keyword in ALTER TABLE
|
|
clean_def = column_def.replace("AUTO_INCREMENT PRIMARY KEY", "AUTO_INCREMENT").replace("PRIMARY KEY", "")
|
|
alter_query = "ALTER TABLE `%s` ADD COLUMN `%s` %s" % (table_name, column_name, clean_def)
|
|
system.db.runUpdateQuery(alter_query, DS)
|
|
table_columns_added += 1
|
|
total_columns_added_count += 1
|
|
logger.info("STEP 3.2: Added column '%s.%s'" % (table_name, column_name))
|
|
except Exception as e:
|
|
table_columns_failed += 1
|
|
total_columns_failed_count += 1
|
|
error_msg = str(e)
|
|
if "exist" not in error_msg.lower() and "duplicate" not in error_msg.lower():
|
|
logger.error("STEP 3.2: Failed to add column '%s.%s' - %s" % (table_name, column_name, error_msg))
|
|
|
|
logger.info("STEP 3.2: Table '%s' - Added=%d, Existed=%d, Failed=%d" % (table_name, table_columns_added, table_columns_existed, table_columns_failed))
|
|
|
|
logger.info("STEP 3.2: Completed - Added=%d, Existed=%d, Failed=%d" % (total_columns_added_count, total_columns_existed_count, total_columns_failed_count))
|
|
|
|
phase3_duration = time.time() - phase3_start_time
|
|
logger.info("PHASE 3: Completed in %.3f seconds" % phase3_duration)
|
|
|
|
# ========================================================================
|
|
# PHASE 4: CREATE ALARM EVENT INDEXES
|
|
# ========================================================================
|
|
phase4_start_time = time.time()
|
|
|
|
index_queries = [
|
|
("idx_alarm_events_active", "alarm_events", "(eventtype, eventtime DESC, id)"),
|
|
("idx_alarm_events_type_time_id", "alarm_events", "(eventtype, eventtime DESC, id)"),
|
|
("idx_alarm_events_grouping", "alarm_events", "(eventtype, source, displaypath, priority, eventtime DESC, id)"),
|
|
("idx_alarm_events_clear", "alarm_events", "(eventtype, eventtime, eventid)"),
|
|
("idx_alarm_event_data_lookup", "alarm_event_data", "(id, propname)"),
|
|
("idx_alarm_events_eventid", "alarm_events", "(eventid, eventtype)")
|
|
]
|
|
|
|
logger.info("PHASE 4: Checking/creating %d alarm event indexes..." % len(index_queries))
|
|
|
|
success_count = 0
|
|
error_count = 0
|
|
existed_count = 0
|
|
|
|
for index_name, table_name, columns in index_queries:
|
|
try:
|
|
# Check if index exists
|
|
index_check_query = "SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s' AND INDEX_NAME = '%s'" % (DB_NAME, table_name, index_name)
|
|
index_exists_result = system.db.runScalarQuery(index_check_query, "MariaDB")
|
|
index_exists = int(index_exists_result) > 0 if index_exists_result is not None else False
|
|
|
|
if index_exists:
|
|
existed_count += 1
|
|
success_count += 1
|
|
else:
|
|
query = "CREATE INDEX IF NOT EXISTS %s ON %s%s" % (index_name, table_name, columns)
|
|
system.db.runUpdateQuery(query, "MariaDB")
|
|
success_count += 1
|
|
except Exception as e:
|
|
error_count += 1
|
|
logger.error("PHASE 4: Failed to create index '%s' - %s" % (index_name, str(e)))
|
|
|
|
phase4_duration = time.time() - phase4_start_time
|
|
logger.info("PHASE 4: Completed - Created=%d, Existed=%d, Failed=%d (%.3f seconds)" % (success_count - existed_count, existed_count, error_count, phase4_duration))
|
|
|
|
# ========================================================================
|
|
# PHASE 5: INDEX CREATION SUMMARY
|
|
# ========================================================================
|
|
total_success = success_count
|
|
total_error = error_count
|
|
total_existed = existed_count
|
|
total_created = total_success - total_existed
|
|
|
|
logger.info("PHASE 5: Index Summary - Alarm: Created=%d/Existed=%d/Failed=%d, Total: Created=%d/Existed=%d/Failed=%d" %
|
|
(success_count - existed_count, existed_count, error_count,
|
|
total_created, total_existed, total_error))
|
|
|
|
# ========================================================================
|
|
# PHASE 6: SET SYNCHRONIZATION STATUS TAG (ALWAYS SET REGARDLESS OF ERRORS)
|
|
# ========================================================================
|
|
time.sleep(30)
|
|
try:
|
|
tag_path = "[MTN6_SCADA_TAG_PROVIDER]System/SMC/Statistics/bDataSynchronized"
|
|
logger.info("PHASE 6: Setting synchronization status tag (regardless of errors)...")
|
|
system.tag.writeBlocking([tag_path], [True])
|
|
logger.info("PHASE 6: Synchronization tag set to ON - Database initialization complete")
|
|
except Exception as tag_error:
|
|
logger.error("PHASE 6: Failed to set synchronization tag - %s" % str(tag_error))
|
|
|
|
# ========================================================================
|
|
# PHASE 7: RESET ITEM bData_Ready BITS TO FALSE (0 to 100)
|
|
# ========================================================================
|
|
phase7_start_time = time.time()
|
|
logger.info("PHASE 7: Resetting Item bData_Ready bits to False...")
|
|
|
|
try:
|
|
tag_paths = []
|
|
tag_values = []
|
|
|
|
# Generate tag paths for Item_000 through Item_100
|
|
for i in range(101): # 0 to 100 inclusive
|
|
tag_path = "[MTN6_SCADA_TAG_PROVIDER]System/SMC/Statistics/Item/Item_%03d/bData_Ready" % i
|
|
tag_paths.append(tag_path)
|
|
tag_values.append(False)
|
|
|
|
logger.info("PHASE 7: Writing False to %d bData_Ready tags (Item_000 to Item_100)..." % len(tag_paths))
|
|
system.tag.writeBlocking(tag_paths, tag_values)
|
|
|
|
phase7_duration = time.time() - phase7_start_time
|
|
logger.info("PHASE 7: Successfully reset all bData_Ready bits to False (%.3f seconds)" % phase7_duration)
|
|
except Exception as e:
|
|
logger.error("PHASE 7: ERROR resetting bData_Ready bits - %s" % str(e))
|
|
|
|
# ========================================================================
|
|
# SCRIPT COMPLETION SUMMARY
|
|
# ========================================================================
|
|
script_duration = time.time() - script_start_time
|
|
completion_timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
|
|
if total_error == 0:
|
|
logger.info("SCRIPT COMPLETE: SUCCESS - Finished at %s, Duration: %.3f seconds, Indexes: %d created, %d existed" %
|
|
(completion_timestamp, script_duration, total_created, total_existed))
|
|
else:
|
|
logger.warning("SCRIPT COMPLETE: COMPLETED WITH ERRORS - Finished at %s, Duration: %.3f seconds, %d failures" %
|
|
(completion_timestamp, script_duration, total_error))
|
|
|
|
except NameError as e:
|
|
error_type = type(e).__name__
|
|
logger.error("=" * 80)
|
|
logger.error("CRITICAL ERROR: SYSTEM MODULE NOT AVAILABLE")
|
|
logger.error("=" * 80)
|
|
logger.error("Error type: %s" % error_type)
|
|
logger.error("Error message: %s" % str(e))
|
|
logger.error("")
|
|
logger.error("This script requires Ignition Gateway runtime environment")
|
|
logger.error("The 'system' module is provided by Ignition and is not available")
|
|
logger.error("Please ensure this script is running in Ignition Gateway context")
|
|
logger.error("=" * 80)
|
|
|
|
except Exception as e:
|
|
error_type = type(e).__name__
|
|
script_duration = time.time() - script_start_time if 'script_start_time' in locals() else 0
|
|
|
|
logger.error("=" * 80)
|
|
logger.error("CRITICAL ERROR: UNEXPECTED EXCEPTION")
|
|
logger.error("=" * 80)
|
|
logger.error("Error type: %s" % error_type)
|
|
logger.error("Error message: %s" % str(e))
|
|
logger.error("Script execution time before error: %.3f seconds" % script_duration)
|
|
logger.error("")
|
|
logger.error("An unexpected error occurred during script execution")
|
|
logger.error("Please review the error details above and check:")
|
|
logger.error(" - Database connectivity")
|
|
logger.error(" - Database permissions")
|
|
logger.error(" - SQL syntax validity")
|
|
logger.error(" - Ignition Gateway logs for additional context")
|
|
logger.error("=" * 80)
|
|
|
|
# Re-raise the exception to ensure it's visible in Ignition logs
|
|
raise |