PLC_Generation/batch_workflow_processor.py
2025-09-02 11:13:29 +04:00

499 lines
19 KiB
Python

#!/usr/bin/env python3
"""
Batch PLC Generation Workflow Processor
=======================================
Processes all available Excel files in PLC Data Generator/data directory and:
1. Generates timestamped L5X files for each project
2. Sets up compilation for each generated L5X file
3. Provides detailed logging and status tracking
Expected Excel file naming patterns:
- Current: {PROJECT}_{MCM}.xlsx/xlsm (e.g., MTN6_MCM06.xlsm -> PROJECT=MTN6, MCM=MCM06)
- Legacy: IO Assignment_{PROJECT}_{MCM}_COMPLETE.xlsx/xlsm (backward compatibility)
"""
import os
import sys
import argparse
import subprocess
import re
from pathlib import Path
from datetime import datetime
from typing import List, Dict, Tuple, Optional
import glob
def get_project_root() -> Path:
"""Get the project root directory."""
return Path(__file__).parent.resolve()
def find_excel_files(data_dir: Path) -> List[Tuple[Path, str, str]]:
"""Find all Excel files matching the expected patterns.
Args:
data_dir: Path to the PLC Data Generator/data directory
Returns:
List of (excel_file_path, project_prefix, mcm_name) tuples
"""
# Pattern 1: IO Assignment_PROJECT_MCM_COMPLETE.xlsx/xlsm (legacy format)
pattern1 = re.compile(r'IO Assignment_([A-Z0-9]+)_([A-Z0-9]+)_COMPLETE\.(xlsx|xlsm)$', re.IGNORECASE)
# Pattern 2: PROJECT_MCM.xlsx/xlsm (current format)
pattern2 = re.compile(r'^([A-Z0-9]+)_(MCM\d+)\.(xlsx|xlsm)$', re.IGNORECASE)
excel_files = []
if not data_dir.exists():
print(f"Error: Data directory not found: {data_dir}")
return []
# Check both .xlsx and .xlsm files
for extension in ["*.xlsx", "*.xlsm"]:
for excel_file in data_dir.glob(extension):
filename = excel_file.name
# Try pattern 1 first (legacy format)
match = pattern1.match(filename)
if match:
project_prefix = match.group(1).upper()
mcm_name = match.group(2).upper()
excel_files.append((excel_file, project_prefix, mcm_name))
continue
# Try pattern 2 (current format)
match = pattern2.match(filename)
if match:
project_prefix = match.group(1).upper()
mcm_name = match.group(2).upper()
excel_files.append((excel_file, project_prefix, mcm_name))
continue
# Skip files that don't match either pattern
print(f" ⚠️ Skipping unrecognized file pattern: {filename}")
# Sort by project and MCM name for consistent processing order
excel_files.sort(key=lambda x: (x[1], x[2]))
return excel_files
def check_project_configuration(project_root: Path, project_prefix: str) -> bool:
"""Check if the required configuration files exist for a project.
Args:
project_root: Root directory of the project
project_prefix: Project prefix (e.g., MTN6, SAT9)
Returns:
True if configuration files exist, False otherwise
"""
generator_config = project_root / f"{project_prefix}_generator_config.json"
zones_config = project_root / f"{project_prefix}_zones.json"
return generator_config.exists() and zones_config.exists()
def run_complete_workflow(excel_file: Path, project_prefix: str, mcm_name: str,
project_root: Path, verbose: bool = False) -> Dict:
"""Run the complete workflow for a single Excel file.
Args:
excel_file: Path to the Excel file
project_prefix: Project prefix (e.g., MTN6)
mcm_name: MCM name (e.g., MCM06)
project_root: Root directory of the project
verbose: Whether to enable verbose logging
Returns:
Dictionary with processing results
"""
project_name = f"{project_prefix}_{mcm_name}"
# Path to the complete workflow script
complete_workflow_script = project_root / "Routines Generator" / "complete_workflow.py"
if not complete_workflow_script.exists():
return {
'status': 'error',
'project_name': project_name,
'excel_file': str(excel_file),
'error': f"Complete workflow script not found: {complete_workflow_script}"
}
try:
# Build command arguments
cmd_args = [
sys.executable,
str(complete_workflow_script),
"--project", project_prefix,
"--project-name", project_name,
"--excel-file", str(excel_file),
"--no-compilation" # Skip compilation by default in batch mode
]
if verbose:
cmd_args.append("--verbose")
print(f"Running workflow for {project_name}...")
print(f" Excel file: {excel_file.name}")
print(f" Command: {' '.join(cmd_args)}")
# Run the complete workflow
result = subprocess.run(
cmd_args,
cwd=project_root,
capture_output=True,
text=True,
timeout=1800 # 30 minute timeout
)
if result.returncode == 0:
print(f" ✅ SUCCESS: {project_name}")
# Check if L5X was actually generated by looking for success message
l5x_generated = "OK: Generated project:" in result.stdout
return {
'status': 'success',
'project_name': project_name,
'excel_file': str(excel_file),
'stdout': result.stdout,
'stderr': result.stderr,
'l5x_generated': l5x_generated # Track actual L5X generation
}
else:
print(f" ❌ FAILED: {project_name} (exit code: {result.returncode})")
# Check if L5X was generated despite errors
l5x_generated = "OK: Generated project:" in result.stdout
return {
'status': 'failed' if not l5x_generated else 'success',
'project_name': project_name,
'excel_file': str(excel_file),
'exit_code': result.returncode,
'stdout': result.stdout,
'stderr': result.stderr,
'l5x_generated': l5x_generated # Track actual L5X generation
}
except subprocess.TimeoutExpired:
print(f" ⏰ TIMEOUT: {project_name} (exceeded 30 minutes)")
return {
'status': 'timeout',
'project_name': project_name,
'excel_file': str(excel_file),
'error': 'Process timed out after 30 minutes'
}
except Exception as e:
print(f" 💥 EXCEPTION: {project_name} - {e}")
return {
'status': 'exception',
'project_name': project_name,
'excel_file': str(excel_file),
'error': str(e)
}
def setup_batch_compilation(project_root: Path, results: List[Dict], verbose: bool = False) -> List[Dict]:
"""Setup compilation for all successfully generated L5X files.
Args:
project_root: Root directory of the project
results: List of workflow results
verbose: Whether to enable verbose logging
Returns:
List of compilation setup results
"""
compilation_results = []
successful_results = [r for r in results if r['status'] == 'success']
if not successful_results:
print("No successful L5X generations found - skipping compilation setup")
return []
print(f"\nSetting up compilation for {len(successful_results)} successful projects...")
# Path to compilation manager
l5x2acd_dir = project_root / "L5X2ACD Compiler"
compilation_manager_script = l5x2acd_dir / "compilation_manager.py"
if not compilation_manager_script.exists():
print(f"Error: Compilation manager not found: {compilation_manager_script}")
return []
io_tree_dir = project_root / "IO Tree Configuration Generator"
generated_projects_dir = project_root / "generated_projects"
for result in successful_results:
project_name = result['project_name']
try:
# Look for L5X files in root-level generated_projects folder
project_folder = generated_projects_dir / project_name
if project_folder.exists():
l5x_files = list(project_folder.glob(f"{project_name}_*.L5X"))
else:
# Project folder not found
l5x_files = []
print(f" ❌ Project folder not found: {project_folder}")
if not l5x_files:
print(f" ❌ No L5X files found for {project_name}")
compilation_results.append({
'status': 'error',
'project_name': project_name,
'error': 'No L5X files found'
})
continue
# Sort by modification time to get the most recent
l5x_files.sort(key=lambda f: f.stat().st_mtime, reverse=True)
latest_l5x = l5x_files[0]
print(f" 📁 Setting up compilation for {project_name}")
print(f" L5X file: {latest_l5x.name}")
# Build compilation manager command
cmd_args = [
sys.executable,
str(compilation_manager_script),
"--project", project_name,
"--l5x-file", str(latest_l5x)
]
# Run compilation setup
comp_result = subprocess.run(
cmd_args,
cwd=l5x2acd_dir,
capture_output=True,
text=True,
timeout=120 # 2 minute timeout for setup
)
if comp_result.returncode == 0:
print(f" ✅ Compilation setup complete for {project_name}")
compilation_results.append({
'status': 'success',
'project_name': project_name,
'l5x_file': str(latest_l5x),
'batch_file': f"compile_{project_name}.bat"
})
else:
print(f" ❌ Compilation setup failed for {project_name}")
if verbose:
print(f" stdout: {comp_result.stdout}")
print(f" stderr: {comp_result.stderr}")
compilation_results.append({
'status': 'failed',
'project_name': project_name,
'l5x_file': str(latest_l5x),
'error': comp_result.stderr or comp_result.stdout
})
except Exception as e:
print(f" 💥 Exception setting up compilation for {project_name}: {e}")
compilation_results.append({
'status': 'exception',
'project_name': project_name,
'error': str(e)
})
return compilation_results
def print_batch_summary(excel_files: List[Tuple], results: List[Dict],
compilation_results: List[Dict]) -> None:
"""Print a comprehensive summary of batch processing results."""
print(f"\n{'='*80}")
print(f"BATCH PROCESSING SUMMARY")
print(f"{'='*80}")
# Overall statistics
total_files = len(excel_files)
successful_workflows = len([r for r in results if r['status'] == 'success'])
failed_workflows = len([r for r in results if r['status'] == 'failed'])
timeout_workflows = len([r for r in results if r['status'] == 'timeout'])
exception_workflows = len([r for r in results if r['status'] == 'exception'])
print(f"Excel files found: {total_files}")
print(f"Successful L5X generations: {successful_workflows}")
print(f"Failed workflows: {failed_workflows}")
if timeout_workflows > 0:
print(f"Timed out workflows: {timeout_workflows}")
if exception_workflows > 0:
print(f"Exception workflows: {exception_workflows}")
if compilation_results:
successful_compilations = len([r for r in compilation_results if r['status'] == 'success'])
failed_compilations = len([r for r in compilation_results if r['status'] != 'success'])
print(f"Successful compilation setups: {successful_compilations}")
if failed_compilations > 0:
print(f"Failed compilation setups: {failed_compilations}")
# Successful projects
if successful_workflows > 0:
print(f"\n📁 SUCCESSFUL PROJECTS ({successful_workflows}):")
for result in results:
if result['status'] == 'success':
print(f"{result['project_name']}")
# Find corresponding compilation result
comp_result = next(
(cr for cr in compilation_results if cr['project_name'] == result['project_name']),
None
)
if comp_result and comp_result['status'] == 'success':
print(f" 🔧 Compilation ready: {comp_result['batch_file']}")
# Failed projects
failed_results = [r for r in results if r['status'] != 'success']
if failed_results:
print(f"\n❌ FAILED PROJECTS ({len(failed_results)}):")
for result in failed_results:
print(f"{result['project_name']} ({result['status']})")
if 'error' in result:
print(f" Error: {result['error']}")
# Next steps
print(f"\n🚀 NEXT STEPS:")
if successful_workflows > 0:
print(f"1. Review generated L5X files in: IO Tree Configuration Generator/generated_projects/")
if compilation_results:
batch_files = [cr['batch_file'] for cr in compilation_results if cr['status'] == 'success']
if batch_files:
print(f"2. Run compilation batch files in: L5X2ACD Compiler/")
print(f" Example: compile_{results[0]['project_name']}.bat")
if failed_workflows > 0:
print(f"3. Review failed projects and fix any configuration issues")
print(f"4. Re-run specific projects manually using complete_workflow.py")
print(f"{'='*80}")
def main():
"""Main entry point for batch processing."""
parser = argparse.ArgumentParser(
description="Batch PLC Generation Workflow Processor",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Process all available Excel files
python batch_workflow_processor.py
# Process with verbose logging
python batch_workflow_processor.py --verbose
# Process only specific project prefixes
python batch_workflow_processor.py --projects MTN6 SAT9
# Also setup compilation (by default, only generates L5X files)
python batch_workflow_processor.py --setup-compilation
"""
)
parser.add_argument('--verbose', action='store_true',
help='Enable verbose logging for all workflows')
parser.add_argument('--projects', nargs='*',
help='Process only specific project prefixes (e.g., MTN6 SAT9)')
parser.add_argument('--setup-compilation', action='store_true',
help='Also setup compilation (by default, only generates L5X files)')
parser.add_argument('--dry-run', action='store_true',
help='Show what would be processed without running workflows')
args = parser.parse_args()
# Get project paths
project_root = get_project_root()
data_dir = project_root / "PLC Data Generator" / "data"
print("Batch PLC Generation Workflow Processor")
print("=" * 50)
print(f"Project root: {project_root}")
print(f"Data directory: {data_dir}")
# Find all Excel files
excel_files = find_excel_files(data_dir)
if not excel_files:
print("No Excel files found matching the expected pattern!")
print("Expected pattern: IO Assignment_{PROJECT}_{MCM}_COMPLETE.xlsx/xlsm")
print(f"In directory: {data_dir}")
return 1
# Filter by project prefixes if specified
if args.projects:
project_filters = [p.upper() for p in args.projects]
excel_files = [(f, p, m) for f, p, m in excel_files if p in project_filters]
print(f"Filtered to projects: {', '.join(project_filters)}")
print(f"\nFound {len(excel_files)} Excel files to process:")
# Check configuration for each project
valid_files = []
for excel_file, project_prefix, mcm_name in excel_files:
project_name = f"{project_prefix}_{mcm_name}"
config_ok = check_project_configuration(project_root, project_prefix)
status = "" if config_ok else ""
print(f" {status} {project_name} - {excel_file.name}")
if not config_ok:
print(f" Missing config files: {project_prefix}_generator_config.json or {project_prefix}_zones.json")
else:
valid_files.append((excel_file, project_prefix, mcm_name))
if not valid_files:
print("\nNo valid files to process (missing configuration files)")
return 1
if len(valid_files) != len(excel_files):
print(f"\nProceeding with {len(valid_files)} valid files (skipping {len(excel_files) - len(valid_files)} due to missing configs)")
if args.dry_run:
print(f"\nDRY RUN - would process {len(valid_files)} files")
return 0
# Process each Excel file
print(f"\nStarting batch processing of {len(valid_files)} files...")
print(f"Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
results = []
for i, (excel_file, project_prefix, mcm_name) in enumerate(valid_files, 1):
print(f"\n[{i}/{len(valid_files)}] Processing {project_prefix}_{mcm_name}")
result = run_complete_workflow(excel_file, project_prefix, mcm_name, project_root, args.verbose)
results.append(result)
# Setup compilation for successful results (only if requested)
compilation_results = []
if args.setup_compilation:
compilation_results = setup_batch_compilation(project_root, results, args.verbose)
else:
print(f"\n📁 L5X generation complete. Use compile_all_projects.bat to compile generated files.")
# Print comprehensive summary
print_batch_summary(valid_files, results, compilation_results)
# Return appropriate exit code
# Check if L5X files were actually generated
l5x_generated = []
l5x_failed = []
for result in results:
project_name = result.get('project_name', 'unknown')
if result.get('l5x_generated', False): # New flag to check actual L5X generation
l5x_generated.append(project_name)
else:
l5x_failed.append(project_name)
if len(l5x_generated) == len(results):
return 0 # All L5X files generated successfully
elif len(l5x_generated) > 0:
# Some L5X files generated, print which ones failed
print("\n⚠️ Some L5X generations failed but others succeeded:")
print(f" ✅ Generated: {', '.join(l5x_generated)}")
print(f" ❌ Failed: {', '.join(l5x_failed)}")
return 0 # Still return success if any L5X was generated
else:
return 1 # No L5X files generated at all
if __name__ == '__main__':
sys.exit(main())