PLC_Generation/Routines Generator/complete_workflow.py
2025-09-02 11:13:29 +04:00

806 lines
34 KiB
Python

#!/usr/bin/env python3
"""
Complete PLC Generation Workflow
Runs the entire pipeline from raw Excel to compiled ACD:
1. PLC Data Generator (raw Excel → DESC_IP_MERGED.xlsx)
2. Routines Generator (DESC_IP_MERGED.xlsx → L5X files)
3. IO Tree Generator (L5X files → Complete L5X + Compilation batch script)
4. Legacy compilation check (batch script creation now handled in Step 3)
"""
from __future__ import annotations
import sys
import argparse
import subprocess
from pathlib import Path
import io
import contextlib
import xml.etree.ElementTree as ET
import re
import shutil
import json
from datetime import datetime
def get_project_paths():
"""Get standardized paths for all project components."""
project_root = Path(__file__).parent.parent.resolve()
return {
'project_root': project_root,
'data_generator': project_root / "PLC Data Generator",
'routines_generator': project_root / "Routines Generator",
'l5x2acd_compiler': project_root / "L5X2ACD Compiler",
'io_tree_generator': project_root / "IO Tree Configuration Generator"
}
def resolve_project_config_files(project_name: str, project_root: Path) -> tuple[Path, Path]:
"""Resolve configuration file paths based on project name.
Args:
project_name: Project name (e.g., 'MTN6_MCM02', 'SAT9_MCM01', 'CNO8_MCM01')
project_root: Root directory of the project
Returns:
tuple: (generator_config_path, zones_config_path)
Raises:
FileNotFoundError: If required config files don't exist
"""
# Extract project prefix (MTN6, SAT9, CNO8, etc.)
import re
project_match = re.match(r'^([A-Z0-9]+)_', project_name.upper())
if not project_match:
raise ValueError(f"Invalid project name format: {project_name}. Expected format: PREFIX_MCMXX")
project_prefix = project_match.group(1)
# Build config file paths
generator_config = project_root / f"{project_prefix}_generator_config.json"
zones_config = project_root / f"{project_prefix}_zones.json"
# Validate files exist
if not generator_config.exists():
raise FileNotFoundError(f"Generator config not found: {generator_config}")
if not zones_config.exists():
raise FileNotFoundError(f"Zones config not found: {zones_config}")
return generator_config, zones_config
def get_available_projects(project_root: Path) -> list[str]:
"""Get list of available project prefixes based on config files.
Args:
project_root: Root directory of the project
Returns:
List of available project prefixes (e.g., ['MTN6', 'SAT9', 'CNO8'])
"""
import re
projects = set()
# Look for *_generator_config.json files
for config_file in project_root.glob("*_generator_config.json"):
match = re.match(r'^([A-Z0-9]+)_generator_config\.json$', config_file.name)
if match:
prefix = match.group(1)
# Check if corresponding zones file exists
zones_file = project_root / f"{prefix}_zones.json"
if zones_file.exists():
projects.add(prefix)
return sorted(list(projects))
def resolve_boilerplate_directory(project_prefix: str, io_tree_dir: Path) -> str:
"""Resolve the boilerplate directory based on project prefix.
Args:
project_prefix: Project prefix (e.g., 'MTN6', 'SAT9', 'CNO8')
io_tree_dir: IO Tree Configuration Generator directory
Returns:
Boilerplate directory name (e.g., 'MTN6_boilerplate')
"""
boilerplate_dir = f"{project_prefix}_boilerplate"
boilerplate_path = io_tree_dir / boilerplate_dir
if boilerplate_path.exists():
return boilerplate_dir
else:
# Fall back to default boilerplate directory
print(f"Warning: Project-specific boilerplate directory not found: {boilerplate_dir}")
print(" Using default 'boilerplate' directory")
return "boilerplate"
def _append_log(log_path: Path | None, header: str, body: str) -> None:
if not log_path:
return
try:
log_path.parent.mkdir(parents=True, exist_ok=True)
with open(log_path, 'a', encoding='utf-8') as f:
f.write(f"\n=== {header} ===\n")
if body:
f.write(body)
if not body.endswith("\n"):
f.write("\n")
except Exception:
pass
def run_plc_data_generator(raw_excel_file: Path, paths: dict, verbose: bool = False, log_file: Path | None = None) -> bool:
"""Run the PLC Data Generator to create DESC_IP_MERGED.xlsx."""
data_gen_dir = paths['data_generator']
data_gen_script = data_gen_dir / "main.py"
if not data_gen_script.exists():
print(f"ERROR: PLC Data Generator not found at {data_gen_script}")
return False
if not raw_excel_file.exists():
print(f"ERROR: Raw Excel file not found at {raw_excel_file}")
return False
try:
# Run the PLC Data Generator with the Excel file path as argument
result = subprocess.run([
sys.executable,
str(data_gen_script),
str(raw_excel_file.resolve()) # Pass the Excel file path as argument
], cwd=data_gen_dir, capture_output=True, text=True)
# Check if core processing succeeded by looking for output files
# Even if there's a permission error at the end, the processing might have completed
# Extract full project name from the raw Excel file (now includes project prefix)
project_specific_name = raw_excel_file.stem.replace("_COMPLETE", "")
# The PLC Data Generator now creates files with full project prefix (e.g., MTN6_MCM04)
source = data_gen_dir / f"{project_specific_name}_DESC_IP_MERGED.xlsx"
# Also check for generic filename as fallback
generic_source = data_gen_dir / "DESC_IP_MERGED.xlsx"
success_indicators = [
"Processing complete!" in result.stdout,
"New Excel file created:" in result.stdout,
source.exists() or generic_source.exists()
]
# Consider it successful if the essential files were created, even with permission errors
if result.returncode == 0 or (any(success_indicators) and "[Errno 1] Operation not permitted" in result.stdout):
if verbose and result.returncode != 0:
_append_log(log_file, "Step 1 Warning", "Permission error at end of processing, core processing completed")
if verbose:
_append_log(log_file, "Step 1: PLC Data Generator stdout", result.stdout)
_append_log(log_file, "Step 1: PLC Data Generator stderr", result.stderr)
# Copy DESC_IP_MERGED.xlsx from data generator output (it already has safety sheets)
dest = paths['routines_generator'] / "DESC_IP_MERGED.xlsx"
# Try project-specific filename first, then generic
import shutil
try:
if source.exists():
shutil.copy2(source, dest)
if verbose:
_append_log(log_file, "Step 1 File Copy", f"Copied {source.name} to {dest}")
return True
elif generic_source.exists():
shutil.copy2(generic_source, dest)
if verbose:
_append_log(log_file, "Step 1 File Copy", f"Copied {generic_source.name} to {dest}")
return True
else:
if verbose:
_append_log(log_file, "Step 1 File Copy Error", f"Neither {source.name} nor {generic_source.name} found in {data_gen_dir}")
return False
except Exception as e:
if verbose:
_append_log(log_file, "Step 1 File Copy Exception", f"Failed to copy file: {e}")
return False
else:
if verbose:
_append_log(log_file, "Step 1 Error", "Data processing failed")
_append_log(log_file, "Step 1: PLC Data Generator stdout", result.stdout)
_append_log(log_file, "Step 1: PLC Data Generator stderr", result.stderr)
return False
except Exception as e:
if verbose:
_append_log(log_file, "Step 1 Exception", str(e))
return False
def run_routines_generator(paths: dict, project_name: str = None, ignore_estop1ok: bool = False, safety_only: bool = False, verbose: bool = False, log_file: Path | None = None, config_path: Path = None) -> bool:
"""Run the Routines Generator.
When safety_only is True, runs safety-only generation (inputs, outputs, resets,
estops, zones, estop_check, safety tag map). Otherwise runs the standard
generator with DPM and other routines.
Args:
config_path: Path to project-specific generator config file. If None, uses default.
"""
routines_dir = paths['routines_generator']
try:
# Create project-specific output directory
project_root = paths['project_root']
project_output_dir = project_root / "generated_projects" / project_name
project_output_dir.mkdir(parents=True, exist_ok=True)
# Archive existing routines to old folder
old_dir = project_output_dir / "old"
old_dir.mkdir(exist_ok=True)
# Move existing routine files to old folder
routine_files = [
"SafetyProgram_Generated.L5X",
"MainProgram_Generated.L5X",
"SafetyTagMapping.txt"
]
from datetime import datetime
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
# Move specific routine files
for routine_file in routine_files:
existing_file = project_output_dir / routine_file
if existing_file.exists():
old_filename = f"{routine_file.replace('.', f'_{timestamp}.')}"
shutil.move(str(existing_file), str(old_dir / old_filename))
print(f" Archived {routine_file} to old folder")
# Move existing ACD files (with any timestamp suffix)
acd_pattern = f"{project_name}*.ACD"
for acd_file in project_output_dir.glob(acd_pattern):
if acd_file.is_file():
# Create timestamped filename for the old ACD file
old_acd_filename = f"{acd_file.stem}_{timestamp}.ACD"
shutil.move(str(acd_file), str(old_dir / old_acd_filename))
print(f" Archived {acd_file.name} to old folder")
# Also move any backup ACD files (.acd extension)
acd_backup_pattern = f"{project_name}*.acd"
for acd_backup_file in project_output_dir.glob(acd_backup_pattern):
if acd_backup_file.is_file():
old_acd_backup_filename = f"{acd_backup_file.stem}_{timestamp}.acd"
shutil.move(str(acd_backup_file), str(old_dir / old_acd_backup_filename))
print(f" Archived {acd_backup_file.name} to old folder")
# Use provided config path or fall back to default
if config_path is None:
config_path = paths['project_root'] / 'generator_config.json'
# Create temporary config with project-specific output directory
import json
temp_config_path = routines_dir / f"temp_config_{project_name}.json"
try:
# Load existing config
with open(config_path, 'r') as f:
config_data = json.load(f)
# Modify output directory to project-specific path
if 'files' not in config_data:
config_data['files'] = {}
config_data['files']['output_dir'] = str(project_output_dir)
# Save temporary config
with open(temp_config_path, 'w') as f:
json.dump(config_data, f, indent=2)
excel_path = routines_dir / 'DESC_IP_MERGED.xlsx'
subcmd = 'safety' if safety_only else 'all'
# Build args with temporary config
cmd_args = [
sys.executable,
'-m', 'src.unified_cli',
'--config', str(temp_config_path),
'--excel-file', str(excel_path),
]
if verbose:
cmd_args.extend(['--log-level', 'DEBUG'])
if log_file is not None:
cmd_args.extend(['--log-file', str(log_file)])
cmd_args.append(subcmd)
# Run the unified CLI
result = subprocess.run(cmd_args, cwd=routines_dir, capture_output=True, text=True)
if verbose:
_append_log(log_file, "Step 2: Routines Generator stdout", result.stdout)
if result.stderr:
_append_log(log_file, "Step 2: Routines Generator stderr", result.stderr)
return result.returncode == 0
finally:
# Clean up temporary config
if temp_config_path.exists():
temp_config_path.unlink()
except Exception as e:
if verbose:
_append_log(log_file, "Step 2 Exception", str(e))
return False
def run_io_tree_generator(paths: dict, project_name: str, safety_only: bool = False, verbose: bool = False, log_file: Path | None = None, boilerplate_dir: str = None) -> bool:
"""Run the IO Tree Configuration Generator.
If safety_only is True, skip this step to avoid generating non-safety routines.
"""
if safety_only:
return True
io_tree_dir = paths['io_tree_generator']
enhanced_mcm_script = io_tree_dir / "enhanced_mcm_generator.py"
# Use the file directly from PLC Data Generator since we're skipping Routines Generator
desc_ip_file = paths['data_generator'] / "DESC_IP_MERGED.xlsx"
if not enhanced_mcm_script.exists():
return False
# Zones fully removed: do not attempt to load or pass zones
zones_json = None
try:
# Build command arguments
cmd_args = [
sys.executable,
str(enhanced_mcm_script),
str(desc_ip_file),
project_name
]
# Add boilerplate directory if specified
if boilerplate_dir:
cmd_args.append(boilerplate_dir)
# Run the IO Tree Configuration Generator
result = subprocess.run(cmd_args, cwd=io_tree_dir, capture_output=True, text=True)
if verbose:
_append_log(log_file, "Step 3: IO Tree Generator stdout", result.stdout)
if result.stderr:
_append_log(log_file, "Step 3: IO Tree Generator stderr", result.stderr)
if result.returncode == 0:
# L5X generation successful - immediately create compilation batch script
try:
_create_compilation_batch_script_followup(paths, project_name, verbose, log_file)
except Exception as e:
if verbose:
_append_log(log_file, "Step 3 Batch Script Creation Warning", f"Failed to create batch script: {e}")
# Don't fail the step if batch script creation fails - L5X was successful
return True
else:
return False
except Exception as e:
if verbose:
_append_log(log_file, "Step 3 Exception", str(e))
return False
def _create_compilation_batch_script_followup(paths: dict, project_name: str, verbose: bool = False, log_file: Path | None = None) -> bool:
"""Create compilation batch script immediately after L5X generation.
This is called as a follow-up to successful L5X creation to ensure the batch script
is available immediately without requiring a separate compilation preparation step.
"""
# Find the generated complete project L5X file in the new project structure
generated_projects_dir = paths['project_root'] / "generated_projects"
if not generated_projects_dir.exists():
return False
# Look for L5X files in the project-specific folder
project_folder = generated_projects_dir / project_name
if not project_folder.exists():
return False
# Find timestamped L5X files in the project folder
l5x_files = list(project_folder.glob(f"{project_name}_*.L5X"))
# Sort by modification time to get the most recent
l5x_files.sort(key=lambda f: f.stat().st_mtime, reverse=True)
if not l5x_files:
return False
complete_l5x = l5x_files[0]
# Inject SafetyTagMap from project-specific SafetyTagMapping.txt before compilation (if available)
try:
# Use project-specific SafetyTagMapping.txt from the project output directory
project_output_dir = paths['project_root'] / "generated_projects" / project_name
mapping_file = project_output_dir / 'SafetyTagMapping.txt'
if mapping_file.exists():
# Always inject silently (minimal logging)
_inject_safety_tag_map_into_l5x(complete_l5x, mapping_file, False)
except Exception as e:
if verbose:
_append_log(log_file, "Step 3 SafetyTagMap Warning", f"Failed to inject SafetyTagMap: {e}")
# Use the dynamic compilation manager
l5x2acd_dir = paths['project_root'] / "L5X2ACD Compiler"
try:
# Import and use the compilation manager
import sys
sys.path.append(str(l5x2acd_dir))
from compilation_manager import CompilationManager
# Create compilation manager
manager = CompilationManager(l5x2acd_dir)
# Determine project-specific options (silent)
project_type = "UNKNOWN"
options = {}
if project_name:
if "MCM01" in project_name.upper():
project_type = "MCM01"
options['enable_safety_validation'] = True
elif "MCM04" in project_name.upper():
project_type = "MCM04"
options['enable_feeder_optimization'] = True
# Setup project folder compilation (creates batch file in project directory)
# Always run quietly and suppress tool output
_buf = io.StringIO()
with contextlib.redirect_stdout(_buf), contextlib.redirect_stderr(_buf):
batch_file = manager.setup_project_folder_compilation(
source_l5x=complete_l5x,
project_name=project_name or complete_l5x.stem,
compilation_options=options
)
if verbose:
_append_log(log_file, "Step 3 Batch Script Creation", _buf.getvalue())
return True
except Exception as e:
if verbose:
_append_log(log_file, "Step 3 Batch Script Exception", str(e))
return False
def run_l5x_to_acd_compiler(paths: dict, project_name: str, safety_only: bool = False, verbose: bool = False, log_file: Path | None = None) -> bool:
"""Legacy step for L5X2ACD Compilation preparation.
NOTE: Batch script creation is now handled as a follow-up to Step 3 (L5X generation).
This step is now redundant but kept for compatibility.
If safety_only is True, skip this step since a full project L5X wasn't generated.
"""
if safety_only:
return True
# Check if batch script already exists from Step 3 follow-up
project_root = paths['project_root']
project_output_dir = project_root / "generated_projects" / project_name
batch_file = project_output_dir / f"compile_{project_name}.bat"
if batch_file.exists():
if verbose:
_append_log(log_file, "Step 4: Batch Script Status", f"Batch script already created in Step 3: {batch_file.name}")
return True
# If batch script doesn't exist, try to create it (fallback)
if verbose:
_append_log(log_file, "Step 4: Batch Script Status", "Batch script not found from Step 3, creating as fallback")
try:
return _create_compilation_batch_script_followup(paths, project_name, verbose, log_file)
except Exception as e:
if verbose:
_append_log(log_file, "Step 4 Exception", str(e))
return False
def _inject_safety_tag_map_into_l5x(l5x_path: Path, mapping_file: Path, verbose: bool = False) -> None:
"""Inject or replace <SafetyTagMap> inside the existing Controller/SafetyInfo using text edits.
- Preserves the original XML header exactly
- Does not create additional SafetyInfo blocks
- Formats SafetyTagMap on its own line between SafetyInfo open/close tags
"""
mapping_text = mapping_file.read_text(encoding='utf-8').strip()
if not mapping_text:
if verbose:
print("SafetyTagMapping.txt is empty; skipping injection")
return
xml_text = l5x_path.read_text(encoding='utf-8')
# Find Controller block
ctrl_match = re.search(r"<Controller\b[\s\S]*?</Controller>", xml_text)
if not ctrl_match:
if verbose:
print("No <Controller> found; skipping injection")
return
ctrl_start, ctrl_end = ctrl_match.span()
ctrl_text = xml_text[ctrl_start:ctrl_end]
# Locate first SafetyInfo (body or self-closing)
m_body = re.search(r"<SafetyInfo\b([^>]*)>([\s\S]*?)</SafetyInfo>", ctrl_text)
m_self = re.search(r"<SafetyInfo\b([^>]*)/>", ctrl_text)
if not m_body and not m_self:
if verbose:
print("No <SafetyInfo> under <Controller>; skipping injection")
return
# Determine indentation based on the SafetyInfo line
first_match = m_body if (m_body and (not m_self or m_body.start() < m_self.start())) else m_self
safety_start = first_match.start()
line_start = ctrl_text.rfind('\n', 0, safety_start)
indent = ctrl_text[line_start+1:safety_start] if line_start != -1 else ''
map_line = f"\n{indent} <SafetyTagMap> {mapping_text} </SafetyTagMap>\n"
def dedup_safety_infos(text: str) -> str:
seen = False
def repl(match: re.Match) -> str:
nonlocal seen
if seen:
return ''
seen = True
return match.group(0)
pat = re.compile(r"(<SafetyInfo\b[^>]*/>)|(\n?\s*<SafetyInfo\b[^>]*>[\s\S]*?</SafetyInfo>)")
return pat.sub(repl, text)
if m_body and (not m_self or m_body.start() < m_self.start()):
# Replace or insert SafetyTagMap inside existing body
attrs = m_body.group(1)
inner = m_body.group(2)
# Replace existing map if present
if re.search(r"<SafetyTagMap>[\s\S]*?</SafetyTagMap>", inner):
new_inner = re.sub(r"<SafetyTagMap>[\s\S]*?</SafetyTagMap>", map_line.strip('\n'), inner, count=1)
# Remove any additional maps
new_inner = re.sub(r"<SafetyTagMap>[\s\S]*?</SafetyTagMap>", '', new_inner)
else:
new_inner = map_line + inner
new_block = f"<SafetyInfo{attrs}>{new_inner}</SafetyInfo>"
new_ctrl_text = ctrl_text[:m_body.start()] + new_block + ctrl_text[m_body.end():]
else:
# Convert self-closing to body with map
attrs = m_self.group(1)
new_block = f"<SafetyInfo{attrs}>{map_line}</SafetyInfo>"
new_ctrl_text = ctrl_text[:m_self.start()] + new_block + ctrl_text[m_self.end():]
new_ctrl_text = dedup_safety_infos(new_ctrl_text)
new_xml = xml_text[:ctrl_start] + new_ctrl_text + xml_text[ctrl_end:]
l5x_path.write_text(new_xml, encoding='utf-8')
if verbose:
print("SafetyTagMap injection OK")
def main() -> None:
"""Main entry point for complete workflow."""
parser = argparse.ArgumentParser(description="Complete PLC generation workflow from raw Excel to ACD")
parser.add_argument('--excel-file', type=str, help='Raw Excel file to process')
# Project selection
parser.add_argument('--project', help='Project prefix (e.g., MTN6, SAT9, CNO8) - automatically selects config files')
parser.add_argument('--project-name', help='Project name (e.g., MTN6_MCM02) - used for output naming and compatibility')
parser.add_argument('--ignore-estop1ok', action='store_true', help='Ignore ESTOP1OK tags in safety routines generation')
parser.add_argument('--safety-only', action='store_true', help='Generate only safety routines and safety checks')
parser.add_argument('--no-compilation', action='store_true', help='Skip compilation preparation step (generate L5X only)')
parser.add_argument('--verbose', action='store_true', help='Write detailed logs for each step to a file (no console spam)')
parser.add_argument('--list-projects', action='store_true', help='List available projects and exit')
args = parser.parse_args()
# Get project paths
paths = get_project_paths()
# Handle --list-projects (allow without --excel-file)
if args.list_projects:
available_projects = get_available_projects(paths['project_root'])
print("Available projects:")
for project in available_projects:
generator_config = paths['project_root'] / f"{project}_generator_config.json"
zones_config = paths['project_root'] / f"{project}_zones.json"
boilerplate_dir = paths['io_tree_generator'] / f"{project}_boilerplate"
boilerplate_status = "" if boilerplate_dir.exists() else ""
print(f" {project:<6} - Config: {generator_config.name}, Zones: {zones_config.name}, Boilerplate: {boilerplate_status}")
if not available_projects:
print(" No projects found. Expected files: PREFIX_generator_config.json and PREFIX_zones.json")
return
# Resolve project configuration
generator_config_path = None
zones_config_path = None
project_name = args.project_name # Use provided project name if given
if args.project:
# Project selection mode - use project prefix to find config files
try:
# If no project name provided, derive it from Excel file name
if not project_name:
excel_name = args.excel_file.stem
if 'MCM' in excel_name.upper():
# Try to extract MCM info from filename
import re
mcm_match = re.search(r'(MCM\d+)', excel_name.upper())
if mcm_match:
project_name = f"{args.project.upper()}_{mcm_match.group(1)}"
else:
project_name = f"{args.project.upper()}_MCM01" # Default fallback
else:
project_name = f"{args.project.upper()}_MCM01" # Default fallback
# Use project prefix to find config files, but use provided or derived project name
generator_config_path, zones_config_path = resolve_project_config_files(f"{args.project.upper()}_MCM01", paths['project_root'])
print(f"Using project: {args.project.upper()}")
print(f" Generator config: {generator_config_path.name}")
print(f" Zones config: {zones_config_path.name}")
print(f" Project name: {project_name}")
except (ValueError, FileNotFoundError) as e:
print(f"Error: {e}")
available_projects = get_available_projects(paths['project_root'])
if available_projects:
print(f"Available projects: {', '.join(available_projects)}")
else:
print("No projects found. Run with --list-projects to see details.")
sys.exit(1)
elif args.project_name:
# Backward compatibility mode using --project-name only
project_name = args.project_name
# Try to auto-detect config files based on project name
try:
generator_config_path, zones_config_path = resolve_project_config_files(project_name, paths['project_root'])
print(f"Auto-detected config files for {project_name}:")
print(f" Generator config: {generator_config_path.name}")
print(f" Zones config: {zones_config_path.name}")
except (ValueError, FileNotFoundError):
# Fall back to default config files
print(f"Using default config files (project-specific configs not found for {project_name})")
generator_config_path = None # Will use default in run_routines_generator
else:
# No project specified - require at least one
print("Error: Either --project or --project-name must be specified")
available_projects = get_available_projects(paths['project_root'])
if available_projects:
print(f"Available projects: {', '.join(available_projects)}")
sys.exit(1)
# Validate excel-file is provided for actual processing (not just listing)
if not args.excel_file:
print("Error: --excel-file is required for processing")
parser.print_help()
sys.exit(1)
# Normalize the Excel file path to handle Windows-style paths in WSL
excel_file_path = Path(args.excel_file.replace('\\', '/'))
if not excel_file_path.is_absolute():
excel_file_path = paths['project_root'] / excel_file_path
args.excel_file = excel_file_path
# Setup enhanced logging
from src.logging_config import setup_logging, get_logger
ts = datetime.now().strftime('%Y%m%d_%H%M%S')
log_dir = paths['project_root'] / 'logs'
base = project_name or 'project'
log_file = log_dir / f"workflow_{base}_{ts}.log" if args.verbose else None
# Configure logging with location information
setup_logging(
level='DEBUG' if args.verbose else 'INFO',
console_format='human',
log_file=log_file,
show_module=True,
show_location=True,
use_colors=True
)
logger = get_logger(__name__)
logger.info("PLC Generation Workflow started", excel_file=str(args.excel_file), project_name=args.project_name)
print("PLC Generation Workflow")
# Step 1: Process raw Excel data
print("Step 1: Data processing ...", end=" ")
logger.info("Starting data processing step")
try:
ok = run_plc_data_generator(args.excel_file, paths, verbose=args.verbose, log_file=log_file)
print("OK" if ok else "FAIL")
if not ok:
logger.error("Data processing failed")
if not args.verbose:
print("(details suppressed; re-run with --verbose)")
sys.exit(1)
logger.info("Data processing completed successfully")
except Exception as e:
logger.exception("Data processing step failed with exception", error=str(e))
print("FAIL")
sys.exit(1)
# Step 2: Generate L5X programs (Routines Generator)
print("Step 2: Routine generation ...", end=" ")
logger.info("Starting routine generation step")
try:
ok = run_routines_generator(paths, project_name, args.ignore_estop1ok, args.safety_only, verbose=args.verbose, log_file=log_file, config_path=generator_config_path)
print("OK" if ok else "FAIL")
if not ok:
logger.error("Routine generation failed")
if not args.verbose:
print("(details suppressed; re-run with --verbose)")
sys.exit(1)
logger.info("Routine generation completed successfully")
except Exception as e:
logger.exception("Routine generation step failed with exception", error=str(e))
print("FAIL")
sys.exit(1)
# Step 3: Generate complete project L5X (IO Tree Generator) + create batch script
if args.safety_only:
print("Step 3: IO tree generation ... SKIPPED")
else:
print("Step 3: IO tree generation + batch script ...", end=" ")
# Determine boilerplate directory based on project
boilerplate_dir = None
if args.project:
boilerplate_dir = resolve_boilerplate_directory(args.project.upper(), paths['io_tree_generator'])
ok = run_io_tree_generator(paths, project_name, args.safety_only, verbose=args.verbose, log_file=log_file, boilerplate_dir=boilerplate_dir)
print("OK" if ok else "FAIL")
if not ok:
if not args.verbose:
print("(details suppressed; re-run with --verbose)")
sys.exit(1)
# Step 4: Legacy compilation preparation (batch script now created in Step 3)
if args.safety_only or args.no_compilation:
skip_reason = "safety-only mode" if args.safety_only else "compilation disabled"
print(f"Step 4: Legacy compilation check ... SKIPPED ({skip_reason})")
else:
print("Step 4: Legacy compilation check ...", end=" ")
ok = run_l5x_to_acd_compiler(paths, project_name, args.safety_only, verbose=args.verbose, log_file=log_file)
print("OK" if ok else "FAIL")
if not ok:
if not args.verbose:
print("(details suppressed; re-run with --verbose)")
print("WARNING: L5X generation completed successfully, but compilation preparation failed")
print(" You can still use the generated L5X files for manual compilation")
# Don't exit with failure if L5X was generated successfully
return 0
print("Workflow complete")
if args.verbose and log_file is not None:
print(f"Logs: {log_file}")
if project_name:
project_root = paths['project_root']
project_output_dir = project_root / "generated_projects" / project_name
# Find the most recent L5X file for this project
l5x_files = list(project_output_dir.glob(f"{project_name}_*.L5X"))
if l5x_files:
# Sort by modification time to get the most recent
l5x_files.sort(key=lambda f: f.stat().st_mtime, reverse=True)
latest_l5x = l5x_files[0]
print(f"L5X: {latest_l5x.relative_to(paths['project_root'])}")
# Show compilation batch file if it exists
if not args.no_compilation:
batch_file = project_output_dir / f"compile_{project_name}.bat"
if batch_file.exists():
print(f"Compilation: {batch_file.relative_to(paths['project_root'])}")
# Show routine files location
if not args.safety_only:
routine_files = [
project_output_dir / "MainProgram_Generated.L5X",
project_output_dir / "SafetyProgram_Generated.L5X",
project_output_dir / "SafetyTagMapping.txt"
]
for routine_file in routine_files:
if routine_file.exists():
print(f"Routine: {routine_file.relative_to(paths['project_root'])}")
# Explicit success return
return 0
if __name__ == '__main__':
sys.exit(main())