2025-09-11 00:08:34 +04:00

276 lines
11 KiB
Python

from __future__ import annotations
import xml.etree.ElementTree as ET
import pandas as pd
from ..utils.common import natural_sort_key
__all__ = ["create_outputs_routine"]
def create_outputs_routine(routines: ET.Element, zones_df: pd.DataFrame, sto_df: pd.DataFrame) -> None:
"""Append OUTPUTS routine (R011_OUTPUTS) grouped by zones.
For each zone in zones_df (excluding the root MCMx row), generate a rung:
XIC(EStop_MCM_OK) [XIC(EStop_<INTERLOCK>_OK) ] XIC(EStop_<ZONE>_OK) [OTE(<VFD:SO.*>), ...];
The outputs are selected from sto_df where the VFD tag falls within the
zone's [start, stop] UL range (numeric range, allowing letter suffixes).
"""
routine = ET.SubElement(routines, "Routine", Use="Target", Name="R011_OUTPUTS", Type="RLL")
rll_content = ET.SubElement(routine, "RLLContent")
def norm_zone(s: str) -> str:
return str(s).strip().replace('-', '_')
# Helper to parse tokens like UL1_13 -> (prefix='UL1', index=13)
def parse_ul_token(token: str) -> tuple[str, int] | None:
try:
parts = token.split('_')
prefix = parts[0]
num_part = parts[1]
# strip trailing letters (e.g., '10A' -> 10)
digits = ''.join(ch for ch in num_part if ch.isdigit())
return prefix, int(digits)
except Exception:
return None
# Build VFD outputs catalog from sto_df
# Map base 'ULx_y' -> list of concrete outputs to write (e.g.,
# 'UL1_3' -> ['UL1_3_VFD1:SO.Out00Output', 'UL1_3_VFD1:SO.STOOutput'])
base_to_outputs: dict[str, set[str]] = {}
for _, sto_row in sto_df.iterrows():
tag = str(sto_row.get('TAGNAME', '')).strip()
if not tag:
continue
# Consider any VFD tag regardless of prefix (UL/FL/others)
if 'VFD' not in tag:
continue
# Skip SI inputs - only process SO outputs
path = str(sto_row.get('IO_PATH', '')).strip()
if ':SI.' in path:
continue # Skip safety inputs, only process safety outputs
parts = tag.split('_')
if len(parts) < 2:
continue
base = f"{parts[0]}_{parts[1]}" # e.g., UL1_3 from UL1_3_VFD1
outputs_for_tag: set[str] = set()
# Check if this is an actual STO device (DESCA contains "STO")
desca = str(sto_row.get('DESCA', '')).upper()
is_sto_device = 'STO' in desca
if is_sto_device:
# For actual STO devices, use explicit IO_PATH and add both Out00Output and Out01Output
if path and ':SO.' in path:
outputs_for_tag.add(path)
else:
# Fallback to constructed STO output
outputs_for_tag.add(f"{tag}:SO.STOOutput")
# Add both Out00Output and Out01Output for actual STO devices
outputs_for_tag.add(f"{tag}:SO.Out00Output")
outputs_for_tag.add(f"{tag}:SO.Out01Output")
else:
# For regular VFD devices, only add STOOutput
outputs_for_tag.add(f"{tag}:SO.STOOutput")
cell = base_to_outputs.setdefault(base, set())
cell.update(outputs_for_tag)
# Helper to process range data from zones (same as zones.py)
def get_zone_candidates(zone_row) -> list[str]:
"""Extract all device candidates from a zone (supporting both old start/stop and new ranges format)."""
candidates = []
# Check if zone has ranges field (new format)
ranges_data = zone_row.get('ranges')
if ranges_data and isinstance(ranges_data, list) and len(ranges_data) > 0:
# Process multiple ranges
for range_item in ranges_data:
start_tok = str(range_item.get('start', '')).strip()
stop_tok = str(range_item.get('stop', '')).strip()
# Skip empty ranges
if not start_tok:
continue
# Handle single device case (start == stop or only start provided)
if not stop_tok or start_tok == stop_tok:
candidates.append(start_tok)
continue
# Handle range case (start != stop)
bounds_s = parse_ul_token(start_tok)
bounds_e = parse_ul_token(stop_tok)
if bounds_s and bounds_e and bounds_s[0] == bounds_e[0]:
prefix = bounds_s[0]
lo, hi = sorted([bounds_s[1], bounds_e[1]])
candidates.extend([f"{prefix}_{i}" for i in range(lo, hi + 1)])
else:
# If parsing fails, add both tokens individually
candidates.append(start_tok)
if stop_tok != start_tok:
candidates.append(stop_tok)
else:
# Fallback to old start/stop format
start_tok = str(zone_row.get('start', '')).strip()
stop_tok = str(zone_row.get('stop', '')).strip()
if start_tok and stop_tok:
if start_tok == stop_tok:
# Single device
candidates.append(start_tok)
else:
# Range
bounds_s = parse_ul_token(start_tok)
bounds_e = parse_ul_token(stop_tok)
if bounds_s and bounds_e and bounds_s[0] == bounds_e[0]:
prefix = bounds_s[0]
lo, hi = sorted([bounds_s[1], bounds_e[1]])
candidates.extend([f"{prefix}_{i}" for i in range(lo, hi + 1)])
else:
# If parsing fails, add both tokens
candidates.append(start_tok)
candidates.append(stop_tok)
elif start_tok:
# Only start token provided
candidates.append(start_tok)
# Remove duplicates while preserving order
seen = set()
unique_candidates = []
for candidate in candidates:
if candidate not in seen:
seen.add(candidate)
unique_candidates.append(candidate)
return unique_candidates
# Order zones: ensure parents before children
rows: list[dict] = []
for _, zr in zones_df.iterrows():
name = str(zr.get('name', '')).strip()
if not name or name.upper().startswith('MCM'):
continue
# Handle both single interlock (string) and multiple interlocks (array)
interlock_raw = zr.get('interlock', '')
if isinstance(interlock_raw, list):
interlocks = [str(il).strip() for il in interlock_raw if str(il).strip()]
else:
interlocks = [str(interlock_raw).strip()] if str(interlock_raw).strip() else []
rows.append({
'name': name,
'name_norm': norm_zone(name),
'start': str(zr.get('start', '')).strip(),
'stop': str(zr.get('stop', '')).strip(),
'interlock': str(zr.get('interlock', '')).strip(), # Keep for backward compatibility
'interlocks': interlocks, # New multiple interlocks field
'interlock_norm': norm_zone(zr.get('interlock', '')) if str(zr.get('interlock', '')).strip() else '',
'interlocks_norm': [norm_zone(il) for il in interlocks], # Normalized multiple interlocks
'zone_row': zr # Store the full row for range processing
})
ordered: list[dict] = []
placed = set()
remaining = rows.copy()
for _ in range(len(rows) + 5):
progressed = False
next_remaining = []
for r in remaining:
# Check if all interlocks are satisfied (placed) or reference MCM
interlocks_satisfied = True
for interlock in r['interlocks']:
interlock_norm = norm_zone(interlock)
if interlock and not interlock.upper().startswith('MCM') and interlock_norm not in placed:
interlocks_satisfied = False
break
if not r['interlocks'] or interlocks_satisfied:
ordered.append(r)
placed.add(r['name_norm'])
progressed = True
else:
next_remaining.append(r)
remaining = next_remaining
if not remaining or not progressed:
ordered.extend(remaining)
break
# Emit one rung per zone - sort zones naturally
rung_num = 0
for r in sorted(ordered, key=lambda x: natural_sort_key(x['name'])):
# Get zone candidates using the new ranges-aware logic
zone_candidates = get_zone_candidates(r['zone_row'])
# Collect outputs for all candidates in this zone
selected_outputs: set[str] = set()
for candidate in zone_candidates:
# Extract base name (e.g., ULC1_3 from ULC1_3_VFD1)
parts = candidate.split('_')
if len(parts) >= 2:
candidate_base = f"{parts[0]}_{parts[1]}"
# Enhanced matching: handle letter suffixes in VFD names
# Check for exact match first
if candidate_base in base_to_outputs:
selected_outputs.update(base_to_outputs[candidate_base])
else:
# Check for VFD bases that start with the candidate (e.g., PS1_5 matches PS1_5A, PS1_5B)
for vfd_base, outputs in base_to_outputs.items():
if vfd_base.startswith(candidate_base):
# Additional check: ensure the suffix is just letters (not numbers)
suffix = vfd_base[len(candidate_base):]
if suffix and suffix.isalpha():
selected_outputs.update(outputs)
if not selected_outputs:
continue
# Build rung
rung = ET.SubElement(rll_content, 'Rung', Number=str(rung_num), Type='N')
text = ET.SubElement(rung, 'Text')
from ..config import get_config
top_ok = getattr(get_config().routines, 'top_level_estop_ok_tag', 'EStop_MCM_OK')
xics = [f"XIC({top_ok})"]
# Add XIC conditions for all interlocks (multiple interlocks support)
for interlock in r['interlocks']:
if interlock and not interlock.upper().startswith('MCM'):
xics.append(f"XIC(EStop_{norm_zone(interlock)}_OK)")
xics.append(f"XIC(EStop_{r['name_norm']}_OK)")
ote_list = [f"OTE({o})" for o in sorted(selected_outputs, key=natural_sort_key)]
if len(ote_list) == 1:
text.text = ''.join(xics) + ote_list[0] + ';'
else:
text.text = ''.join(xics) + '[' + ','.join(ote_list) + '];'
rung_num += 1
# --- Plugin wrapper so modern generator can execute this routine ---
from ..plugin_system import RoutinePlugin
class OutputsRoutinePlugin(RoutinePlugin):
name = "outputs"
description = "Generates the R011_OUTPUTS routine"
category = "safety"
def can_generate(self) -> bool:
try:
return not self.context.data_loader.sto.empty
except Exception:
return False
def generate(self) -> bool:
create_outputs_routine(
self.context.routines_element,
self.context.data_loader.zones, # zones are deprecated; returns empty DF
self.context.data_loader.sto,
)
return True