Wrking verson at least on Windows

This commit is contained in:
ilia.gurielidze@autStand.com 2025-04-10 18:50:47 +04:00
parent a3dd4511c7
commit bef2f78da4
47 changed files with 4127 additions and 4007 deletions

View File

@ -1,2 +0,0 @@
ProjectName: MTN6
RepoURL: http://192.168.5.191:3000/LCI/MTN6.git

@ -1 +0,0 @@
Subproject commit 456de12cca56c09bc1881660b163ac3b5dff593a

View File

@ -1,2 +0,0 @@
ProjectName: MTN61
RepoURL: http://192.168.5.191:3000/LCI/MTN6.git

@ -1 +0,0 @@
Subproject commit 456de12cca56c09bc1881660b163ac3b5dff593a

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,2 @@
ProjectName: MTN6_Test
RepoURL: http://192.168.5.191:3000/ilia-gurielidze-autstand/MTN6_SCADA.git

@ -0,0 +1 @@
Subproject commit 0280b9ed18b899617fe507ac161be04780386d65

View File

@ -0,0 +1,2 @@
ProjectName: MTN6_Test1
RepoURL: http://192.168.5.191:3000/ilia-gurielidze-autstand/MTN6_SCADA.git

@ -0,0 +1 @@
Subproject commit 0280b9ed18b899617fe507ac161be04780386d65

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -8,6 +8,9 @@ from flask import Flask, render_template, jsonify, Response, request # Add reque
from werkzeug.utils import secure_filename # For securing filenames
from concurrent.futures import ThreadPoolExecutor # Import ThreadPoolExecutor
import shutil # Import shutil for directory cleanup on errors
import csv # Add csv import
import io # Add io import
import stat # Needed for onerror handler
# Import configurations and new modules
from . import config
@ -51,6 +54,33 @@ data_updated_event = threading.Event() # Event signals ANY project update
MAX_INITIAL_CHECK_WORKERS = 5 # Adjust as needed
MAX_PERIODIC_CHECK_WORKERS = 5 # Adjust as needed
# --- Helper Function for shutil.rmtree ---
def handle_remove_readonly(func, path, exc_info):
"""Error handler for shutil.rmtree.
If the error is due to permission errors (read-only), it attempts
to remove the read-only permission and retry the operation.
Otherwise, it re-raises the error.
Usage: shutil.rmtree(path, onerror=handle_remove_readonly)
"""
# Check if the error is an OSError
exc_type, exc_value, _ = exc_info
if issubclass(exc_type, OSError) and exc_value.errno == 5: # Check for Access Denied (WinError 5)
print(f"Permission error deleting {path}. Attempting to change permissions...")
try:
# Attempt to make the file writable
os.chmod(path, stat.S_IWRITE)
# Retry the function that failed (e.g., os.remove)
func(path)
print(f"Successfully deleted {path} after changing permissions.")
except Exception as e:
print(f"Failed to change permissions or delete {path} after permission error: {e}")
# Re-raise the original exception if changing permissions/retrying failed
raise exc_value
else:
# Re-raise the error if it's not a permission error we can handle
raise exc_value
# --- Core Logic Orchestration (Per-Project) ---
def update_progress_data(project_name):
@ -121,7 +151,7 @@ def set_status(project_name, message):
# --- Git Repo Handling (Per-Project) ---
def check_and_update_repo(project_name):
"""Checks and updates the Git repository for a specific project, minimizing lock contention."""
"""Checks and updates the Git repository for a specific project based on remote changes."""
global project_last_commit, project_status # Reference global dicts
repo_path = utils.get_repo_path(project_name)
@ -129,14 +159,13 @@ def check_and_update_repo(project_name):
branch = config.BRANCH # Assuming global for now
did_update = False # Flag to track if files were actually updated
initial_hash = None
with repo_lock: # Briefly lock to get initial hash
initial_hash = project_last_commit.get(project_name)
previous_remote_hash = None
with repo_lock: # Briefly lock to get previously stored remote hash
previous_remote_hash = project_last_commit.get(project_name)
try:
project_base_path = utils.get_project_base_path(project_name)
if not os.path.exists(project_base_path):
# Use set_status which handles locking
set_status(project_name, f"Error: Project directory not found: {project_base_path}")
return False # Cannot proceed
@ -148,99 +177,115 @@ def check_and_update_repo(project_name):
if not repo_existed:
print(f"[{project_name}] Cloning repository {repo_url} into {repo_path}...")
set_status(project_name, "Cloning repository...")
# --- Clone happens OUTSIDE lock ---
try:
# Clone happens OUTSIDE lock
git.Repo.clone_from(repo_url, repo_path, branch=branch)
repo = git.Repo(repo_path)
new_commit_hash = repo.head.commit.hexsha
# After clone, get the initial REMOTE commit hash
# No fetch needed immediately after clone, origin/<branch> should be set
current_remote_hash = repo.commit(f'origin/{branch}').hexsha
with repo_lock: # Lock ONLY to update shared state
project_last_commit[project_name] = new_commit_hash
print(f"[{project_name}] Initial clone complete. Commit: {new_commit_hash}")
did_update = True
project_last_commit[project_name] = current_remote_hash # Store remote hash
print(f"[{project_name}] Initial clone complete. Remote commit: {current_remote_hash}")
did_update = True # Cloned, so files are "updated" from nothing
except git.GitCommandError as clone_err:
set_status(project_name, f"Error cloning repository: {clone_err}")
print(f"[{project_name}] Git clone error: {clone_err}")
# Ensure commit state reflects error if needed
with repo_lock:
if project_last_commit.get(project_name) is None:
project_last_commit[project_name] = "Clone Error"
return False # Indicate no update occurred
# --- End Clone ---
return False
else:
# --- Fetch/Pull Logic ---
# --- Fetch/Pull Logic for Existing Repo ---
repo = git.Repo(repo_path)
current_local_commit = repo.head.commit.hexsha
# Ensure initial hash is set if missing (brief lock)
with repo_lock:
if project_last_commit.get(project_name) is None:
project_last_commit[project_name] = current_local_commit
initial_hash = current_local_commit # Update local var too
print(f"[{project_name}] Fetching updates from remote...")
origin = repo.remotes.origin
# --- Fetch happens OUTSIDE lock ---
try:
fetch_info = origin.fetch()
# Log fetch details if needed (can be verbose)
# for info in fetch_info:
# print(f"[{project_name}] Fetched {info.ref}: {info.flags}")
except git.GitCommandError as fetch_err:
set_status(project_name, f"Error fetching remote: {fetch_err}")
print(f"[{project_name}] Git fetch error: {fetch_err}")
return False # No update occurred
# --- End Fetch ---
# --- Check commits (brief lock) ---
# --- Check commits (compare remote against previously seen remote) ---
current_remote_commit = None
pull_needed = False
reset_needed = False
state_was_missing = (previous_remote_hash is None)
try:
# Must read remote commit *after* fetch
# Get current remote commit *after* fetch
current_remote_commit = repo.commit(f'origin/{branch}').hexsha
# Check if pull is needed inside the try block after getting remote commit
if current_local_commit != current_remote_commit:
pull_needed = True
if state_was_missing:
print(f"[{project_name}] Previous remote commit state missing. Baseline: {current_remote_commit}")
# Set the initial baseline in the state
with repo_lock:
project_last_commit[project_name] = current_remote_commit
previous_remote_hash = current_remote_commit # Update local var
# Check if local needs alignment with this *initial* remote baseline
current_local_commit = repo.head.commit.hexsha
if current_local_commit != current_remote_commit:
print(f"[{project_name}] Local hash {current_local_commit} differs from initial remote baseline. Resetting.")
reset_needed = True
else:
print(f"[{project_name}] Local hash matches initial remote baseline. No reset needed initially.")
# Compare current remote with previously stored remote hash (which is now guaranteed to exist)
elif current_remote_commit != previous_remote_hash:
reset_needed = True
print(f"[{project_name}] Remote commit changed. Previous: {previous_remote_hash[:7]}, Current: {current_remote_commit[:7]}. Resetting.")
else:
print(f"[{project_name}] Remote commit unchanged: {current_remote_commit[:7]}")
except git.GitCommandError as commit_err:
set_status(project_name, f"Error accessing remote branch origin/{branch}: {commit_err}")
print(f"[{project_name}] Error accessing remote branch: {commit_err}")
return False # Cannot compare/pull
# --- End Check commits ---
print(f"[{project_name}] Local commit: {current_local_commit}, Remote commit (origin/{branch}): {current_remote_commit}")
if pull_needed:
print(f"[{project_name}] New commit detected! Pulling changes...")
set_status(project_name, "Pulling updates...")
# --- Pull happens OUTSIDE lock ---
if reset_needed:
print(f"[{project_name}] Forcing local repo to match remote commit: {current_remote_commit[:7]}...")
set_status(project_name, f"Syncing to remote commit {current_remote_commit[:7]}...")
# --- Reset happens OUTSIDE lock ---
try:
pull_info = origin.pull()
new_commit_hash = repo.head.commit.hexsha # Get hash after pull
with repo_lock: # Lock ONLY to update shared state
project_last_commit[project_name] = new_commit_hash
print(f"[{project_name}] Pull successful. New commit: {new_commit_hash}")
did_update = True
# Set status AFTER successful pull (set_status handles lock & event)
set_status(project_name, f"Pull successful. New commit: {new_commit_hash[:7]}")
except git.GitCommandError as pull_err:
# Set status on pull error (set_status handles lock & event)
set_status(project_name, f"Error pulling repository: {pull_err}")
print(f"[{project_name}] Git pull error: {pull_err}")
# Revert shared state hash if pull failed? Safest is to keep the pre-pull local commit.
with repo_lock:
project_last_commit[project_name] = current_local_commit # Revert to known local state before pull attempt
# Keep did_update = False
# --- End Pull ---
else:
print(f"[{project_name}] No new commits detected.")
# Update status only if it wasn't an error before (set_status handles lock)
current_status = ""
with repo_lock: # Need lock to safely read current status
current_status = project_status.get(project_name, "")
# Use git reset --hard to force alignment
repo.git.reset('--hard', f'origin/{branch}')
# Verify reset success - check local commit hash AFTER reset
new_local_hash = repo.head.commit.hexsha
if new_local_hash == current_remote_commit:
with repo_lock: # Lock ONLY to update shared state
project_last_commit[project_name] = current_remote_commit
print(f"[{project_name}] Reset successful. Local is now at remote commit: {current_remote_commit[:7]}")
did_update = True
set_status(project_name, f"Sync successful. Local matches: {current_remote_commit[:7]}")
else:
# This should ideally not happen after a successful reset --hard
print(f"[{project_name}] CRITICAL ERROR: Reset --hard finished, but local hash {new_local_hash[:7]} doesn't match target remote hash {current_remote_commit[:7]}. Manual intervention likely required.")
# Update state with actual hash, but flag critical error
with repo_lock:
project_last_commit[project_name] = new_local_hash
did_update = True # Files likely changed, but state is inconsistent
set_status(project_name, f"CRITICAL SYNC ERROR: Local at {new_local_hash[:7]}, expected {current_remote_commit[:7]}. Re-analysis triggered.")
# Set status to indicate no changes were found
no_change_msg = f"Checked repo at {time.strftime('%Y-%m-%d %H:%M:%S')}. No changes."
# Call set_status even if no changes, it handles event logic internally
# This ensures the timestamp updates in the UI status bar via SSE if the message differs
except git.GitCommandError as reset_err:
set_status(project_name, f"Error resetting repository: {reset_err}")
print(f"[{project_name}] Git reset --hard error: {reset_err}")
# Do NOT update project_last_commit on error, keep the old remote hash
# did_update remains False
# --- End Reset ---
else:
# No reset needed, remote commit hasn't changed since last check
print(f"[{project_name}] No reset needed.")
# Set status to indicate check completed without changes (set_status handles lock/event)
no_change_msg = f"Checked repo at {time.strftime('%Y-%m-%d %H:%M:%S')}. No changes detected on remote."
set_status(project_name, no_change_msg)
# --- End Fetch/Pull Logic ---
# --- End Fetch/Reset Logic ---
# --- Run analysis IF repo was updated (outside lock) ---
if did_update:
@ -249,33 +294,24 @@ def check_and_update_repo(project_name):
except git.InvalidGitRepositoryError:
msg = f"Error: Directory '{repo_path}' exists but is not a valid Git repository. Consider deleting it and restarting."
set_status(project_name, msg) # Handles lock
set_status(project_name, msg)
print(f"[{project_name}] {msg}")
with repo_lock: # Lock to update commit state
project_last_commit[project_name] = "Invalid Repository"
except git.GitCommandError as e:
# General Git command error (if not caught above)
msg = f"Git command error: {e}"
set_status(project_name, msg) # Handles lock
set_status(project_name, msg)
print(f"[{project_name}] {msg}")
# Try to set commit hash state even on error (brief lock)
with repo_lock:
if project_last_commit.get(project_name) is None: # Only set if not already set (e.g., by failed pull)
try:
if os.path.exists(os.path.join(repo_path, ".git")):
repo = git.Repo(repo_path)
project_last_commit[project_name] = repo.head.commit.hexsha
else:
project_last_commit[project_name] = "Error (No repo)"
except Exception:
project_last_commit[project_name] = "Error reading commit"
# If state is missing, set it to generic error. Avoid overwriting specific errors like Invalid Repo.
if project_last_commit.get(project_name) is None:
project_last_commit[project_name] = "Git Command Error"
except Exception as e:
# Catch-all for other unexpected errors
msg = f"Unexpected error checking repository: {e}"
set_status(project_name, msg) # Handles lock
print(f"[{project_name}] {msg}") # Log stack trace for unexpected errors
with repo_lock: # Lock to update commit state
set_status(project_name, msg)
print(f"[{project_name}] {msg}", exc_info=True) # Log stack trace for unexpected errors
with repo_lock:
if project_last_commit.get(project_name) is None:
project_last_commit[project_name] = "Error checking repo"
@ -661,7 +697,44 @@ def add_project():
manifest_filename = "manifest.csv"
manifest_save_path = os.path.join(project_base_path, manifest_filename)
print(f"Saving manifest file to: {manifest_save_path}")
manifest_file.save(manifest_save_path)
# manifest_file.save(manifest_save_path) # OLD WAY
# --- NEW: Read, check first row, and write ---
try:
# Decode the stream directly, assuming common encodings or trying utf-8 first
try:
stream = io.StringIO(manifest_file.stream.read().decode('utf-8-sig')) # Handle BOM
except UnicodeDecodeError:
print(f"Warning: Could not decode manifest as UTF-8. Trying latin-1.")
manifest_file.stream.seek(0) # Reset stream pointer
stream = io.StringIO(manifest_file.stream.read().decode('latin-1')) # Fallback
reader = csv.reader(stream)
header = next(reader)
first_data_row = next(reader, None) # Read the first data row if it exists
# Check if the first data row is empty or contains only whitespace
is_first_row_empty = False
if first_data_row is not None:
is_first_row_empty = all(not cell or cell.isspace() for cell in first_data_row)
# Write the file, skipping the first data row if it was empty
with open(manifest_save_path, 'w', newline='', encoding='utf-8') as outfile: # Use utf-8 for writing
writer = csv.writer(outfile)
writer.writerow(header) # Always write header
if not is_first_row_empty and first_data_row is not None:
writer.writerow(first_data_row) # Write the first row if not empty
# Write the rest of the rows
writer.writerows(reader)
print(f"Manifest processed. First data row {'skipped' if is_first_row_empty else 'kept'}.")
except Exception as process_err:
print(f"Error processing/writing manifest CSV: {process_err}")
# Clean up directories on this specific error as well
shutil.rmtree(project_base_path, ignore_errors=True)
return jsonify(success=False, message=f"Error processing manifest file: {process_err}"), 500
# --- End NEW ---
except Exception as e:
print(f"Error saving manifest file for {safe_project_name}: {e}")
# Clean up created directories on error?
@ -787,7 +860,43 @@ def upload_manifest(project_name):
manifest_filename = "manifest.csv" # Standard name
manifest_save_path = os.path.join(project_base_path, manifest_filename)
print(f"Saving/Overwriting manifest file at: {manifest_save_path}")
manifest_file.save(manifest_save_path)
# manifest_file.save(manifest_save_path) # OLD WAY
# --- NEW: Read, check first row, and write ---
try:
# Decode the stream directly, assuming common encodings or trying utf-8 first
try:
stream = io.StringIO(manifest_file.stream.read().decode('utf-8-sig')) # Handle BOM
except UnicodeDecodeError:
print(f"Warning: Could not decode manifest as UTF-8. Trying latin-1.")
manifest_file.stream.seek(0) # Reset stream pointer
stream = io.StringIO(manifest_file.stream.read().decode('latin-1')) # Fallback
reader = csv.reader(stream)
header = next(reader)
first_data_row = next(reader, None) # Read the first data row if it exists
# Check if the first data row is empty or contains only whitespace
is_first_row_empty = False
if first_data_row is not None:
is_first_row_empty = all(not cell or cell.isspace() for cell in first_data_row)
# Write the file, skipping the first data row if it was empty
with open(manifest_save_path, 'w', newline='', encoding='utf-8') as outfile: # Use utf-8 for writing
writer = csv.writer(outfile)
writer.writerow(header) # Always write header
if not is_first_row_empty and first_data_row is not None:
writer.writerow(first_data_row) # Write the first row if not empty
# Write the rest of the rows
writer.writerows(reader)
print(f"Manifest processed. First data row {'skipped' if is_first_row_empty else 'kept'}.")
except Exception as process_err:
print(f"Error processing/writing updated manifest CSV: {process_err}")
# Set status to reflect this specific error
set_status(safe_project_name, f"Error processing manifest: {process_err}")
return jsonify(success=False, message=f"Error processing manifest file: {process_err}"), 500
# --- End NEW ---
# Update status and signal for UI update
set_status(safe_project_name, f"Manifest file updated. Re-analysis pending.")
@ -830,8 +939,15 @@ def delete_project(project_name):
try:
if os.path.isdir(project_base_path):
# THE DANGEROUS PART: Delete the entire directory tree
shutil.rmtree(project_base_path)
print(f"Successfully deleted directory: {project_base_path}")
# Use the onerror handler to deal with potential read-only files in .git
try:
shutil.rmtree(project_base_path, onerror=handle_remove_readonly)
print(f"Successfully deleted directory: {project_base_path}")
except Exception as e:
# Catch potential errors from the rmtree or the handler itself
print(f"Error during shutil.rmtree with handler: {e}")
raise # Re-raise to be caught by the outer exception handler
else:
# This shouldn't happen if it's tracked, but good to check
print(f"Warning: Project '{safe_project_name}' was tracked but directory not found: {project_base_path}")

View File

@ -66,15 +66,15 @@ function sseProcessUpdate(fullData) {
uiUpdateStatusBar(selectedProjectName, projectData.status, projectData.last_commit);
// Update main UI based on processing state
if (isProcessing(projectData.status)) { // Needs global access
console.log(`[SSE] Project ${selectedProjectName} is processing.`);
const projectIsCurrentlyProcessing = isProcessing(projectData.status);
console.log(`[SSE] Status Check: Project '${selectedProjectName}', Status: '${projectData.status}', isProcessing: ${projectIsCurrentlyProcessing}`);
if (projectIsCurrentlyProcessing) {
// If *any* project is processing, the global flag should remain true
// (or be set to true if it wasn't already)
isAnalysisGloballyActive = true;
console.log(`[State] isAnalysisGloballyActive set/kept true due to processing status: "${projectData.status}"`);
uiShowProcessingState(projectData.status);
} else {
console.log(`[SSE] Project ${selectedProjectName} is ready/error.`);
// --- Update Global Flag: Check if ANY project is still processing ---
let anyProjectStillProcessing = false;
const projectNames = Object.keys(currentProjectData);
@ -91,18 +91,22 @@ function sseProcessUpdate(fullData) {
}
// --- End Global Flag Update ---
// Added Debugging: Log before clearing and redrawing
console.log(`[SSE] Clearing processing state and triggering redraw for ${selectedProjectName}`);
uiClearProcessingState();
// Call core redraws immediately
const latestData = currentProjectData[selectedProjectName]; // Get current data
if(latestData && !isProcessing(latestData.status)) { // Double-check status
console.log("[SSE] Triggering immediate redraw after non-processing update.");
updateUIScadaCore(latestData); // Needs global access
updateUIDrawingCore(latestData); // Needs global access
updateUIConflictsCore(latestData); // Needs global access
if(projectData && !isProcessing(projectData.status)) {
console.log(`[SSE] Confirmed non-processing state for ${selectedProjectName}. Calling core redraw functions.`);
updateUIScadaCore(projectData); // Needs global access
updateUIDrawingCore(projectData); // Needs global access
updateUIConflictsCore(projectData); // Needs global access
} else {
// If state somehow changed *back* to processing (unlikely here, but safe check)
console.warn("[SSE] State seems to indicate processing immediately after non-processing update.");
uiShowProcessingState(latestData?.status || "Processing...");
// This case handles if the status somehow flipped back to processing
// between the initial check and this point (very unlikely but safe).
const latestStatus = projectData?.status || "Processing...";
console.warn(`[SSE] State unexpectedly indicates processing just before redraw for ${selectedProjectName}. Status: '${latestStatus}'. Showing processing state again.`);
uiShowProcessingState(latestStatus);
}
}
}