1.1.3 - Bug fixes & updates to the automation scheduler
This commit is contained in:
+32
-1
@@ -595,6 +595,16 @@ def update_settings():
|
||||
def get_automation_rules():
|
||||
try:
|
||||
rules = db.get_automation_rules()
|
||||
|
||||
# Annotate each rule with its next scheduled run time when the engine is up.
|
||||
if automation_engine is not None:
|
||||
next_run_times = automation_engine.get_next_run_times()
|
||||
for rule in rules:
|
||||
rule["next_run_at"] = next_run_times.get(rule["id"])
|
||||
else:
|
||||
for rule in rules:
|
||||
rule["next_run_at"] = None
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"rules": rules
|
||||
@@ -733,7 +743,10 @@ def run_automation_rule(rule_id):
|
||||
engine = _ensure_automation_engine()
|
||||
result = engine.run_rule_now(rule_id, dry_run=dry_run)
|
||||
if not result.get("success"):
|
||||
return jsonify(result), 404
|
||||
# Rule not found vs execution failure — surface the right status code.
|
||||
error = result.get("error", "")
|
||||
status = 404 if "not found" in error.lower() else 500
|
||||
return jsonify(result), status
|
||||
return jsonify(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error running automation rule: {e}")
|
||||
@@ -743,6 +756,24 @@ def run_automation_rule(rule_id):
|
||||
}), 500
|
||||
|
||||
|
||||
@app.route('/api/automation/logs', methods=['GET'])
|
||||
def get_automation_logs():
|
||||
try:
|
||||
rule_id = request.args.get('rule_id') or None
|
||||
limit = request.args.get('limit', 100, type=int)
|
||||
logs = db.get_automation_logs(rule_id=rule_id, limit=limit)
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"logs": logs
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching automation logs: {e}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}), 500
|
||||
|
||||
|
||||
# ============ SCAN ENDPOINTS ============
|
||||
|
||||
@app.route('/api/scan/start', methods=['POST'])
|
||||
|
||||
@@ -35,7 +35,8 @@ class AutomationEngine:
|
||||
logger.info("Starting automation scheduler...")
|
||||
self._scheduler.start()
|
||||
self._started = True
|
||||
self.reload_rules()
|
||||
# Call the lock-free variant directly — we already hold self._lock.
|
||||
self._reload_rules_unlocked()
|
||||
logger.info("Automation scheduler successfully started.")
|
||||
|
||||
def shutdown(self):
|
||||
@@ -53,24 +54,40 @@ class AutomationEngine:
|
||||
# RULE MANAGEMENT
|
||||
# ------------------------------------------------------------------
|
||||
def reload_rules(self):
|
||||
"""Reload all automation rules from storage."""
|
||||
"""Reload all automation rules from storage (thread-safe)."""
|
||||
logger.info("Reloading automation rules from database...")
|
||||
|
||||
with self._lock:
|
||||
self._scheduler.remove_all_jobs()
|
||||
logger.debug("Cleared all scheduled jobs.")
|
||||
self._reload_rules_unlocked()
|
||||
|
||||
rules = self._load_rules()
|
||||
logger.info("Loaded %d automation rules.", len(rules))
|
||||
def _reload_rules_unlocked(self):
|
||||
"""Reload rules without acquiring the lock. Caller must hold self._lock."""
|
||||
self._scheduler.remove_all_jobs()
|
||||
logger.debug("Cleared all scheduled jobs.")
|
||||
|
||||
for rule in rules:
|
||||
logger.debug("Evaluating rule %s (enabled=%s, schedule=%s)", rule.id, rule.enabled, rule.schedule)
|
||||
rules = self._load_rules()
|
||||
logger.info("Loaded %d automation rules.", len(rules))
|
||||
|
||||
if not rule.enabled:
|
||||
logger.info("Rule %s is disabled — skipping scheduling.", rule.id)
|
||||
continue
|
||||
for rule in rules:
|
||||
logger.debug("Evaluating rule %s (enabled=%s, schedule=%s)", rule.id, rule.enabled, rule.schedule)
|
||||
|
||||
self._schedule_rule(rule)
|
||||
if not rule.enabled:
|
||||
logger.info("Rule %s is disabled — skipping scheduling.", rule.id)
|
||||
continue
|
||||
|
||||
self._schedule_rule(rule)
|
||||
|
||||
def get_next_run_times(self) -> Dict[str, Optional[str]]:
|
||||
"""Return mapping of rule_id → ISO next run time (or None if not scheduled)."""
|
||||
result: Dict[str, Optional[str]] = {}
|
||||
try:
|
||||
for job in self._scheduler.get_jobs():
|
||||
if job.id.startswith("automation:"):
|
||||
rule_id = job.id[len("automation:"):]
|
||||
nrt = job.next_run_time
|
||||
result[rule_id] = nrt.isoformat() if nrt else None
|
||||
except Exception as e:
|
||||
logger.warning("Could not retrieve next run times: %s", e)
|
||||
return result
|
||||
|
||||
def run_rule_now(self, rule_id: str, dry_run: bool = False) -> dict:
|
||||
logger.info("Manual execution requested for rule %s (dry_run=%s)", rule_id, dry_run)
|
||||
|
||||
@@ -1173,6 +1173,30 @@ class DatabaseManager:
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def get_automation_logs(self, rule_id=None, limit=100):
|
||||
"""Get automation log entries, optionally filtered by rule_id"""
|
||||
session = self.get_session()
|
||||
try:
|
||||
query = session.query(AutomationLog).order_by(AutomationLog.run_at.desc())
|
||||
if rule_id:
|
||||
query = query.filter(AutomationLog.rule_id == rule_id)
|
||||
logs = query.limit(limit).all()
|
||||
return [
|
||||
{
|
||||
"id": log.id,
|
||||
"rule_id": log.rule_id,
|
||||
"file_path": log.file_path,
|
||||
"modified": log.modified,
|
||||
"removed_lines": log.removed_lines,
|
||||
"dry_run": log.dry_run,
|
||||
"error_message": log.error_message,
|
||||
"run_at": log.run_at.isoformat() if log.run_at else None,
|
||||
}
|
||||
for log in logs
|
||||
]
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
# ============ MAINTENANCE OPERATIONS ============
|
||||
|
||||
def clear_settings(self, keep_api_keys=False):
|
||||
|
||||
Reference in New Issue
Block a user