Removes the connection testing functionality and updates the default API port to 8729 for Mikrotik routers. Replit-Commit-Author: Agent Replit-Commit-Session-Id: 7a657272-55ba-4a79-9a2e-f1ed9bc7a528 Replit-Commit-Checkpoint-Type: full_checkpoint Replit-Commit-Event-Id: 54ecaeb2-ec77-4629-8d8d-e3bc4f663bec Replit-Commit-Screenshot-Url: https://storage.googleapis.com/screenshot-production-us-central1/449cf7c4-c97a-45ae-8234-e5c5b8d6a84f/7a657272-55ba-4a79-9a2e-f1ed9bc7a528/31VdIyL
585 lines
20 KiB
TypeScript
585 lines
20 KiB
TypeScript
import type { Express } from "express";
|
|
import { createServer, type Server } from "http";
|
|
import { storage } from "./storage";
|
|
import { insertRouterSchema, insertDetectionSchema, insertWhitelistSchema, networkAnalytics, routers } from "@shared/schema";
|
|
import { db } from "./db";
|
|
import { desc, eq } from "drizzle-orm";
|
|
|
|
export async function registerRoutes(app: Express): Promise<Server> {
|
|
// Routers
|
|
app.get("/api/routers", async (req, res) => {
|
|
try {
|
|
const routers = await storage.getAllRouters();
|
|
res.json(routers);
|
|
} catch (error) {
|
|
console.error('[DB ERROR] Failed to fetch routers:', error);
|
|
res.status(500).json({ error: "Failed to fetch routers" });
|
|
}
|
|
});
|
|
|
|
app.post("/api/routers", async (req, res) => {
|
|
try {
|
|
const validatedData = insertRouterSchema.parse(req.body);
|
|
const router = await storage.createRouter(validatedData);
|
|
res.json(router);
|
|
} catch (error) {
|
|
res.status(400).json({ error: "Invalid router data" });
|
|
}
|
|
});
|
|
|
|
app.put("/api/routers/:id", async (req, res) => {
|
|
try {
|
|
const validatedData = insertRouterSchema.parse(req.body);
|
|
const router = await storage.updateRouter(req.params.id, validatedData);
|
|
if (!router) {
|
|
return res.status(404).json({ error: "Router not found" });
|
|
}
|
|
res.json(router);
|
|
} catch (error) {
|
|
console.error('[Router UPDATE] Error:', error);
|
|
res.status(400).json({ error: "Invalid router data" });
|
|
}
|
|
});
|
|
|
|
app.delete("/api/routers/:id", async (req, res) => {
|
|
try {
|
|
const success = await storage.deleteRouter(req.params.id);
|
|
if (!success) {
|
|
return res.status(404).json({ error: "Router not found" });
|
|
}
|
|
res.json({ success: true });
|
|
} catch (error) {
|
|
res.status(500).json({ error: "Failed to delete router" });
|
|
}
|
|
});
|
|
|
|
// Network Logs
|
|
app.get("/api/logs", async (req, res) => {
|
|
try {
|
|
const limit = parseInt(req.query.limit as string) || 100;
|
|
const logs = await storage.getRecentLogs(limit);
|
|
res.json(logs);
|
|
} catch (error) {
|
|
res.status(500).json({ error: "Failed to fetch logs" });
|
|
}
|
|
});
|
|
|
|
app.get("/api/logs/ip/:ip", async (req, res) => {
|
|
try {
|
|
const limit = parseInt(req.query.limit as string) || 50;
|
|
const logs = await storage.getLogsByIp(req.params.ip, limit);
|
|
res.json(logs);
|
|
} catch (error) {
|
|
res.status(500).json({ error: "Failed to fetch logs for IP" });
|
|
}
|
|
});
|
|
|
|
// Detections
|
|
app.get("/api/detections", async (req, res) => {
|
|
try {
|
|
const limit = req.query.limit ? parseInt(req.query.limit as string) : 500;
|
|
const anomalyType = req.query.anomalyType as string | undefined;
|
|
const minScore = req.query.minScore ? parseFloat(req.query.minScore as string) : undefined;
|
|
const maxScore = req.query.maxScore ? parseFloat(req.query.maxScore as string) : undefined;
|
|
|
|
const detections = await storage.getAllDetections({
|
|
limit,
|
|
anomalyType,
|
|
minScore,
|
|
maxScore
|
|
});
|
|
res.json(detections);
|
|
} catch (error) {
|
|
console.error('[DB ERROR] Failed to fetch detections:', error);
|
|
res.status(500).json({ error: "Failed to fetch detections" });
|
|
}
|
|
});
|
|
|
|
app.get("/api/dashboard/live", async (req, res) => {
|
|
try {
|
|
const hours = parseInt(req.query.hours as string) || 72;
|
|
const stats = await storage.getLiveDashboardStats(hours);
|
|
res.json(stats);
|
|
} catch (error) {
|
|
console.error('[DB ERROR] Failed to fetch dashboard stats:', error);
|
|
res.status(500).json({ error: "Failed to fetch dashboard stats" });
|
|
}
|
|
});
|
|
|
|
app.get("/api/detections/unblocked", async (req, res) => {
|
|
try {
|
|
const detections = await storage.getUnblockedDetections();
|
|
res.json(detections);
|
|
} catch (error) {
|
|
res.status(500).json({ error: "Failed to fetch unblocked detections" });
|
|
}
|
|
});
|
|
|
|
// Whitelist
|
|
app.get("/api/whitelist", async (req, res) => {
|
|
try {
|
|
const whitelist = await storage.getAllWhitelist();
|
|
res.json(whitelist);
|
|
} catch (error) {
|
|
console.error('[DB ERROR] Failed to fetch whitelist:', error);
|
|
res.status(500).json({ error: "Failed to fetch whitelist" });
|
|
}
|
|
});
|
|
|
|
app.post("/api/whitelist", async (req, res) => {
|
|
try {
|
|
const validatedData = insertWhitelistSchema.parse(req.body);
|
|
const item = await storage.createWhitelist(validatedData);
|
|
res.json(item);
|
|
} catch (error) {
|
|
res.status(400).json({ error: "Invalid whitelist data" });
|
|
}
|
|
});
|
|
|
|
app.delete("/api/whitelist/:id", async (req, res) => {
|
|
try {
|
|
const success = await storage.deleteWhitelist(req.params.id);
|
|
if (!success) {
|
|
return res.status(404).json({ error: "Whitelist entry not found" });
|
|
}
|
|
res.json({ success: true });
|
|
} catch (error) {
|
|
res.status(500).json({ error: "Failed to delete whitelist entry" });
|
|
}
|
|
});
|
|
|
|
// Training History
|
|
app.get("/api/training-history", async (req, res) => {
|
|
try {
|
|
const limit = parseInt(req.query.limit as string) || 10;
|
|
const history = await storage.getTrainingHistory(limit);
|
|
res.json(history);
|
|
} catch (error) {
|
|
console.error('[DB ERROR] Failed to fetch training history:', error);
|
|
res.status(500).json({ error: "Failed to fetch training history" });
|
|
}
|
|
});
|
|
|
|
app.get("/api/training-history/latest", async (req, res) => {
|
|
try {
|
|
const latest = await storage.getLatestTraining();
|
|
res.json(latest || null);
|
|
} catch (error) {
|
|
res.status(500).json({ error: "Failed to fetch latest training" });
|
|
}
|
|
});
|
|
|
|
// Network Analytics
|
|
app.get("/api/analytics/recent", async (req, res) => {
|
|
try {
|
|
const days = parseInt(req.query.days as string) || 3;
|
|
const hourly = req.query.hourly === 'true';
|
|
const analytics = await storage.getRecentAnalytics(days, hourly);
|
|
res.json(analytics);
|
|
} catch (error) {
|
|
console.error('[DB ERROR] Failed to fetch recent analytics:', error);
|
|
res.status(500).json({ error: "Failed to fetch analytics" });
|
|
}
|
|
});
|
|
|
|
app.get("/api/analytics/range", async (req, res) => {
|
|
try {
|
|
const startDate = new Date(req.query.start as string);
|
|
const endDate = new Date(req.query.end as string);
|
|
const hourly = req.query.hourly === 'true';
|
|
|
|
if (isNaN(startDate.getTime()) || isNaN(endDate.getTime())) {
|
|
return res.status(400).json({ error: "Invalid date range" });
|
|
}
|
|
|
|
const analytics = await storage.getAnalyticsByDateRange(startDate, endDate, hourly);
|
|
res.json(analytics);
|
|
} catch (error) {
|
|
console.error('[DB ERROR] Failed to fetch analytics range:', error);
|
|
res.status(500).json({ error: "Failed to fetch analytics" });
|
|
}
|
|
});
|
|
|
|
// Stats
|
|
app.get("/api/stats", async (req, res) => {
|
|
try {
|
|
const routers = await storage.getAllRouters();
|
|
const detections = await storage.getAllDetections({ limit: 1000 });
|
|
const recentLogs = await storage.getRecentLogs(1000);
|
|
const whitelist = await storage.getAllWhitelist();
|
|
const latestTraining = await storage.getLatestTraining();
|
|
|
|
const blockedCount = detections.filter(d => d.blocked).length;
|
|
const criticalCount = detections.filter(d => parseFloat(d.riskScore) >= 85).length;
|
|
const highCount = detections.filter(d => parseFloat(d.riskScore) >= 70 && parseFloat(d.riskScore) < 85).length;
|
|
|
|
res.json({
|
|
routers: {
|
|
total: routers.length,
|
|
enabled: routers.filter(r => r.enabled).length
|
|
},
|
|
detections: {
|
|
total: detections.length,
|
|
blocked: blockedCount,
|
|
critical: criticalCount,
|
|
high: highCount
|
|
},
|
|
logs: {
|
|
recent: recentLogs.length
|
|
},
|
|
whitelist: {
|
|
total: whitelist.length
|
|
},
|
|
latestTraining: latestTraining
|
|
});
|
|
} catch (error) {
|
|
console.error('[DB ERROR] Failed to fetch stats:', error);
|
|
res.status(500).json({ error: "Failed to fetch stats" });
|
|
}
|
|
});
|
|
|
|
// ML Actions - Trigger training/detection on Python backend
|
|
const ML_BACKEND_URL = process.env.ML_BACKEND_URL || "http://localhost:8000";
|
|
const ML_TIMEOUT = 120000; // 2 minutes timeout
|
|
const IDS_API_KEY = process.env.IDS_API_KEY; // API Key for secure ML backend communication
|
|
|
|
// Helper to create authenticated fetch headers
|
|
const getMLBackendHeaders = () => {
|
|
const headers: HeadersInit = {
|
|
"Content-Type": "application/json",
|
|
};
|
|
if (IDS_API_KEY) {
|
|
headers["X-API-Key"] = IDS_API_KEY;
|
|
}
|
|
return headers;
|
|
};
|
|
|
|
app.post("/api/ml/train", async (req, res) => {
|
|
try {
|
|
const { max_records = 100000, hours_back = 24 } = req.body;
|
|
|
|
// Validate input
|
|
if (typeof max_records !== 'number' || max_records <= 0 || max_records > 1000000) {
|
|
return res.status(400).json({ error: "max_records must be between 1 and 1000000" });
|
|
}
|
|
if (typeof hours_back !== 'number' || hours_back <= 0 || hours_back > 720) {
|
|
return res.status(400).json({ error: "hours_back must be between 1 and 720" });
|
|
}
|
|
|
|
const controller = new AbortController();
|
|
const timeout = setTimeout(() => controller.abort(), ML_TIMEOUT);
|
|
|
|
const response = await fetch(`${ML_BACKEND_URL}/train`, {
|
|
method: "POST",
|
|
headers: getMLBackendHeaders(),
|
|
body: JSON.stringify({ max_records, hours_back }),
|
|
signal: controller.signal,
|
|
});
|
|
|
|
clearTimeout(timeout);
|
|
|
|
if (!response.ok) {
|
|
const errorData = await response.json().catch(() => ({}));
|
|
return res.status(response.status).json({
|
|
error: errorData.detail || "Training failed",
|
|
status: response.status,
|
|
});
|
|
}
|
|
|
|
const data = await response.json();
|
|
res.json(data);
|
|
} catch (error: any) {
|
|
if (error.name === 'AbortError') {
|
|
return res.status(504).json({ error: "Training timeout - operation took too long" });
|
|
}
|
|
if (error.code === 'ECONNREFUSED') {
|
|
return res.status(503).json({ error: "ML backend not available - is Python server running?" });
|
|
}
|
|
res.status(500).json({ error: error.message || "Failed to trigger training" });
|
|
}
|
|
});
|
|
|
|
app.post("/api/ml/detect", async (req, res) => {
|
|
try {
|
|
const { max_records = 50000, hours_back = 1, risk_threshold = 75, auto_block = false } = req.body;
|
|
|
|
// Validate input
|
|
if (typeof max_records !== 'number' || max_records <= 0 || max_records > 1000000) {
|
|
return res.status(400).json({ error: "max_records must be between 1 and 1000000" });
|
|
}
|
|
if (typeof hours_back !== 'number' || hours_back <= 0 || hours_back > 720) {
|
|
return res.status(400).json({ error: "hours_back must be between 1 and 720" });
|
|
}
|
|
if (typeof risk_threshold !== 'number' || risk_threshold < 0 || risk_threshold > 100) {
|
|
return res.status(400).json({ error: "risk_threshold must be between 0 and 100" });
|
|
}
|
|
|
|
const controller = new AbortController();
|
|
const timeout = setTimeout(() => controller.abort(), ML_TIMEOUT);
|
|
|
|
const response = await fetch(`${ML_BACKEND_URL}/detect`, {
|
|
method: "POST",
|
|
headers: getMLBackendHeaders(),
|
|
body: JSON.stringify({ max_records, hours_back, risk_threshold, auto_block }),
|
|
signal: controller.signal,
|
|
});
|
|
|
|
clearTimeout(timeout);
|
|
|
|
if (!response.ok) {
|
|
const errorData = await response.json().catch(() => ({}));
|
|
return res.status(response.status).json({
|
|
error: errorData.detail || "Detection failed",
|
|
status: response.status,
|
|
});
|
|
}
|
|
|
|
const data = await response.json();
|
|
res.json(data);
|
|
} catch (error: any) {
|
|
if (error.name === 'AbortError') {
|
|
return res.status(504).json({ error: "Detection timeout - operation took too long" });
|
|
}
|
|
if (error.code === 'ECONNREFUSED') {
|
|
return res.status(503).json({ error: "ML backend not available - is Python server running?" });
|
|
}
|
|
res.status(500).json({ error: error.message || "Failed to trigger detection" });
|
|
}
|
|
});
|
|
|
|
app.get("/api/ml/stats", async (req, res) => {
|
|
try {
|
|
const controller = new AbortController();
|
|
const timeout = setTimeout(() => controller.abort(), 10000); // 10s timeout for stats
|
|
|
|
const response = await fetch(`${ML_BACKEND_URL}/stats`, {
|
|
headers: getMLBackendHeaders(),
|
|
signal: controller.signal,
|
|
});
|
|
|
|
clearTimeout(timeout);
|
|
|
|
if (!response.ok) {
|
|
const errorData = await response.json().catch(() => ({}));
|
|
return res.status(response.status).json({
|
|
error: errorData.detail || "Failed to fetch ML stats",
|
|
status: response.status,
|
|
});
|
|
}
|
|
|
|
const data = await response.json();
|
|
res.json(data);
|
|
} catch (error: any) {
|
|
if (error.name === 'AbortError') {
|
|
return res.status(504).json({ error: "Stats timeout" });
|
|
}
|
|
if (error.code === 'ECONNREFUSED') {
|
|
return res.status(503).json({ error: "ML backend not available" });
|
|
}
|
|
res.status(500).json({ error: error.message || "Failed to fetch ML stats" });
|
|
}
|
|
});
|
|
|
|
// Services monitoring
|
|
app.get("/api/services/status", async (req, res) => {
|
|
try {
|
|
const services = {
|
|
mlBackend: { name: "ML Backend Python", status: "unknown", healthy: false, details: null as any },
|
|
database: { name: "PostgreSQL Database", status: "unknown", healthy: false, details: null as any },
|
|
syslogParser: { name: "Syslog Parser", status: "unknown", healthy: false, details: null as any },
|
|
analyticsAggregator: { name: "Analytics Aggregator Timer", status: "unknown", healthy: false, details: null as any },
|
|
};
|
|
|
|
// Check ML Backend Python
|
|
try {
|
|
const controller = new AbortController();
|
|
const timeout = setTimeout(() => controller.abort(), 5000);
|
|
|
|
const response = await fetch(`${ML_BACKEND_URL}/health`, {
|
|
signal: controller.signal,
|
|
});
|
|
|
|
clearTimeout(timeout);
|
|
|
|
if (response.ok) {
|
|
const data = await response.json();
|
|
services.mlBackend.status = "running";
|
|
services.mlBackend.healthy = true;
|
|
services.mlBackend.details = {
|
|
modelLoaded: data.ml_model === "loaded",
|
|
timestamp: data.timestamp,
|
|
};
|
|
} else {
|
|
services.mlBackend.status = "error";
|
|
services.mlBackend.details = { error: `HTTP ${response.status}` };
|
|
}
|
|
} catch (error: any) {
|
|
services.mlBackend.status = "offline";
|
|
services.mlBackend.details = { error: error.code === 'ECONNREFUSED' ? "Connection refused" : error.message };
|
|
}
|
|
|
|
// Check Database
|
|
try {
|
|
const conn = await storage.testConnection();
|
|
if (conn) {
|
|
services.database.status = "running";
|
|
services.database.healthy = true;
|
|
services.database.details = { connected: true };
|
|
}
|
|
} catch (error: any) {
|
|
services.database.status = "error";
|
|
services.database.details = { error: error.message };
|
|
}
|
|
|
|
// Check Python Services via authenticated endpoint
|
|
try {
|
|
const controller2 = new AbortController();
|
|
const timeout2 = setTimeout(() => controller2.abort(), 5000);
|
|
|
|
const servicesResponse = await fetch(`${ML_BACKEND_URL}/services/status`, {
|
|
headers: getMLBackendHeaders(),
|
|
signal: controller2.signal,
|
|
});
|
|
|
|
clearTimeout(timeout2);
|
|
|
|
if (servicesResponse.ok) {
|
|
const servicesData = await servicesResponse.json();
|
|
|
|
// Update syslog parser status
|
|
const parserInfo = servicesData.services?.syslog_parser;
|
|
if (parserInfo) {
|
|
services.syslogParser.status = parserInfo.running ? "running" : "offline";
|
|
services.syslogParser.healthy = parserInfo.running;
|
|
services.syslogParser.details = {
|
|
systemd_unit: parserInfo.systemd_unit,
|
|
pid: parserInfo.details?.pid,
|
|
error: parserInfo.error,
|
|
};
|
|
}
|
|
} else if (servicesResponse.status === 403) {
|
|
services.syslogParser.status = "error";
|
|
services.syslogParser.healthy = false;
|
|
services.syslogParser.details = { error: "Authentication failed" };
|
|
} else {
|
|
throw new Error(`HTTP ${servicesResponse.status}`);
|
|
}
|
|
} catch (error: any) {
|
|
services.syslogParser.status = "error";
|
|
services.syslogParser.healthy = false;
|
|
services.syslogParser.details = {
|
|
error: error.code === 'ECONNREFUSED' ? "ML Backend offline" : error.message
|
|
};
|
|
}
|
|
|
|
// Check Analytics Aggregator (via last record timestamp)
|
|
try {
|
|
const latestAnalytics = await db
|
|
.select()
|
|
.from(networkAnalytics)
|
|
.orderBy(desc(networkAnalytics.date), desc(networkAnalytics.hour))
|
|
.limit(1);
|
|
|
|
if (latestAnalytics.length > 0) {
|
|
const lastRun = new Date(latestAnalytics[0].date);
|
|
const lastTimestamp = lastRun.toISOString();
|
|
const hoursSinceLastRun = (Date.now() - lastRun.getTime()) / (1000 * 60 * 60);
|
|
|
|
if (hoursSinceLastRun < 2) {
|
|
services.analyticsAggregator.status = "running";
|
|
services.analyticsAggregator.healthy = true;
|
|
services.analyticsAggregator.details = {
|
|
lastRun: latestAnalytics[0].date,
|
|
lastTimestamp,
|
|
hoursSinceLastRun: hoursSinceLastRun.toFixed(1),
|
|
};
|
|
} else {
|
|
services.analyticsAggregator.status = "idle";
|
|
services.analyticsAggregator.healthy = false;
|
|
services.analyticsAggregator.details = {
|
|
lastRun: latestAnalytics[0].date,
|
|
lastTimestamp,
|
|
hoursSinceLastRun: hoursSinceLastRun.toFixed(1),
|
|
warning: "No aggregation in last 2 hours",
|
|
};
|
|
}
|
|
} else {
|
|
services.analyticsAggregator.status = "idle";
|
|
services.analyticsAggregator.healthy = false;
|
|
services.analyticsAggregator.details = { error: "No analytics data found" };
|
|
}
|
|
} catch (error: any) {
|
|
services.analyticsAggregator.status = "error";
|
|
services.analyticsAggregator.healthy = false;
|
|
services.analyticsAggregator.details = { error: error.message };
|
|
}
|
|
|
|
res.json({ services });
|
|
} catch (error: any) {
|
|
res.status(500).json({ error: "Failed to check services status" });
|
|
}
|
|
});
|
|
|
|
// Service Control Endpoints (Secured - only allow specific systemd operations)
|
|
const ALLOWED_SERVICES = ["ids-ml-backend", "ids-syslog-parser"];
|
|
const ALLOWED_ACTIONS = ["start", "stop", "restart", "status"];
|
|
|
|
app.post("/api/services/:service/:action", async (req, res) => {
|
|
try {
|
|
const { service, action } = req.params;
|
|
|
|
// Validate service name
|
|
if (!ALLOWED_SERVICES.includes(service)) {
|
|
return res.status(400).json({ error: "Invalid service name" });
|
|
}
|
|
|
|
// Validate action
|
|
if (!ALLOWED_ACTIONS.includes(action)) {
|
|
return res.status(400).json({ error: "Invalid action" });
|
|
}
|
|
|
|
// Execute systemd command
|
|
const { exec } = await import("child_process");
|
|
const { promisify } = await import("util");
|
|
const execAsync = promisify(exec);
|
|
|
|
try {
|
|
const systemdAction = action === "status" ? "status" : action;
|
|
const { stdout, stderr } = await execAsync(
|
|
`systemctl ${systemdAction} ${service}`,
|
|
{ timeout: 10000 }
|
|
);
|
|
|
|
res.json({
|
|
success: true,
|
|
service,
|
|
action,
|
|
output: stdout || stderr,
|
|
timestamp: new Date().toISOString(),
|
|
});
|
|
} catch (execError: any) {
|
|
// systemctl returns non-zero exit for stopped services in status command
|
|
if (action === "status") {
|
|
res.json({
|
|
success: true,
|
|
service,
|
|
action,
|
|
output: execError.stdout || execError.stderr,
|
|
timestamp: new Date().toISOString(),
|
|
});
|
|
} else {
|
|
throw execError;
|
|
}
|
|
}
|
|
} catch (error: any) {
|
|
res.status(500).json({
|
|
error: "Service control failed",
|
|
details: error.message,
|
|
});
|
|
}
|
|
});
|
|
|
|
const httpServer = createServer(app);
|
|
return httpServer;
|
|
}
|