const express = require('express'); const http = require('http'); const WebSocket = require('ws'); const axios = require('axios'); const Docker = require('dockerode'); const { exec } = require('child_process'); const util = require('util'); require('dotenv').config(); const app = express(); const server = http.createServer(app); const wss = new WebSocket.Server({ server }); const docker = new Docker({ socketPath: process.env.DOCKER_SOCKET_PATH }); const NETDATA_URL = process.env.NETDATA_URL; const NETDATA_JUMP_URL = process.env.NETDATA_JUMP_URL; const TOTAL_CORES = parseInt(process.env.TOTAL_CORES, 10); // Verify environment variables if (!NETDATA_URL) { console.error('NETDATA_URL environment variable is not set.'); } if (!NETDATA_JUMP_URL) { console.error('NETDATA_JUMP_URL environment variable is not set.'); } if (!process.env.DOCKER_SOCKET_PATH) { console.error('DOCKER_SOCKET_PATH environment variable is not set.'); } if (!TOTAL_CORES) { console.error('TOTAL_CORES environment variable is not set or invalid.'); } // Promisify exec for async/await const execPromise = util.promisify(exec); // Store previous stats for rate calculations let prevStats = new Map(); // Helper to format bytes dynamically function formatBytes(bytes) { if (bytes >= 1e9) return { value: (bytes / 1e9).toFixed(2), unit: 'GB/s' }; if (bytes >= 1e6) return { value: (bytes / 1e6).toFixed(2), unit: 'MB/s' }; if (bytes >= 1e3) return { value: (bytes / 1e3).toFixed(2), unit: 'KB/s' }; return { value: bytes.toFixed(2), unit: 'B/s' }; } // Fetch Holesail process count async function getHolesailProcessCount() { try { const { stdout } = await execPromise('ps auxf | grep holesail | wc -l'); return parseInt(stdout.trim(), 10); } catch (error) { console.error('Error fetching Holesail process count:', error.message); return 0; } } // Fetch Docker container stats async function getDockerStats() { try { const containers = await docker.listContainers({ all: true }); const runningContainers = containers.filter(c => c.State === 'running'); const mcContainers = runningContainers.filter(c => c.Names.some(name => name.startsWith('/mc_'))); // Get container stats const containerStats = await Promise.all( mcContainers.map(async (container) => { const containerInfo = docker.getContainer(container.Id); const stats = await containerInfo.stats({ stream: false }); const containerId = container.Id; // CPU usage calculation const prev = prevStats.get(containerId) || { cpu_usage: stats.cpu_stats.cpu_usage.total_usage, system_cpu: stats.cpu_stats.system_cpu_usage, time: Date.now() }; const cpuDelta = stats.cpu_stats.cpu_usage.total_usage - prev.cpu_usage; const systemDelta = stats.cpu_stats.system_cpu_usage - prev.system_cpu; const now = Date.now(); const timeDiffMs = now - prev.time; // Calculate CPU usage as percentage of total CPU capacity let cpuUsage = 0; if (systemDelta > 0 && timeDiffMs > 0) { cpuUsage = (cpuDelta / systemDelta) * stats.cpu_stats.online_cpus / TOTAL_CORES * 100; } // Memory usage const memoryUsage = stats.memory_stats.usage / 1024 / 1024; // MB // Network stats const networkStats = stats.networks?.eth0 || { rx_bytes: 0, tx_bytes: 0 }; let receivedRate = 0; let sentRate = 0; const prevNetwork = prev.network || { rx_bytes: 0, tx_bytes: 0 }; if (timeDiffMs > 0) { const timeDiffSec = timeDiffMs / 1000; receivedRate = (networkStats.rx_bytes - prevNetwork.rx_bytes) / timeDiffSec; // bytes/s sentRate = (networkStats.tx_bytes - prevNetwork.tx_bytes) / timeDiffSec; // bytes/s } // Update previous stats prevStats.set(containerId, { cpu_usage: stats.cpu_stats.cpu_usage.total_usage, system_cpu: stats.cpu_stats.system_cpu_usage, network: { rx_bytes: networkStats.rx_bytes, tx_bytes: networkStats.tx_bytes }, time: now }); return { id: containerId.substring(0, 12), name: container.Names[0].replace(/^\//, ''), cpu: cpuUsage.toFixed(2), memory: memoryUsage.toFixed(2), network: { received: formatBytes(receivedRate), sent: formatBytes(sentRate) }, state: container.State }; }) ); // Sort by CPU and memory const sortedByCpu = [...containerStats].sort((a, b) => b.cpu - a.cpu); const sortedByMemory = [...containerStats].sort((a, b) => b.memory - a.memory); // Aggregate totals const totalCpu = containerStats.reduce((sum, c) => sum + parseFloat(c.cpu), 0).toFixed(2); const totalMemory = (containerStats.reduce((sum, c) => sum + parseFloat(c.memory), 0) / 1024).toFixed(2); // Convert MB to GB const totalNetwork = containerStats.reduce((sum, c) => ({ received: sum.received + parseFloat(c.network.received.value) * (c.network.received.unit === 'GB/s' ? 1e9 : c.network.received.unit === 'MB/s' ? 1e6 : c.network.received.unit === 'KB/s' ? 1e3 : 1), sent: sum.sent + parseFloat(c.network.sent.value) * (c.network.sent.unit === 'GB/s' ? 1e9 : c.network.sent.unit === 'MB/s' ? 1e6 : c.network.sent.unit === 'KB/s' ? 1e3 : 1) }), { received: 0, sent: 0 }); // Clean up prevStats for stopped containers const currentContainerIds = new Set(mcContainers.map(c => c.Id)); for (const id of prevStats.keys()) { if (!currentContainerIds.has(id)) { prevStats.delete(id); } } return { totalContainers: containers.length - 3, // Exclude 3 system containers runningContainers: runningContainers.length - 3, totalCpu, totalMemory, totalNetwork: { received: formatBytes(totalNetwork.received), sent: formatBytes(totalNetwork.sent), time: Math.floor(Date.now() / 1000) }, sortedByCpu, sortedByMemory }; } catch (error) { console.error('Error fetching Docker stats:', error.message); return {}; } } // Fetch Netdata metrics async function getNetdataMetrics() { if (!NETDATA_URL) { console.error('Cannot fetch Netdata metrics: NETDATA_URL is undefined.'); return { cpu: [], ram: [], net: [], disk: [], disk_space: [], anomaly: [] }; } try { const charts = [ { key: 'cpu', url: `${NETDATA_URL}/api/v1/data?chart=system.cpu&after=-60&points=30`, map: d => ({ time: d[0], user: d[6], system: d[7] }) }, { key: 'ram', url: `${NETDATA_URL}/api/v1/data?chart=system.ram&after=-60&points=30`, map: d => ({ time: d[0], used: d[2], free: d[3] }) }, { key: 'net', url: `${NETDATA_URL}/api/v1/data?chart=system.net&after=-60&points=30`, map: d => ({ time: d[0], received: d[1], sent: d[2] }) }, { key: 'disk', url: `${NETDATA_URL}/api/v1/data?chart=system.io&after=-60&points=30`, map: d => ({ time: d[0], in: d[1], out: d[2] }) }, { key: 'disk_space', url: `${NETDATA_URL}/api/v1/data?chart=disk_space./&format=json&after=-60&points=30`, map: d => ({ time: d[0], avail: d[1], used: d[2], reserved: d[3] }) }, { key: 'anomaly', url: `${NETDATA_URL}/api/v1/data?chart=anomaly_detection.dimensions_on_mchost&format=json&after=-60&points=30`, map: d => ({ time: d[0], anomalous: d[1], normal: d[2] }) } ]; const results = await Promise.all( charts.map(async ({ key, url, map }) => { try { const response = await axios.get(url, { timeout: 5000 }); if (!response.data || !response.data.data) { console.warn(`No data returned for Netdata chart ${key} from ${url}`); return { key, data: [] }; } const data = response.data.data.map(map); return { key, data }; } catch (error) { console.warn(`Failed to fetch Netdata chart ${key} from ${url}:`, error.message, error.response ? `Status: ${error.response.status}` : ''); return { key, data: [] }; } }) ); const metrics = {}; results.forEach(({ key, data }) => { metrics[key] = data; }); return metrics; } catch (error) { console.error('Error fetching Netdata metrics:', error.message); return { cpu: [], ram: [], net: [], disk: [], disk_space: [], anomaly: [] }; } } // Fetch Jump Node metrics async function getJumpNodeMetrics() { if (!NETDATA_JUMP_URL) { console.error('Cannot fetch Jump Node metrics: NETDATA_JUMP_URL is undefined.'); return { cpu: [], ram: [], net: [], disk: [], anomaly: [], disk_space: [], uptime: [] }; } try { const charts = [ { key: 'cpu', url: `${NETDATA_JUMP_URL}/api/v1/data?chart=system.cpu&after=-60&points=30`, map: d => ({ time: d[0], user: d[6], system: d[7] }) }, { key: 'net', url: `${NETDATA_JUMP_URL}/api/v1/data?chart=system.net&after=-60&points=30`, map: d => ({ time: d[0], received: d[1], sent: d[2] }) }, { key: 'disk', url: `${NETDATA_JUMP_URL}/api/v1/data?chart=system.io&after=-60&points=30`, map: d => ({ time: d[0], in: d[1], out: d[2] }) }, { key: 'anomaly', url: `${NETDATA_JUMP_URL}/api/v1/data?chart=anomaly_detection.dimensions_on_my-mc-link&format=json&after=-60&points=30`, map: d => ({ time: d[0], anomalous: d[1], normal: d[2] }) }, { key: 'disk_space', url: `${NETDATA_JUMP_URL}/api/v1/data?chart=disk_space./&format=json&after=-60&points=30`, map: d => ({ time: d[0], avail: d[1], used: d[2], reserved: d[3] }) }, { key: 'uptime', url: `${NETDATA_JUMP_URL}/api/v1/data?chart=system.uptime&format=json&after=-60&points=30`, map: d => ({ time: d[0], uptime: d[1] }) } ]; const results = await Promise.all( charts.map(async ({ key, url, map }) => { try { const response = await axios.get(url, { timeout: 10000 }); if (!response.data || !response.data.data) { console.warn(`No data returned for Jump Node chart ${key} from ${url}`); return { key, data: [] }; } const data = response.data.data.map(map); return { key, data }; } catch (error) { console.warn(`Failed to fetch Jump Node chart ${key} from ${url}:`, error.message, error.response ? `Status: ${error.response.status}` : ''); return { key, data: [] }; } }) ); const metrics = {}; results.forEach(({ key, data }) => { metrics[key] = data; }); // Ensure all keys are present, even if empty return { cpu: metrics.cpu || [], ram: [], // RAM chart removed until verified net: metrics.net || [], disk: metrics.disk || [], anomaly: metrics.anomaly || [], disk_space: metrics.disk_space || [], uptime: metrics.uptime || [] }; } catch (error) { console.error('Error fetching Jump Node metrics:', error.message); return { cpu: [], ram: [], net: [], disk: [], anomaly: [], disk_space: [], uptime: [] }; } } app.get('/', (req, res) => { res.sendFile(__dirname + '/status.html'); }); // WebSocket connection wss.on('connection', (ws) => { console.log('WebSocket client connected'); // Send updates every 1 second const interval = setInterval(async () => { let dockerStats = {}; let netdataMetrics = { cpu: [], ram: [], net: [], disk: [], disk_space: [], anomaly: [] }; let jumpNodeMetrics = { cpu: [], ram: [], net: [], disk: [], anomaly: [], disk_space: [], uptime: [] }; let holesailProcessCount = 0; try { dockerStats = await getDockerStats(); } catch (error) { console.error('Error in getDockerStats:', error.message); } try { netdataMetrics = await getNetdataMetrics(); } catch (error) { console.error('Error in getNetdataMetrics:', error.message); } try { jumpNodeMetrics = await getJumpNodeMetrics(); } catch (error) { console.error('Error in getJumpNodeMetrics:', error.message); } try { holesailProcessCount = await getHolesailProcessCount(); } catch (error) { console.error('Error in getHolesailProcessCount:', error.message); } const payload = { docker: dockerStats, netdata: netdataMetrics, jumpNode: jumpNodeMetrics, holesailProcessCount }; ws.send(JSON.stringify(payload)); }, 1000); ws.on('close', () => { console.log('WebSocket client disconnected'); clearInterval(interval); }); }); server.listen(process.env.PORT, () => { console.log(`Server running on http://localhost:${process.env.PORT}`); });