diff --git a/README.md b/README.md index 75a2682..457d558 100644 --- a/README.md +++ b/README.md @@ -45,14 +45,27 @@ Before setting up HyperMC Panel, ensure you have the following installed: 2. **Install Dependencies** ```bash - npm install + npm install express cors ws dockerode node-fetch unirest dotenv ``` 3. **Set Up Environment Variables** Create a `.env` file in the root directory and configure the required variables (see [Configuration](#configuration)). -4. **Start the Server** +4. **Update package.json** + + Ensure your `package.json` includes `"type": "module"` to enable ES modules: + + ```json + { + "type": "module", + "scripts": { + "start": "node index.js" + } + } + ``` + +5. **Start the Server** ```bash npm start @@ -149,14 +162,38 @@ Replace placeholder values (e.g., `https://api.example.com`, `your_secure_secret │ │ └── styles.min.css # Compiled Tailwind CSS │ ├── favicon/ # Favicon assets │ └── index.html # Main HTML file +├── includes/ +│ ├── api.js # API request handling +│ ├── auth.js # Authentication and login link logic +│ ├── docker.js # Docker-related functions +│ ├── status.js # Connection status checking functions +│ ├── websocket.js # WebSocket connection handling +├── index.js # Main server entry point ├── app.js # Frontend JavaScript logic -├── server.js # Backend server logic ├── .env # Environment variables (not tracked) ├── package.json # Node.js dependencies └── README.md # Project documentation ``` +## Contributing + +Contributions are welcome! To contribute: + +1. Fork the repository. +2. Create a new branch (`git checkout -b feature/your-feature`). +3. Make your changes and commit (`git commit -m "Add your feature"`). +4. Push to your branch (`git push origin feature/your-feature`). +5. Open a pull request on the [repository](https://git.ssh.surf/hypermc/panel). + +Please ensure your code follows the project's coding style and includes appropriate tests. + +## License + +This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details. + +## Acknowledgments + - **Chart.js**: For beautiful, responsive charts. - **xterm.js**: For the terminal interface. - **Tailwind CSS**: For the sleek, modern UI. @@ -165,6 +202,4 @@ Replace placeholder values (e.g., `https://api.example.com`, `your_secure_secret ## Contact -For questions, issues, or suggestions, please open an issue on the [repository](https://git.ssh.surf/hypermc/panel) or contact the maintainer at [raven-scott.fyi](https://raven-scott.fyi). - -Happy server management with HyperMC Panel! 🚀 \ No newline at end of file +For questions, issues, or suggestions, please open an issue on the [repository](https://git.ssh.surf/hypermc/panel) or contact the maintainer at [raven-scott.fyi](https://raven-scott.fyi). \ No newline at end of file diff --git a/includes/api.js b/includes/api.js new file mode 100644 index 0000000..90ede63 --- /dev/null +++ b/includes/api.js @@ -0,0 +1,21 @@ +import fetch from 'node-fetch'; + +export async function apiRequest(endpoint, apiKey, method = 'GET', body = null) { + const headers = { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'x-my-mc-auth': apiKey + }; + try { + const response = await fetch(`${process.env.API_URL}${endpoint}`, { + method, + headers, + body: body ? JSON.stringify(body) : null + }); + const data = await response.json(); + if (!response.ok) return { error: data.message || `HTTP ${response.status}` }; + return data; + } catch (error) { + return { error: `Network error: ${error.message}` }; + } +} \ No newline at end of file diff --git a/includes/auth.js b/includes/auth.js new file mode 100644 index 0000000..8b7f3f4 --- /dev/null +++ b/includes/auth.js @@ -0,0 +1,88 @@ +import unirest from 'unirest'; +import { randomBytes } from 'crypto'; + +const temporaryLinks = new Map(); + +setInterval(() => { + const now = Date.now(); + for (const [linkId, linkData] of temporaryLinks.entries()) { + if (linkData.expiresAt < now) temporaryLinks.delete(linkId); + } +}, parseInt(process.env.TEMP_LINKS_CLEANUP_INTERVAL_MS, 10)); + +export async function generateLoginLink(req, res) { + try { + const { secretKey, username } = req.body; + if (secretKey !== process.env.ADMIN_SECRET_KEY) return res.status(401).json({ error: 'Invalid secret key' }); + if (!username) return res.status(400).json({ error: 'Username is required' }); + + const tokenResponse = await unirest + .post(process.env.AUTH_ENDPOINT) + .headers({ 'Accept': 'application/json', 'Content-Type': 'application/json' }) + .send({ username, password: process.env.AUTH_PASSWORD }); + + if (!tokenResponse.body.token) return res.status(500).json({ error: 'Failed to generate API key' }); + + const apiKey = tokenResponse.body.token; + const linkId = randomBytes(parseInt(process.env.LINK_ID_BYTES, 10)).toString('hex'); + const loginLink = `${process.env.AUTO_LOGIN_LINK_PREFIX}${linkId}`; + + temporaryLinks.set(linkId, { + apiKey, + username, + expiresAt: Date.now() + parseInt(process.env.LINK_EXPIRY_SECONDS, 10) * 1000 + }); + + setTimeout(() => temporaryLinks.delete(linkId), parseInt(process.env.LINK_EXPIRY_SECONDS, 10) * 1000); + res.json({ loginLink }); + } catch (error) { + res.status(500).json({ error: 'Internal server error' }); + } +} + +export function handleAutoLogin(req, res) { + const { linkId } = req.params; + const linkData = temporaryLinks.get(linkId); + + if (!linkData || linkData.expiresAt < Date.now()) { + temporaryLinks.delete(linkId); + return res.send(` + + + + + + +
+ +

Login Expired.

+

Redirecting...

+
+ + + `); + } + + temporaryLinks.delete(linkId); + res.send(` + + + + Auto Login + + + +

Logging in...

+ + + `); +} \ No newline at end of file diff --git a/includes/docker.js b/includes/docker.js new file mode 100644 index 0000000..d33e417 --- /dev/null +++ b/includes/docker.js @@ -0,0 +1,188 @@ +import Docker from 'dockerode'; +import { promisify } from 'util'; +import { exec } from 'child_process'; +import path from 'path'; +const execPromise = promisify(exec); + +export function setupDocker() { + return new Docker({ socketPath: process.env.DOCKER_SOCKET_PATH }); +} + +export async function getContainerStats(docker, containerName) { + try { + const container = docker.getContainer(containerName); + const [containers, info, stats] = await Promise.all([ + docker.listContainers({ all: true }), + container.inspect(), + container.stats({ stream: false }) + ]); + + if (!containers.some(c => c.Names.includes(`/${containerName}`))) { + return { error: `Container ${containerName} not found` }; + } + + const memoryUsage = stats.memory_stats.usage / 1024 / 1024; + const memoryLimit = stats.memory_stats.limit / 1024 / 1024 / 1024; + const memoryPercent = ((memoryUsage / (memoryLimit * 1024)) * 100).toFixed(2); + const cpuDelta = stats.cpu_stats.cpu_usage.total_usage - (stats.precpu_stats.cpu_usage?.total_usage || 0); + const systemDelta = stats.cpu_stats.system_cpu_usage - (stats.precpu_stats.system_cpu_usage || 0); + const cpuPercent = systemDelta > 0 ? ((cpuDelta / systemDelta) * stats.cpu_stats.online_cpus * 100).toFixed(2) : 0; + + return { + status: info.State.Status, + memory: { raw: `${memoryUsage.toFixed(2)}MiB / ${memoryLimit.toFixed(2)}GiB`, percent: memoryPercent }, + cpu: cpuPercent + }; + } catch (error) { + console.error(`Docker stats error for ${containerName}:`, error.message); + return { error: `Failed to fetch stats for ${containerName}: ${error.message}` }; + } +} + +export async function streamContainerLogs(docker, ws, containerName, client) { + let isStreaming = true; + let isStartingStream = false; + + const startLogStream = async () => { + if (isStartingStream) return false; + isStartingStream = true; + + try { + const container = docker.getContainer(containerName); + const [containers, inspect] = await Promise.all([ + docker.listContainers({ all: true }), + container.inspect() + ]); + + if (!containers.some(c => c.Names.includes(`/${containerName}`))) { + if (isStreaming) ws.send(JSON.stringify({ type: 'docker-logs', error: `Container ${containerName} not found` })); + return false; + } + + if (inspect.State.Status !== 'running') { + if (isStreaming) ws.send(JSON.stringify({ type: 'docker-logs', error: `Container ${containerName} is not running` })); + return false; + } + + if (client.logStream) { + client.logStream.removeAllListeners(); + client.logStream.destroy(); + client.logStream = null; + } + + const logStream = await container.logs({ + follow: true, + stdout: true, + stderr: true, + tail: parseInt(process.env.LOG_STREAM_TAIL_LINES, 10), + timestamps: true + }); + + logStream.on('data', (chunk) => { + if (isStreaming && client.logStream === logStream) { + ws.send(JSON.stringify({ type: 'docker-logs', data: { log: chunk.toString('utf8') } })); + } + }); + + logStream.on('error', (error) => { + if (isStreaming) ws.send(JSON.stringify({ type: 'docker-logs', error: `Log stream error: ${error.message}` })); + }); + + client.logStream = logStream; + return true; + } catch (error) { + if (isStreaming) ws.send(JSON.stringify({ type: 'docker-logs', error: `Failed to stream logs: ${error.message}` })); + return false; + } finally { + isStartingStream = false; + } + }; + + const monitorContainer = async () => { + try { + const container = docker.getContainer(containerName); + const inspect = await container.inspect(); + if (inspect.State.Status !== 'running') { + if (client.logStream) { + client.logStream.removeAllListeners(); + client.logStream.destroy(); + client.logStream = null; + } + return false; + } + return true; + } catch (error) { + return false; + } + }; + + if (!(await startLogStream())) { + const monitorInterval = setInterval(async () => { + if (!isStreaming) return clearInterval(monitorInterval); + if (await monitorContainer() && !client.logStream && !isStartingStream) { + await startLogStream(); + } + }, parseInt(process.env.LOG_STREAM_MONITOR_INTERVAL_MS, 10)); + + ws.on('close', () => { + isStreaming = false; + clearInterval(monitorInterval); + if (client.logStream) { + client.logStream.removeAllListeners(); + client.logStream.destroy(); + client.logStream = null; + } + }); + return; + } + + const monitorInterval = setInterval(async () => { + if (!isStreaming) return clearInterval(monitorInterval); + if (await monitorContainer() && !client.logStream && !isStartingStream) { + await startLogStream(); + } + }, parseInt(process.env.LOG_STREAM_MONITOR_INTERVAL_MS, 10)); + + ws.on('close', () => { + isStreaming = false; + clearInterval(monitorInterval); + if (client.logStream) { + client.logStream.removeAllListeners(); + client.logStream.destroy(); + client.logStream = null; + } + }); +} + +export async function readServerProperties(docker, containerName) { + try { + const container = docker.getContainer(containerName); + const inspect = await container.inspect(); + if (inspect.State.Status !== 'running') { + return { error: `Container ${containerName} is not running` }; + } + const { stdout, stderr } = await execPromise(`docker exec ${containerName} bash -c "cat ${process.env.SERVER_PROPERTIES_PATH}"`); + if (stderr) return { error: 'Failed to read server.properties' }; + return { content: stdout }; + } catch (error) { + return { error: `Failed to read server.properties: ${error.message}` }; + } +} + +export async function writeServerProperties(docker, containerName, content) { + try { + const { randomBytes } = await import('crypto'); + const tmpDir = process.env.TEMP_DIR; + const randomId = randomBytes(parseInt(process.env.TEMP_FILE_RANDOM_ID_BYTES, 10)).toString('hex'); + const tmpFile = path.join(tmpDir, `server_properties_${randomId}.tmp`); + const containerFilePath = `${process.env.CONTAINER_TEMP_FILE_PREFIX}${randomId}.tmp`; + + await (await import('fs')).promises.writeFile(tmpFile, content); + await execPromise(`docker cp ${tmpFile} ${containerName}:${containerFilePath}`); + await execPromise(`docker exec ${containerName} bash -c "mv ${containerFilePath} ${process.env.SERVER_PROPERTIES_PATH} && chown mc:mc ${process.env.SERVER_PROPERTIES_PATH}"`); + await (await import('fs')).promises.unlink(tmpFile).catch(err => console.error(`Error deleting temp file: ${err.message}`)); + return { message: 'Server properties updated' }; + } catch (error) { + return { error: `Failed to write server.properties: ${error.message}` }; + } +} \ No newline at end of file diff --git a/includes/status.js b/includes/status.js new file mode 100644 index 0000000..c8360cc --- /dev/null +++ b/includes/status.js @@ -0,0 +1,49 @@ +import { promisify } from 'util'; +import { exec } from 'child_process'; +import { Socket } from 'net'; +const execPromise = promisify(exec); + +export async function checkConnectionStatus(hostname, port) { + try { + const { stdout, stderr } = await execPromise(`${process.env.STATUS_CHECK_PATH} -host ${hostname} -port ${port}`); + if (stderr) return { isOnline: false, error: stderr }; + return { isOnline: true, data: JSON.parse(stdout) }; + } catch (error) { + return { isOnline: false, error: error.message }; + } +} + +export async function checkGeyserStatus(hostname, port) { + try { + const { stdout, stderr } = await execPromise(`${process.env.GEYSER_STATUS_CHECK_PATH} -host ${hostname} -port ${port}`); + if (stderr) return { isOnline: false, error: stderr }; + return { isOnline: true, data: JSON.parse(stdout) }; + } catch (error) { + return { isOnline: false, error: error.message }; + } +} + +export async function checkSftpStatus(hostname, port) { + return new Promise((resolve) => { + const socket = new Socket(); + const timeout = parseInt(process.env.SFTP_CONNECTION_TIMEOUT_MS, 10); + socket.setTimeout(timeout); + + socket.on('connect', () => { + socket.destroy(); + resolve({ isOnline: true }); + }); + + socket.on('timeout', () => { + socket.destroy(); + resolve({ isOnline: false, error: 'Connection timed out' }); + }); + + socket.on('error', (error) => { + socket.destroy(); + resolve({ isOnline: false, error: error.message }); + }); + + socket.connect(port, process.env.SFTP_HOSTNAME); + }); +} \ No newline at end of file diff --git a/includes/websocket.js b/includes/websocket.js new file mode 100644 index 0000000..92bfef9 --- /dev/null +++ b/includes/websocket.js @@ -0,0 +1,667 @@ +import { URLSearchParams } from 'url'; +import { getContainerStats, streamContainerLogs, readServerProperties, writeServerProperties } from './docker.js'; +import { checkConnectionStatus, checkGeyserStatus, checkSftpStatus } from './status.js'; +import { apiRequest } from './api.js'; + +const clients = new Map(); +const staticEndpoints = ['log', 'website', 'map', 'my-link-cache', 'my-geyser-cache', 'my-sftp-cache', 'my-link', 'my-geyser-link', 'my-sftp']; +const dynamicEndpoints = ['hello', 'time', 'mod-list']; + +async function fetchAndSendUpdate(ws, endpoint, client, docker) { + if (['mod-list', 'list-players'].includes(endpoint) && client.user !== 'Unknown') { + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status !== 'running') { + ws.send(JSON.stringify({ type: endpoint, error: `Container ${client.user} is not running` })); + return; + } + } catch (error) { + ws.send(JSON.stringify({ type: endpoint, error: `Failed to check container status: ${error.message}` })); + return; + } + } + + if (endpoint === 'time' && client.cache['time']) { + ws.send(JSON.stringify({ type: endpoint, data: client.cache['time'] })); + return; + } + + const response = await apiRequest(`/${endpoint}`, client.apiKey); + if (!response.error) { + if (endpoint === 'time') client.cache['time'] = response; + if (endpoint === 'my-link-cache') { + client.cache['my-link-cache'] = response; + if (client.subscriptions.has('my-link-cache') && client.user !== 'Unknown') { + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status === 'running' && response.hostname && response.port) { + const status = await checkConnectionStatus(response.hostname, response.port); + ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } })); + } else { + ws.send(JSON.stringify({ type: 'connection-status', error: `Container ${client.user} is not running` })); + } + } catch (error) { + ws.send(JSON.stringify({ type: 'connection-status', error: `Failed to check container status: ${error.message}` })); + } + } + } + if (endpoint === 'my-geyser-cache') { + client.cache['my-geyser-cache'] = response; + if (client.subscriptions.has('my-geyser-cache') && client.user !== 'Unknown') { + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status === 'running' && response.hostname && response.port) { + const status = await checkGeyserStatus(response.hostname, response.port); + ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } })); + } else { + ws.send(JSON.stringify({ type: 'geyser-status', error: `Container ${client.user} is not running` })); + } + } catch (error) { + ws.send(JSON.stringify({ type: 'geyser-status', error: `Failed to check container status: ${error.message}` })); + } + } + } + if (endpoint === 'my-sftp-cache') { + client.cache['my-sftp-cache'] = response; + if (client.subscriptions.has('my-sftp-cache') && client.user !== 'Unknown') { + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status === 'running' && response.hostname && response.port) { + const status = await checkSftpStatus(response.hostname, response.port); + ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: status.isOnline } })); + } else { + ws.send(JSON.stringify({ type: 'sftp-status', error: `Container ${client.user} is not running` })); + } + } catch (error) { + ws.send(JSON.stringify({ type: 'sftp-status', error: `Failed to check container status: ${error.message}` })); + } + } + } + ws.send(JSON.stringify({ type: endpoint, data: response })); + } else { + ws.send(JSON.stringify({ type: endpoint, error: response.error })); + } +} + +async function manageStatusChecks(ws, client, user, docker) { + try { + const container = docker.getContainer(user); + const inspect = await container.inspect(); + const isRunning = inspect.State.Status === 'running'; + + client.intervals.forEach(clearInterval); + client.intervals = []; + ['connectionStatusInterval', 'geyserStatusInterval', 'sftpStatusInterval', 'statusCheckMonitorInterval'].forEach((key) => { + if (client[key]) clearInterval(client[key]); + client[key] = null; + }); + + if (!isRunning || user === 'Unknown') { + ['my-link-cache', 'my-geyser-cache', 'my-sftp-cache'].forEach((sub) => { + if (client.subscriptions.has(sub)) { + ws.send(JSON.stringify({ type: sub.replace('-cache', '-status'), error: `Container ${user} is not running or user unknown` })); + } + }); + if (!isRunning && (client.subscriptions.has('my-link-cache') || client.subscriptions.has('my-geyser-cache') || client.subscriptions.has('my-sftp-cache')) && user !== 'Unknown') { + console.log(`Starting container status monitor for ${user}`); + client.statusCheckMonitorInterval = setInterval(async () => { + try { + const monitorContainer = docker.getContainer(user); + const monitorInspect = await monitorContainer.inspect(); + if (monitorInspect.State.Status === 'running') { + console.log(`Container ${user} is running, restarting status checks`); + await manageStatusChecks(ws, client, user, docker); + clearInterval(client.statusCheckMonitorInterval); + client.statusCheckMonitorInterval = null; + } + } catch (error) { + console.error(`Error monitoring container ${user}:`, error.message); + } + }, parseInt(process.env.CONTAINER_STATUS_MONITOR_INTERVAL_MS, 10)); + client.intervals.push(client.statusCheckMonitorInterval); + } + return; + } + + const statusChecks = [ + { + subscription: 'my-link-cache', + intervalKey: 'connectionStatusInterval', + intervalMs: process.env.CONNECTION_STATUS_INTERVAL_MS, + checkFn: checkConnectionStatus, + cacheKey: 'my-link-cache', + statusType: 'connection-status' + }, + { + subscription: 'my-geyser-cache', + intervalKey: 'geyserStatusInterval', + intervalMs: process.env.GEYSER_STATUS_INTERVAL_MS, + checkFn: checkGeyserStatus, + cacheKey: 'my-geyser-cache', + statusType: 'geyser-status' + }, + { + subscription: 'my-sftp-cache', + intervalKey: 'sftpStatusInterval', + intervalMs: process.env.SFTP_STATUS_INTERVAL_MS, + checkFn: checkSftpStatus, + cacheKey: 'my-sftp-cache', + statusType: 'sftp-status' + } + ]; + + for (const { subscription, intervalKey, intervalMs, checkFn, cacheKey, statusType } of statusChecks) { + if (client.subscriptions.has(subscription)) { + console.log(`Starting ${statusType} check for ${user}`); + client[intervalKey] = setInterval(async () => { + try { + const containerCheck = docker.getContainer(user); + const inspectCheck = await containerCheck.inspect(); + if (inspectCheck.State.Status !== 'running') { + console.log(`Container ${user} stopped, clearing ${statusType} interval`); + clearInterval(client[intervalKey]); + client[intervalKey] = null; + return; + } + const data = client.cache[cacheKey]; + if (data && data.hostname && data.port) { + const status = await checkFn(data.hostname, data.port); + ws.send(JSON.stringify({ type: statusType, data: { isOnline: status.isOnline } })); + } + } catch (error) { + console.error(`Error in ${statusType} check for ${user}:`, error.message); + ws.send(JSON.stringify({ type: statusType, data: { isOnline: false, error: error.message } })); + } + }, parseInt(intervalMs, 10)); + client.intervals.push(client[intervalKey]); + + const data = client.cache[cacheKey]; + if (data && data.hostname && data.port) { + console.log(`Performing initial ${statusType} check for ${user}`); + const status = await checkFn(data.hostname, data.port); + ws.send(JSON.stringify({ type: statusType, data: { isOnline: status.isOnline } })); + } + } + } + } catch (error) { + console.error(`Error managing status checks for ${user}:`, error.message); + } +} + +export function handleWebSocket(ws, req, docker) { + const urlParams = new URLSearchParams(req.url.split('?')[1]); + const apiKey = urlParams.get('apiKey'); + if (!apiKey) { + console.error('WebSocket connection rejected: Missing API key'); + ws.send(JSON.stringify({ error: 'API key required' })); + ws.close(); + return; + } + + const client = { + apiKey, + subscriptions: new Set(), + user: null, + intervals: [], + logStream: null, + cache: {}, + connectionStatusInterval: null, + geyserStatusInterval: null, + sftpStatusInterval: null, + statusCheckMonitorInterval: null + }; + clients.set(ws, client); + console.log('WebSocket client registered with API key'); + + ws.on('message', async (message) => { + try { + const data = JSON.parse(message.toString()); + if (data.type === 'subscribe') { + data.endpoints.forEach(endpoint => { + client.subscriptions.add(endpoint); + console.log(`Client subscribed to ${endpoint}`); + }); + console.log(`Client subscriptions: ${Array.from(client.subscriptions)}`); + + let hello = client.cache['hello'] || await apiRequest('/hello', client.apiKey); + if (!client.cache['hello'] && !hello.error) client.cache['hello'] = hello; + + if (hello.error) { + console.error('Failed to fetch /hello:', hello.error); + ws.send(JSON.stringify({ type: 'hello', error: hello.error })); + return; + } + + if (hello.message && typeof hello.message === 'string') { + const user = hello.message.split(', ')[1]?.replace('!', '').trim() || 'Unknown'; + client.user = user; + console.log(`User identified: ${user}`); + ws.send(JSON.stringify({ type: 'hello', data: hello })); + + if (client.subscriptions.has('docker') && user !== 'Unknown') { + try { + const container = docker.getContainer(user); + const inspect = await container.inspect(); + if (inspect.State.Status === 'running') { + console.log(`Starting docker stats interval for ${user}`); + client.intervals.push(setInterval(async () => { + try { + console.log(`Fetching docker stats for ${user}`); + const stats = await getContainerStats(docker, user); + if (stats.error) { + console.error(`Error fetching stats for ${user}: ${stats.error}`); + ws.send(JSON.stringify({ type: 'docker', error: stats.error })); + } else { + console.log(`Sending docker stats for ${user}:`, stats); + ws.send(JSON.stringify({ type: 'docker', data: { ...stats, user } })); + } + } catch (error) { + console.error(`Error in docker stats interval for ${user}:`, error.message); + ws.send(JSON.stringify({ type: 'docker', error: `Failed to fetch stats: ${error.message}` })); + } + }, parseInt(process.env.DOCKER_STATS_INTERVAL_MS, 10))); + + // Send initial stats immediately + console.log(`Sending initial docker stats for ${user}`); + const initialStats = await getContainerStats(docker, user); + ws.send(JSON.stringify({ type: 'docker', data: { ...initialStats, user } })); + } else { + console.log(`Container ${user} is not running, skipping docker stats interval`); + ws.send(JSON.stringify({ type: 'docker', error: `Container ${user} is not running` })); + } + } catch (error) { + console.error(`Error checking container status for docker stats for ${user}:`, error.message); + ws.send(JSON.stringify({ type: 'docker', error: `Failed to check container status: ${error.message}` })); + } + } else if (user === 'Unknown') { + console.warn('Cannot start docker stats interval: User is Unknown'); + ws.send(JSON.stringify({ type: 'docker', error: 'User not identified' })); + } + + if (client.subscriptions.has('docker-logs') && user !== 'Unknown') { + console.log(`Starting docker logs stream for ${user}`); + await streamContainerLogs(docker, ws, user, client); + } else if (user === 'Unknown') { + console.warn('Cannot start docker logs stream: User is Unknown'); + ws.send(JSON.stringify({ type: 'docker-logs', error: 'User not identified' })); + } + + await manageStatusChecks(ws, client, user, docker); + + await Promise.all([ + ...staticEndpoints.filter(e => client.subscriptions.has(e)).map(e => fetchAndSendUpdate(ws, e, client, docker)), + ...dynamicEndpoints.filter(e => client.subscriptions.has(e)).map(async (e) => { + if (e === 'hello' && client.cache['hello']) { + ws.send(JSON.stringify({ type: 'hello', data: client.cache['hello'] })); + return; + } + if (e === 'time' && client.cache['time']) { + ws.send(JSON.stringify({ type: 'time', data: client.cache['time'] })); + return; + } + await fetchAndSendUpdate(ws, e, client, docker); + }), + client.subscriptions.has('list-players') ? fetchAndSendUpdate(ws, 'list-players', client, docker) : null + ].filter(Boolean)); + + client.intervals.push(setInterval(async () => { + try { + for (const endpoint of dynamicEndpoints) { + if (client.subscriptions.has(endpoint) && !(endpoint === 'hello' && client.cache['hello'] || endpoint === 'time' && client.cache['time'])) { + await fetchAndSendUpdate(ws, endpoint, client, docker); + } + } + } catch (error) { + console.error('Error in dynamic endpoints interval:', error.message); + } + }, parseInt(process.env.DYNAMIC_ENDPOINTS_INTERVAL_MS, 10))); + + client.intervals.push(setInterval(async () => { + try { + for (const endpoint of staticEndpoints) { + if (client.subscriptions.has(endpoint)) { + await fetchAndSendUpdate(ws, endpoint, client, docker); + } + } + } catch (error) { + console.error('Error in static endpoints interval:', error.message); + } + }, parseInt(process.env.STATIC_ENDPOINTS_INTERVAL_MS, 10))); + + if (client.subscriptions.has('list-players') && user !== 'Unknown') { + try { + const container = docker.getContainer(user); + const inspect = await container.inspect(); + if (inspect.State.Status === 'running') { + console.log(`Starting list-players interval for ${user}`); + client.intervals.push(setInterval(() => fetchAndSendUpdate(ws, 'list-players', client, docker), parseInt(process.env.LIST_PLAYERS_INTERVAL_MS, 10))); + } else { + console.log(`Container ${user} is not running, skipping list-players interval`); + ws.send(JSON.stringify({ type: 'list-players', error: `Container ${user} is not running` })); + } + } catch (error) { + console.error(`Error checking container status for list-players for ${user}:`, error.message); + ws.send(JSON.stringify({ type: 'list-players', error: `Failed to check container status: ${error.message}` })); + } + } + } else { + console.error('Invalid /hello response:', hello); + ws.send(JSON.stringify({ type: 'hello', error: 'Invalid hello response' })); + } + } else if (data.type === 'updateUser') { + client.user = data.user; + console.log(`Updated user to: ${client.user}`); + if (client.user !== 'Unknown') { + client.intervals.forEach(clearInterval); + client.intervals = []; + ['connectionStatusInterval', 'geyserStatusInterval', 'sftpStatusInterval', 'statusCheckMonitorInterval'].forEach((key) => { + if (client[key]) clearInterval(client[key]); + client[key] = null; + }); + + if (client.subscriptions.has('docker')) { + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status === 'running') { + console.log(`Starting docker stats interval for new user ${client.user}`); + client.intervals.push(setInterval(async () => { + try { + console.log(`Fetching docker stats for ${client.user}`); + const stats = await getContainerStats(docker, client.user); + if (stats.error) { + console.error(`Error fetching stats for ${client.user}: ${stats.error}`); + ws.send(JSON.stringify({ type: 'docker', error: stats.error })); + } else { + console.log(`Sending docker stats for ${client.user}:`, stats); + ws.send(JSON.stringify({ type: 'docker', data: { ...stats, user: client.user } })); + } + } catch (error) { + console.error(`Error in docker stats interval for ${client.user}:`, error.message); + ws.send(JSON.stringify({ type: 'docker', error: `Failed to fetch stats: ${error.message}` })); + } + }, parseInt(process.env.DOCKER_STATS_INTERVAL_MS, 10))); + + // Send initial stats immediately + console.log(`Sending initial docker stats for ${client.user}`); + const initialStats = await getContainerStats(docker, client.user); + ws.send(JSON.stringify({ type: 'docker', data: { ...initialStats, user: client.user } })); + } else { + console.log(`Container ${client.user} is not running, skipping docker stats interval`); + ws.send(JSON.stringify({ type: 'docker', error: `Container ${client.user} is not running` })); + } + } catch (error) { + console.error(`Error checking container status for docker stats for ${client.user}:`, error.message); + ws.send(JSON.stringify({ type: 'docker', error: `Failed to check container status: ${error.message}` })); + } + } + + if (client.subscriptions.has('list-players')) { + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status === 'running') { + console.log(`Starting list-players interval for new user ${client.user}`); + client.intervals.push(setInterval(() => fetchAndSendUpdate(ws, 'list-players', client, docker), parseInt(process.env.LIST_PLAYERS_NEW_USER_INTERVAL_MS, 10))); + } else { + console.log(`Container ${client.user} is not running, skipping list-players interval`); + ws.send(JSON.stringify({ type: 'list-players', error: `Container ${client.user} is not running` })); + } + } catch (error) { + console.error(`Error checking container status for list-players for ${client.user}:`, error.message); + ws.send(JSON.stringify({ type: 'list-players', error: `Failed to check container status: ${error.message}` })); + } + } + + await manageStatusChecks(ws, client, client.user, docker); + + if (client.subscriptions.has('docker-logs')) { + if (client.logStream) { + client.logStream.destroy(); + client.logStream = null; + } + console.log(`Starting docker logs stream for new user ${client.user}`); + await streamContainerLogs(docker, ws, client.user, client); + } + } + } else if (data.type === 'request') { + const { requestId, endpoint, method, body } = data; + let response; + if (endpoint.startsWith('/docker') || endpoint === '/docker') { + response = client.user === 'Unknown' ? { error: 'User not identified' } : await getContainerStats(docker, client.user); + console.log(`Docker stats request response for ${client.user}:`, response); + } else if (endpoint === '/search' && method === 'POST' && body) { + response = await apiRequest(endpoint, client.apiKey, method, body); + response.totalResults = response.totalResults || (response.results ? response.results.length : 0); + } else if (endpoint === '/server-properties' && method === 'GET') { + response = client.user === 'Unknown' ? { error: 'User not identified' } : await readServerProperties(docker, client.user); + } else if (endpoint === '/server-properties' && method === 'POST' && body && body.content) { + response = client.user === 'Unknown' ? { error: 'User not identified' } : await writeServerProperties(docker, client.user, body.content); + } else { + response = await apiRequest(endpoint, client.apiKey, method, body); + } + ws.send(JSON.stringify({ requestId, ...response })); + if (['my-link', 'my-geyser-link', 'my-sftp'].includes(endpoint) && !response.error) { + await fetchAndSendUpdate(ws, endpoint, client, docker); + if (endpoint === 'my-link') { + const linkData = await apiRequest('/my-link-cache', client.apiKey); + if (!linkData.error) { + client.cache['my-link-cache'] = linkData; + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status === 'running') { + console.log(`Performing status check after my-link request for ${client.user}`); + const status = await checkConnectionStatus(linkData.hostname, linkData.port); + ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } })); + } else { + ws.send(JSON.stringify({ type: 'connection-status', error: `Container ${client.user} is not running` })); + } + } catch (error) { + console.error(`Error checking container status for ${client.user}:`, error.message); + ws.send(JSON.stringify({ type: 'connection-status', error: `Failed to check container status: ${error.message}` })); + } + } + } else if (endpoint === 'my-geyser-link') { + const geyserData = await apiRequest('/my-geyser-cache', client.apiKey); + if (!geyserData.error) { + client.cache['my-geyser-cache'] = geyserData; + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status === 'running') { + console.log(`Performing status check after my-geyser-link request for ${client.user}`); + const status = await checkGeyserStatus(geyserData.hostname, geyserData.port); + ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } })); + } else { + ws.send(JSON.stringify({ type: 'geyser-status', error: `Container ${client.user} is not running` })); + } + } catch (error) { + console.error(`Error checking container status for ${client.user}:`, error.message); + ws.send(JSON.stringify({ type: 'geyser-status', error: `Failed to check container status: ${error.message}` })); + } + } + } else if (endpoint === 'my-sftp') { + const sftpData = await apiRequest('/my-sftp-cache', client.apiKey); + if (!sftpData.error) { + client.cache['my-sftp-cache'] = sftpData; + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status === 'running') { + console.log(`Performing status check after my-sftp request for ${client.user}`); + const status = await checkSftpStatus(sftpData.hostname, sftpData.port); + ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: status.isOnline } })); + } else { + ws.send(JSON.stringify({ type: 'sftp-status', error: `Container ${client.user} is not running` })); + } + } catch (error) { + console.error(`Error checking container status for ${client.user}:`, error.message); + ws.send(JSON.stringify({ type: 'sftp-status', error: `Failed to check container status: ${error.message}` })); + } + } + } + } + } else if (['kick-player', 'ban-player', 'op-player', 'deop-player'].includes(data.type)) { + const { requestId, player } = data; + if (!player) { + ws.send(JSON.stringify({ requestId, error: 'Player name is required' })); + return; + } + if (client.user === 'Unknown') { + ws.send(JSON.stringify({ requestId, error: 'User not identified' })); + return; + } + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status !== 'running') { + ws.send(JSON.stringify({ requestId, error: `Container ${client.user} is not running` })); + return; + } + const command = { + 'kick-player': `kick ${player}`, + 'ban-player': `ban ${player}`, + 'op-player': `op ${player}`, + 'deop-player': `deop ${player}` + }[data.type]; + const response = await apiRequest('/console', client.apiKey, 'POST', { command }); + if (!response.error) { + const playerListResponse = await apiRequest('/list-players', client.apiKey); + if (!playerListResponse.error) { + ws.send(JSON.stringify({ type: 'list-players', data: playerListResponse })); + } + } + ws.send(JSON.stringify({ requestId, ...response })); + } catch (error) { + console.error(`Error processing ${data.type} for ${player}:`, error.message); + ws.send(JSON.stringify({ requestId, error: `Failed to process command: ${error.message}` })); + } + } else if (data.type === 'tell-player') { + const { requestId, player, message } = data; + if (!player || !message) { + ws.send(JSON.stringify({ requestId, error: 'Player name and message are required' })); + return; + } + if (client.user === 'Unknown') { + ws.send(JSON.stringify({ requestId, error: 'User not identified' })); + return; + } + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status !== 'running') { + ws.send(JSON.stringify({ requestId, error: `Container ${client.user} is not running` })); + return; + } + const response = await apiRequest('/tell', client.apiKey, 'POST', { username: player, message }); + ws.send(JSON.stringify({ requestId, ...response })); + } catch (error) { + console.error(`Error sending message to ${player}:`, error.message); + ws.send(JSON.stringify({ requestId, error: `Failed to send message: ${error.message}` })); + } + } else if (data.type === 'give-player') { + const { requestId, player, item, amount } = data; + if (!player || !item || !amount) { + ws.send(JSON.stringify({ requestId, error: 'Player name, item, and amount are required' })); + return; + } + if (client.user === 'Unknown') { + ws.send(JSON.stringify({ requestId, error: 'User not identified' })); + return; + } + try { + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status !== 'running') { + ws.send(JSON.stringify({ requestId, error: `Container ${client.user} is not running` })); + return; + } + const response = await apiRequest('/give', client.apiKey, 'POST', { username: player, item, amount }); + ws.send(JSON.stringify({ requestId, ...response })); + } catch (error) { + console.error(`Error giving item to ${player}:`, error.message); + ws.send(JSON.stringify({ requestId, error: `Failed to give item: ${error.message}` })); + } + } else if (data.type === 'refresh') { + console.log('Processing refresh request'); + delete client.cache['hello']; + delete client.cache['time']; + await Promise.all([ + ...staticEndpoints.filter(e => client.subscriptions.has(e)).map(e => fetchAndSendUpdate(ws, e, client, docker)), + ...dynamicEndpoints.filter(e => client.subscriptions.has(e)).map(e => fetchAndSendUpdate(ws, e, client, docker)), + client.subscriptions.has('list-players') ? fetchAndSendUpdate(ws, 'list-players', client, docker) : null + ].filter(Boolean)); + if (client.user && client.user !== 'Unknown') { + try { + const stats = await getContainerStats(docker, client.user); + console.log(`Sending refreshed docker stats for ${client.user}:`, stats); + ws.send(JSON.stringify({ type: 'docker', data: { ...stats, user: client.user } })); + const container = docker.getContainer(client.user); + const inspect = await container.inspect(); + if (inspect.State.Status === 'running') { + const linkData = client.cache['my-link-cache']; + if (linkData && linkData.hostname && linkData.port && client.subscriptions.has('my-link-cache')) { + console.log(`Performing refresh connection status check for ${client.user}`); + const status = await checkConnectionStatus(linkData.hostname, linkData.port); + ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } })); + } + const geyserData = client.cache['my-geyser-cache']; + if (geyserData && geyserData.hostname && geyserData.port && client.subscriptions.has('my-geyser-cache')) { + console.log(`Performing refresh Geyser status check for ${client.user}`); + const status = await checkGeyserStatus(geyserData.hostname, geyserData.port); + ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } })); + } + const sftpData = client.cache['my-sftp-cache']; + if (sftpData && sftpData.hostname && sftpData.port && client.subscriptions.has('my-sftp-cache')) { + console.log(`Performing refresh SFTP status check for ${client.user}`); + const status = await checkSftpStatus(sftpData.hostname, sftpData.port); + ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: status.isOnline } })); + } + } else { + if (client.subscriptions.has('my-link-cache')) { + ws.send(JSON.stringify({ type: 'connection-status', error: `Container ${client.user} is not running` })); + } + if (client.subscriptions.has('my-geyser-cache')) { + ws.send(JSON.stringify({ type: 'geyser-status', error: `Container ${client.user} is not running` })); + } + if (client.subscriptions.has('my-sftp-cache')) { + ws.send(JSON.stringify({ type: 'sftp-status', error: `Container ${client.user} is not running` })); + } + } + } catch (error) { + console.error(`Error during refresh for ${client.user}:`, error.message); + ws.send(JSON.stringify({ type: 'docker', error: `Failed to refresh stats: ${error.message}` })); + } + } + } + } catch (error) { + console.error('WebSocket message error:', error.message); + ws.send(JSON.stringify({ error: `Invalid message: ${error.message}` })); + } + }); + + ws.on('close', () => { + try { + const client = clients.get(ws); + client.intervals.forEach(clearInterval); + if (client.logStream) { + client.logStream.destroy(); + client.logStream = null; + } + ['connectionStatusInterval', 'geyserStatusInterval', 'sftpStatusInterval', 'statusCheckMonitorInterval'].forEach((key) => { + if (client[key]) clearInterval(client[key]); + }); + clients.delete(ws); + console.log('WebSocket client disconnected'); + } catch (error) { + console.error('Error on WebSocket close:', error.message); + } + }); + + ws.on('error', (error) => console.error('WebSocket error:', error.message)); +} \ No newline at end of file diff --git a/package.json b/package.json index 06b4477..d1125d7 100644 --- a/package.json +++ b/package.json @@ -3,6 +3,7 @@ "version": "1.0.0", "description": "Web panel for My-MC API with Docker integration", "main": "server.js", + "type": "module", "scripts": { "start": "node server.js", "build:css": "postcss public/css/styles.css -o public/css/styles.min.css", diff --git a/server.js b/server.js index bbe9f7c..65f5eda 100644 --- a/server.js +++ b/server.js @@ -1,1540 +1,31 @@ -require('dotenv').config(); -const express = require('express'); -const Docker = require('dockerode'); -const fetch = require('node-fetch'); -const path = require('path'); -const cors = require('cors'); -const WebSocket = require('ws'); -const crypto = require('crypto'); -const unirest = require('unirest'); -const { exec } = require('child_process'); -const util = require('util'); -const fs = require('fs').promises; -const os = require('os'); -const net = require('net'); - -const execPromise = util.promisify(exec); +import 'dotenv/config'; +import express from 'express'; +import cors from 'cors'; +import path from 'path'; +import { WebSocketServer } from 'ws'; +import { fileURLToPath } from 'url'; +import { setupDocker } from './includes/docker.js'; +import { handleWebSocket } from './includes/websocket.js'; +import { generateLoginLink, handleAutoLogin } from './includes/auth.js'; +const __dirname = path.dirname(fileURLToPath(import.meta.url)); const app = express(); -const docker = new Docker({ socketPath: process.env.DOCKER_SOCKET_PATH }); -const API_URL = process.env.API_URL; const PORT = parseInt(process.env.PORT, 10); -const ADMIN_SECRET_KEY = process.env.ADMIN_SECRET_KEY; -const LINK_EXPIRY_SECONDS = parseInt(process.env.LINK_EXPIRY_SECONDS, 10); -const temporaryLinks = new Map(); app.use(cors()); app.use(express.json()); app.use(express.static(path.join(__dirname, 'public'))); -const wss = new WebSocket.Server({ noServer: true }); +const docker = setupDocker(); +const wss = new WebSocketServer({ noServer: true }); -async function apiRequest(endpoint, apiKey, method = 'GET', body = null) { - const headers = { - 'Accept': 'application/json', - 'Content-Type': 'application/json', - 'x-my-mc-auth': apiKey - }; - try { - const response = await fetch(`${API_URL}${endpoint}`, { - method, - headers, - body: body ? JSON.stringify(body) : null - }); - const data = await response.json(); - if (!response.ok) { - console.error(`API error for ${endpoint}: ${response.status} - ${JSON.stringify(data)}`); - return { error: data.message || `HTTP ${response.status}` }; - } - return data; - } catch (error) { - console.error(`Network error for ${endpoint}:`, error.message); - return { error: `Network error: ${error.message}` }; - } -} +wss.on('connection', (ws, req) => handleWebSocket(ws, req, docker)); -async function getContainerStats(containerName) { - try { - const containers = await docker.listContainers({ all: true }); - const containerExists = containers.some(c => c.Names.includes(`/${containerName}`)); - if (!containerExists) { - console.error(`Container ${containerName} not found`); - return { error: `Container ${containerName} not found` }; - } - - const container = docker.getContainer(containerName); - const info = await container.inspect(); - const stats = await container.stats({ stream: false }); - - const memoryUsage = stats.memory_stats.usage / 1024 / 1024; - const memoryLimit = stats.memory_stats.limit / 1024 / 1024 / 1024; - const memoryPercent = ((memoryUsage / (memoryLimit * 1024)) * 100).toFixed(2); - - const cpuDelta = stats.cpu_stats.cpu_usage.total_usage - (stats.precpu_stats.cpu_usage?.total_usage || 0); - const systemDelta = stats.cpu_stats.system_cpu_usage - (stats.precpu_stats.system_cpu_usage || 0); - const cpuPercent = systemDelta > 0 ? ((cpuDelta / systemDelta) * stats.cpu_stats.online_cpus * 100).toFixed(2) : 0; - - return { - status: info.State.Status, - memory: { raw: `${memoryUsage.toFixed(2)}MiB / ${memoryLimit.toFixed(2)}GiB`, percent: memoryPercent }, - cpu: cpuPercent - }; - } catch (error) { - console.error(`Docker stats error for ${containerName}:`, error.message); - return { error: `Failed to fetch stats for ${containerName}: ${error.message}` }; - } -} - -async function checkConnectionStatus(hostname, port) { - try { - const command = `${process.env.STATUS_CHECK_PATH} -host ${hostname} -port ${port}`; - console.log(`Executing status check: ${command}`); - const { stdout, stderr } = await execPromise(command); - if (stderr) { - console.error(`Status check error: ${stderr}`); - return { isOnline: false, error: stderr }; - } - const result = JSON.parse(stdout); - return { isOnline: true, data: result }; - } catch (error) { - console.error(`Status check failed: ${error.message}`); - return { isOnline: false, error: error.message }; - } -} - -async function checkGeyserStatus(hostname, port) { - try { - const command = `${process.env.GEYSER_STATUS_CHECK_PATH} -host ${hostname} -port ${port}`; - console.log(`Executing Geyser status check: ${command}`); - const { stdout, stderr } = await execPromise(command); - if (stderr) { - console.error(`Geyser status check error: ${stderr}`); - return { isOnline: false, error: stderr }; - } - const result = JSON.parse(stdout); - return { isOnline: true, data: result }; - } catch (error) { - console.error(`Geyser status check failed: ${error.message}`); - return { isOnline: false, error: error.message }; - } -} - -async function checkSftpStatus(hostname, port) { - return new Promise((resolve) => { - const socket = new net.Socket(); - const timeout = parseInt(process.env.SFTP_CONNECTION_TIMEOUT_MS, 10); - - socket.setTimeout(timeout); - - socket.on('connect', () => { - console.log(`SFTP port check succeeded for ${hostname}:${port}`); - socket.destroy(); - resolve({ isOnline: true }); - }); - - socket.on('timeout', () => { - console.error(`SFTP port check timed out for ${hostname}:${port}`); - socket.destroy(); - resolve({ isOnline: false, error: 'Connection timed out' }); - }); - - socket.on('error', (error) => { - console.error(`SFTP port check failed for ${hostname}:${port}: ${error.message}`); - socket.destroy(); - resolve({ isOnline: false, error: error.message }); - }); - - socket.connect(port, process.env.SFTP_HOSTNAME); - }); -} - -async function streamContainerLogs(ws, containerName, client) { - let isStreaming = true; - let isStartingStream = false; - - async function startLogStream() { - if (isStartingStream) { - console.log(`Stream start already in progress for ${containerName}`); - return false; - } - isStartingStream = true; - - try { - const containers = await docker.listContainers({ all: true }); - const containerExists = containers.some(c => c.Names.includes(`/${containerName}`)); - if (!containerExists) { - console.error(`Container ${containerName} not found for logs`); - if (isStreaming) { - ws.send(JSON.stringify({ type: 'docker-logs', error: `Container ${containerName} not found` })); - } - return false; - } - - const container = docker.getContainer(containerName); - const inspect = await container.inspect(); - if (inspect.State.Status !== 'running') { - console.log(`Container ${containerName} is not running (status: ${inspect.State.Status})`); - if (isStreaming) { - ws.send(JSON.stringify({ type: 'docker-logs', error: `Container ${containerName} is not running` })); - } - return false; - } - - if (client.logStream) { - console.log(`Destroying existing log stream for ${containerName}`); - client.logStream.removeAllListeners(); - client.logStream.destroy(); - client.logStream = null; - } - - const logStream = await container.logs({ - follow: true, - stdout: true, - stderr: true, - tail: parseInt(process.env.LOG_STREAM_TAIL_LINES, 10), - timestamps: true - }); - - logStream.on('data', (chunk) => { - if (isStreaming && client.logStream === logStream) { - const log = chunk.toString('utf8'); - ws.send(JSON.stringify({ type: 'docker-logs', data: { log } })); - } - }); - - logStream.on('error', (error) => { - console.error(`Log stream error for ${containerName}:`, error.message); - if (isStreaming) { - ws.send(JSON.stringify({ type: 'docker-logs', error: `Log stream error: ${error.message}` })); - } - }); - - client.logStream = logStream; - console.log(`Log stream started for ${containerName}`); - return true; - } catch (error) { - console.error(`Error streaming logs for ${containerName}:`, error.message); - if (isStreaming) { - ws.send(JSON.stringify({ type: 'docker-logs', error: `Failed to stream logs: ${error.message}` })); - } - return false; - } finally { - isStartingStream = false; - } - } - - async function monitorContainer() { - try { - const container = docker.getContainer(containerName); - const inspect = await container.inspect(); - if (inspect.State.Status !== 'running') { - console.log(`Container ${containerName} is not running (status: ${inspect.State.Status})`); - if (client.logStream) { - client.logStream.removeAllListeners(); - client.logStream.destroy(); - client.logStream = null; - } - return false; - } - return true; - } catch (error) { - console.error(`Error monitoring container ${containerName}:`, error.message); - return false; - } - } - - if (!(await startLogStream())) { - const monitorInterval = setInterval(async () => { - if (!isStreaming) { - clearInterval(monitorInterval); - return; - } - const isRunning = await monitorContainer(); - if (isRunning && !client.logStream && !isStartingStream) { - console.log(`Container ${containerName} is running, attempting to start log stream`); - await startLogStream(); - } else if (!isRunning) { - console.log(`Container ${containerName} not running, waiting for restart`); - } - }, parseInt(process.env.LOG_STREAM_MONITOR_INTERVAL_MS, 10)); - - ws.on('close', () => { - console.log(`WebSocket closed for ${containerName}, cleaning up`); - isStreaming = false; - clearInterval(monitorInterval); - if (client.logStream) { - client.logStream.removeAllListeners(); - client.logStream.destroy(); - client.logStream = null; - } - }); - - return; - } - - const monitorInterval = setInterval(async () => { - if (!isStreaming) { - clearInterval(monitorInterval); - return; - } - const isRunning = await monitorContainer(); - if (isRunning && !client.logStream && !isStartingStream) { - console.log(`Container ${containerName} is running, attempting to restart log stream`); - await startLogStream(); - } else if (!isRunning) { - console.log(`Container ${containerName} not running, waiting for restart`); - } - }, parseInt(process.env.LOG_STREAM_MONITOR_INTERVAL_MS, 10)); - - ws.on('close', () => { - console.log(`WebSocket closed for ${containerName}, cleaning up`); - isStreaming = false; - clearInterval(monitorInterval); - if (client.logStream) { - client.logStream.removeAllListeners(); - client.logStream.destroy(); - client.logStream = null; - } - }); -} - -const clients = new Map(); -const staticEndpoints = ['log', 'website', 'map', 'my-link-cache', 'my-geyser-cache', 'my-sftp-cache', 'my-link', 'my-geyser-link', 'my-sftp']; -const dynamicEndpoints = ['hello', 'time', 'mod-list']; - -wss.on('connection', (ws, req) => { - try { - console.log('WebSocket connection established'); - const urlParams = new URLSearchParams(req.url.split('?')[1]); - const apiKey = urlParams.get('apiKey'); - if (!apiKey) { - console.error('WebSocket connection rejected: Missing API key'); - ws.send(JSON.stringify({ error: 'API key required' })); - ws.close(); - return; - } - - clients.set(ws, { - apiKey, - subscriptions: new Set(), - user: null, - intervals: [], - logStream: null, - cache: {}, - connectionStatusInterval: null, - geyserStatusInterval: null, - sftpStatusInterval: null, - statusCheckMonitorInterval: null - }); - console.log('WebSocket client registered'); - - ws.on('message', async (message) => { - try { - const data = JSON.parse(message.toString()); - console.log('WebSocket message received:', data); - const client = clients.get(ws); - - if (data.type === 'subscribe') { - data.endpoints.forEach(endpoint => client.subscriptions.add(endpoint)); - console.log(`Client subscribed to: ${Array.from(client.subscriptions)}`); - - let hello = client.cache['hello']; - if (!hello) { - hello = await apiRequest('/hello', client.apiKey); - if (!hello.error) { - client.cache['hello'] = hello; - } - } - - if (hello.error) { - console.error('Failed to fetch /hello:', hello.error); - ws.send(JSON.stringify({ type: 'hello', error: hello.error })); - return; - } - - if (hello.message && typeof hello.message === 'string') { - const user = hello.message.split(', ')[1]?.replace('!', '').trim() || 'Unknown'; - client.user = user; - console.log(`User identified: ${user}`); - ws.send(JSON.stringify({ type: 'hello', data: hello })); - - async function manageStatusChecks() { - try { - const container = docker.getContainer(user); - const inspect = await container.inspect(); - const isRunning = inspect.State.Status === 'running'; - console.log(`Container ${user} status: ${inspect.State.Status}`); - - // Clear any existing status check intervals - if (client.connectionStatusInterval) { - clearInterval(client.connectionStatusInterval); - client.connectionStatusInterval = null; - } - if (client.geyserStatusInterval) { - clearInterval(client.geyserStatusInterval); - client.geyserStatusInterval = null; - } - if (client.sftpStatusInterval) { - clearInterval(client.sftpStatusInterval); - client.sftpStatusInterval = null; - } - - if (isRunning && user !== 'Unknown') { - if (client.subscriptions.has('my-link-cache')) { - console.log(`Starting connection status check for ${user}`); - client.connectionStatusInterval = setInterval(async () => { - try { - const containerCheck = docker.getContainer(user); - const inspectCheck = await containerCheck.inspect(); - if (inspectCheck.State.Status !== 'running') { - console.log(`Container ${user} stopped, clearing connection status interval`); - clearInterval(client.connectionStatusInterval); - client.connectionStatusInterval = null; - return; - } - const linkData = client.cache['my-link-cache']; - if (linkData && linkData.hostname && linkData.port) { - const status = await checkConnectionStatus(linkData.hostname, linkData.port); - ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } })); - } - } catch (error) { - console.error(`Error in connection status check for ${user}:`, error.message); - ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: false } })); - } - }, parseInt(process.env.CONNECTION_STATUS_INTERVAL_MS, 10)); - client.intervals.push(client.connectionStatusInterval); - // Initial check - const linkData = client.cache['my-link-cache']; - if (linkData && linkData.hostname && linkData.port) { - console.log(`Performing initial connection status check for ${user}`); - const status = await checkConnectionStatus(linkData.hostname, linkData.port); - ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } })); - } - } - - if (client.subscriptions.has('my-geyser-cache')) { - console.log(`Starting Geyser status check for ${user}`); - client.geyserStatusInterval = setInterval(async () => { - try { - const containerCheck = docker.getContainer(user); - const inspectCheck = await containerCheck.inspect(); - if (inspectCheck.State.Status !== 'running') { - console.log(`Container ${user} stopped, clearing Geyser status interval`); - clearInterval(client.geyserStatusInterval); - client.geyserStatusInterval = null; - return; - } - const geyserData = client.cache['my-geyser-cache']; - if (geyserData && geyserData.hostname && geyserData.port) { - const status = await checkGeyserStatus(geyserData.hostname, geyserData.port); - ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } })); - } - } catch (error) { - console.error(`Error in Geyser status check for ${user}:`, error.message); - ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: false } })); - } - }, parseInt(process.env.GEYSER_STATUS_INTERVAL_MS, 10)); - client.intervals.push(client.geyserStatusInterval); - // Initial check - const geyserData = client.cache['my-geyser-cache']; - if (geyserData && geyserData.hostname && geyserData.port) { - console.log(`Performing initial Geyser status check for ${user}`); - const status = await checkGeyserStatus(geyserData.hostname, geyserData.port); - ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } })); - } - } - - if (client.subscriptions.has('my-sftp-cache')) { - console.log(`Starting SFTP status check for ${user}`); - client.sftpStatusInterval = setInterval(async () => { - try { - const containerCheck = docker.getContainer(user); - const inspectCheck = await containerCheck.inspect(); - if (inspectCheck.State.Status !== 'running') { - console.log(`Container ${user} stopped, clearing SFTP status interval`); - clearInterval(client.sftpStatusInterval); - client.sftpStatusInterval = null; - return; - } - const sftpData = client.cache['my-sftp-cache']; - if (sftpData && sftpData.hostname && sftpData.port) { - const status = await checkSftpStatus(sftpData.hostname, sftpData.port); - ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: status.isOnline } })); - } - } catch (error) { - console.error(`Error in SFTP status check for ${user}:`, error.message); - ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: false } })); - } - }, parseInt(process.env.SFTP_STATUS_INTERVAL_MS, 10)); - client.intervals.push(client.sftpStatusInterval); - // Initial check - const sftpData = client.cache['my-sftp-cache']; - if (sftpData && sftpData.hostname && sftpData.port) { - console.log(`Performing initial SFTP status check for ${user}`); - const status = await checkSftpStatus(sftpData.hostname, sftpData.port); - ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: status.isOnline } })); - } - } - } else { - console.log(`Container ${user} is not running or user is Unknown, skipping status checks`); - if (client.subscriptions.has('my-link-cache')) { - ws.send(JSON.stringify({ type: 'connection-status', error: `Container ${user} is not running` })); - } - if (client.subscriptions.has('my-geyser-cache')) { - ws.send(JSON.stringify({ type: 'geyser-status', error: `Container ${user} is not running` })); - } - if (client.subscriptions.has('my-sftp-cache')) { - ws.send(JSON.stringify({ type: 'sftp-status', error: `Container ${user} is not running` })); - } - } - - // Clear any existing monitor interval - if (client.statusCheckMonitorInterval) { - clearInterval(client.statusCheckMonitorInterval); - client.statusCheckMonitorInterval = null; - } - - // Start monitoring if container is not running and status checks are subscribed - if (!isRunning && (client.subscriptions.has('my-link-cache') || client.subscriptions.has('my-geyser-cache') || client.subscriptions.has('my-sftp-cache')) && user !== 'Unknown') { - console.log(`Starting container status monitor for ${user}`); - client.statusCheckMonitorInterval = setInterval(async () => { - try { - const monitorContainer = docker.getContainer(user); - const monitorInspect = await monitorContainer.inspect(); - if (monitorInspect.State.Status === 'running') { - console.log(`Container ${user} is running, restarting status checks`); - await manageStatusChecks(); - clearInterval(client.statusCheckMonitorInterval); - client.statusCheckMonitorInterval = null; - } - } catch (error) { - console.error(`Error monitoring container ${user}:`, error.message); - } - }, parseInt(process.env.CONTAINER_STATUS_MONITOR_INTERVAL_MS, 10)); - client.intervals.push(client.statusCheckMonitorInterval); - } - } catch (error) { - console.error(`Error checking container status for ${user}:`, error.message); - if (client.subscriptions.has('my-link-cache')) { - ws.send(JSON.stringify({ type: 'connection-status', error: `Failed to check container status: ${error.message}` })); - } - if (client.subscriptions.has('my-geyser-cache')) { - ws.send(JSON.stringify({ type: 'geyser-status', error: `Failed to check container status: ${error.message}` })); - } - if (client.subscriptions.has('my-sftp-cache')) { - ws.send(JSON.stringify({ type: 'sftp-status', error: `Failed to check container status: ${error.message}` })); - } - } - } - - if (client.subscriptions.has('docker') && user !== 'Unknown') { - try { - const container = docker.getContainer(user); - const inspect = await container.inspect(); - if (inspect.State.Status === 'running') { - console.log(`Starting docker interval for ${user}`); - client.intervals.push(setInterval(async () => { - try { - const stats = await getContainerStats(user); - ws.send(JSON.stringify({ type: 'docker', data: { ...stats, user } })); - } catch (error) { - console.error(`Error in docker interval for ${user}:`, error.message); - } - }, parseInt(process.env.DOCKER_STATS_INTERVAL_MS, 10))); - } else { - console.log(`Container ${user} is not running, skipping docker interval`); - ws.send(JSON.stringify({ type: 'docker', error: `Container ${user} is not running` })); - } - } catch (error) { - console.error(`Error checking container status for ${user}:`, error.message); - ws.send(JSON.stringify({ type: 'docker', error: `Failed to check container status: ${error.message}` })); - } - } else if (user === 'Unknown') { - console.warn('Cannot start docker interval: User is Unknown'); - ws.send(JSON.stringify({ type: 'docker', error: 'User not identified' })); - } - - if (client.subscriptions.has('docker-logs') && user !== 'Unknown') { - console.log(`Starting docker logs stream for ${user}`); - await streamContainerLogs(ws, user, client); - } else if (user === 'Unknown') { - console.warn('Cannot start docker logs stream: User is Unknown'); - ws.send(JSON.stringify({ type: 'docker-logs', error: 'User not identified' })); - } - - // Manage status checks based on container status - await manageStatusChecks(); - - await Promise.all([ - ...staticEndpoints.filter(e => client.subscriptions.has(e)).map(e => fetchAndSendUpdate(ws, e, client)), - ...dynamicEndpoints.filter(e => client.subscriptions.has(e)).map(async (e) => { - if (e === 'hello' && client.cache['hello']) { - ws.send(JSON.stringify({ type: 'hello', data: client.cache['hello'] })); - return; - } - if (e === 'time' && client.cache['time']) { - ws.send(JSON.stringify({ type: 'time', data: client.cache['time'] })); - return; - } - await fetchAndSendUpdate(ws, e, client); - }), - client.subscriptions.has('list-players') ? fetchAndSendUpdate(ws, 'list-players', client) : null - ].filter(Boolean)); - - client.intervals.push(setInterval(async () => { - try { - for (const endpoint of dynamicEndpoints) { - if (client.subscriptions.has(endpoint)) { - if ((endpoint === 'hello' && client.cache['hello']) || - (endpoint === 'time' && client.cache['time'])) { - continue; - } - await fetchAndSendUpdate(ws, endpoint, client); - } - } - } catch (error) { - console.error('Error in dynamic endpoints interval:', error.message); - } - }, parseInt(process.env.DYNAMIC_ENDPOINTS_INTERVAL_MS, 10))); - - client.intervals.push(setInterval(async () => { - try { - for (const endpoint of staticEndpoints) { - if (client.subscriptions.has(endpoint)) { - await fetchAndSendUpdate(ws, endpoint, client); - } - } - } catch (error) { - console.error('Error in static endpoints interval:', error.message); - } - }, parseInt(process.env.STATIC_ENDPOINTS_INTERVAL_MS, 10))); - - if (client.subscriptions.has('list-players') && user !== 'Unknown') { - try { - const container = docker.getContainer(user); - const inspect = await container.inspect(); - if (inspect.State.Status === 'running') { - client.intervals.push(setInterval(async () => { - try { - await fetchAndSendUpdate(ws, 'list-players', client); - } catch (error) { - console.error('Error in list-players interval:', error.message); - } - }, parseInt(process.env.LIST_PLAYERS_INTERVAL_MS, 10))); - } else { - console.log(`Container ${user} is not running, skipping list-players interval`); - ws.send(JSON.stringify({ type: 'list-players', error: `Container ${user} is not running` })); - } - } catch (error) { - console.error(`Error checking container status for ${user}:`, error.message); - ws.send(JSON.stringify({ type: 'list-players', error: `Failed to check container status: ${error.message}` })); - } - } - } else { - console.error('Invalid /hello response:', hello); - ws.send(JSON.stringify({ type: 'hello', error: 'Invalid hello response' })); - } - } else if (data.type === 'updateUser') { - client.user = data.user; - console.log(`Updated user to: ${client.user}`); - if (client.user !== 'Unknown') { - client.intervals.forEach(clearInterval); - client.intervals = []; - if (client.connectionStatusInterval) { - clearInterval(client.connectionStatusInterval); - client.connectionStatusInterval = null; - } - if (client.geyserStatusInterval) { - clearInterval(client.geyserStatusInterval); - client.geyserStatusInterval = null; - } - if (client.sftpStatusInterval) { - clearInterval(client.sftpStatusInterval); - client.sftpStatusInterval = null; - } - if (client.statusCheckMonitorInterval) { - clearInterval(client.statusCheckMonitorInterval); - client.statusCheckMonitorInterval = null; - } - - async function manageStatusChecks() { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - const isRunning = inspect.State.Status === 'running'; - console.log(`Container ${client.user} status: ${inspect.State.Status}`); - - // Clear any existing status check intervals - if (client.connectionStatusInterval) { - clearInterval(client.connectionStatusInterval); - client.connectionStatusInterval = null; - } - if (client.geyserStatusInterval) { - clearInterval(client.geyserStatusInterval); - client.geyserStatusInterval = null; - } - if (client.sftpStatusInterval) { - clearInterval(client.sftpStatusInterval); - client.sftpStatusInterval = null; - } - - if (isRunning) { - if (client.subscriptions.has('my-link-cache')) { - console.log(`Starting new connection status check for ${client.user}`); - client.connectionStatusInterval = setInterval(async () => { - try { - const containerCheck = docker.getContainer(client.user); - const inspectCheck = await containerCheck.inspect(); - if (inspectCheck.State.Status !== 'running') { - console.log(`Container ${client.user} stopped, clearing connection status interval`); - clearInterval(client.connectionStatusInterval); - client.connectionStatusInterval = null; - return; - } - const linkData = client.cache['my-link-cache']; - if (linkData && linkData.hostname && linkData.port) { - const status = await checkConnectionStatus(linkData.hostname, linkData.port); - ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } })); - } - } catch (error) { - console.error(`Error in connection status check for ${client.user}:`, error.message); - ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: false } })); - } - }, parseInt(process.env.CONNECTION_STATUS_NEW_USER_INTERVAL_MS, 10)); - client.intervals.push(client.connectionStatusInterval); - // Initial check - const linkData = client.cache['my-link-cache']; - if (linkData && linkData.hostname && linkData.port) { - console.log(`Performing initial connection status check for ${client.user}`); - const status = await checkConnectionStatus(linkData.hostname, linkData.port); - ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } })); - } - } - - if (client.subscriptions.has('my-geyser-cache')) { - console.log(`Starting new Geyser status check for ${client.user}`); - client.geyserStatusInterval = setInterval(async () => { - try { - const containerCheck = docker.getContainer(client.user); - const inspectCheck = await containerCheck.inspect(); - if (inspectCheck.State.Status !== 'running') { - console.log(`Container ${client.user} stopped, clearing Geyser status interval`); - clearInterval(client.geyserStatusInterval); - client.geyserStatusInterval = null; - return; - } - const geyserData = client.cache['my-geyser-cache']; - if (geyserData && geyserData.hostname && geyserData.port) { - const status = await checkGeyserStatus(geyserData.hostname, geyserData.port); - ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } })); - } - } catch (error) { - console.error(`Error in Geyser status check for ${client.user}:`, error.message); - ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: false } })); - } - }, parseInt(process.env.GEYSER_STATUS_INTERVAL_MS, 10)); - client.intervals.push(client.geyserStatusInterval); - // Initial check - const geyserData = client.cache['my-geyser-cache']; - if (geyserData && geyserData.hostname && geyserData.port) { - console.log(`Performing initial Geyser status check for ${client.user}`); - const status = await checkGeyserStatus(geyserData.hostname, geyserData.port); - ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } })); - } - } - - if (client.subscriptions.has('my-sftp-cache')) { - console.log(`Starting new SFTP status check for ${client.user}`); - client.sftpStatusInterval = setInterval(async () => { - try { - const containerCheck = docker.getContainer(client.user); - const inspectCheck = await containerCheck.inspect(); - if (inspectCheck.State.Status !== 'running') { - console.log(`Container ${client.user} stopped, clearing SFTP status interval`); - clearInterval(client.sftpStatusInterval); - client.sftpStatusInterval = null; - return; - } - const sftpData = client.cache['my-sftp-cache']; - if (sftpData && sftpData.hostname && sftpData.port) { - const status = await checkSftpStatus(sftpData.hostname, sftpData.port); - ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: status.isOnline } })); - } - } catch (error) { - console.error(`Error in SFTP status check for ${client.user}:`, error.message); - ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: false } })); - } - }, parseInt(process.env.SFTP_STATUS_INTERVAL_MS, 10)); - client.intervals.push(client.sftpStatusInterval); - // Initial check - const sftpData = client.cache['my-sftp-cache']; - if (sftpData && sftpData.hostname && sftpData.port) { - console.log(`Performing initial SFTP status check for ${client.user}`); - const status = await checkSftpStatus(sftpData.hostname, sftpData.port); - ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: status.isOnline } })); - } - } - } else { - console.log(`Container ${client.user} is not running, skipping status checks`); - if (client.subscriptions.has('my-link-cache')) { - ws.send(JSON.stringify({ type: 'connection-status', error: `Container ${client.user} is not running` })); - } - if (client.subscriptions.has('my-geyser-cache')) { - ws.send(JSON.stringify({ type: 'geyser-status', error: `Container ${client.user} is not running` })); - } - if (client.subscriptions.has('my-sftp-cache')) { - ws.send(JSON.stringify({ type: 'sftp-status', error: `Container ${client.user} is not running` })); - } - } - - // Clear any existing monitor interval - if (client.statusCheckMonitorInterval) { - clearInterval(client.statusCheckMonitorInterval); - client.statusCheckMonitorInterval = null; - } - - // Start monitoring if container is not running and status checks are subscribed - if (!isRunning && (client.subscriptions.has('my-link-cache') || client.subscriptions.has('my-geyser-cache') || client.subscriptions.has('my-sftp-cache'))) { - console.log(`Starting container status monitor for ${client.user}`); - client.statusCheckMonitorInterval = setInterval(async () => { - try { - const monitorContainer = docker.getContainer(client.user); - const monitorInspect = await monitorContainer.inspect(); - if (monitorInspect.State.Status === 'running') { - console.log(`Container ${client.user} is running, restarting status checks`); - await manageStatusChecks(); - clearInterval(client.statusCheckMonitorInterval); - client.statusCheckMonitorInterval = null; - } - } catch (error) { - console.error(`Error monitoring container ${client.user}:`, error.message); - } - }, parseInt(process.env.CONTAINER_STATUS_MONITOR_INTERVAL_MS, 10)); - client.intervals.push(client.statusCheckMonitorInterval); - } - } catch (error) { - console.error(`Error checking container status for ${client.user}:`, error.message); - if (client.subscriptions.has('my-link-cache')) { - ws.send(JSON.stringify({ type: 'connection-status', error: `Failed to check container status: ${error.message}` })); - } - if (client.subscriptions.has('my-geyser-cache')) { - ws.send(JSON.stringify({ type: 'geyser-status', error: `Failed to check container status: ${error.message}` })); - } - if (client.subscriptions.has('my-sftp-cache')) { - ws.send(JSON.stringify({ type: 'sftp-status', error: `Failed to check container status: ${error.message}` })); - } - } - } - - if (client.subscriptions.has('docker')) { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status === 'running') { - console.log(`Starting new docker interval for ${client.user}`); - client.intervals.push(setInterval(async () => { - try { - const stats = await getContainerStats(client.user); - ws.send(JSON.stringify({ type: 'docker', data: { ...stats, user: client.user } })); - } catch (error) { - console.error(`Error in docker interval for ${client.user}:`, error.message); - } - }, parseInt(process.env.DOCKER_STATS_INTERVAL_MS, 10))); - } else { - console.log(`Container ${client.user} is not running, skipping docker interval`); - ws.send(JSON.stringify({ type: 'docker', error: `Container ${client.user} is not running` })); - } - } catch (error) { - console.error(`Error checking container status for ${client.user}:`, error.message); - ws.send(JSON.stringify({ type: 'docker', error: `Failed to check container status: ${error.message}` })); - } - } - - if (client.subscriptions.has('list-players')) { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status === 'running') { - client.intervals.push(setInterval(async () => { - try { - await fetchAndSendUpdate(ws, 'list-players', client); - } catch (error) { - console.error('Error in list-players interval:', error.message); - } - }, parseInt(process.env.LIST_PLAYERS_NEW_USER_INTERVAL_MS, 10))); - } else { - console.log(`Container ${client.user} is not running, skipping list-players interval`); - ws.send(JSON.stringify({ type: 'list-players', error: `Container ${client.user} is not running` })); - } - } catch (error) { - console.error(`Error checking container status for ${client.user}:`, error.message); - ws.send(JSON.stringify({ type: 'list-players', error: `Failed to check container status: ${error.message}` })); - } - } - - // Manage status checks for the new user - await manageStatusChecks(); - - if (client.subscriptions.has('docker-logs')) { - if (client.logStream) { - client.logStream.destroy(); - client.logStream = null; - } - console.log(`Starting new docker logs stream for ${client.user}`); - await streamContainerLogs(ws, client.user, client); - } - } - } else if (data.type === 'request') { - const { requestId, endpoint, method, body } = data; - let response; - if (endpoint.startsWith('/docker') || endpoint === '/docker') { - const containerName = client.user || 'Unknown'; - if (containerName === 'Unknown') { - console.error('Cannot fetch docker stats: User not identified'); - response = { error: 'User not identified' }; - } else { - response = await getContainerStats(containerName); - } - } else if (endpoint === '/search' && method === 'POST' && body) { - response = await apiRequest(endpoint, client.apiKey, method, body); - response.totalResults = response.totalResults || (response.results ? response.results.length : 0); - } else if (endpoint === '/server-properties' && method === 'GET') { - if (!client.user || client.user === 'Unknown') { - response = { error: 'User not identified' }; - } else { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status !== 'running') { - response = { error: `Container ${client.user} is not running` }; - } else { - const filePath = process.env.SERVER_PROPERTIES_PATH; - const command = `docker exec ${client.user} bash -c "cat ${filePath}"`; - console.log(`Executing: ${command}`); - const { stdout, stderr } = await execPromise(command); - if (stderr) { - console.error(`Error reading server.properties: ${stderr}`); - response = { error: 'Failed to read server.properties' }; - } else { - response = { content: stdout }; - } - } - } catch (error) { - console.error(`Error reading server.properties: ${error.message}`); - response = { error: `Failed to read server.properties: ${error.message}` }; - } - } - } else if (endpoint === '/server-properties' && method === 'POST' && body && body.content) { - if (!client.user || client.user === 'Unknown') { - response = { error: 'User not identified' }; - } else { - try { - const filePath = process.env.SERVER_PROPERTIES_PATH; - const tmpDir = process.env.TEMP_DIR; - const randomId = crypto.randomBytes(parseInt(process.env.TEMP_FILE_RANDOM_ID_BYTES, 10)).toString('hex'); - const tmpFile = path.join(tmpDir, `server_properties_${randomId}.tmp`); - const containerFilePath = `${process.env.CONTAINER_TEMP_FILE_PREFIX}${randomId}.tmp`; - - await fs.writeFile(tmpFile, body.content); - const copyCommand = `docker cp ${tmpFile} ${client.user}:${containerFilePath}`; - console.log(`Executing: ${copyCommand}`); - await execPromise(copyCommand); - - const moveCommand = `docker exec ${client.user} bash -c "mv ${containerFilePath} ${filePath} && chown mc:mc ${filePath}"`; - console.log(`Executing: ${moveCommand}`); - await execPromise(moveCommand); - - await fs.unlink(tmpFile).catch(err => console.error(`Error deleting temp file: ${err.message}`)); - response = { message: 'Server properties updated' }; - } catch (error) { - console.error(`Error writing server.properties: ${error.message}`); - response = { error: `Failed to write server.properties: ${error.message}` }; - } - } - } else { - response = await apiRequest(endpoint, client.apiKey, method, body); - } - ws.send(JSON.stringify({ requestId, ...response })); - if (['my-link', 'my-geyser-link', 'my-sftp'].includes(endpoint) && !response.error) { - await fetchAndSendUpdate(ws, endpoint, client); - if (endpoint === 'my-link') { - const linkData = await apiRequest('/my-link-cache', client.apiKey); - if (!linkData.error) { - client.cache['my-link-cache'] = linkData; - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status === 'running') { - console.log(`Performing status check after my-link request for ${client.user}`); - const status = await checkConnectionStatus(linkData.hostname, linkData.port); - ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } })); - } else { - ws.send(JSON.stringify({ type: 'connection-status', error: `Container ${client.user} is not running` })); - } - } catch (error) { - console.error(`Error checking container status for ${client.user}:`, error.message); - ws.send(JSON.stringify({ type: 'connection-status', error: `Failed to check container status: ${error.message}` })); - } - } - } else if (endpoint === 'my-geyser-link') { - const geyserData = await apiRequest('/my-geyser-cache', client.apiKey); - if (!geyserData.error) { - client.cache['my-geyser-cache'] = geyserData; - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status === 'running') { - console.log(`Performing status check after my-geyser-link request for ${client.user}`); - const status = await checkGeyserStatus(geyserData.hostname, geyserData.port); - ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } })); - } else { - ws.send(JSON.stringify({ type: 'geyser-status', error: `Container ${client.user} is not running` })); - } - } catch (error) { - console.error(`Error checking container status for ${client.user}:`, error.message); - ws.send(JSON.stringify({ type: 'geyser-status', error: `Failed to check container status: ${error.message}` })); - } - } - } else if (endpoint === 'my-sftp') { - const sftpData = await apiRequest('/my-sftp-cache', client.apiKey); - if (!sftpData.error) { - client.cache['my-sftp-cache'] = sftpData; - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status === 'running') { - console.log(`Performing status check after my-sftp request for ${client.user}`); - const status = await checkSftpStatus(sftpData.hostname, sftpData.port); - ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: status.isOnline } })); - } else { - ws.send(JSON.stringify({ type: 'sftp-status', error: `Container ${client.user} is not running` })); - } - } catch (error) { - console.error(`Error checking container status for ${client.user}:`, error.message); - ws.send(JSON.stringify({ type: 'sftp-status', error: `Failed to check container status: ${error.message}` })); - } - } - } - } - } else if (data.type === 'kick-player') { - const { requestId, player } = data; - let response; - if (!player) { - response = { error: 'Player name is required' }; - } else if (!client.user || client.user === 'Unknown') { - response = { error: 'User not identified' }; - } else { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status !== 'running') { - response = { error: `Container ${client.user} is not running` }; - } else { - response = await apiRequest('/console', client.apiKey, 'POST', { command: `kick ${player.toString()}` }); - if (!response.error) { - const playerListResponse = await apiRequest('/list-players', client.apiKey); - if (playerListResponse.error) { - console.error(`Failed to fetch updated player list after kick: ${playerListResponse.error}`); - } else { - ws.send(JSON.stringify({ type: 'list-players', data: playerListResponse })); - } - } - } - } catch (error) { - console.error(`Error kicking player: ${error.message}`); - response = { error: `Failed to kick player: ${error.message}` }; - } - } - ws.send(JSON.stringify({ requestId, ...response })); - } else if (data.type === 'ban-player') { - const { requestId, player } = data; - let response; - if (!player) { - response = { error: 'Player name is required' }; - } else if (!client.user || client.user === 'Unknown') { - response = { error: 'User not identified' }; - } else { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status !== 'running') { - response = { error: `Container ${client.user} is not running` }; - } else { - response = await apiRequest('/console', client.apiKey, 'POST', { command: `ban ${player}` }); - if (!response.error) { - const playerListResponse = await apiRequest('/list-players', client.apiKey); - if (playerListResponse.error) { - console.error(`Failed to fetch updated player list after ban: ${playerListResponse.error}`); - } else { - ws.send(JSON.stringify({ type: 'list-players', data: playerListResponse })); - } - } - } - } catch (error) { - console.error(`Error banning player: ${error.message}`); - response = { error: `Failed to ban player: ${error.message}` }; - } - } - ws.send(JSON.stringify({ requestId, ...response })); - } else if (data.type === 'op-player') { - const { requestId, player } = data; - let response; - if (!player) { - response = { error: 'Player name is required' }; - } else if (!client.user || client.user === 'Unknown') { - response = { error: 'User not identified' }; - } else { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status !== 'running') { - response = { error: `Container ${client.user} is not running` }; - } else { - response = await apiRequest('/console', client.apiKey, 'POST', { command: `op ${player}` }); - if (!response.error) { - const playerListResponse = await apiRequest('/list-players', client.apiKey); - if (playerListResponse.error) { - console.error(`Failed to fetch updated player list after op: ${playerListResponse.error}`); - } else { - ws.send(JSON.stringify({ type: 'list-players', data: playerListResponse })); - } - } - } - } catch (error) { - console.error(`Error op-ing player: ${error.message}`); - response = { error: `Failed to op player: ${error.message}` }; - } - } - ws.send(JSON.stringify({ requestId, ...response })); - } else if (data.type === 'deop-player') { - const { requestId, player } = data; - let response; - if (!player) { - response = { error: 'Player name is required' }; - } else if (!client.user || client.user === 'Unknown') { - response = { error: 'User not identified' }; - } else { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status !== 'running') { - response = { error: `Container ${client.user} is not running` }; - } else { - response = await apiRequest('/console', client.apiKey, 'POST', { command: `deop ${player}` }); - if (!response.error) { - const playerListResponse = await apiRequest('/list-players', client.apiKey); - if (playerListResponse.error) { - console.error(`Failed to fetch updated player list after deop: ${playerListResponse.error}`); - } else { - ws.send(JSON.stringify({ type: 'list-players', data: playerListResponse })); - } - } - } - } catch (error) { - console.error(`Error deop-ing player: ${error.message}`); - response = { error: `Failed to deop player: ${error.message}` }; - } - } - ws.send(JSON.stringify({ requestId, ...response })); - } else if (data.type === 'tell-player') { - const { requestId, player, message } = data; - let response; - if (!player || !message) { - response = { error: 'Player name and message are required' }; - } else if (!client.user || client.user === 'Unknown') { - response = { error: 'User not identified' }; - } else { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status !== 'running') { - response = { error: `Container ${client.user} is not running` }; - } else { - response = await apiRequest('/tell', client.apiKey, 'POST', { username: player, message }); - if (!response.error) { - console.log(`Message sent to ${player}: ${message}`); - } - } - } catch (error) { - console.error(`Error sending message to player: ${error.message}`); - response = { error: `Failed to send message: ${error.message}` }; - } - } - ws.send(JSON.stringify({ requestId, ...response })); - } else if (data.type === 'give-player') { - const { requestId, player, item, amount } = data; - let response; - if (!player || !item || !amount) { - response = { error: 'Player name, item, and amount are required' }; - } else if (!client.user || client.user === 'Unknown') { - response = { error: 'User not identified' }; - } else { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status !== 'running') { - response = { error: `Container ${client.user} is not running` }; - } else { - response = await apiRequest('/give', client.apiKey, 'POST', { username: player, item, amount }); - if (!response.error) { - console.log(`Gave ${amount} ${item} to ${player}`); - } - } - } catch (error) { - console.error(`Error giving item to player: ${error.message}`); - response = { error: `Failed to give item: ${error.message}` }; - } - } - ws.send(JSON.stringify({ requestId, ...response })); - } else if (data.type === 'refresh') { - console.log('Processing refresh request'); - delete client.cache['hello']; - delete client.cache['time']; - await Promise.all([ - ...staticEndpoints.filter(e => client.subscriptions.has(e)).map(e => fetchAndSendUpdate(ws, e, client)), - ...dynamicEndpoints.filter(e => client.subscriptions.has(e)).map(e => fetchAndSendUpdate(ws, e, client)), - client.subscriptions.has('list-players') ? fetchAndSendUpdate(ws, 'list-players', client) : null - ].filter(Boolean)); - if (client.user && client.user !== 'Unknown') { - const stats = await getContainerStats(client.user); - ws.send(JSON.stringify({ type: 'docker', data: { ...stats, user: client.user } })); - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status === 'running') { - const linkData = client.cache['my-link-cache']; - if (linkData && linkData.hostname && linkData.port && client.subscriptions.has('my-link-cache')) { - console.log(`Performing refresh connection status check for ${client.user}`); - const status = await checkConnectionStatus(linkData.hostname, linkData.port); - ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } })); - } - const geyserData = client.cache['my-geyser-cache']; - if (geyserData && geyserData.hostname && geyserData.port && client.subscriptions.has('my-geyser-cache')) { - console.log(`Performing refresh Geyser status check for ${client.user}`); - const status = await checkGeyserStatus(geyserData.hostname, geyserData.port); - ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } })); - } - const sftpData = client.cache['my-sftp-cache']; - if (sftpData && sftpData.hostname && sftpData.port && client.subscriptions.has('my-sftp-cache')) { - console.log(`Performing refresh SFTP status check for ${client.user}`); - const status = await checkSftpStatus(sftpData.hostname, sftpData.port); - ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: status.isOnline } })); - } - } else { - if (client.subscriptions.has('my-link-cache')) { - ws.send(JSON.stringify({ type: 'connection-status', error: `Container ${client.user} is not running` })); - } - if (client.subscriptions.has('my-geyser-cache')) { - ws.send(JSON.stringify({ type: 'geyser-status', error: `Container ${client.user} is not running` })); - } - if (client.subscriptions.has('my-sftp-cache')) { - ws.send(JSON.stringify({ type: 'sftp-status', error: `Container ${client.user} is not running` })); - } - } - } catch (error) { - console.error(`Error checking container status for ${client.user}:`, error.message); - if (client.subscriptions.has('my-link-cache')) { - ws.send(JSON.stringify({ type: 'connection-status', error: `Failed to check container status: ${error.message}` })); - } - if (client.subscriptions.has('my-geyser-cache')) { - ws.send(JSON.stringify({ type: 'geyser-status', error: `Failed to check container status: ${error.message}` })); - } - if (client.subscriptions.has('my-sftp-cache')) { - ws.send(JSON.stringify({ type: 'sftp-status', error: `Failed to check container status: ${error.message}` })); - } - } - } - } - } catch (error) { - console.error('WebSocket message error:', error.message); - ws.send(JSON.stringify({ error: `Invalid message: ${error.message}` })); - } - }); - - ws.on('close', () => { - try { - const client = clients.get(ws); - client.intervals.forEach(clearInterval); - if (client.logStream) { - client.logStream.destroy(); - client.logStream = null; - } - if (client.connectionStatusInterval) { - clearInterval(client.connectionStatusInterval); - } - if (client.geyserStatusInterval) { - clearInterval(client.geyserStatusInterval); - } - if (client.sftpStatusInterval) { - clearInterval(client.sftpStatusInterval); - } - if (client.statusCheckMonitorInterval) { - clearInterval(client.statusCheckMonitorInterval); - } - clients.delete(ws); - console.log('WebSocket client disconnected'); - } catch (error) { - console.error('Error on WebSocket close:', error.message); - } - }); - - ws.on('error', (error) => { - console.error('WebSocket error:', error.message); - }); - } catch (error) { - console.error('WebSocket connection error:', error.message); - ws.send(JSON.stringify({ error: `Connection error: ${error.message}` })); - ws.close(); - } -}); - -async function fetchAndSendUpdate(ws, endpoint, client) { - try { - if (['mod-list', 'list-players'].includes(endpoint) && client.user && client.user !== 'Unknown') { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status !== 'running') { - console.log(`Skipping update for ${endpoint} as container ${client.user} is not running`); - ws.send(JSON.stringify({ type: endpoint, error: `Container ${client.user} is not running` })); - return; - } - } catch (error) { - console.error(`Error checking container status for ${endpoint}:`, error.message); - ws.send(JSON.stringify({ type: endpoint, error: `Failed to check container status: ${error.message}` })); - return; - } - } - - if (endpoint === 'time' && client.cache['time']) { - ws.send(JSON.stringify({ type: endpoint, data: client.cache['time'] })); - return; - } - - const response = await apiRequest(`/${endpoint}`, client.apiKey); - if (!response.error) { - if (endpoint === 'time') { - client.cache['time'] = response; - } - if (endpoint === 'my-link-cache') { - client.cache['my-link-cache'] = response; - if (client.subscriptions.has('my-link-cache') && client.user !== 'Unknown') { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status === 'running' && response.hostname && response.port) { - console.log(`Performing status check for my-link-cache update for ${client.user}`); - const status = await checkConnectionStatus(response.hostname, response.port); - ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } })); - } else { - ws.send(JSON.stringify({ type: 'connection-status', error: `Container ${client.user} is not running` })); - } - } catch (error) { - console.error(`Error checking container status for ${client.user}:`, error.message); - ws.send(JSON.stringify({ type: 'connection-status', error: `Failed to check container status: ${error.message}` })); - } - } - } - if (endpoint === 'my-geyser-cache') { - client.cache['my-geyser-cache'] = response; - if (client.subscriptions.has('my-geyser-cache') && client.user !== 'Unknown') { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status === 'running' && response.hostname && response.port) { - console.log(`Performing status check for my-geyser-cache update for ${client.user}`); - const status = await checkGeyserStatus(response.hostname, response.port); - ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } })); - } else { - ws.send(JSON.stringify({ type: 'geyser-status', error: `Container ${client.user} is not running` })); - } - } catch (error) { - console.error(`Error checking container status for ${client.user}:`, error.message); - ws.send(JSON.stringify({ type: 'geyser-status', error: `Failed to check container status: ${error.message}` })); - } - } - } - if (endpoint === 'my-sftp-cache') { - client.cache['my-sftp-cache'] = response; - if (client.subscriptions.has('my-sftp-cache') && client.user !== 'Unknown') { - try { - const container = docker.getContainer(client.user); - const inspect = await container.inspect(); - if (inspect.State.Status === 'running' && response.hostname && response.port) { - console.log(`Performing status check for my-sftp-cache update for ${client.user}`); - const status = await checkSftpStatus(response.hostname, response.port); - ws.send(JSON.stringify({ type: 'sftp-status', data: { isOnline: status.isOnline } })); - } else { - ws.send(JSON.stringify({ type: 'sftp-status', error: `Container ${client.user} is not running` })); - } - } catch (error) { - console.error(`Error checking container status for ${client.user}:`, error.message); - ws.send(JSON.stringify({ type: 'sftp-status', error: `Failed to check container status: ${error.message}` })); - } - } - } - ws.send(JSON.stringify({ type: endpoint, data: response })); - } else { - console.error(`Error fetching ${endpoint}:`, response.error); - ws.send(JSON.stringify({ type: endpoint, error: response.error })); - } - } catch (error) { - console.error(`Error fetching ${endpoint}:`, error.message); - ws.send(JSON.stringify({ type: endpoint, error: error.message })); - } -} - -app.post('/generate-login-link', async (req, res) => { - try { - const { secretKey, username } = req.body; - - if (!secretKey || secretKey !== process.env.ADMIN_SECRET_KEY) { - return res.status(401).json({ error: 'Invalid secret key' }); - } - - if (!username) { - return res.status(400).json({ error: 'Username is required' }); - } - - const tokenResponse = await unirest - .post(process.env.AUTH_ENDPOINT) - .headers({ 'Accept': 'application/json', 'Content-Type': 'application/json' }) - .send({ - username: username, - password: process.env.AUTH_PASSWORD - }); - - if (!tokenResponse.body.token) { - return res.status(500).json({ error: 'Failed to generate API key' }); - } - - const apiKey = tokenResponse.body.token; - - const linkId = crypto.randomBytes(parseInt(process.env.LINK_ID_BYTES, 10)).toString('hex'); - const loginLink = `${process.env.AUTO_LOGIN_LINK_PREFIX}${linkId}`; - - temporaryLinks.set(linkId, { - apiKey, - username, - expiresAt: Date.now() + LINK_EXPIRY_SECONDS * 1000 - }); - - setTimeout(() => { - temporaryLinks.delete(linkId); - }, LINK_EXPIRY_SECONDS * 1000); - - res.json({ loginLink }); - } catch (error) { - console.error('Error generating login link:', error.message); - res.status(500).json({ error: 'Internal server error' }); - } -}); - -app.get('/auto-login/:linkId', (req, res) => { - const { linkId } = req.params; - const linkData = temporaryLinks.get(linkId); - - if (!linkData || linkData.expiresAt < Date.now()) { - temporaryLinks.delete(linkId); - return res.send(` - - - - - - -
- -

Login Expired.

-

Redirecting...

-
- - -`); - } - - temporaryLinks.delete(linkId); - - res.send(` - - - - Auto Login - - - -

Logging in...

- - - `); -}); - -setInterval(() => { - const now = Date.now(); - for (const [linkId, linkData] of temporaryLinks.entries()) { - if (linkData.expiresAt < now) { - temporaryLinks.delete(linkId); - } - } -}, parseInt(process.env.TEMP_LINKS_CLEANUP_INTERVAL_MS, 10)); - -const server = app.listen(PORT, () => { - console.log(`Server running on http://localhost:${PORT}`); -}); +app.post('/generate-login-link', generateLoginLink); +app.get('/auto-login/:linkId', handleAutoLogin); +app.get('/', (req, res) => res.sendFile(path.join(__dirname, 'public', 'index.html'))); +const server = app.listen(PORT, () => console.log(`Server running on http://localhost:${PORT}`)); server.on('upgrade', (request, socket, head) => { - wss.handleUpgrade(request, socket, head, (ws) => { - wss.emit('connection', ws, request); - }); -}); - -app.get('/', (req, res) => { - res.sendFile(path.join(__dirname, 'public', 'index.html')); + wss.handleUpgrade(request, socket, head, (ws) => wss.emit('connection', ws, request)); }); \ No newline at end of file