update - add internal SFTP Endpoint
This commit is contained in:
@@ -4,7 +4,7 @@ import { checkConnectionStatus, checkGeyserStatus, checkSftpStatus } from './sta
|
||||
import { apiRequest } from './api.js';
|
||||
|
||||
const clients = new Map();
|
||||
const staticEndpoints = ['log', 'website', 'map', 'my-link-cache', 'my-geyser-cache', 'my-sftp-cache', 'my-link', 'my-geyser-link', 'my-sftp', 'holesail-hashes'];
|
||||
const staticEndpoints = ['log', 'website', 'map', 'my-link-cache', 'my-geyser-cache', 'my-sftp-cache', 'my-link', 'my-geyser-link', 'my-sftp', 'holesail-hashes', 'internal-sftp'];
|
||||
const dynamicEndpoints = ['hello', 'time', 'mod-list'];
|
||||
|
||||
// Helper function to start Docker stats interval
|
||||
@@ -14,8 +14,8 @@ function startDockerStatsInterval(ws, client, user, docker) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Send initial stats immediately
|
||||
(async () => {
|
||||
// Send initial stats immediately
|
||||
(async () => {
|
||||
try {
|
||||
const initialStats = await getContainerStats(docker, user);
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
@@ -80,10 +80,12 @@ async function fetchAndSendUpdate(ws, endpoint, client, docker) {
|
||||
const container = docker.getContainer(client.user);
|
||||
const inspect = await container.inspect();
|
||||
if (inspect.State.Status !== 'running') {
|
||||
console.warn(`Container ${client.user} is not running for endpoint ${endpoint}`);
|
||||
ws.send(JSON.stringify({ type: endpoint, error: `Container ${client.user} is not running` }));
|
||||
return;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Failed to check container status for ${endpoint} for ${client.user}:`, error.message);
|
||||
ws.send(JSON.stringify({ type: endpoint, error: `Failed to check container status: ${error.message}` }));
|
||||
return;
|
||||
}
|
||||
@@ -99,6 +101,12 @@ async function fetchAndSendUpdate(ws, endpoint, client, docker) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (endpoint === 'internal-sftp' && client.cache['internal-sftp']) {
|
||||
console.log(`Using cached internal-sftp data for ${client.user}`);
|
||||
ws.send(JSON.stringify({ type: 'internal-sftp', data: client.cache['internal-sftp'] }));
|
||||
return;
|
||||
}
|
||||
|
||||
if (endpoint === 'holesail-hashes') {
|
||||
try {
|
||||
const [hashResponse, geyserHashResponse, sftpHashResponse] = await Promise.all([
|
||||
@@ -132,82 +140,95 @@ async function fetchAndSendUpdate(ws, endpoint, client, docker) {
|
||||
return;
|
||||
}
|
||||
|
||||
const response = await apiRequest(`/${endpoint}`, client.apiKey);
|
||||
if (!response.error) {
|
||||
if (endpoint === 'time') client.cache['time'] = response;
|
||||
if (endpoint === 'my-link-cache') {
|
||||
client.cache['my-link-cache'] = response;
|
||||
if (client.subscriptions.has('my-link-cache') && client.user !== 'Unknown') {
|
||||
try {
|
||||
const container = docker.getContainer(client.user);
|
||||
const inspect = await container.inspect();
|
||||
if (inspect.State.Status === 'running' && response.hostname && response.port) {
|
||||
const status = await checkConnectionStatus(response.hostname, response.port);
|
||||
ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } }));
|
||||
} else {
|
||||
ws.send(JSON.stringify({ type: 'connection-status', error: `Container ${client.user} is not running` }));
|
||||
try {
|
||||
const response = await apiRequest(`/${endpoint}`, client.apiKey);
|
||||
if (!response.error) {
|
||||
if (endpoint === 'time') client.cache['time'] = response;
|
||||
if (endpoint === 'my-link-cache') {
|
||||
client.cache['my-link-cache'] = response;
|
||||
if (client.subscriptions.has('my-link-cache') && client.user !== 'Unknown') {
|
||||
try {
|
||||
const container = docker.getContainer(client.user);
|
||||
const inspect = await container.inspect();
|
||||
if (inspect.State.Status === 'running' && response.hostname && response.port) {
|
||||
const status = await checkConnectionStatus(response.hostname, response.port);
|
||||
ws.send(JSON.stringify({ type: 'connection-status', data: { isOnline: status.isOnline } }));
|
||||
} else {
|
||||
ws.send(JSON.stringify({ type: 'connection-status', error: `Container ${client.user} is not running` }));
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error checking connection status for ${client.user}:`, error.message);
|
||||
ws.send(JSON.stringify({ type: 'connection-status', error: `Failed to check container status: ${error.message}` }));
|
||||
}
|
||||
} catch (error) {
|
||||
ws.send(JSON.stringify({ type: 'connection-status', error: `Failed to check container status: ${error.message}` }));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (endpoint === 'my-geyser-cache') {
|
||||
client.cache['my-geyser-cache'] = response;
|
||||
if (client.subscriptions.has('my-geyser-cache') && client.user !== 'Unknown') {
|
||||
try {
|
||||
const container = docker.getContainer(client.user);
|
||||
const inspect = await container.inspect();
|
||||
if (inspect.State.Status === 'running' && response.hostname && response.port) {
|
||||
const status = await checkGeyserStatus(response.hostname, response.port);
|
||||
ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } }));
|
||||
} else {
|
||||
ws.send(JSON.stringify({ type: 'geyser-status', error: `Container ${client.user} is not running` }));
|
||||
if (endpoint === 'my-geyser-cache') {
|
||||
client.cache['my-geyser-cache'] = response;
|
||||
if (client.subscriptions.has('my-geyser-cache') && client.user !== 'Unknown') {
|
||||
try {
|
||||
const container = docker.getContainer(client.user);
|
||||
const inspect = await container.inspect();
|
||||
if (inspect.State.Status === 'running' && response.hostname && response.port) {
|
||||
const status = await checkGeyserStatus(response.hostname, response.port);
|
||||
ws.send(JSON.stringify({ type: 'geyser-status', data: { isOnline: status.isOnline } }));
|
||||
} else {
|
||||
ws.send(JSON.stringify({ type: 'geyser-status', error: `Container ${client.user} is not running` }));
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error checking geyser status for ${client.user}:`, error.message);
|
||||
ws.send(JSON.stringify({ type: 'geyser-status', error: `Failed to check container status: ${error.message}` }));
|
||||
}
|
||||
} catch (error) {
|
||||
ws.send(JSON.stringify({ type: 'geyser-status', error: `Failed to check container status: ${error.message}` }));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (endpoint === 'my-sftp-cache') {
|
||||
client.cache['my-sftp-cache'] = response;
|
||||
if (client.subscriptions.has('my-sftp-cache') && client.user !== 'Unknown') {
|
||||
try {
|
||||
const container = docker.getContainer(client.user);
|
||||
const inspect = await container.inspect();
|
||||
const ipAddress = inspect.NetworkSettings.Networks?.minecraft_network?.IPAddress || 'N/A';
|
||||
if (inspect.State.Status === 'running' && response.hostname && response.port) {
|
||||
const status = await checkSftpStatus(response.hostname, response.port);
|
||||
ws.send(JSON.stringify({
|
||||
type: 'sftp-status',
|
||||
data: {
|
||||
isOnline: status.isOnline,
|
||||
if (endpoint === 'my-sftp-cache') {
|
||||
client.cache['my-sftp-cache'] = response;
|
||||
if (client.subscriptions.has('my-sftp-cache') && client.user !== 'Unknown') {
|
||||
try {
|
||||
const container = docker.getContainer(client.user);
|
||||
const inspect = await container.inspect();
|
||||
const ipAddress = inspect.NetworkSettings.Networks?.minecraft_network?.IPAddress || 'N/A';
|
||||
if (inspect.State.Status === 'running' && response.hostname && response.port) {
|
||||
const status = await checkSftpStatus(response.hostname, response.port);
|
||||
ws.send(JSON.stringify({
|
||||
type: 'sftp-status',
|
||||
data: {
|
||||
isOnline: status.isOnline,
|
||||
ipAddress
|
||||
}
|
||||
}));
|
||||
} else {
|
||||
ws.send(JSON.stringify({
|
||||
type: 'sftp-status',
|
||||
error: `Container ${client.user} is not running`,
|
||||
ipAddress
|
||||
}
|
||||
}));
|
||||
} else {
|
||||
}));
|
||||
}
|
||||
response.ipAddress = ipAddress;
|
||||
} catch (error) {
|
||||
console.error(`Error checking sftp status for ${client.user}:`, error.message);
|
||||
ws.send(JSON.stringify({
|
||||
type: 'sftp-status',
|
||||
error: `Container ${client.user} is not running`,
|
||||
ipAddress
|
||||
error: `Failed to check container status: ${error.message}`
|
||||
}));
|
||||
}
|
||||
// Add IP address to my-sftp-cache response
|
||||
response.ipAddress = ipAddress;
|
||||
} catch (error) {
|
||||
ws.send(JSON.stringify({
|
||||
type: 'sftp-status',
|
||||
error: `Failed to check container status: ${error.message}`
|
||||
}));
|
||||
}
|
||||
}
|
||||
if (endpoint === 'internal-sftp') {
|
||||
client.cache['internal-sftp'] = response;
|
||||
}
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
ws.send(JSON.stringify({ type: endpoint, data: response }));
|
||||
}
|
||||
} else {
|
||||
console.error(`API error for /${endpoint}: ${response.error}`);
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
ws.send(JSON.stringify({ type: endpoint, error: response.error }));
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error fetching /${endpoint} for ${client.user}:`, error.message);
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
ws.send(JSON.stringify({ type: endpoint, data: response }));
|
||||
}
|
||||
} else {
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
ws.send(JSON.stringify({ type: endpoint, error: response.error }));
|
||||
ws.send(JSON.stringify({ type: endpoint, error: `Failed to fetch ${endpoint}: ${error.message}` }));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -229,12 +250,14 @@ async function manageStatusChecks(ws, client, user, docker) {
|
||||
});
|
||||
|
||||
if (!isRunning || user === 'Unknown') {
|
||||
['my-link-cache', 'my-geyser-cache', 'my-sftp-cache'].forEach((sub) => {
|
||||
['my-link-cache', 'my-geyser-cache', 'my-sftp-cache', 'internal-sftp'].forEach((sub) => {
|
||||
if (client.subscriptions.has(sub)) {
|
||||
ws.send(JSON.stringify({ type: sub.replace('-cache', '-status'), error: `Container ${user} is not running or user unknown` }));
|
||||
const statusType = sub.replace('-cache', '-status');
|
||||
console.warn(`Container ${user} is not running or user unknown for subscription ${sub}`);
|
||||
ws.send(JSON.stringify({ type: statusType, error: `Container ${user} is not running or user unknown` }));
|
||||
}
|
||||
});
|
||||
if (!isRunning && (client.subscriptions.has('my-link-cache') || client.subscriptions.has('my-geyser-cache') || client.subscriptions.has('my-sftp-cache')) && user !== 'Unknown') {
|
||||
if (!isRunning && (client.subscriptions.has('my-link-cache') || client.subscriptions.has('my-geyser-cache') || client.subscriptions.has('my-sftp-cache') || client.subscriptions.has('internal-sftp')) && user !== 'Unknown') {
|
||||
console.log(`Starting container status monitor for ${user}`);
|
||||
client.statusCheckMonitorInterval = setInterval(async () => {
|
||||
try {
|
||||
@@ -368,6 +391,20 @@ export function handleWebSocket(ws, req, docker) {
|
||||
console.log(`Received pong from client for user ${client.user || 'unknown'}`);
|
||||
});
|
||||
|
||||
// Send internal-sftp data immediately on connection if subscribed
|
||||
(async () => {
|
||||
try {
|
||||
if (client.subscriptions.has('internal-sftp')) {
|
||||
await fetchAndSendUpdate(ws, 'internal-sftp', client, docker);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error sending initial internal-sftp data:', error.message);
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
ws.send(JSON.stringify({ type: 'internal-sftp', error: `Failed to fetch initial internal-sftp data: ${error.message}` }));
|
||||
}
|
||||
}
|
||||
})();
|
||||
|
||||
ws.on('message', async (message) => {
|
||||
try {
|
||||
const data = JSON.parse(message.toString());
|
||||
@@ -380,6 +417,18 @@ export function handleWebSocket(ws, req, docker) {
|
||||
});
|
||||
console.log(`Client subscriptions: ${Array.from(client.subscriptions)}`);
|
||||
|
||||
// Send internal-sftp data immediately after subscription
|
||||
if (client.subscriptions.has('internal-sftp')) {
|
||||
try {
|
||||
await fetchAndSendUpdate(ws, 'internal-sftp', client, docker);
|
||||
} catch (error) {
|
||||
console.error(`Error fetching internal-sftp data after subscription for ${client.user}:`, error.message);
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
ws.send(JSON.stringify({ type: 'internal-sftp', error: `Failed to fetch internal-sftp data: ${error.message}` }));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let hello = client.cache['hello'] || await apiRequest('/hello', client.apiKey);
|
||||
if (!client.cache['hello'] && !hello.error) client.cache['hello'] = hello;
|
||||
|
||||
@@ -439,7 +488,7 @@ export function handleWebSocket(ws, req, docker) {
|
||||
client.intervals.push(setInterval(async () => {
|
||||
try {
|
||||
for (const endpoint of staticEndpoints) {
|
||||
if (client.subscriptions.has(endpoint) && endpoint !== 'holesail-hashes') {
|
||||
if (client.subscriptions.has(endpoint) && endpoint !== 'holesail-hashes' && endpoint !== 'internal-sftp') {
|
||||
await fetchAndSendUpdate(ws, endpoint, client, docker);
|
||||
}
|
||||
}
|
||||
@@ -448,6 +497,28 @@ export function handleWebSocket(ws, req, docker) {
|
||||
}
|
||||
}, parseInt(process.env.STATIC_ENDPOINTS_INTERVAL_MS, 10)));
|
||||
|
||||
// Add 10-minute interval for internal-sftp updates
|
||||
if (client.subscriptions.has('internal-sftp')) {
|
||||
const sftpIntervalId = setInterval(async () => {
|
||||
try {
|
||||
if (ws.readyState !== ws.OPEN) {
|
||||
console.warn(`WebSocket not open (state: ${ws.readyState}) for ${user}, clearing internal-sftp interval ${sftpIntervalId}`);
|
||||
clearInterval(sftpIntervalId);
|
||||
client.intervals = client.intervals.filter(id => id !== sftpIntervalId);
|
||||
return;
|
||||
}
|
||||
await fetchAndSendUpdate(ws, 'internal-sftp', client, docker);
|
||||
} catch (error) {
|
||||
console.error(`Error in internal-sftp interval for ${user}:`, error.message);
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
ws.send(JSON.stringify({ type: 'internal-sftp', error: `Failed to fetch internal-sftp data: ${error.message}` }));
|
||||
}
|
||||
}
|
||||
}, 10 * 60 * 1000); // 10 minutes
|
||||
client.intervals.push(sftpIntervalId);
|
||||
console.log(`Internal SFTP interval ID ${sftpIntervalId} added for ${user}`);
|
||||
}
|
||||
|
||||
if (client.subscriptions.has('list-players') && user !== 'Unknown') {
|
||||
try {
|
||||
const container = docker.getContainer(user);
|
||||
@@ -520,6 +591,18 @@ export function handleWebSocket(ws, req, docker) {
|
||||
console.log(`Starting docker logs stream for new user ${client.user}`);
|
||||
await streamContainerLogs(docker, ws, client.user, client);
|
||||
}
|
||||
|
||||
// Refresh internal-sftp data for new user
|
||||
if (client.subscriptions.has('internal-sftp')) {
|
||||
try {
|
||||
await fetchAndSendUpdate(ws, 'internal-sftp', client, docker);
|
||||
} catch (error) {
|
||||
console.error(`Error refreshing internal-sftp data for new user ${client.user}:`, error.message);
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
ws.send(JSON.stringify({ type: 'internal-sftp', error: `Failed to fetch internal-sftp data: ${error.message}` }));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (data.type === 'request') {
|
||||
const { requestId, endpoint, method, body } = data;
|
||||
@@ -540,6 +623,7 @@ export function handleWebSocket(ws, req, docker) {
|
||||
response = client.user === 'Unknown' ? { error: 'User not identified' } : await createBackup(docker, client.user);
|
||||
} else {
|
||||
response = await apiRequest(endpoint, client.apiKey, method, body);
|
||||
console.log(`API request response for ${endpoint}:`, response);
|
||||
}
|
||||
if (ws.readyState === ws.OPEN) {
|
||||
ws.send(JSON.stringify({ requestId, ...response }));
|
||||
@@ -696,6 +780,7 @@ export function handleWebSocket(ws, req, docker) {
|
||||
console.log('Processing refresh request');
|
||||
delete client.cache['hello'];
|
||||
delete client.cache['time'];
|
||||
delete client.cache['internal-sftp'];
|
||||
await Promise.all([
|
||||
...staticEndpoints.filter(e => client.subscriptions.has(e)).map(e => fetchAndSendUpdate(ws, e, client, docker)),
|
||||
...dynamicEndpoints.filter(e => client.subscriptions.has(e)).map(e => fetchAndSendUpdate(ws, e, client, docker)),
|
||||
@@ -739,6 +824,9 @@ export function handleWebSocket(ws, req, docker) {
|
||||
if (client.subscriptions.has('my-sftp-cache')) {
|
||||
ws.send(JSON.stringify({ type: 'sftp-status', error: `Container ${client.user} is not running` }));
|
||||
}
|
||||
if (client.subscriptions.has('internal-sftp')) {
|
||||
ws.send(JSON.stringify({ type: 'internal-sftp', error: `Container ${client.user} is not running` }));
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error during refresh for ${client.user}:`, error.message);
|
||||
|
Reference in New Issue
Block a user