283 lines
9.7 KiB
JavaScript
283 lines
9.7 KiB
JavaScript
import 'dotenv/config';
|
|
import Groq from 'groq-sdk';
|
|
import unirest from 'unirest';
|
|
import readline from 'readline';
|
|
|
|
const DISCORD_LINUX_API_URL = 'https://api.ssh.surf';
|
|
const DISCORD_LINUX_API_KEY = process.env['DISCORD_LINUX_API_KEY'];
|
|
const GROQ_API_KEY = process.env['GROQ_API_KEY'];
|
|
|
|
const MAX_ITERATIONS = 5;
|
|
|
|
// Initialize the Groq client
|
|
const groqClient = new Groq({
|
|
apiKey: GROQ_API_KEY,
|
|
});
|
|
|
|
// A small helper for nice verbose logging:
|
|
function logHeader(message) {
|
|
console.log('\n' + '═'.repeat(80));
|
|
console.log('═ ' + message);
|
|
console.log('═'.repeat(80) + '\n');
|
|
}
|
|
|
|
function logSubHeader(message) {
|
|
console.log('\n' + '-'.repeat(60));
|
|
console.log('> ' + message);
|
|
console.log('-'.repeat(60) + '\n');
|
|
}
|
|
|
|
function logInfo(message) {
|
|
console.log(`INFO: ${message}`);
|
|
}
|
|
|
|
function logCommandStart(cmd) {
|
|
console.log(`\n[EXECUTING COMMAND]\n$ ${cmd}\n`);
|
|
}
|
|
|
|
function logCommandResult(stdout, stderr) {
|
|
if (stdout && stdout.trim().length > 0) {
|
|
console.log("[STDOUT]:\n" + indentMultiline(stdout));
|
|
} else {
|
|
console.log("[STDOUT]: (empty)\n");
|
|
}
|
|
|
|
if (stderr && stderr.trim().length > 0) {
|
|
console.log("[STDERR]:\n" + indentMultiline(stderr));
|
|
} else {
|
|
console.log("[STDERR]: (empty)\n");
|
|
}
|
|
}
|
|
|
|
function indentMultiline(text) {
|
|
return text.split('\n').map(line => ' ' + line).join('\n');
|
|
}
|
|
|
|
// Helper to execute a command in the container:
|
|
async function execCommandInContainer(cmd, pwd = '/home') {
|
|
const response = await unirest
|
|
.post(`${DISCORD_LINUX_API_URL}/exec`)
|
|
.headers({
|
|
'Accept': 'application/json',
|
|
'Content-Type': 'application/json',
|
|
'x-ssh-auth': DISCORD_LINUX_API_KEY
|
|
})
|
|
.send({ cmd, pwd });
|
|
|
|
return response.body;
|
|
}
|
|
|
|
// This function queries the AI for instructions to achieve a goal.
|
|
async function askAIForInstructions(context, goal) {
|
|
const systemPrompt = `You are a world-class Linux system administration assistant, given the ability to access and run commands on a remote Debian/Ubuntu-based Linux container. Your mission is to help achieve the following goal: ${goal}.
|
|
Rules:
|
|
1. Return only shell commands needed, line-by-line, no explanation.
|
|
2. If previous attempts failed, refine your approach and fix the issues based on the provided errors and output.
|
|
3. If you need to run multiple commands, separate them by new lines.
|
|
4. Consider common steps: updating package lists, installing packages, verifying installation.
|
|
5. The container might be minimal, so consider installing or fixing repositories if needed.
|
|
6. Always ensure commands are non-interactive.
|
|
7. Do not use markdown formatting at all ever.
|
|
8. All commands are non-interactive
|
|
9. If installing packages, always use -y to allow for non-interactive commands
|
|
`;
|
|
|
|
const userPrompt = `CONTEXT:\n${context}\n\nPlease provide the exact shell commands to achieve the goal above.`;
|
|
|
|
const params = {
|
|
messages: [
|
|
{ role: 'system', content: systemPrompt },
|
|
{ role: 'user', content: userPrompt }
|
|
],
|
|
model: 'llama3-8b-8192',
|
|
};
|
|
|
|
const chatCompletion = await groqClient.chat.completions.create(params);
|
|
const aiResponse = chatCompletion.choices[0].message.content.trim();
|
|
return aiResponse;
|
|
}
|
|
|
|
// This function is used for chatting with the AI like a chatbot. It can answer questions,
|
|
// give advice, or provide commands about the container or any tasks you want to do next.
|
|
async function chatWithAI(context, userMessage) {
|
|
const systemPrompt = `You are a helpful Linux system administration assistant with the ability to access and run commands on a remote Debian/Ubuntu-based Linux container.
|
|
You can help answer questions about the container state, suggest commands, or assist with Linux-related tasks.
|
|
Rules:
|
|
1. You can provide explanations, instructions, and command suggestions.
|
|
2. If giving commands, list them clearly line-by-line.
|
|
3. For explanations or answers, you can use normal text.
|
|
4. Do not use markdown formatting.
|
|
5. Keep answers concise and clear.
|
|
6. The user may then choose to run commands you suggest.
|
|
`;
|
|
|
|
const userPrompt = `Context:\n${context}\n\nUser says: ${userMessage}`;
|
|
|
|
const params = {
|
|
messages: [
|
|
{ role: 'system', content: systemPrompt },
|
|
{ role: 'user', content: userPrompt }
|
|
],
|
|
model: 'llama3-8b-8192',
|
|
};
|
|
|
|
const chatCompletion = await groqClient.chat.completions.create(params);
|
|
const aiResponse = chatCompletion.choices[0].message.content.trim();
|
|
return aiResponse;
|
|
}
|
|
|
|
function parseCommandsFromAIResponse(aiResponse) {
|
|
const lines = aiResponse.split('\n').map(l => l.trim()).filter(l => l.length > 0);
|
|
return lines;
|
|
}
|
|
|
|
// This function attempts to automatically achieve a given goal using the AI instructions up to MAX_ITERATIONS
|
|
async function automateGoal(context, goal) {
|
|
logHeader(`ATTEMPTING TO AUTOMATE GOAL: ${goal}`);
|
|
let iteration = 0;
|
|
let success = false;
|
|
|
|
while (iteration < MAX_ITERATIONS && !success) {
|
|
iteration++;
|
|
logHeader(`ITERATION ${iteration} OF ${MAX_ITERATIONS}`);
|
|
|
|
logSubHeader('Asking AI for instructions');
|
|
const instructions = await askAIForInstructions(context, goal);
|
|
console.log("AI PROVIDED COMMANDS:\n" + indentMultiline(instructions));
|
|
|
|
const commands = parseCommandsFromAIResponse(instructions);
|
|
let allCommandsSucceeded = true;
|
|
let attemptLog = `Attempt #${iteration}:\nAI instructions:\n${instructions}\n\nCommand results:\n`;
|
|
|
|
for (const cmd of commands) {
|
|
logCommandStart(cmd);
|
|
const result = await execCommandInContainer(cmd);
|
|
const stdout = result.stdout || '';
|
|
const stderr = result.stderr || '';
|
|
logCommandResult(stdout, stderr);
|
|
|
|
attemptLog += `\n> ${cmd}\nstdout:\n${stdout}\nstderr:\n${stderr}\n`;
|
|
|
|
// If we find a non-empty stderr, we consider it a failure signal
|
|
if (stderr && stderr.trim().length > 0) {
|
|
logInfo(`Command failed with error. Will request refined instructions next iteration.`);
|
|
allCommandsSucceeded = false;
|
|
break;
|
|
} else {
|
|
logInfo(`Command executed successfully.`);
|
|
}
|
|
}
|
|
|
|
context += `\n\n${attemptLog}`;
|
|
|
|
if (allCommandsSucceeded) {
|
|
logInfo("All commands executed successfully.");
|
|
success = true;
|
|
} else {
|
|
logInfo("At least one command failed. The AI will refine approach in next iteration.");
|
|
}
|
|
}
|
|
|
|
if (success) {
|
|
logHeader("SUCCESS! The goal appears to have been achieved.");
|
|
} else {
|
|
logHeader("FAILURE TO ACHIEVE GOAL WITHIN MAX ITERATIONS");
|
|
logInfo("Below is the final accumulated context/logs:\n" + context);
|
|
}
|
|
|
|
return {context, success};
|
|
}
|
|
|
|
// After achieving or attempting the goal, we start an interactive chat loop.
|
|
// Added the ability to run "automate \"Your new goal\"" from the chat mode.
|
|
async function startChatLoop(context) {
|
|
const rl = readline.createInterface({
|
|
input: process.stdin,
|
|
output: process.stdout
|
|
});
|
|
|
|
logHeader("Entering Interactive Chat Mode");
|
|
console.log("You can now ask the AI about the container or request additional tasks.");
|
|
console.log("Type 'exit' to quit.");
|
|
console.log("If the AI suggests commands, you can run them by typing 'run <line_number>'.");
|
|
console.log("To re-enter automated mode with a new goal, type: automate \"Your new goal\"\n");
|
|
|
|
let lastAIResponse = "";
|
|
|
|
async function promptUser() {
|
|
rl.question("> ", async (input) => {
|
|
if (input.trim().toLowerCase() === 'exit') {
|
|
rl.close();
|
|
return;
|
|
}
|
|
|
|
// If the user wants to run a command from the last AI response:
|
|
if (input.startsWith('run ')) {
|
|
const lineNum = parseInt(input.replace('run ', '').trim(), 10);
|
|
const commands = parseCommandsFromAIResponse(lastAIResponse);
|
|
if (!isNaN(lineNum) && lineNum > 0 && lineNum <= commands.length) {
|
|
const cmd = commands[lineNum - 1];
|
|
logInfo(`Running command from AI suggestion: ${cmd}`);
|
|
const result = await execCommandInContainer(cmd);
|
|
const stdout = result.stdout || '';
|
|
const stderr = result.stderr || '';
|
|
logCommandResult(stdout, stderr);
|
|
context += `\nUser ran command: ${cmd}\nstdout:\n${stdout}\nstderr:\n${stderr}`;
|
|
} else {
|
|
console.log("Invalid line number for running command.");
|
|
}
|
|
return promptUser();
|
|
}
|
|
|
|
// If the user wants to automate a new goal:
|
|
if (input.trim().toLowerCase().startsWith('automate ')) {
|
|
const goalMatch = input.match(/^automate\s+["'](.+)["']$/i);
|
|
if (goalMatch && goalMatch[1]) {
|
|
const newGoal = goalMatch[1];
|
|
// Run automated attempts on the new goal
|
|
const result = await automateGoal(context, newGoal);
|
|
context = result.context;
|
|
// After finishing automated attempts, return to chat mode
|
|
return promptUser();
|
|
} else {
|
|
console.log("To automate a new goal, use: automate \"Your new goal\"");
|
|
return promptUser();
|
|
}
|
|
}
|
|
|
|
// Otherwise, treat input as a question to the AI
|
|
lastAIResponse = await chatWithAI(context, input);
|
|
console.log("AI:", lastAIResponse);
|
|
promptUser();
|
|
});
|
|
}
|
|
|
|
promptUser();
|
|
}
|
|
|
|
async function main() {
|
|
// Retrieve the initial goal from command-line arguments
|
|
const args = process.argv.slice(2);
|
|
const initialGoal = args.join(' ').trim();
|
|
|
|
if (!initialGoal) {
|
|
console.error("Usage: node script.js \"Your goal here\"");
|
|
process.exit(1);
|
|
}
|
|
|
|
let context = "Initial attempt. No commands have been run yet.\n" +
|
|
"We are working with a Debian/Ubuntu container.\n" +
|
|
"Goal: " + initialGoal;
|
|
|
|
// Attempt to achieve the initial goal:
|
|
const result = await automateGoal(context, initialGoal);
|
|
context = result.context;
|
|
|
|
// After finishing attempts, start chat mode:
|
|
await startChatLoop(context);
|
|
}
|
|
|
|
main().catch(err => {
|
|
console.error("An error occurred:", err);
|
|
});
|