Compare commits

..

225 Commits
main ... main

Author SHA1 Message Date
Raven Scott
e4521cd202 Update 2024-11-14 15:18:01 -05:00
Raven Scott
f42f30941b Update article 2024-11-14 02:47:15 -05:00
Raven Scott
3d1b91d0b2 Update article 2024-11-14 02:40:29 -05:00
Raven Scott
9e24f132ee Update article 2024-11-14 02:40:07 -05:00
Raven Scott
27b95dc12e Update article 2024-11-14 02:35:23 -05:00
Raven Scott
904efda674 Add article 2024-11-14 02:27:46 -05:00
Raven Scott
3886e95f23 update 2024-11-09 00:49:11 -05:00
Raven Scott
4205593761 update 2024-11-09 00:47:15 -05:00
Raven Scott
05b09c5db5 update 2024-11-09 00:42:14 -05:00
Raven Scott
fcbbe6a6c9 update 2024-10-29 01:21:43 -04:00
Raven Scott
1aab0bc7b1 update 2024-10-28 17:53:35 -04:00
Raven Scott
59727f6fa8 update 2024-10-28 17:45:48 -04:00
Raven Scott
4bd7dbe053 update 2024-10-28 17:45:11 -04:00
Raven Scott
91acdabda0 update 2024-10-28 17:40:52 -04:00
Raven Scott
b18ac40bd4 update 2024-10-28 17:35:49 -04:00
Raven Scott
9a9411823f update 2024-10-28 17:34:10 -04:00
Raven Scott
931490c45d update 2024-10-28 17:27:12 -04:00
Raven Scott
a9615274f1 update 2024-10-28 17:11:10 -04:00
Raven Scott
9ee315d8b0 update 2024-10-28 16:47:03 -04:00
Raven Scott
02678b29a6 update 2024-10-28 16:31:29 -04:00
Raven Scott
a528a73ec8 update 2024-10-27 01:39:39 -04:00
Raven Scott
869ca41971 update 2024-10-27 01:38:27 -04:00
Raven Scott
39d6d7ed79 update 2024-10-27 01:37:45 -04:00
Raven Scott
817b64f4db update 2024-10-27 01:33:28 -04:00
Raven Scott
cf22cc7551 update 2024-10-22 23:08:01 -04:00
Raven Scott
f20231ddae update article 2024-10-13 04:05:50 -04:00
Raven Scott
93286d80ce update article 2024-10-13 04:03:41 -04:00
Raven Scott
2d169295cc update article 2024-10-13 03:12:29 -04:00
Raven Scott
d0ad514d40 update article 2024-10-13 03:10:39 -04:00
Raven Scott
a9db77194e update article 2024-10-13 03:01:47 -04:00
Raven Scott
b2c6cebdc1 update article 2024-10-13 02:56:43 -04:00
Raven Scott
583e9d4925 add article 2024-10-13 02:44:03 -04:00
Raven Scott
646a82a956 update article 2024-10-12 23:42:41 -04:00
Raven Scott
a463c36441 update article 2024-10-12 23:22:36 -04:00
Raven Scott
755e0ced62 update article 2024-10-12 23:15:54 -04:00
Raven Scott
c7456dbbf6 add article 2024-10-12 17:04:32 -04:00
Raven Scott
a89e84fd38 add /json route 2024-10-05 04:20:17 -04:00
Raven Scott
0472fb4c71 test 2024-10-04 21:03:07 -04:00
Raven Scott
3ef37e2dc9 test 2024-10-04 21:02:47 -04:00
Raven Scott
62ccbf00e2 test 2024-10-04 20:34:47 -04:00
Raven Scott
abd246a4fd update AI page 2024-10-02 23:22:26 -04:00
Raven Scott
1cd0dc673f update AI page 2024-10-02 20:59:42 -04:00
Raven Scott
52423554ae update AI page 2024-10-02 20:56:23 -04:00
Raven Scott
ae65d9f032 update AI page 2024-10-02 20:55:02 -04:00
Raven Scott
bf1ec57e4a update AI page 2024-10-02 20:52:17 -04:00
Raven Scott
1bc3f10203 update AI page 2024-10-02 20:50:47 -04:00
Raven Scott
28e2057a2e update AI page 2024-10-02 20:46:33 -04:00
Raven Scott
f1b9abb4e0 remove last message if 429 2024-10-02 15:31:33 -04:00
Raven Scott
49b94d9e13 Add article 2024-10-02 15:03:40 -04:00
Raven Scott
fa6958e551 update AI page 2024-10-02 06:40:56 -04:00
Raven Scott
1993f568bf update AI page 2024-10-02 06:06:19 -04:00
Raven Scott
7c231504ec update AI page 2024-10-02 06:03:51 -04:00
Raven Scott
e55bc2978a Add injected Menu Items 2024-10-02 06:02:29 -04:00
Raven Scott
8553e94547 Add injected Menu Items 2024-10-02 05:57:07 -04:00
Raven Scott
2cc64150d0 Add injected Menu Items 2024-10-02 05:53:47 -04:00
Raven Scott
74ab9c9da6 update AI page 2024-10-02 05:51:00 -04:00
Raven Scott
b53389532f update AI page 2024-10-02 05:44:50 -04:00
Raven Scott
ff53187da7 update AI page 2024-10-02 05:42:00 -04:00
Raven Scott
da00f63b08 update AI page 2024-10-02 05:39:16 -04:00
Raven Scott
4ccc349ac0 update AI page 2024-10-02 05:35:26 -04:00
Raven Scott
fbd95b0a82 update AI page 2024-10-02 05:34:29 -04:00
Raven Scott
efc8099db3 update AI page 2024-10-02 05:31:30 -04:00
Raven Scott
ded4c43700 update AI page 2024-10-02 05:30:13 -04:00
Raven Scott
c2a658c61c update AI page 2024-10-02 05:21:43 -04:00
Raven Scott
64238d0205 update AI page 2024-10-02 05:18:34 -04:00
Raven Scott
350644e36d update AI page 2024-10-02 05:12:16 -04:00
Raven Scott
9f784f5c0a Add injected Menu Items 2024-10-02 05:10:03 -04:00
Raven Scott
94ee7240ad update AI page 2024-10-02 04:58:01 -04:00
Raven Scott
1c6ad3c8b0 update AI page 2024-10-02 04:49:29 -04:00
Raven Scott
9a1bd49f27 update AI page 2024-10-02 04:45:18 -04:00
Raven Scott
e745cc0cff update AI page 2024-10-02 04:43:43 -04:00
Raven Scott
a5cb7efff1 update AI page 2024-10-02 04:40:18 -04:00
Raven Scott
939c5668a0 update AI page 2024-10-02 04:38:56 -04:00
Raven Scott
73043cdac0 update AI page 2024-10-02 04:37:52 -04:00
Raven Scott
267f68c646 update AI page 2024-10-02 04:29:24 -04:00
Raven Scott
4ece2a9461 update AI page 2024-10-02 04:24:40 -04:00
Raven Scott
7ca90f3215 update AI page 2024-10-02 04:23:20 -04:00
Raven Scott
a61bc9c761 update AI page 2024-10-02 04:19:03 -04:00
Raven Scott
b3bf0a7112 update AI page 2024-10-02 04:15:09 -04:00
Raven Scott
1f03736280 update AI page 2024-10-02 04:14:25 -04:00
Raven Scott
a6fb6bbb3d update AI page 2024-10-02 04:11:27 -04:00
Raven Scott
62f89a4948 update AI page 2024-10-02 04:09:08 -04:00
Raven Scott
ddb266825c update AI page 2024-10-02 04:07:32 -04:00
Raven Scott
741ce8b01d update AI page 2024-10-02 04:03:47 -04:00
Raven Scott
856e79732e update AI page 2024-10-02 03:50:22 -04:00
Raven Scott
69dd5b4694 update AI page 2024-10-02 03:49:47 -04:00
Raven Scott
356ca2001a update AI page 2024-10-02 03:45:08 -04:00
Raven Scott
8006c44f81 update AI page 2024-10-02 03:38:25 -04:00
Raven Scott
6e6194f820 update AI page 2024-10-02 03:36:38 -04:00
Raven Scott
c602d5b8f8 add AI page 2024-10-02 03:32:27 -04:00
Raven Scott
775e146a68 update article 2024-10-02 01:41:09 -04:00
Raven Scott
977917b2e7 add article 2024-10-02 01:40:18 -04:00
Raven Scott
ba61bbd438 add article 2024-10-02 01:33:38 -04:00
Raven Scott
a9456c32ea update article 2024-09-29 06:13:45 -04:00
Raven Scott
de69589e0e update article 2024-09-29 06:10:29 -04:00
Raven Scott
f0ebb59f06 add article 2024-09-29 06:04:27 -04:00
Raven Scott
806e14d747 add article 2024-09-29 06:03:03 -04:00
Raven Scott
e7a6d934fa add article 2024-09-29 05:56:49 -04:00
Raven Scott
3a1848a95e add article 2024-09-29 05:47:51 -04:00
Raven Scott
d8f62781cc update article 2024-09-29 05:32:12 -04:00
Raven Scott
acf1f68f85 update article 2024-09-29 05:31:22 -04:00
Raven Scott
b28e288d7d add article 2024-09-29 05:26:32 -04:00
Raven Scott
87b70c0777 update article 2024-09-27 04:10:12 -04:00
Raven Scott
837401b8ff fix 2024-09-27 03:33:31 -04:00
Raven Scott
4a5250474e fix spelling issue 2024-09-27 03:25:21 -04:00
Raven Scott
214bf3ab39 add article 2024-09-27 03:10:40 -04:00
Raven Scott
7c0cc3c26e fix footer 2024-09-26 20:47:45 -04:00
Raven Scott
5a8f4cbada Add autofocus 2024-09-26 19:56:40 -04:00
Raven Scott
82ce418c09 Add autofocus 2024-09-26 19:53:01 -04:00
Raven Scott
8f5e21119d readd auto search 2024-09-26 19:15:04 -04:00
Raven Scott
05302c3b04 fix 404 2024-09-26 19:09:48 -04:00
Raven Scott
2d6e58d41d fix 404 2024-09-26 19:09:08 -04:00
Raven Scott
7e8297bb8b fix menu system 2024-09-26 18:47:09 -04:00
Raven Scott
62cd503704 Update default menu 2024-09-26 18:24:33 -04:00
Raven Scott
6e02e0219a Add open new page when configuring the menu 2024-09-26 18:19:30 -04:00
Raven Scott
2d89dcddf6 Add menu system built using MD 2024-09-26 18:13:47 -04:00
Raven Scott
56cb62bad7 update default .env 2024-09-26 17:48:02 -04:00
Raven Scott
9ea5880292 Automatically submit search form with a timeout 2024-09-26 17:46:21 -04:00
Raven Scott
d082b3b8b0 update css 2024-09-26 17:34:32 -04:00
Raven Scott
8bfd815084 move site key to .env 2024-09-26 17:01:18 -04:00
Raven Scott
0a09e7bbef fix contact 2024-09-26 16:58:47 -04:00
Raven Scott
4115f64f84 add favicon before bedtime 2024-09-26 05:04:09 -04:00
Raven Scott
381cd6bac2 Add meta 2024-09-26 04:55:38 -04:00
474e08516d revert f64c5df9f3
revert revert 388d489181

revert Remove dynamic robots.txt and move to static txt
2024-09-26 04:48:45 -04:00
f64c5df9f3 revert 388d489181
revert Remove dynamic robots.txt and move to static txt
2024-09-26 04:41:43 -04:00
Raven Scott
388d489181 Remove dynamic robots.txt and move to static txt 2024-09-26 04:40:07 -04:00
Raven Scott
e9101d6df5 Update robots.md 2024-09-26 04:36:16 -04:00
Raven Scott
07d55ab192 Update robots.md 2024-09-26 04:34:20 -04:00
Raven Scott
8e7aef938e Add dynamically generated robots.txt from MarkDown 2024-09-26 04:32:47 -04:00
Raven Scott
da0de2464e update article 2024-09-26 04:06:44 -04:00
Raven Scott
bbaef875e0 update article 2024-09-26 04:06:02 -04:00
Raven Scott
e96fdb5bde update article 2024-09-26 03:58:19 -04:00
Raven Scott
104f6b96d7 update article 2024-09-26 03:51:45 -04:00
Raven Scott
d3611e5640 update 2024-09-26 03:04:46 -04:00
Raven Scott
6970978920 fix 2024-09-26 03:03:40 -04:00
Raven Scott
a66188f25e fix 2024-09-26 03:03:04 -04:00
Raven Scott
13aaec73fa fix 2024-09-26 02:56:55 -04:00
Raven Scott
70d7516b8e enhance 2024-09-26 02:52:44 -04:00
Raven Scott
540ee04816 update 2024-09-26 02:49:49 -04:00
Raven Scott
0c67713063 Add default env 2024-09-26 02:47:34 -04:00
Raven Scott
17fec0a2b1 Add index title and tagline to .env 2024-09-26 02:44:16 -04:00
Raven Scott
815cbc034a Convert about me to MD 2024-09-26 02:41:22 -04:00
Raven Scott
1634778e57 remove submit button from search 2024-09-26 02:28:04 -04:00
Raven Scott
42f9ab8e60 update footer color 2024-09-26 02:25:35 -04:00
Raven Scott
e262ab229f update footer color 2024-09-26 02:23:42 -04:00
Raven Scott
c3f77abb67 revert 2024-09-26 02:18:51 -04:00
Raven Scott
7630a3ba77 update css 2024-09-26 02:17:53 -04:00
Raven Scott
cb743a31f5 update css 2024-09-26 02:16:38 -04:00
Raven Scott
72f94b9d41 update css 2024-09-26 02:14:46 -04:00
Raven Scott
d563cd91f8 custom scrollbar 2024-09-26 02:05:51 -04:00
Raven Scott
4be73a8a3b small update 2024-09-26 02:02:36 -04:00
Raven Scott
ec891ab070 Add error message when no posts are found 2024-09-26 02:00:48 -04:00
Raven Scott
9ec9374ea2 dynamically generate linux in ejs 2024-09-26 01:51:44 -04:00
Raven Scott
59f866100a Update colors 2024-09-26 01:36:57 -04:00
Raven Scott
3af344a12c Add search css 2024-09-26 01:34:56 -04:00
Raven Scott
8e02173583 Add search feature 2024-09-26 01:27:35 -04:00
Raven Scott
5dc2d2e6bc move some things to .env 2024-09-26 01:07:01 -04:00
Raven Scott
1f64934d03 move some things to .env 2024-09-26 01:05:37 -04:00
Raven Scott
de3d09e1de fix sitemap 2024-09-26 00:23:19 -04:00
Raven Scott
ce301fcc77 fix sitemap 2024-09-26 00:20:45 -04:00
Raven Scott
e0bc9509d1 fix sitemap 2024-09-26 00:20:26 -04:00
Raven Scott
9c3957c8f0 update menu 2024-09-26 00:18:59 -04:00
Raven Scott
f101f38017 update menu 2024-09-26 00:16:43 -04:00
Raven Scott
dbdd34f521 update code 2024-09-26 00:14:52 -04:00
Raven Scott
348ef3245d update code 2024-09-26 00:10:33 -04:00
Raven Scott
57fafd2126 update code 2024-09-26 00:09:39 -04:00
Raven Scott
05455191f5 revert 2024-09-26 00:03:45 -04:00
Raven Scott
0a84015943 update 2024-09-25 23:57:59 -04:00
Raven Scott
830a51c334 update 2024-09-25 23:57:09 -04:00
Raven Scott
d391bcd7e6 uodate route 2024-09-25 23:55:47 -04:00
Raven Scott
b23e3071b6 uodate route 2024-09-25 23:52:55 -04:00
Raven Scott
75e87c8d93 uodate link 2024-09-25 23:25:57 -04:00
Raven Scott
0f561b0353 fix post order 2024-09-25 23:05:49 -04:00
Raven Scott
c0d530be27 update 2024-09-25 22:43:14 -04:00
Raven Scott
5d8f2a2b80 fix filename 2024-09-25 22:38:39 -04:00
Raven Scott
6d60520654 update css 2024-09-25 22:33:01 -04:00
Raven Scott
45c6601406 update css 2024-09-20 02:29:25 -04:00
Raven Scott
3b06237e12 update css 2024-09-20 02:27:00 -04:00
Raven Scott
344c53544a add article 2024-09-20 02:19:58 -04:00
Raven Scott
b962825ba3 add article 2024-09-20 01:56:49 -04:00
Raven Scott
64b493bb31 update readme 2024-09-19 19:47:42 -04:00
Raven Scott
7ee3d2f731 update captcha to contact 2024-09-19 08:37:50 -04:00
Raven Scott
390671b8f3 update captcha to contact 2024-09-19 08:36:25 -04:00
Raven Scott
e7ac2eaf17 update captcha to contact 2024-09-19 08:35:56 -04:00
Raven Scott
c13d7eba4d update captcha to contact 2024-09-19 08:35:20 -04:00
Raven Scott
983966b932 update captcha to contact 2024-09-19 08:34:42 -04:00
Raven Scott
0066061a7b Add captcha to contact 2024-09-19 08:30:19 -04:00
Raven Scott
18a427c7b0 change from mtime to dateCreated 2024-09-19 01:45:44 -04:00
Raven Scott
aed95beea1 change from mtime to dateCreated 2024-09-19 01:44:03 -04:00
Raven Scott
32d6465bf9 update 2024-09-19 01:40:10 -04:00
Raven Scott
b13ce5ca0c update 2024-09-19 01:39:34 -04:00
Raven Scott
e718252f74 update 2024-09-19 01:38:42 -04:00
Raven Scott
530d7ddb05 update 2024-09-19 01:36:48 -04:00
Raven Scott
d215bdb89b update 2024-09-19 01:35:18 -04:00
Raven Scott
5bd27c069e update 2024-09-19 01:34:06 -04:00
Raven Scott
dd89a3c577 update 2024-09-19 01:31:52 -04:00
Raven Scott
33fdc1396c update 2024-09-19 01:30:18 -04:00
Raven Scott
f7fbbc2889 update 2024-09-19 01:20:20 -04:00
Raven Scott
c9dd063557 update 2024-09-19 01:19:43 -04:00
Raven Scott
fd20b088e4 update 2024-09-19 01:10:33 -04:00
Raven Scott
eb50ed4a11 update 2024-09-19 01:08:49 -04:00
Raven Scott
d58282df5d update 2024-09-19 01:08:05 -04:00
Raven Scott
2609ac9e3f update 2024-09-19 01:06:05 -04:00
Raven Scott
04f0d02a97 update 2024-09-19 01:01:30 -04:00
Raven Scott
24606151ad update 2024-09-19 01:00:17 -04:00
Raven Scott
3193423ddd update 2024-09-18 21:14:09 -04:00
Raven Scott
a53231160c update 2024-09-18 20:38:42 -04:00
Raven Scott
462e2a232d update 2024-09-18 20:33:59 -04:00
Raven Scott
adebf8b317 update 2024-09-18 19:45:25 -04:00
Raven Scott
85ec0ac68d Update 2024-09-18 19:44:21 -04:00
Raven Scott
1be3b8eea0 Update article 2024-09-18 19:43:35 -04:00
Raven Scott
bdc4cdaf77 Update article 2024-09-18 19:42:13 -04:00
Raven Scott
099c9f138b Update article 2024-09-18 19:37:33 -04:00
Raven Scott
dab7a7d0f8 Update article 2024-09-18 19:37:01 -04:00
Raven Scott
30b02e6084 Update article 2024-09-18 19:35:49 -04:00
Raven Scott
af22b29ec1 Update article 2024-09-18 19:34:04 -04:00
Raven Scott
6f69ec66a4 Update article 2024-09-18 19:33:34 -04:00
Raven Scott
b1b6cfd650 Update 2024-09-18 19:18:17 -04:00
Raven Scott
2d0d653e83 Update 2024-09-18 19:15:41 -04:00
Raven Scott
9ff7d0bc8c fix rss and website sitemap 2024-09-18 18:55:52 -04:00
Raven Scott
5236c18fe1 fix format 2024-09-18 18:52:42 -04:00
Raven Scott
4c97b608ee Make latest posts show first, fix special chars in URL and add new article 2024-09-18 18:46:51 -04:00
Raven Scott
838dc4c706 Add Sitemap and RSS Feed to footer 2024-09-17 05:36:33 -04:00
Raven Scott
719fd33dc5 Article update 2024-09-16 16:23:42 -04:00
0c9279805e Merge pull request 'feat: redirect if page is below 1' (#1) from Cyber/ravenscott-blog:main into main
Reviewed-on: snxraven/ravenscott-blog#1
2024-09-16 16:03:46 -04:00
44 changed files with 7715 additions and 355 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

2
.gitignore vendored
View File

@ -1,4 +1,4 @@
node_modules
package-lock.json
.env
menu.md

View File

@ -1,5 +1,3 @@
Here is a `README.md` file for your project:
```markdown
# Raven Scott Blog Website
@ -62,7 +60,7 @@ raven-scott-website
4. Run the project:
```bash
npm start
node app.js
```
The server will run on [http://localhost:3000](http://localhost:3000).

333
app.js
View File

@ -6,13 +6,13 @@ const { marked } = require('marked');
const nodemailer = require('nodemailer');
const hljs = require('highlight.js');
const { format } = require('date-fns'); // To format dates in a proper XML format
const axios = require('axios'); // Add axios for reCAPTCHA verification
const app = express();
// Set options for marked to use highlight.js for syntax highlighting
marked.setOptions({
highlight: function (code, language) {
// Check if the language is valid
const validLanguage = hljs.getLanguage(language) ? language : 'plaintext';
return hljs.highlight(validLanguage, code).value;
}
@ -28,6 +28,35 @@ app.use(express.urlencoded({ extended: false }));
// Serve static files (CSS, Images)
app.use(express.static(path.join(__dirname, 'public')));
// Function to load menu items from the markdown file
function loadMenuItems() {
const menuFile = path.join(__dirname, 'menu.md');
const content = fs.readFileSync(menuFile, 'utf-8');
const menuItems = [];
const itemRegex = /<!--\s*title:\s*(.*?)\s*-->\s*(<!--\s*openNewPage\s*-->\s*)?<!--\s*url:\s*(.*?)\s*-->/g;
let match;
// Loop to find all menu items
while ((match = itemRegex.exec(content)) !== null) {
const title = match[1];
const url = match[3];
const openNewPage = !!match[2]; // Check if openNewPage is present in the match
menuItems.push({
title,
url,
openNewPage
});
}
return menuItems;
}
// Load the menu once and make it available to all routes
const menuItems = loadMenuItems();
// Function to load and parse markdown files and extract lead
function loadMarkdownWithLead(file) {
const markdownContent = fs.readFileSync(path.join(__dirname, 'markdown', file), 'utf-8');
@ -35,39 +64,44 @@ function loadMarkdownWithLead(file) {
let lead = '';
let contentMarkdown = markdownContent;
// Detect and extract the lead section
const leadKeyword = '<!-- lead -->';
if (contentMarkdown.includes(leadKeyword)) {
const [beforeLead, afterLead] = contentMarkdown.split(leadKeyword);
// Extract the first paragraph after the lead keyword
lead = afterLead.split('\n').find(line => line.trim() !== '').trim();
// Remove the lead from the main content
contentMarkdown = beforeLead + afterLead.replace(lead, '').trim();
}
// Convert markdown to HTML
const contentHtml = marked.parse(contentMarkdown);
return { contentHtml, lead };
}
// Function to convert a title (with spaces) into a URL-friendly slug (with dashes)
// Function to convert a title into a URL-friendly slug
function titleToSlug(title) {
return title.replace(/\s+/g, '-').toLowerCase(); // Always lowercase the slug
return title
.toLowerCase()
.replace(/[^a-z0-9\s-]/g, '')
.replace(/\s+/g, '-');
}
// Function to convert a slug (with dashes) back into a readable title (with spaces)
function slugToTitle(slug) {
return slug.replace(/-/g, ' ');
}
// Function to load all blog posts with pagination and search support
function getAllBlogPosts(page = 1, postsPerPage = 5, searchQuery = '') {
let blogFiles = fs.readdirSync(path.join(__dirname, 'markdown')).filter(file => file.endsWith('.md'));
// Function to load all blog posts with pagination support
function getAllBlogPosts(page = 1, postsPerPage = 5) {
const blogFiles = fs.readdirSync(path.join(__dirname, 'markdown')).filter(file => file.endsWith('.md'));
if (searchQuery) {
const lowerCaseQuery = searchQuery.toLowerCase();
blogFiles = blogFiles.filter(file => file.toLowerCase().includes(lowerCaseQuery));
}
if (blogFiles.length === 0) {
return { blogPosts: [], totalPages: 0 }; // Return empty results if no files
}
blogFiles.sort((a, b) => {
const statA = fs.statSync(path.join(__dirname, 'markdown', a)).birthtime;
const statB = fs.statSync(path.join(__dirname, 'markdown', b)).birthtime;
return statB - statA;
});
// Paginate the results
const totalPosts = blogFiles.length;
const totalPages = Math.ceil(totalPosts / postsPerPage);
const start = (page - 1) * postsPerPage;
@ -76,116 +110,101 @@ function getAllBlogPosts(page = 1, postsPerPage = 5) {
const paginatedFiles = blogFiles.slice(start, end);
const blogPosts = paginatedFiles.map(file => {
const title = file.replace('.md', '').replace(/-/g, ' '); // Keep original casing for title
const slug = titleToSlug(title); // Convert title to slug (lowercase)
// Get the last modified date of the markdown file
const title = file.replace('.md', '').replace(/-/g, ' ');
const slug = titleToSlug(title);
const stats = fs.statSync(path.join(__dirname, 'markdown', file));
const lastModifiedDate = new Date(stats.mtime); // Use mtime for last modification time
const dateCreated = new Date(stats.birthtime);
// Format the date
const formattedDate = lastModifiedDate.toLocaleDateString('en-US', {
year: 'numeric',
month: 'long',
day: 'numeric'
});
return {
title, // Original casing title
slug,
date: formattedDate // Include the formatted date
};
return { title, slug, dateCreated };
});
return { blogPosts, totalPages };
}
// Home Route (Blog Home with Pagination)
// Home Route (Blog Home with Pagination and Search)
app.get('/', (req, res) => {
const page = parseInt(req.query.page) || 1;
const searchQuery = req.query.search || '';
if (page < 1) {
return res.redirect(req.hostname);
}
const postsPerPage = 5; // Set how many posts to display per page
const postsPerPage = 5;
const { blogPosts, totalPages } = getAllBlogPosts(page, postsPerPage, searchQuery);
const { blogPosts, totalPages } = getAllBlogPosts(page, postsPerPage);
const noResults = blogPosts.length === 0; // Check if there are no results
res.render('index', {
title: 'Raven Scott Blog',
title: `${process.env.OWNER_NAME}'s Blog`,
blogPosts,
currentPage: page,
totalPages
totalPages,
searchQuery, // Pass search query to the view
noResults, // Pass this flag to indicate no results found
menuItems // Pass the menu items to the view
});
});
// About Route
// About Route (Load markdown and render using EJS)
app.get('/about', (req, res) => {
res.render('about', { title: 'About Raven Scott' });
});
const aboutMarkdownFile = path.join(__dirname, 'me', 'about.md');
// Display the Request a Quote form
app.get('/contact', (req, res) => {
res.render('contact', { title: 'Contact Raven Scott', msg: undefined });
});
// Handle contact form submission
app.post('/contact', (req, res) => {
const { name, email, subject, message } = req.body;
// Validate form inputs (basic example)
if (!name || !email || !subject || !message) {
return res.render('contact', { title: 'Contact Raven Scott', msg: 'All fields are required.' });
}
// Create email content
const output = `
<p>You have a new contact request from <strong>${name}</strong>.</p>
<h3>Contact Details</h3>
<ul>
<li><strong>Name:</strong> ${name}</li>
<li><strong>Email:</strong> ${email}</li>
<li><strong>Subject:</strong> ${subject}</li>
</ul>
<h3>Message</h3>
<p>${message}</p>
`;
// Set up Nodemailer transporter
let transporter = nodemailer.createTransport({
host: process.env.SMTP_HOST,
port: process.env.SMTP_PORT,
secure: false, // true for 465, false for other ports
auth: {
user: process.env.EMAIL_USER, // Email user from environment variables
pass: process.env.EMAIL_PASS, // Email password from environment variables
},
tls: {
rejectUnauthorized: false,
},
});
// Set up email options
let mailOptions = {
from: `"${name}" <quote@node-geeks.com>`,
to: process.env.RECEIVER_EMAIL, // Your email address to receive contact form submissions
subject: subject,
html: output,
};
// Send email
transporter.sendMail(mailOptions, (error, info) => {
if (error) {
console.error(error);
return res.render('contact', { title: 'Contact Raven Scott', msg: 'An error occurred. Please try again.' });
} else {
console.log('Email sent: ' + info.response);
return res.render('contact', { title: 'Contact Raven Scott', msg: 'Your message has been sent successfully!' });
// Read the markdown file and convert to HTML
fs.readFile(aboutMarkdownFile, 'utf-8', (err, data) => {
if (err) {
return res.status(500).send('Error loading About page');
}
const aboutContentHtml = marked(data); // Convert markdown to HTML
res.render('about', {
title: `About ${process.env.OWNER_NAME}`,
content: aboutContentHtml,
menuItems // Pass the menu items to the view
});
});
});
// About Route (Load markdown and render using EJS)
app.get('/about-rayai', (req, res) => {
const aboutMarkdownFile = path.join(__dirname, 'me', 'about-rayai.md');
// Read the markdown file and convert to HTML
fs.readFile(aboutMarkdownFile, 'utf-8', (err, data) => {
if (err) {
return res.status(500).send('Error loading About page');
}
const aboutContentHtml = marked(data); // Convert markdown to HTML
res.render('about-rayai', {
title: `About RayAI`,
content: aboutContentHtml,
menuItems // Pass the menu items to the view
});
});
});
// Contact Route (Render the contact form)
app.get('/contact', (req, res) => {
res.render('contact', {
title: `Contact ${process.env.OWNER_NAME}`,
msg: undefined,
menuItems // Pass the menu items to the view
});
});
// Contact Route (Render the contact form)
app.get('/chat', (req, res) => {
res.render('chat', {
title: `RayAI - Raven's Chatbot`,
msg: undefined,
menuItems // Pass the menu items to the view
});
});
// Blog Post Route
app.get('/blog/:slug', (req, res) => {
const slug = req.params.slug;
@ -193,59 +212,63 @@ app.get('/blog/:slug', (req, res) => {
.find(file => titleToSlug(file.replace('.md', '')) === slug);
if (markdownFile) {
const originalTitle = markdownFile.replace('.md', ''); // Original title with casing
const originalTitle = markdownFile.replace('.md', '');
const blogPosts = getAllBlogPosts();
const { contentHtml, lead } = loadMarkdownWithLead(markdownFile);
// Fallback to a generic description if lead is not available
const description = lead || `${originalTitle} - Read the full post on ${process.env.OWNER_NAME}'s blog.`;
res.render('blog-post', {
title: originalTitle, // Use the original title with casing
title: originalTitle,
content: contentHtml,
lead: lead,
blogPosts
lead,
description, // Pass the description to the view
blogPosts,
menuItems // Pass the menu items to the view
});
} else {
res.redirect('/'); // Redirect to the home page if the blog post is not found
res.redirect('/');
}
});
// Sitemap Route
app.get('/sitemap.xml', (req, res) => {
const hostname = req.headers.host || 'http://localhost'; // Ensure this is your site URL in production
const blogFiles = fs.readdirSync(path.join(__dirname, 'markdown')).filter(file => file.endsWith('.md'));
const hostname = req.headers.host || 'http://localhost';
const blogFiles = fs.readdirSync(path.join(__dirname, 'markdown'))
.filter(file => file.endsWith('.md'))
.sort((a, b) => {
const statA = fs.statSync(path.join(__dirname, 'markdown', a)).birthtime;
const statB = fs.statSync(path.join(__dirname, 'markdown', b)).birthtime;
return statB - statA;
});
// Static URLs (e.g., homepage, about, contact)
const staticUrls = [
{ url: '/', changefreq: 'weekly', priority: 1.0 },
{ url: '/about', changefreq: 'monthly', priority: 0.8 },
{ url: '/contact', changefreq: 'monthly', priority: 0.8 }
{ url: `${process.env.HOST_URL}`, changefreq: 'weekly', priority: 1.0 },
{ url: `${process.env.HOST_URL}/about`, changefreq: 'monthly', priority: 0.8 },
{ url: `${process.env.HOST_URL}/contact`, changefreq: 'monthly', priority: 0.8 }
];
// Dynamic URLs (e.g., blog posts)
const blogUrls = blogFiles.map(file => {
const title = file.replace('.md', '');
const slug = titleToSlug(title);
// Get the last modified date of the markdown file
const stats = fs.statSync(path.join(__dirname, 'markdown', file));
const lastModifiedDate = format(new Date(stats.mtime), 'yyyy-MM-dd');
const lastModifiedDate = format(new Date(stats.birthtime), 'yyyy-MM-dd');
return {
url: `/blog/${slug}`,
url: `${process.env.BLOG_URL}${slug}`,
lastmod: lastModifiedDate,
changefreq: 'monthly',
priority: 0.9
};
});
// Combine static and dynamic URLs
const urls = [...staticUrls, ...blogUrls];
// Generate the XML for the sitemap
let sitemap = `<?xml version="1.0" encoding="UTF-8"?>\n<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n`;
urls.forEach(({ url, lastmod, changefreq, priority }) => {
sitemap += ` <url>\n`;
sitemap += ` <loc>https://${hostname}${url}</loc>\n`;
sitemap += ` <loc>${url}</loc>\n`;
if (lastmod) {
sitemap += ` <lastmod>${lastmod}</lastmod>\n`;
}
@ -255,59 +278,85 @@ app.get('/sitemap.xml', (req, res) => {
});
sitemap += `</urlset>`;
// Set the content type to XML and send the response
res.header('Content-Type', 'application/xml');
res.send(sitemap);
});
// RSS Feed Route
app.get('/rss', (req, res) => {
const hostname = req.headers.host || 'http://localhost'; // Adjust for production if needed
const blogFiles = fs.readdirSync(path.join(__dirname, 'markdown')).filter(file => file.endsWith('.md'));
const hostname = req.headers.host || 'http://localhost';
const blogFiles = fs.readdirSync(path.join(__dirname, 'markdown'))
.filter(file => file.endsWith('.md'))
.sort((a, b) => {
const statA = fs.statSync(path.join(__dirname, 'markdown', a)).birthtime;
const statB = fs.statSync(path.join(__dirname, 'markdown', b)).birthtime;
return statB - statA;
});
// Build RSS feed
let rssFeed = `<?xml version="1.0" encoding="UTF-8" ?>\n<rss version="2.0">\n<channel>\n`;
rssFeed += `<title>Raven Scott Blog</title>\n`;
rssFeed += `<title>${process.env.OWNER_NAME} Blog</title>\n`;
rssFeed += `<link>https://${hostname}</link>\n`;
rssFeed += `<description>This is the RSS feed for Raven Scott's blog.</description>\n`;
rssFeed += `<description>This is the RSS feed for ${process.env.OWNER_NAME}'s blog.</description>\n`;
// Generate RSS items for each blog post
blogFiles.forEach(file => {
const title = file.replace('.md', '');
const slug = titleToSlug(title);
// Get the last modified date of the markdown file
const stats = fs.statSync(path.join(__dirname, 'markdown', file));
const lastModifiedDate = new Date(stats.mtime).toUTCString(); // Use UTC date for RSS
// Load and parse markdown content to extract a lead or description
const lastModifiedDate = new Date(stats.birthtime).toUTCString();
const { lead } = loadMarkdownWithLead(file);
// RSS item for each post
rssFeed += `<item>\n`;
rssFeed += `<title>${title}</title>\n`;
rssFeed += `<link>https://${hostname}/blog/${slug}</link>\n`;
rssFeed += `<link>${process.env.BLOG_URL}${slug}</link>\n`;
rssFeed += `<description>${lead || 'Read the full post on the blog.'}</description>\n`;
rssFeed += `<pubDate>${lastModifiedDate}</pubDate>\n`;
rssFeed += `<guid>https://${hostname}/blog/${slug}</guid>\n`;
rssFeed += `<guid>${process.env.BLOG_URL}${slug}</guid>\n`;
rssFeed += `</item>\n`;
});
rssFeed += `</channel>\n</rss>`;
// Set content type to XML and send the RSS feed
res.header('Content-Type', 'application/rss+xml');
res.send(rssFeed);
});
// Global 404 handler for any other unmatched routes
app.use((req, res) => {
res.redirect('/'); // Redirect to the home page for any 404 error
// Route to return all blog content in plain text JSON format
app.get('/json', (req, res) => {
const blogFiles = fs.readdirSync(path.join(__dirname, 'markdown')).filter(file => file.endsWith('.md'));
const blogPosts = blogFiles.map(file => {
const title = file.replace('.md', '').replace(/-/g, ' ');
const slug = titleToSlug(title);
const markdownContent = fs.readFileSync(path.join(__dirname, 'markdown', file), 'utf-8');
// Strip all formatting and return plain text
const plainTextContent = markdownContent.replace(/[#*>\-`_~[\]]/g, '').replace(/\n+/g, ' ').trim();
return {
title,
slug,
content: plainTextContent
};
});
res.json(blogPosts);
});
// Create a URL object from the environment variable
const blog_URL = new URL(process.env.BLOG_URL);
// Extract just the hostname (e.g., blog.raven-scott.fyi)
const hostname = blog_URL.hostname;
// Global 404 handler for unmatched routes
app.use((req, res) => {
if (req.hostname === hostname) {
res.redirect(process.env.HOST_URL);
} else {
res.redirect('/');
}
});
// ================================
// Server Listening
// ================================
const PORT = process.env.PORT || 8899;
app.listen(PORT, () => {
console.log(`Server running on http://localhost:${PORT}`);

29
default.env Normal file
View File

@ -0,0 +1,29 @@
# SMTP configuration for sending emails
SMTP_HOST=us2.smtp.yourtld.com # SMTP server host
SMTP_PORT=587 # SMTP server port
EMAIL_USER=user@yourtld.com # Email address used for SMTP authentication
EMAIL_PASS="ComplexPass" # Password for the SMTP user (Use a complex, secure password)
RECEIVER_EMAIL=youremail@yourtld.com # Default receiver email for outgoing messages
# CAPTCHA key for form verification (replace with your real CAPTCHA secret key)
CAPTCHA_SECRET_KEY="KEYHERE"
CAPTCHA_SITE_KEY="SITE_KEY_HERE"
# URL configuration
# NO TRAILING SLASH - Base host URL for the website
HOST_URL="https://yourtld.com"
# TRAILING SLASH - Blog URL, should have a trailing slash at the end
BLOG_URL="https://blog.yourtld.com/"
# Domain name, without URL structure
DOMAIN_NAME="yourtld.com"
# Website branding
SITE_NAME="what ever you want here" # Title used in the website's navbar
OWNER_NAME="Your Name" # Name of the website's owner (you)
# Front page content
FRONT_PAGE_TITLE="Hello, my name is Your Name" # Main heading on the homepage
FRONT_PAGE_LEAD="Where Technology Meets Creativity: Insights from a Linux Enthusiast" # Short lead text on the homepage
# Footer content
FOOTER_TAGLINE="Never Stop Learning" # Tagline for the footer section of the website

12
default.menu.md Normal file
View File

@ -0,0 +1,12 @@
<!-- title: Home -->
<!-- url: / -->
<!-- title: About Me -->
<!-- url: /about -->
<!-- title: Secrets -->
<!-- openNewPage -->
<!-- url: https://your-external-link.com -->
<!-- title: Contact -->
<!-- url: /contact -->

View File

@ -1,7 +1,7 @@
<!-- lead -->
Deep Dive: Automating Container Backups and Saving Space Using `docker export` and `docker import`
In modern infrastructure, Docker containers provide a lightweight and efficient way to run applications, but managing container storage, backups, and minimizing overhead can be complex. The method you are using to shrink running containers into new images by leveraging `docker export` and `docker import` provides an elegant solution to this challenge. This approach not only automates backups but also optimizes space usage. Lets dive into the details of how it works and its potential applications.
In modern infrastructure, Docker containers provide a lightweight and efficient way to run applications, but managing container storage, backups, and minimizing overhead can be complex. The method used in this article to shrink running containers into new images by leveraging `docker export` and `docker import` provides an elegant solution to this challenge. This approach not only automates backups but also optimizes space usage. Lets dive into the details of how it works and its potential applications.
### Command Overview

View File

@ -0,0 +1,372 @@
<!-- lead -->
How I Built This Blog: A Deep Dive into Modern Web Development
A blog is one of the most powerful tools for sharing information, building authority, and engaging with an audience. When I decided to build a blog platform using **Node.js**, I wanted to go beyond the typical setup. I aimed for a feature-rich platform that dynamically serves content from Markdown files, supports pagination, integrates syntax highlighting for code snippets, offers a functional contact form with **reCAPTCHA** validation, generates **RSS** feeds and **sitemaps** for better SEO, and allows for customized pages like "About Me" to be loaded directly from Markdown files.
In this in-depth technical breakdown, Ill walk you through every aspect of the platforms architecture and code, explaining why I chose each technology and how the different parts work together. If you're looking to create your own blog platform or simply want to dive deeper into building dynamic web applications with **Node.js**, this post will cover everything in great detail.
# Source
https://git.ssh.surf/snxraven/ravenscott-blog
## Why Node.js and Express?
Before we get into the technical details, let's talk about the choice of technologies. I chose **Node.js** as the runtime because of its event-driven, non-blocking I/O model, which is great for building scalable and performant web applications. **Express.js**, a minimalist web framework for Node, simplifies the process of setting up a web server, routing requests, and serving static files.
Heres why these choices make sense for this project:
- **Node.js**: Handles high-concurrency applications well, meaning it can efficiently serve multiple blog readers without performance bottlenecks.
- **Express.js**: Provides a straightforward way to build a RESTful architecture for managing routes, handling form submissions, and rendering views dynamically.
## Folder Structure: Organizing the Blog Platform
One of the first things you need to think about when building a project is its structure. Here's a breakdown of the folder structure I used for this blog platform:
```
/blog-platform
├── /markdown # Contains all blog posts written in Markdown
│ └── post-1.md # Example Markdown blog post
├── /public # Public assets (CSS, images, etc.)
│ └── /css
│ └── styles.css # Custom styles for the blog
├── /views # EJS templates (HTML views rendered by the server)
│ ├── index.ejs # Homepage template showing a list of blog posts
│ ├── blog-post.ejs # Template for individual blog posts
│ ├── about.ejs # "About Me" page (loaded from markdown)
│ └── contact.ejs # Contact form page
├── /me # Personal markdown files (like About Me)
│ └── about.md # Markdown file for the "About Me" page
├── app.js # Main server file, handles all backend logic
├── package.json # Project dependencies and scripts
├── .env # Environment variables (API keys, credentials, etc.)
└── README.md # Documentation
```
This structure provides a clear separation of concerns:
- **Markdown** files are stored in their own directory.
- **Public** assets (CSS, images) are isolated for easy reference.
- **Views** are where EJS templates are stored, allowing us to easily manage the HTML structure of each page.
- **me** contains personal information like the **about.md** file, which gets rendered dynamically for the "About Me" page.
- **app.js** acts as the control center, handling the routes, form submissions, and the logic for rendering content.
## Setting Up the Express Server
The core of the application is the **Express.js** server, which powers the entire backend. In `app.js`, we initialize Express, set up the middleware, and define the routes. But before we get into the route handling, lets break down the middleware and configuration settings we used.
### 1. **Loading Dependencies**
Here are the key dependencies we load at the top of the file:
```javascript
require('dotenv').config(); // Load environment variables from .env
const express = require('express');
const path = require('path');
const fs = require('fs');
const { marked } = require('marked'); // For parsing Markdown files
const nodemailer = require('nodemailer'); // For sending emails from the contact form
const hljs = require('highlight.js'); // For syntax highlighting in code blocks
const axios = require('axios'); // For making HTTP requests, e.g., reCAPTCHA verification
const { format } = require('date-fns'); // For formatting dates in RSS feeds and sitemaps
const app = express(); // Initialize Express
```
Heres what each dependency does:
- **dotenv**: Loads environment variables from a `.env` file, which we use to store sensitive information like API keys and email credentials.
- **path** and **fs**: Standard Node.js modules that help us work with file paths and file systems. We use these to read Markdown files and serve static assets.
- **marked**: A Markdown parser that converts Markdown syntax into HTML, allowing us to write blog posts using a simple syntax.
- **nodemailer**: Handles sending emails when users submit the contact form.
- **highlight.js**: Provides syntax highlighting for any code blocks in the blog posts. This is essential for making technical posts more readable.
- **axios**: Used for making external HTTP requests (e.g., verifying the Google reCAPTCHA response).
- **date-fns**: A utility for formatting dates, which we use to ensure dates are correctly formatted in RSS feeds and sitemaps.
### 2. **Setting Up Middleware and Template Engine**
Express makes it easy to set up middleware, which is crucial for handling static assets (like CSS files), parsing form data, and rendering templates using a view engine.
#### EJS Templating Engine
We use **EJS** as the templating engine. This allows us to embed JavaScript logic directly within our HTML, making it possible to render dynamic content like blog posts and form submission results.
```javascript
app.set('view engine', 'ejs');
app.set('views', path.join(__dirname, 'views'));
```
This configuration tells Express to use the `views` folder for storing the HTML templates and `EJS` as the engine to render those templates.
#### Serving Static Files
Static files (like CSS and images) need to be served from the `/public` directory. This is where we store the CSS styles used to make the blog look visually appealing.
```javascript
app.use(express.static(path.join(__dirname, 'public')));
```
#### Parsing Form Data
When users submit the contact form, the form data is sent as **URL-encoded** data. To handle this, we use `express.urlencoded` middleware, which parses the form submissions and makes the data accessible via `req.body`.
```javascript
app.use(express.urlencoded({ extended: false }));
```
## Markdown Parsing with Syntax Highlighting
One of the primary features of this blog platform is that it allows you to write blog posts using **Markdown**. Markdown is a simple markup language that converts easily to HTML and is especially popular among developers because of its lightweight syntax for writing formatted text.
### 1. **Setting Up `marked` and `highlight.js`**
To convert Markdown content to HTML, I used **Marked.js**, a fast and lightweight Markdown parser. Additionally, since many blog posts contain code snippets, **Highlight.js** is used to provide syntax highlighting for those snippets.
```javascript
marked.setOptions({
highlight: function (code, language) {
const validLanguage = hljs.getLanguage(language) ? language : 'plaintext';
return hljs.highlight(validLanguage, code).value;
}
});
```
This makes code blocks in blog posts more readable by colorizing keywords, variables, and other syntax elements, which improves the user experience, especially for technical blogs.
### 2. **Loading the 'About Me' Page from Markdown**
To create a more personalized "About Me" page, I stored the content in a Markdown file (`me/about.md`) and dynamically rendered it with Express. Here's how we load the `about.md` file and render it using **EJS**:
```javascript
app.get('/about', (req, res) => {
const aboutMarkdownFile = path.join(__dirname, 'me', 'about.md');
// Read the markdown file and convert it to HTML
fs.readFile(aboutMarkdownFile, 'utf-8', (err, data) => {
if (err) {
return res.status(500).send('Error loading About page');
}
const aboutContentHtml = marked(data); // Convert markdown to HTML
res.render('about', {
title: `About ${process.env.OWNER_NAME}`,
content: aboutContentHtml
});
});
});
```
## Blog Post Storage and Rendering
### 1. **Storing Blog Posts as Markdown Files**
Instead of storing blog posts in a database, this platform uses a simpler approach: each blog post is a `.md` file stored in the `/markdown` directory. This approach is not only easier to manage, but it also gives writers the flexibility to create and update posts using any text editor.
### 2. **Rendering Markdown as HTML**
To render the Markdown content as HTML on the frontend, we define a function `loadMarkdownWithLead` that reads the Markdown file, parses it, and extracts the lead section if available.
```js
function loadMarkdownWith
Lead(file) {
const markdownContent = fs.readFileSync(path.join(__dirname, 'markdown', file), 'utf-8');
let lead = '';
let contentMarkdown = markdownContent;
// Extract the lead section marked by `<-!-- lead -->`
const leadKeyword = '<-!-- lead -->';
if (contentMarkdown.includes(leadKeyword)) {
const [beforeLead, afterLead] = contentMarkdown.split(leadKeyword);
lead = afterLead.split('\n').find(line => line.trim() !== '').trim();
contentMarkdown = beforeLead + afterLead.replace(lead, '').trim();
}
const contentHtml = marked.parse(contentMarkdown);
return { contentHtml, lead };
}
```
### 3. **Dynamically Rendering Blog Posts**
For each blog post, we generate a URL based on its title (converted to a slug format). The user can access a blog post by navigating to `/blog/{slug}`, where `{slug}` is the URL-friendly version of the title.
```javascript
app.get('/blog/:slug', (req, res) => {
const slug = req.params.slug;
const markdownFile = fs.readdirSync(path.join(__dirname, 'markdown'))
.find(file => titleToSlug(file.replace('.md', '')) === slug);
if (markdownFile) {
const originalTitle = markdownFile.replace('.md', '');
const { contentHtml, lead } = loadMarkdownWithLead(markdownFile);
res.render('blog-post', {
title: originalTitle,
content: contentHtml,
lead: lead
});
} else {
res.redirect('/');
}
});
```
### 4. **Slug Generation for Blog Posts**
The title of each post is converted into a URL-friendly slug, which is used in the post's URL. Here's the utility function for converting a title into a slug:
```javascript
function titleToSlug(title) {
return title.toLowerCase()
.replace(/[^a-z0-9\s-]/g, '') // Remove non-alphanumeric characters
.replace(/\s+/g, '-'); // Replace spaces with dashes
}
```
This ensures that each blog post URL is clean and readable, with no special characters or extra whitespace.
## Adding Search Functionality
Ive implemented a search feature that allows users to search for blog posts by title. The search functionality reads through all the Markdown filenames and returns posts that match the search query.
```javascript
function getAllBlogPosts(page = 1, postsPerPage = 5, searchQuery = '') {
let blogFiles = fs.readdirSync(path.join(__dirname, 'markdown')).filter(file => file.endsWith('.md'));
if (searchQuery) {
const lowerCaseQuery = searchQuery.toLowerCase();
blogFiles = blogFiles.filter(file => file.toLowerCase().includes(lowerCaseQuery));
}
if (blogFiles.length === 0) {
return { blogPosts: [], totalPages: 0 }; // Return empty results if no files
}
blogFiles.sort((a, b) => {
const statA = fs.statSync(path.join(__dirname, 'markdown', a)).birthtime;
const statB = fs.statSync(path.join(__dirname, 'markdown', b)).birthtime;
return statB - statA;
});
const totalPosts = blogFiles.length;
const totalPages = Math.ceil(totalPosts / postsPerPage);
const start = (page - 1) * postsPerPage;
const end = start + postsPerPage;
const paginatedFiles = blogFiles.slice(start, end);
const blogPosts = paginatedFiles.map(file => {
const title = file.replace('.md', '').replace(/-/g, ' ');
const slug = titleToSlug(title);
const stats = fs.statSync(path.join(__dirname, 'markdown', file));
const dateCreated = new Date(stats.birthtime);
return { title, slug, dateCreated };
});
return { blogPosts, totalPages };
}
```
The search query is passed through the route and displayed dynamically on the homepage with the search results.
```javascript
app.get('/', (req, res) => {
const page = parseInt(req.query.page) || 1;
const searchQuery = req.query.search || '';
if (page < 1) {
return res.redirect(req.hostname);
}
const postsPerPage = 5;
const { blogPosts, totalPages } = getAllBlogPosts(page, postsPerPage, searchQuery);
const noResults = blogPosts.length === 0; // Check if there are no results
res.render('index', {
title: `${process.env.OWNER_NAME}'s Blog`,
blogPosts,
currentPage: page,
totalPages,
searchQuery, // Pass search query to the view
noResults // Pass this flag to indicate no results found
});
});
```
In the `index.ejs` file, the search form dynamically updates the results, and the pagination controls help users navigate between pages.
```html
<form action="/" method="get" class="mb-4">
<div class="input-group">
<input type="text" name="search" class="form-control" placeholder="Search blog posts..." value="<%= typeof searchQuery !== 'undefined' ? searchQuery : '' %>">
</div>
</form>
```
## Environment Variable Customization
The `.env` file contains all the configuration settings for the site, making it easy to change things like the owner's name, email settings, and URLs without modifying the code.
Heres a breakdown of the relevant `.env` variables:
```env
# SMTP configuration for sending emails
SMTP_HOST=us2.smtp.yourtld.com # SMTP server host
SMTP_PORT=587 # SMTP server port
EMAIL_USER=user@yourtld.com # Email address used for SMTP authentication
EMAIL_PASS="ComplexPass" # Password for the SMTP user
RECEIVER_EMAIL=youremail@yourtld.com # Default receiver email for outgoing messages
# CAPTCHA key for form verification
CAPTCHA_SECRET_KEY="KEYHERE"
# URL configuration
HOST_URL="https://yourtld.com" # Base host URL
BLOG_URL="https://blog.yourtld.com/" # Blog URL (with trailing slash)
# Website branding
SITE_NAME="Your Blog Title" # Title used in the website's navbar
OWNER_NAME="Your Name" # Name of the website's owner (you)
# Front page content
FRONT_PAGE_TITLE="Hello, my name is Your Name" # Main heading on the homepage
FRONT_PAGE_LEAD="Where Technology Meets Creativity: Insights from a Linux Enthusiast" # Lead text on the homepage
# Footer content
FOOTER_TAGLINE="Never Stop Learning" # Tagline for the footer
```
# Final Thoughts
By leveraging **Node.js**, **Express**, **EJS**, and **Markdown**, this blog platform demonstrates how you can combine modern, lightweight technologies to build a dynamic, feature-rich website that is both scalable and easy to maintain. These technologies work together to offer a seamless development experience, allowing you to focus on creating content and functionality rather than worrying about performance bottlenecks or complex configurations.
**Node.js** is renowned for its event-driven, non-blocking architecture, making it perfect for real-time applications and websites that require high concurrency. It allows the platform to handle multiple users and requests simultaneously without compromising performance. This is crucial for blogs or websites with growing traffic, where responsiveness and speed are essential to user experience. The efficiency of Node.js, along with its ability to unify backend and frontend development through JavaScript, creates a cohesive environment that is both efficient and developer-friendly. Whether scaling the application for higher traffic or deploying updates quickly, Node.js provides a fast, reliable runtime.
**Express.js** simplifies the challenges of building a backend server. Its minimalist design allows for easy routing, middleware configuration, and management of HTTP requests. In this blog platform, Express plays a key role in routing different parts of the site, such as serving static assets, rendering dynamic content with EJS, handling form submissions, and integrating security features like reCAPTCHA in the contact form. Express is designed to be flexible and extendable, allowing you to integrate additional functionality like authentication, session management, or third-party APIs with minimal effort. Its built-in support for middleware also enables developers to easily add features or customize existing ones, making the platform adaptable to evolving needs.
**EJS (Embedded JavaScript Templates)** is used to render dynamic content within HTML, making it easy to inject variables and logic directly into views. In this project, EJS powers the dynamic rendering of blog posts, search results, pagination, and custom pages like the "About Me" section. By allowing us to integrate JavaScript logic directly into HTML templates, EJS enables a more interactive and personalized user experience. It also supports the reuse of templates, which helps to keep the code clean and modular. The familiarity of EJS with standard HTML means developers can quickly get up to speed without learning an entirely new templating language.
The use of **Markdown** as the primary format for content creation offers simplicity and flexibility. Storing blog posts in Markdown files removes the need for a complex database, making the platform lightweight and easy to manage. Markdowns intuitive syntax allows content creators to focus on writing, while the platform automatically handles formatting and presentation. When paired with tools like **Marked.js**, Markdown becomes even more powerful, as it allows for easy conversion from plain text into rich HTML. This setup is particularly useful for technical blogs, where code snippets are often embedded. By integrating **highlight.js**, the platform ensures that code blocks are both functional and beautifully presented, making the reading experience more enjoyable and accessible for developers and technical audiences.
This combination of technologies unlocks several powerful features that enhance both the user experience and the development process. With **dynamic content rendering**, the platform efficiently serves blog posts, handles search queries, and manages pagination on the fly. The content is written and stored in Markdown files, but its transformed into fully styled HTML at the moment of request, allowing for quick updates and modifications. This approach not only makes content management easier but also ensures that users always see the most up-to-date version of the blog without requiring database queries or complex caching mechanisms.
The **flexibility and extendability** of this platform are key advantages. Whether you want to add new features, such as a gallery or portfolio section, or integrate external services like a newsletter or analytics platform, the modular structure of Express and the use of EJS templates make this process straightforward. Adding a new feature is as simple as creating a new Markdown file and a corresponding EJS template, enabling rapid development and easy customization. This makes the platform ideal for developers who want to scale or expand their site over time without worrying about technical debt.
A key principle of this platform is **separation of concerns**, which ensures that the content, logic, and presentation are kept distinct. Blog posts are stored as Markdown files, static assets like CSS and images are kept in their own directory, and the logic for handling routes and rendering views is managed in the Express app. This makes the platform highly maintainable, as changes to one part of the system dont affect other parts. For instance, you can easily update the styling of the blog without changing the logic that handles blog posts or search functionality.
Furthermore, **performance and security** are built into the platform from the start. Node.jss asynchronous, non-blocking architecture ensures that the platform can handle high levels of concurrency with minimal latency. Meanwhile, Express allows for easy integration of security features like **reCAPTCHA**, ensuring that spam submissions are minimized. The use of environment variables stored in a `.env` file means sensitive information, like email credentials and API keys, is kept secure and easily configurable. This approach not only enhances security but also simplifies the deployment process, as configurations can be adjusted without changing the codebase.
One of the standout features of this platform is its **search functionality**. Users can easily search for blog posts by title, with results rendered dynamically based on the query. This is made possible through the flexible routing capabilities of Express, combined with the simplicity of searching through Markdown filenames. The integration of search functionality elevates the user experience, providing quick access to relevant content while maintaining a responsive interface.
Finally, the **environmental customizations** enabled by the `.env` file make the platform incredibly versatile. The `.env` file stores crucial configuration details such as email server settings, CAPTCHA keys, and URLs, allowing these values to be updated without modifying the applications source code. This separation of configuration and logic streamlines deployment and maintenance, especially when migrating the platform to different environments or adjusting for production and development needs. By externalizing configuration, the platform can be easily adapted to different hosting environments, whether its deployed on a local server, a cloud service, or a dedicated VPS.
In conclusion, this blog platform showcases how **Node.js**, **Express**, **EJS**, and **Markdown** can be combined to create a robust, feature-rich website that is highly adaptable to various content needs. From dynamic blog posts to customizable pages like "About Me," to integrated search functionality and secure contact forms, this platform provides a flexible and efficient solution for content creators, developers, and businesses alike. Its scalability, maintainability, and performance make it a perfect choice for anyone looking to build a modern, high-performance blog or content management system.

View File

@ -0,0 +1,303 @@
<!-- lead -->
A Deep Dive into Live Streaming and Sharing Audio Data
Live audio streaming is a powerful tool for content delivery, communication, and entertainment. From podcasts to live events, the ability to stream audio across the globe in real-time is both convenient and widely utilized. In this blog post, we're going to explore an innovative way to stream audio using peer-to-peer (P2P) technology, leveraging the power of the Hyperswarm network to share audio data with connected peers in real time.
Well dissect the code, which integrates various Node.js libraries such as Hyperswarm, `youtube-audio-stream`, and `Speaker`. These components, when combined, enable a fully functioning P2P audio streaming solution. By the end of this post, you'll have a comprehensive understanding of how this code works and the fundamental building blocks for creating your own live streaming service without relying on traditional servers.
# Source
## https://git.ssh.surf/snxraven/hypertube
### The Concept: Peer-to-Peer Audio Streaming
The traditional approach to live streaming audio involves a server that transmits data to clients (listeners). This centralized model works well, but it can be costly and have single points of failure. With P2P streaming, instead of having a single server, each peer (user) in the network can act as both a client and a server, sharing the workload of streaming the audio.
The benefits of a P2P system include:
- **Decentralization:** No central server means there is no single point of failure.
- **Scalability:** As more peers join, the network can handle more load.
- **Cost Efficiency:** By eliminating the need for dedicated servers, operational costs are reduced.
Lets break down how the code enables live audio streaming through P2P, starting from the top.
### Setting Up Dependencies
The code starts by requiring several key dependencies that allow us to implement the core functionality of the streaming system. These are the libraries responsible for handling audio data, network connections, and cryptographic operations:
```js
const fs = require('fs');
const b4a = require('b4a');
const Hyperswarm = require('hyperswarm');
const gracefulGoodbye = require('graceful-goodbye');
const crypto = require('hypercore-crypto');
```
- **`fs`** allows file system interaction.
- **`b4a`** is a binary and array buffer utility for encoding/decoding data.
- **`Hyperswarm`** is a peer-to-peer networking library.
- **`gracefulGoodbye`** ensures that the swarm is destroyed correctly when the process exits.
- **`crypto`** provides cryptographic functions to generate random public keys.
### Randomizing Usernames and Setting Up Audio Components
To make the experience dynamic and personalized, the code generates a random username for each user:
```js
let rand = Math.floor(Math.random() * 99999).toString();
let USERNAME = "annon" + rand;
```
Next, we set up audio streaming using `youtube-audio-stream` to fetch and decode the audio from a YouTube URL, and the `Speaker` library to play it locally.
```js
const stream = require('youtube-audio-stream');
const decoder = require('@suldashi/lame').Decoder;
const Speaker = require('speaker');
let audioPlayer = new Speaker({
channels: 2,
bitDepth: 16,
sampleRate: 44100,
});
```
- **`youtube-audio-stream`**: Streams audio directly from YouTube videos.
- **`lame.Decoder`**: Decodes MP3 streams into PCM audio data.
- **`Speaker`**: Sends PCM audio data to the speakers.
### Streaming Audio to Peers
The central function in this system is `startStream(URL)`, which handles streaming the audio data from a specified YouTube URL and broadcasting it to all connected peers in the Hyperswarm network:
```js
function startStream(URL) {
const audioStream = stream(URL).pipe(decoder());
audioStream.on('data', data => {
for (const conn of conns) {
conn.write(data);
}
});
if (!audioPlayer.writable) {
audioPlayer = new Speaker({
channels: 2,
bitDepth: 16,
sampleRate: 44100,
});
audioStream.pipe(audioPlayer);
isPlaying = true;
} else {
audioStream.pipe(audioPlayer);
isPlaying = true;
}
}
```
- The **YouTube audio** is streamed, decoded, and then piped to both the connected peers and the local speaker.
- The **`audioStream.on('data')`** listener pushes the audio data to every connected peer via the `conn.write(data)` function.
### Handling the Hyperswarm Network
The P2P backbone of the system is powered by Hyperswarm. Each peer connects to a "swarm" (a decentralized group of peers), where they can exchange audio data:
```js
const swarm = new Hyperswarm();
gracefulGoodbye(() => swarm.destroy());
```
Peers are identified using public keys, and connections are managed through the following code block:
```js
swarm.on('connection', conn => {
const name = b4a.toString(conn.remotePublicKey, 'hex');
console.log(`* got a connection from ${name} (${USERNAME}) *`);
if (isPlaying) {
startStream();
}
conns.push(conn);
conn.once('close', () => conns.splice(conns.indexOf(conn), 1));
conn.on('data', data => {
if (data.length === 0) {
for (const conn of conns) {
conn.write(`Stopping on all Peers`);
}
audioPlayer.end();
isPlaying = false;
} else {
try {
if (!audioPlayer.writable) {
audioPlayer = new Speaker({
channels: 2,
bitDepth: 16,
sampleRate: 44100,
});
audioStream.pipe(audioPlayer);
isPlaying = true;
} else {
audioPlayer.write(data);
}
} catch (err) {
if (err.code === "ERR_STREAM_WRITE_AFTER_END") {
console.log("The stream has already ended, cannot write data.");
} else {
throw err;
}
}
}
});
});
```
Each connection is a two-way channel, where peers can receive and transmit audio data. If the stream is already playing, it starts streaming to new connections immediately. The code also ensures that when a peer disconnects, the connection is removed from the list.
### Streaming Control: Play and Stop
Control over the stream is achieved using commands entered by the user in the terminal. The code listens for two primary commands: `!play` and `!stop`.
- **`!play`**: Starts the stream from the given URL and broadcasts it to all peers.
- **`!stop`**: Stops the current stream and notifies all peers to stop as well.
```js
rl.on('line', input => {
if (input.startsWith('!play')) {
let dataInfo = input.split(" ");
let URL = dataInfo[1];
startStream(URL);
}
if (input === '!stop') {
if (isPlaying) {
audioPlayer.end();
stopStream();
audioPlayer = new Speaker({
channels: 2,
bitDepth: 16,
sampleRate: 44100,
});
} else {
console.log("The stream is already stopped.");
}
}
});
```
### Joining a Topic and Connecting Peers
Peers join the same "topic" to connect and share data. A unique topic is generated for each session, and peers can join by sharing the topic key:
```js
const topic = process.argv[2] ? b4a.from(process.argv[2], 'hex') : crypto.randomBytes(32);
const discovery = swarm.join(topic, { client: true, server: true });
discovery.flushed().then(() => {
console.log(`joined topic: ${b4a.toString(topic, 'hex')}`);
console.log("(Share this key to others so they may join)");
console.log("To Play a youtube link, use !play LINKHERE to stop !stop");
console.log("All commands are global to all peers");
});
```
### Wrapping Up
This P2P audio streaming code provides a powerful way to decentralize live audio broadcasting. By leveraging Hyperswarm for peer-to-peer connections, the system eliminates the need for a central server, making it resilient, scalable, and cost-effective. Users can easily stream YouTube audio to any number of peers in the network, and each peer can share the audio data with others.
### This is Just the Beginning: Expanding Audio Sources and Use Cases
The beauty of this system lies in its flexibility—while the current implementation streams audio from YouTube, this concept can easily be extended to stream audio from a variety of sources. Each source could serve different purposes, opening up a wealth of use cases across industries. Below, we explore some of the diverse audio sources that could be integrated into the system, along with potential functions and real-world applications.
#### 1. **Streaming from YouTube and Other Online Media Platforms**
As the code currently does, streaming audio from YouTube is a simple yet effective way to broadcast any audio content that is hosted online. This could be extended to other platforms, such as Vimeo, SoundCloud, or even custom URLs from media servers.
**Use Cases**:
- **Live Podcasts**: Hosts could broadcast episodes directly from platforms like YouTube or SoundCloud.
- **Music Streaming**: Users could share their favorite tracks or playlists, turning the P2P network into a decentralized music sharing platform.
- **Educational Content**: Stream audio from educational videos or lectures available on platforms like YouTube, providing a collaborative learning environment for students or study groups.
#### 2. **Microphone Input (Live Audio)**
Another powerful feature would be to integrate real-time audio streaming from a users microphone. This would allow for live, dynamic content creation, such as broadcasts, live commentary, or even interactive conversations.
**Use Cases**:
- **Live DJ Sets**: Musicians or DJs could use the platform to broadcast live sets or performances, where listeners across the globe tune in without needing a central server.
- **Live Q&A Sessions or Webinars**: Professionals could host live Q&A sessions or webinars where attendees can join the audio stream to listen, ask questions, or participate.
- **Community Radio**: Users could create their own community-based radio stations, transmitting live shows to peers.
#### 3. **Local Audio Files**
Instead of streaming from an online platform, the system could be expanded to stream locally stored audio files (e.g., MP3, WAV). This would allow users to share personal audio collections, playlists, or even previously recorded broadcasts with peers.
**Use Cases**:
- **Personal Music Sharing**: Users could stream their own music collection to their peers, turning the platform into a decentralized version of services like Spotify.
- **Audiobook Sharing**: Users could broadcast audiobooks stored on their local devices, ideal for creating a P2P audiobook club or study group.
- **Custom Soundtracks**: Independent artists could share their work directly with listeners, bypassing traditional streaming platforms and maintaining control over distribution.
#### 4. **Radio Station Integration**
Incorporating traditional radio station streams could transform the P2P system into a global platform for sharing and redistributing radio content. Many radio stations already stream their broadcasts online, which can be piped into the P2P network for redistribution among peers.
**Use Cases**:
- **Global Access to Local Radio**: Users can stream local radio stations from their country and allow peers worldwide to listen in, bringing localized content to a global audience.
- **Talk Shows and News Broadcasts**: Political talk shows, news broadcasts, or even live sports commentary can be streamed and shared globally, giving access to a wider range of content.
#### 5. **Audio from Streaming APIs**
Streaming APIs such as Spotifys Web API, Apple Musics API, or even real-time data from live event platforms could be utilized to fetch and stream audio dynamically based on user input or pre-configured playlists.
**Use Cases**:
- **Dynamic Playlists**: The system could automatically stream music based on user-defined parameters, pulling tracks from services like Spotify and distributing them to peers.
- **Live Sports or Event Commentary**: Audio streams from live events could be captured through an API and shared in real-time, allowing users to tune in and listen to live commentary or event coverage.
#### 6. **Live Audio Feeds (Surveillance, Public Announcements)**
Another potential application is integrating live audio feeds from different environments or systems. This could include live surveillance audio for security purposes or public announcement systems for large events.
**Use Cases**:
- **Security Surveillance**: In a security-focused environment, audio feeds from various locations could be streamed to connected peers, allowing real-time monitoring.
- **Event PA Systems**: Public announcement systems at large events (conferences, music festivals, etc.) could stream live audio to all attendees via P2P technology, ensuring seamless information distribution.
#### 7. **VoIP (Voice over IP)**
By integrating VoIP protocols, the system could facilitate real-time peer-to-peer voice communication, similar to services like Skype or Discord, but with the added benefit of decentralized infrastructure.
**Use Cases**:
- **Group Voice Chat**: Peers could communicate in real-time using voice chat without relying on centralized servers, ideal for gaming, virtual meetups, or team collaboration.
- **Decentralized Call Center**: Businesses could set up a decentralized call center where customer service representatives communicate with customers via P2P VoIP, reducing server costs and improving privacy.
### Advantages of Decentralizing Audio Streaming
The decentralized nature of this P2P audio streaming system offers several key advantages:
- **Cost Efficiency**: No need for expensive server infrastructure. Each peer contributes to the network, sharing bandwidth and resources.
- **Scalability**: The system grows organically as more peers join. Each new peer helps distribute the load of streaming audio, allowing for near-limitless scalability.
- **Resilience**: Without a central server, there is no single point of failure. The system remains operational even if some peers disconnect or fail.
- **Privacy**: Since the audio data is shared directly between peers, it bypasses traditional content distribution networks, giving users more control over their data and improving privacy.
### Final Thoughts A Flexible and Expandable System
This P2P audio streaming system, though initially designed for streaming YouTube audio, is a highly flexible framework that can be adapted to stream from various sources. Whether it's live broadcasts from a microphone, local audio files, radio streams, or VoIP, the concept can evolve to meet different needs. The potential use cases range from entertainment (music sharing and live DJ sets) to education (webinars, podcasts) and even security (surveillance audio feeds).
By expanding the types of audio sources supported and integrating new functionalities, this P2P framework can become a robust, decentralized platform for streaming and sharing audio data in ways that traditional, centralized systems cannot match. Whether you're a developer, artist, or enthusiast, the opportunities are endless, making this system a powerful tool for real-time, decentralized communication.

View File

@ -0,0 +1,248 @@
## Deep Dive into RayAI Chat Bot User-Installable Version
<!-- lead -->
Enabling users to use RayAI within any Channel on Discord.
This deep dive will explain how I built a user-installable version of the RayAI Chat Bot, highlighting key aspects such as the bots architecture, command handling, user privacy settings, and interaction with APIs. The bot integrates with Discord using its Slash Commands interface and makes use of Axios to send requests to a backend service. Below is a detailed walkthrough of the components that make this bot user-installable and functional.
# Source:
## https://git.ssh.surf/snxraven/rayai/src/branch/main/bot/installableApp-groq.js
### Project Structure and Dependencies
Before diving into specific sections of the code, here's an overview of the main components and dependencies used:
```javascript
const { Client, GatewayIntentBits, REST, Routes, EmbedBuilder, SlashCommandBuilder } = require('discord.js');
const axios = require('axios');
const he = require('he');
const fs = require('fs');
require('dotenv').config();
const { userResetMessages } = require('./assets/messages.js');
```
- **Discord.js**: Provides the essential framework to interact with Discord. Key classes such as `Client`, `GatewayIntentBits`, `REST`, `Routes`, and `EmbedBuilder` manage communication between the bot and Discord's API.
- **Axios**: Used to send HTTP requests to external services, particularly the RayAI backend, handling operations like sending user messages and resetting conversations.
- **`he`**: A library for encoding HTML entities, ensuring that user inputs are safely transmitted over HTTP.
- **File System (fs)**: Utilized to store and retrieve user privacy settings, allowing the bot to persist data between sessions.
- **dotenv**: Manages sensitive information like tokens and API paths by loading environment variables from a `.env` file.
### Discord Client Initialization
The bot is instantiated using the `Client` class, specifically configured to only handle events related to guilds (`GatewayIntentBits.Guilds`). This limits the bot's scope to only manage messages and interactions within servers:
```javascript
const client = new Client({
intents: [GatewayIntentBits.Guilds]
});
```
### Managing User Privacy Settings
The bot includes a privacy feature where users can toggle between ephemeral (private) and standard (public) responses. This is achieved through the `userPrivacySettings.json` file, which stores these preferences:
```javascript
// Load or initialize the user privacy settings
const userPrivacyFilePath = './userPrivacySettings.json';
let userPrivacySettings = {};
if (fs.existsSync(userPrivacyFilePath)) {
userPrivacySettings = JSON.parse(fs.readFileSync(userPrivacyFilePath));
}
// Save the user privacy settings
function saveUserPrivacySettings() {
fs.writeFileSync(userPrivacyFilePath, JSON.stringify(userPrivacySettings, null, 2));
}
```
The privacy settings are initialized by checking if the `userPrivacySettings.json` file exists. If it does, the bot loads the settings into memory; otherwise, a new file is created. The `saveUserPrivacySettings()` function is used to persist updates when a user toggles their privacy settings.
### Slash Command Registration with Extras
The bot supports four commands:
- `/reset`: Resets the current conversation with the AI.
- `/restartcore`: Restarts the core service.
- `/chat`: Sends a user message to the AI.
- `/privacy`: Toggles between ephemeral (private) and standard (public) responses.
The commands are defined using `SlashCommandBuilder` and registered with Discord using the REST API:
```javascript
const commands = [
new SlashCommandBuilder().setName('reset').setDescription('Reset the conversation'),
new SlashCommandBuilder().setName('restartcore').setDescription('Restart the core service'),
new SlashCommandBuilder().setName('chat').setDescription('Send a chat message')
.addStringOption(option =>
option.setName('message')
.setDescription('Message to send')
.setRequired(true)),
new SlashCommandBuilder().setName('privacy').setDescription('Toggle between ephemeral and standard responses')
].map(command => {
const commandJSON = command.toJSON();
const extras = {
"integration_types": [0, 1], // 0 for guild, 1 for user
"contexts": [0, 1, 2] // 0 for guild, 1 for app DMs, 2 for GDMs and other DMs
};
Object.keys(extras).forEach(key => commandJSON[key] = extras[key]);
return commandJSON;
});
// Register commands with Discord
const rest = new REST({ version: '10' }).setToken(process.env.THE_TOKEN_2);
client.once('ready', async () => {
try {
console.log(`Logged in as ${client.user.tag}!`);
await rest.put(Routes.applicationCommands(process.env.DISCORD_CLIENT_ID), { body: commands });
console.log('Successfully registered application commands with extras.');
} catch (error) {
console.error('Error registering commands: ', error);
}
});
```
The bot registers the commands upon startup using the `REST.put()` method. Note that each command includes extra metadata for `integration_types` (whether the command is for guild or user contexts) and `contexts` (indicating where the command applies: guilds, direct messages, etc.).
### Handling User Interactions
Each command has its respective handler, triggered when a user interacts with the bot:
```javascript
client.on('interactionCreate', async interaction => {
if (!interaction.isCommand()) return;
const { commandName, options } = interaction;
if (commandName === 'reset') {
return await resetConversation(interaction);
} else if (commandName === 'restartcore') {
await restartCore(interaction);
} else if (commandName === 'chat') {
const content = options.getString('message');
await handleUserMessage(interaction, content);
} else if (commandName === 'privacy') {
await togglePrivacy(interaction);
}
});
```
Each interaction is checked for its command name, and the corresponding function is invoked:
- **`resetConversation()`**: Sends a POST request to the RayAI backend to reset the current conversation.
- **`restartCore()`**: Sends a POST request to restart the core service.
- **`handleUserMessage()`**: Sends the user's message to the backend for processing, encodes it using `he.encode()` for safety, and handles the bot's typing indicators and replies.
- **`togglePrivacy()`**: Toggles the user's privacy setting between ephemeral and standard responses, storing this preference for future use.
### Privacy Toggle
The `/privacy` command allows users to control whether their responses are private or visible to everyone:
```javascript
async function togglePrivacy(interaction) {
const userId = interaction.user.id;
const currentSetting = userPrivacySettings[userId] || false;
userPrivacySettings[userId] = !currentSetting; // Toggle the setting
saveUserPrivacySettings();
const message = userPrivacySettings[userId]
? 'Your responses are now set to ephemeral (visible only to you).'
: 'Your responses are now standard (visible to everyone).';
await interaction.reply({ content: message, ephemeral: true });
}
```
This function checks the current privacy setting for the user, toggles it, and saves the updated setting. A response is then sent to the user, confirming the new privacy mode.
### Sending Long Messages
The bot can handle large responses from the AI by splitting them into chunks and sending them in sequence:
```javascript
async function sendLongMessage(interaction, responseText) {
const limit = 8096;
if (responseText.length > limit) {
const lines = responseText.split('\n');
const chunks = [];
let currentChunk = '';
for (const line of lines) {
if (currentChunk.length + line.length > limit) {
chunks.push(currentChunk);
currentChunk = '';
}
currentChunk += line + '\n';
}
if (currentChunk.trim() !== '') {
chunks.push(currentChunk.trim());
}
if (chunks.length >= 80) return await interaction.reply({ content: "Response chunks too large. Try again", ephemeral: isEphemeral(interaction.user.id) });
for (let i = 0; i < chunks.length; i++) {
const chunk = chunks[i];
const embed = new EmbedBuilder()
.setDescription(chunk)
.setColor("#3498DB")
.setTimestamp();
setTimeout(() => {
interaction.followUp({
embeds: [embed],
ephemeral: isEphemeral(interaction.user.id)
});
}, i * (process.env.OVERFLOW_DELAY || 3) * 1000);
}
} else {
const embed = new EmbedBuilder()
.setDescription(responseText)
.setColor("#3498DB")
.setTimestamp();
interaction.editReply({
embeds: [embed],
ephemeral: isEphemeral(interaction.user.id)
});
}
}
```
This function ensures that responses longer than Discord's character limit are split and delivered to the user in manageable chunks. Each chunk is sent with a delay to avoid overwhelming the bot or Discord API.
### The results
The RayAI Chat Bot is a robust, user-centric Discord bot designed to provide seamless interaction with an AI backend, featuring rich customization options for user preferences and flexible handling of both short and long messages. This project exemplifies how powerful a user-installable bot can be when built with attention to detail and user experience.
#### Key Aspects of the Project:
1. **Slash Command Integration**:
The bot leverages Discord's Slash Command API to create an intuitive, user-friendly interface where commands are easy to use and comprehend. The choice of commands—such as `/reset`, `/restartcore`, and `/chat`—ensures that the user has full control over interactions with the AI service. By dynamically registering these commands using Discords REST API, the bot can be installed and updated without manual intervention, adding to its user-friendly nature.
2. **User Privacy Customization**:
The integration of a privacy toggle function demonstrates the bots emphasis on user autonomy and personalization. Users can easily switch between public and private message responses via the `/privacy` command, with the bot persisting these settings across sessions using a JSON file. This feature enhances trust and usability, making the bot suitable for varied contexts where privacy may be a concern.
3. **Interaction with External APIs**:
The bots communication with the AI backend is elegantly handled through Axios, enabling the bot to send user inputs securely and receive intelligent responses. The careful encoding of user messages via `he.encode()` mitigates security risks, such as cross-site scripting (XSS) or injection attacks, ensuring that the bot operates safely even when interacting with untrusted input.
4. **Handling Long Messages**:
The long-message handling functionality solves a common challenge when integrating chatbots into platforms like Discord, which has strict message limits. By intelligently splitting long AI responses into manageable chunks and sending them sequentially, the bot maintains the fluidity of conversation while respecting Discords limitations. This ensures that users receive complete responses without abrupt truncation, even when dealing with complex or detailed queries.
5. **Automatic Command Registration with Extras**:
The ability to dynamically register commands at startup using the Discord REST API eliminates the need for pre-configuration, making the bot easy to install and update. Additionally, by adding custom `integration_types` and `contexts` to the command registration, the bot can function across various Discord environments (guilds, DMs, etc.), extending its versatility and appeal to a broader user base.
6. **Resilient Error Handling**:
The bot is built with robust error handling, particularly in scenarios where the AI backend may be busy or encounter rate limits. The user is promptly notified if the service is unavailable, and fallback mechanisms ensure that the user experience remains smooth, even in the face of external service issues. This kind of resilience is crucial for any production-grade bot, minimizing downtime and ensuring reliability.
7. **User-Installable Design**:
A key highlight of this project is the bot's user-installable nature. By storing configuration details like tokens and API paths in environment variables and using simple file-based storage for privacy settings, the bot is easy to configure and deploy in any environment. The ability for users to install and manage the bot themselves adds to its flexibility, making it accessible to a wide range of users, from individuals to server administrators looking for a custom chatbot solution.
The RayAI Chat Bot showcases the power of combining modern web technologies like Discord.js, Axios, and file-based storage to create a sophisticated, user-installable chatbot. Its thoughtful features—ranging from user privacy customization to efficient message handling—make it a highly functional tool that serves both casual and professional needs.
This bot is not just a technical achievement but also a product of careful consideration of user needs, focusing on ease of installation, flexibility, and resilience. Whether you're managing a Discord server, interacting with AI, or looking for a chatbot solution thats easy to deploy and scale, the RayAI Chat Bot sets a high standard for future projects in this domain.

View File

@ -0,0 +1,256 @@
<!-- lead -->
Enabling users to use My-MC.Link within any Channel on Discord.
The My-MC.Link Discord bot offers a comprehensive and user-friendly interface for Minecraft server management, enabling users to control server operations from any Discord server. This deep dive explores the technical aspects of the bot, including its architecture, command handling, token management, API integration, and the strategies employed to deliver seamless user interactions. By combining Discords API with the powerful features of the My-MC.Link service, this bot provides an extensive range of server functionalities in a highly accessible and easily deployable format.
### Project Structure and Key Dependencies
The bot leverages several essential libraries and APIs to deliver its functionality:
```javascript
import { Client, GatewayIntentBits, SlashCommandBuilder, REST, Routes, EmbedBuilder } from 'discord.js';
import jsonfile from 'jsonfile';
import MyMCLib from 'mymc-lib';
import unirest from 'unirest';
import { readFileSync } from 'fs';
import cmd from 'cmd-promise';
```
# Source
## https://git.ssh.surf/hypermc/hypermc-api-user-install-bot
#### Breakdown of Key Dependencies:
- **Discord.js**: This is the backbone of the bot, providing classes like `Client`, `SlashCommandBuilder`, `REST`, and `EmbedBuilder`. These components enable interaction with Discord's API, handling everything from registering commands to managing user interactions and generating rich embeds for responses.
- **jsonfile**: This package manages reading and writing user-specific tokens in JSON format. Storing tokens in a file allows the bot to persist authentication information between sessions, making it unnecessary for users to re-authenticate repeatedly.
- **MyMCLib**: A custom library that acts as a wrapper around the My-MC.Link API, simplifying the process of interacting with the services various endpoints, such as starting or stopping servers, fetching logs, and managing mods.
- **unirest**: Used to make HTTP requests to the My-MC.Link API, specifically to handle token generation and validation.
- **cmd-promise**: A library that facilitates the execution of shell commands in a promise-based format, used for running server checks and other operational commands, ensuring that server status can be verified before certain actions are performed.
### Discord Client Initialization
The bot initializes the Discord client with a focus on `Guilds`, which makes it a server-centric bot that exclusively handles commands and interactions within Discord servers (as opposed to direct messages):
```javascript
const client = new Client({ intents: [GatewayIntentBits.Guilds] });
```
By limiting the bots scope to `Guilds`, it ensures that the bot can manage interactions specific to Minecraft server administration in a controlled environment, reducing unnecessary overhead from other Discord intents.
### Token Management and API Authentication
#### Loading and Saving Tokens
One of the most critical aspects of the bots design is token management, which authenticates user interactions with the My-MC.Link API. The bot stores tokens in a JSON file (`tokens.json`) and retrieves or refreshes these tokens as necessary.
**Token Loading**:
```javascript
function loadTokens() {
try {
return jsonfile.readFileSync(tokensFile);
} catch (error) {
console.error('Error reading tokens file:', error);
return {};
}
}
```
The `loadTokens()` function reads the `tokens.json` file and returns an object containing user tokens. If the file cannot be read (e.g., it doesnt exist or has been corrupted), an empty object is returned, and the bot can request a new token.
**Token Saving**:
```javascript
function saveTokens(tokens) {
jsonfile.writeFileSync(tokensFile, tokens, { spaces: 2 });
}
```
The `saveTokens()` function writes the token data back to the `tokens.json` file, ensuring that any new or refreshed tokens are persisted for future use.
#### Automatic Token Retrieval
If a user doesnt have a valid token or their token has expired, the bot automatically requests a new one from the My-MC.Link service using the `fetchAndSaveToken()` function:
```javascript
async function fetchAndSaveToken(userId, interaction) {
return unirest
.post(config.endpoint.toString())
.headers({ 'Accept': 'application/json', 'Content-Type': 'application/json' })
.send({ "username": `mc_${userId}`, "password": config.password.toString()})
.then((tokenInfo) => {
const tokens = loadTokens();
tokens[userId] = tokenInfo.body.token; // Save the new token
saveTokens(tokens);
return tokenInfo.body.token;
})
.catch((error) => {
console.error('Error fetching token:', error);
sendSexyEmbed("Error", "An error occurred while fetching your API token.", interaction);
throw error;
});
}
```
This function performs several key steps:
1. It makes a POST request to the My-MC.Link API, sending the users credentials to request a new token.
2. On success, it loads the existing tokens, updates the token for the user, and saves the updated tokens to the `tokens.json` file.
3. If theres an error (e.g., the API is down or the request fails), it logs the error and provides feedback to the user via a rich embed.
#### Token Re-Validation and Re-Fetching
Once a token is stored, the bot checks its validity and, if necessary, automatically fetches a new token when making API calls:
```javascript
async function getToken(userId, interaction) {
const tokens = loadTokens();
if (!tokens[userId]) {
return await fetchAndSaveToken(userId, interaction);
}
return tokens[userId];
}
async function handleApiCall(apiCall, userId, interaction) {
try {
return await apiCall();
} catch (error) {
console.error('Token error, re-fetching token...');
await fetchAndSaveToken(userId, interaction);
return await apiCall();
}
}
```
Heres what happens:
1. **getToken**: This function checks if a token exists for the user in `tokens.json`. If no token is found, it calls `fetchAndSaveToken()` to retrieve and save a new one.
2. **handleApiCall**: Wraps any API call to handle invalid tokens by retrying the request after fetching a new token. If a token has expired or there is any issue with authentication, the bot fetches a fresh token and retries the request.
### Command Registration with Discord
The bot uses Discords `SlashCommandBuilder` to define a series of commands that allow users to interact with their Minecraft servers. These commands are registered with Discord using the REST API:
```javascript
const commands = [
new SlashCommandBuilder().setName('server-stats').setDescription('Get the server statistics'),
new SlashCommandBuilder().setName('server-log').setDescription('Get the server log'),
new SlashCommandBuilder().setName('start-server').setDescription('Start the Minecraft server'),
new SlashCommandBuilder().setName('stop-server').setDescription('Stop the Minecraft server'),
new SlashCommandBuilder().setName('restart-server').setDescription('Restart the Minecraft server'),
// Additional commands...
];
// Register commands with Discord
const rest = new REST({ version: '10' }).setToken(config.token);
(async () => {
try {
console.log('Started refreshing application (/) commands.');
await rest.put(Routes.applicationCommands(config.clientId), { body: JSONCommands });
console.log('Successfully reloaded application (/) commands.');
} catch (error) {
console.error(error);
}
})();
```
Each command is defined with a name and description using `SlashCommandBuilder`, which simplifies the process of adding new commands. These commands are then registered with Discord's API, ensuring they are available for use within the server.
### Handling User Interactions
When users invoke commands, the bot listens for interaction events and routes the request to the appropriate function based on the command name:
```javascript
client.on('interactionCreate', async interaction => {
if (!interaction.isCommand()) return;
const userId = interaction.user.id;
const apiToken = await getToken(userId, interaction);
const MyMC = new MyMCLib(apiToken);
switch (interaction.commandName) {
case 'server-stats':
const stats = await handleApiCall(() => MyMC.getStats(), userId, interaction);
handleResponse(stats, interaction);
break;
case 'start-server':
const startResult = await handleApiCall(() => MyMC.startServer(), userId, interaction);
handleResponse(startResult, interaction);
break;
case 'stop-server':
const stopResult = await handleApiCall(() => MyMC.stopServer(), userId, interaction);
handleResponse(stopResult, interaction);
break;
// Other commands...
}
});
```
Each command is mapped to an API call using the `MyMCLib` library. The bot interacts with the Minecraft server via authenticated requests, and responses are processed and displayed back to the user.
### Sending Responses and Embeds
One of the standout features of this bot is its use of rich embeds for displaying information to users. These embeds provide a visually appealing way to present data such as server statistics, logs, or mod information.
#### Simple Embeds
For single-field responses, the bot sends a simple embed with a title and description:
```javascript
function sendSexyEmbed(title, description, interaction, ephemeral = false) {
const embed = new EmbedBuilder()
.setColor("#3498DB")
.setTitle(title)
.setDescription(description)
.setTimestamp()
.setFooter({
text: `Requested by ${interaction.user.username}`,
iconURL: `${interaction.user.displayAvatarURL()}`
});
interaction
.reply({
embeds: [embed],
ephemeral: ephemeral
});
}
```
This function ensures that every response is styled with consistent colors, timestamps, and user information.
#### Complex Embeds with Multiple Fields
For more complex responses (such as server stats or mod lists), the bot generates an embed with multiple fields:
```javascript
function sendSexyEmbedWithFields(title, description, fields, interaction, ephemeral = false) {
const embed = new EmbedBuilder()
.setColor("#3498DB")
.setTitle(title)
.setDescription(description !== "N/A" ? description : undefined)
.addFields(fields)
.setTimestamp()
.setFooter({
text: `Requested by ${interaction.user.username}`,
iconURL: `${interaction.user.displayAvatarURL()}`
});
interaction.reply({
embeds: [embed],
ephemeral: ephemeral
});
}
```
This method allows the bot to handle more detailed responses, such as server resource usage, mod lists, and player data.
### Error Handling and Resilience
A critical aspect of the bots design is its resilience in the face of errors, particularly around token validation and API requests. The bot gracefully handles errors by attempting to fetch a new token and retrying the request. Additionally, the bot provides users with feedback through embeds when something goes wrong, keeping them informed without the need for manual intervention.
### Final Thoughts: A Comprehensive Minecraft Server Management Bot
The My-MC.Link Discord bot is a sophisticated and powerful tool for Minecraft server management, offering a seamless integration with Discord that enables users to perform server tasks through a familiar interface. Its use of modern technologies such as `discord.js`, `MyMCLib`, and JSON-based token management ensures that the bot is both scalable and user-friendly. The automatic token handling, rich embeds, and wide range of server management commands make the bot an invaluable tool for any My-MC.Link user.
From a technical standpoint, the bot demonstrates how effective integration of Discord with external services can lead to a highly functional and interactive user experience. With the ability to automate token management, handle complex API interactions, and deliver visually appealing feedback, this bot sets a high standard for user-installable applications within the Discord ecosystem.

View File

@ -0,0 +1,419 @@
<!-- lead -->
Breaking Boundaries with a Decentralized, Firewall-Resistant Peer-to-Peer DNS System in Node.js
DNS is an essential service that translates human-readable domain names into IP addresses. However, its currently governed by centralized authorities, requiring domain registration and payments for top-level domains (TLDs). This blog post dives into a revolutionary DNS system implemented in Node.js that leverages peer-to-peer (P2P) networking and UDP hole-punching to create a decentralized, firewall-resistant DNS. This system removes the need for traditional registries and provides users with complete control over their TLDs.
```bash
[DEBUG 2024-10-14T21:04:20.875Z] Public DNS returned records for www.google.com
[DEBUG 2024-10-14T21:04:28.465Z] DNS query received: Domain = example.tld, Type = A
Virtual interface dummy2 created with CIDR 192.168.100.2.
[DEBUG 2024-10-14T21:04:28.527Z] Assigned virtual interface IP: 192.168.100.2 for domain: example.tld
[DEBUG 2024-10-14T21:04:28.528Z] Starting Holesail client for domain: example.tld, hash: 8a5b90945f8fbd5d1b620be3c888a47aaae20706a7f140be4bfa0df9e0dbcf38, IP: 192.168.100.2, Port: 80
```
[![screenshot](https://git.ssh.surf/snxraven/p2ns/raw/branch/main/images/sc.webp)](https://git.ssh.surf/snxraven/p2ns/raw/branch/main/images/sc.webp)
# Source:
## https://git.ssh.surf/snxraven/p2ns
### The Beginnings
This P2P DNS system represents a conceptual breakthrough in how DNS can operate. However, it's essential to recognize that the current code is a **proof of concept** and not yet fully equipped for large-scale or production use. As with any pioneering technology, there are significant challenges to address and features to refine.
One of the primary areas that require attention is **ensuring unique TLD registration across peers**. Currently, the system allows any peer to add a TLD and associated hash to the `dnsCore`, but without a centralized authority or a consensus mechanism, theres nothing preventing multiple users from registering the same TLD. This can lead to conflicts where different peers assign different IPs or hashes to the same TLD, creating ambiguity and potential security risks.
Another issue is **data integrity and synchronization**. In decentralized systems, especially those that operate asynchronously like this one, theres always a chance that data may go out of sync. If peers dont replicate updates promptly, they might serve outdated or conflicting DNS records. A more robust method for maintaining consistency, perhaps through periodic verification or consensus models, would enhance reliability.
**Network resilience and performance** also pose challenges. Although Hyperswarm facilitates P2P connectivity, peer connections can be unpredictable, particularly in large, dispersed networks. Connection stability might vary based on network conditions, and without an efficient way to verify peer connectivity, the user experience could degrade over time.
### Potential Issues to Address
Other challenges that might arise in the development of a fully functional P2P DNS system include:
- **Security and Authentication**: Without a centralized verification process, its difficult to authenticate peers and ensure only authorized peers participate in a private DNS network. A secure authentication layer or peer verification process would be necessary to prevent unauthorized access.
- **Redundancy and Conflict Resolution**: In cases where the same TLD exists with different hashes, a conflict-resolution mechanism would be necessary to determine which entry is legitimate. This might involve a voting system or trust-based model where peers agree on the correct record.
- **Network Performance Overhead**: As the network grows, synchronizing large amounts of data could impact performance, especially for peers with limited bandwidth. Optimizations around data storage, compression, and bandwidth management would be needed to handle network scalability.
- **Handling Malicious Peers**: In a decentralized network, theres always the risk of malicious actors attempting to flood the network with junk records or hijacking TLDs. Implementing trust models, perhaps reputation-based or verified peers, could mitigate this risk.
The current code lays the foundation for a decentralized DNS system that could, in time, replace the need for centralized registries. However, for it to become a robust, production-ready solution, these challenges must be overcome through further research, testing, and community collaboration. As this concept evolves, it has the potential to redefine digital identity, providing a truly autonomous DNS model thats both resilient and accessible.
## The Concept: Decentralized DNS with UDP Hole-Punching
At the heart of this system is the integration of decentralized technologies with resilient connection strategies, primarily UDP hole-punching. This P2P DNS uses Holesail for tunneling and Hyperswarm for establishing peer connections, creating seamless communication between peers across diverse network environments.
Handling DNS requests through a decentralized P2P framework fundamentally alters the dynamics of DNS:
- Centralized DNS servers are no longer required.
- Domain registries become irrelevant, as the system allows for self-created TLDs.
- Domain purchases and the limitations of traditional TLD structures are eliminated.
This system redefines DNS, empowering individuals and organizations alike to bypass conventional limitations and establish their own namespaces and domains without dependence on outside entities.
## Technical Ramifications of a Decentralized DNS System
Enhanced resilience and uptime come from distributing DNS records across multiple peers. Traditional DNS relies on centralized servers, meaning if one server goes down or is compromised, access for large numbers of users is affected. With a decentralized model, records are distributed across many peers, creating a self-healing, resilient network with no single point of failure. An attack on one peer or group of peers has little to no effect on the network as a whole. By decentralizing the DNS infrastructure, uptime is maximized even if some peers are offline, as remaining peers continue serving the DNS records without interruption.
UDP hole-punching is a critical feature enabling P2P DNS to function effectively across restrictive networks. Firewalls, NATs, and CGNAT configurations—often found in mobile networks (4G, 5G) and satellite Internet (like Starlink)—typically prevent or limit incoming connections. UDP hole-punching establishes a direct link between peers by creating connection paths through intermediaries, allowing traffic to flow regardless of network restrictions. This makes the system an ideal DNS solution for users in restrictive environments where traditional DNS services may be blocked or monitored. The increased accessibility enables users on restricted networks to participate freely in the DNS network without needing VPNs or additional circumvention tools.
Local IP assignment offers streamlined peer-to-peer interaction, with IPs dynamically assigned to each domain in the range `192.168.100.x`. Each domain is mapped to a virtual interface within the systems subnet, leveraging local addresses like `127.0.0.1`. This approach enhances security by isolating domain traffic within the network, preventing direct IP exposure to external sources. Local traffic stays secure and private, and DNS requests are automatically handled within the network, making the system easier to use for both technical and non-technical users.
## Security and Privacy Implications
This decentralized DNS model also opens up new possibilities for security and privacy. Unlike traditional DNS, which often involves ISP-based logging, centralized tracking, and even DNS hijacking, a P2P DNS creates a privacy-centric, user-controlled environment.
Privacy becomes inherent in this model as users retain control over DNS queries, shielding themselves from ISP surveillance and bypassing centralized logging. DNS queries remain private and inaccessible to third-party trackers, thanks to Holesails encrypted, peer-to-peer tunneling. Traffic stays encrypted and direct between peers, making it far less vulnerable to interception or manipulation compared to conventional DNS systems.
Eliminating the need for domain ownership removes the barriers associated with central registries and domain purchasing. No longer bound by regulatory bodies or commercial interests, users can register their own TLDs and subdomains on demand, within the P2P DNS network. This opens up vast possibilities, from small-scale personal projects to enterprise-level applications, without the need to purchase domain names or navigate the policies of registries.
The structure of this DNS system brings freedom back into the hands of the user, allowing self-regulated control over DNS queries and reducing reliance on ISPs or DNS providers. Users can establish TLDs for internal use, private communication, and personal networks, expanding traditional DNS concepts into a private or exclusive P2P namespace.
## Use Cases: How the P2P DNS System Can Be Applied
This system has applications that extend beyond the technical enthusiast community, with potential to transform enterprise, IoT, and digital rights domains.
In the enterprise sector, organizations often need internal DNS solutions that dont rely on public infrastructure. This P2P DNS system allows businesses to set up secure, firewall-resistant internal DNS namespaces without involving third-party registries or external providers. Not only does this reduce costs, but it also provides a high level of control and customization for internal domains, from resource isolation to custom TLD configurations.
For IoT ecosystems, which consist of numerous connected devices often spread across restricted networks, a decentralized DNS provides an efficient, scalable solution for addressing devices without public IPs. Devices can be registered and managed within a private P2P DNS, accessible through firewall-resistant connections that work even in highly restrictive environments.
Digital rights and censorship-resistant internet advocates stand to benefit greatly from decentralized DNS. Traditional DNS servers can be targeted for censorship, either by disabling access to certain domains or by redirecting users to unwanted sites. With P2P DNS, censorship becomes practically impossible, as there is no single server to target or manipulate. This system empowers users to create and distribute content without the threat of government or corporate suppression.
## How This System Redefines Freedom in the DNS Landscape
Beyond the technical benefits, the freedom this system provides to global users is unparalleled. Removing the requirement to purchase or register domains is not just a cost-saving measure—its a liberation of identity and accessibility in the digital world. This system enables everyone, from independent users to small organizations, to create their own namespaces without restrictions. In essence, anyone can launch a TLD or subdomain network, using their preferred naming structures with no oversight.
By establishing a framework where domain registries are optional rather than mandatory, this system bypasses the regulatory and economic gatekeepers traditionally associated with the DNS. It provides a level of freedom that is particularly empowering for communities in restrictive environments, enabling access to an open DNS system that operates independently of conventional controls.
With this P2P DNS system, the ability to create and maintain digital identities is democratized. Users around the world can reclaim control over their namespaces, creating a more open, resilient, and censorship-resistant Internet. This redefines what it means to have access to the digital world, with a new level of autonomy and security, all powered by decentralized technology.
## Code Walkthrough: Peer Discovery, DNS, and HTTP Proxying in a P2P Network
This implementation involves components for peer discovery, DNS resolution, virtual networking, and HTTP proxying, integrating `Hyperswarm` and `Corestore` to achieve a decentralized DNS system. Here's an in-depth analysis:
### Loading Environment Variables and Dependencies
The code initializes environment variables with `dotenv`, allowing flexibility in configuring settings like `masterNetworkDiscoveryKey`. This key enables peers to create isolated networks, ensuring only peers with the matching key can discover each other.
Dependencies:
- **`exec`** for executing shell commands.
- **`dgram`** for UDP socket communication, essential for DNS operations.
- **`dns-packet`** to handle DNS packet creation and decoding.
- **`HolesailClient`** to manage P2P tunneling, essential for connecting domains over a decentralized network.
- **`Corestore`** for distributed storage, managing and syncing domain records among peers.
- **`Hyperswarm`** for peer discovery and creating P2P connections.
- **`http`** to handle HTTP requests.
- **`crypto`** to manage hashing for secure key generation and identification.
```javascript
require('dotenv').config();
const { exec } = require('child_process');
const dgram = require('dgram');
const dnsPacket = require('dns-packet');
const HolesailClient = require('holesail-client');
const Corestore = require('corestore');
const Hyperswarm = require('hyperswarm');
const http = require('http');
const { createHash } = require('crypto');
const net = require('net');
```
### Corestore and Hyperswarm Configuration
Here, `Corestore` and `Hyperswarm` are set up to manage decentralized DNS entries and establish peer-to-peer connections, respectively.
1. **Corestore** serves as a distributed key-value store that allows DNS records to be synchronized across peers.
2. **Hyperswarm** is initialized for P2P discovery, using a unique topic derived from `masterNetworkDiscoveryKey` or `dnsCore.discoveryKey`. Only peers with matching topics can join the same swarm, allowing for secure data replication.
```javascript
const store = new Corestore('./my-storage');
const swarm = new Hyperswarm();
const masterNetworkDiscoveryKey = process.env.masterNetworkDiscoveryKey
? Buffer.from(process.env.masterNetworkDiscoveryKey, 'hex')
: null;
const dnsCore = store.get({ name: 'dns-core' });
```
### Virtual Network Interfaces for Domain Isolation
Domains are mapped to unique local IP addresses within the `192.168.100.x` subnet. This setup isolates each domain by assigning a virtual network interface for each, thus preventing conflict across domains.
- **`removeExistingInterface`**: Removes any existing virtual interface to avoid conflicts.
- **`createVirtualInterface`**: Uses `ifconfig` to create a local network alias, ensuring each domain has a dedicated IP.
- **`createInterfaceForDomain`**: Manages the IP assignment, incrementing the `currentIP` for each new domain.
```javascript
async function createVirtualInterface(subnetName, subnetCIDR) {
await removeExistingInterface(subnetName);
return new Promise((resolve, reject) => {
exec(`sudo ifconfig ${subnetName} alias ${subnetCIDR}`, (err, stdout, stderr) => {
if (err) {
console.error(`Error creating virtual interface ${subnetName}:`, stderr);
reject(`Error creating virtual interface ${subnetName}: ${stderr}`);
} else {
console.log(`Virtual interface ${subnetName} created with CIDR ${subnetCIDR}.`);
resolve(subnetCIDR);
}
});
});
}
```
### DNS Server and Query Handling
The DNS server listens on UDP port 53, decoding and responding to DNS requests. It checks if a domain exists in the P2P DNS core, using fallback mechanisms if needed.
- **`fetchP2PRecord`**: Retrieves the domains hash from the P2P DNS core.
- **`checkPublicDNS`**: Resolves domains outside the P2P network by querying Cloudflares 1.1.1.1 DNS server.
- **DNS Response Logic**: If the domain exists in P2P DNS, it responds with the locally mapped IP; otherwise, it uses the public DNS result.
```javascript
dnsServer.on('message', async (msg, rinfo) => {
const query = dnsPacket.decode(msg);
const domain = query.questions[0].name;
const p2pRecord = await fetchP2PRecord(domain);
const publicDNSRecords = await checkPublicDNS(domain);
if (p2pRecord) {
const localIP = await createInterfaceForDomain(domain);
startHolesailClient(domain, p2pRecord.hash, localIP, 80);
sendDNSResponse(dnsServer, query, rinfo, localIP);
} else if (publicDNSRecords) {
sendDNSResponse(dnsServer, query, rinfo, publicDNSRecords);
}
});
```
### Holesail Client for Decentralized Tunneling and Connection Management
In this implementation, the **Holesail client** is central to achieving decentralized, peer-to-peer (P2P) tunneling, which allows domains to be accessible across peers without relying on a traditional DNS infrastructure. Holesail is a critical component, acting as a secure bridge for domain-specific connections.
Here's a breakdown of how Holesail functions within this system:
#### Purpose of Holesail in P2P Networking
The Holesail client facilitates direct communication between peers, bypassing the need for centralized servers by creating tunnels that connect domains across the network. Each domain entry has a unique hash, which Holesail uses to establish a tunnel. This unique hash acts as an identifier for each domain in the P2P network, ensuring traffic is routed accurately.
#### Key Functions and Features of Holesail
1. **Starting Tunnels for P2P Domains**:
Holesail uses the domains `hash` (generated or retrieved from the P2P DNS core) as an anchor point for the tunnel connection. By associating the domain hash with a unique local IP and port, the Holesail client can reroute incoming requests to the correct peer over the network.
2. **Automatic Connection Monitoring and Restarting**:
Connections in a P2P network can be less stable than in traditional networking. Holesail monitors each tunnels status and automatically restarts connections if they become unresponsive. This feature is implemented through a responsive check on each domains port. If Holesail detects an issue, it recreates the interface for the domain and starts a new tunnel connection, ensuring continuity of service.
3. **Connection Reusability**:
To optimize resources, Holesail reuses existing connections when possible. Each active connection is stored in the `activeConnections` object, which allows the client to check if a tunnel for a given domain is already active. If a tunnel is found, Holesail reuses it instead of initiating a new one, improving efficiency and reducing resource usage.
4. **Connection Lifecycle Management**:
Each Holesail connection has a lifecycle. To prevent stale or unresponsive connections from lingering, the client uses a timeout mechanism to automatically destroy and remove the tunnel from `activeConnections` after five minutes (300,000 ms). This cleanup process helps conserve resources and ensures only necessary connections remain active.
5. **Integration with Virtual Interface Management**:
When a new connection is needed for a domain, Holesail works alongside the `createInterfaceForDomain` function, which assigns a unique local IP to each domain. This allows domains to be isolated within the local network and ensures that each has a dedicated path through which Holesail can route traffic. By maintaining the virtual interface alongside the tunnel, Holesail manages traffic seamlessly across peers.
#### Code Example of Holesail Connection Management
Here's how Holesail is used to start and manage a P2P connection:
```javascript
async function startHolesailClient(domain, hash, ip, port) {
logDebug(`Attempting to start/reuse Holesail client for domain: ${domain}`);
if (activeConnections[domain]) {
logDebug(`Reusing existing Holesail client for domain: ${domain} on ${ip}:${port}`);
return activeConnections[domain];
}
logDebug(`Starting new Holesail client for domain: ${domain}, hash: ${hash}, IP: ${ip}, Port: ${port}`);
const connector = setupConnector(hash);
const holesailClient = new HolesailClient(connector);
holesailClient.connect({ port: port, address: ip, reuseAddr: true }, () => {
logDebug(`Holesail client for ${domain} connected on ${ip}:${port}`);
});
activeConnections[domain] = holesailClient;
setTimeout(() => {
logDebug(`Destroying Holesail client for domain ${domain}`);
holesailClient.destroy();
delete activeConnections[domain];
}, 300000);
return holesailClient;
}
```
In this function:
- **`setupConnector(hash)`** sets up the connector with the domains hash, allowing Holesail to identify and route traffic correctly.
- **`holesailClient.connect()`** initiates the connection to the specified IP and port, handling requests sent to the domain.
- **Timeout for Connection Lifecycle** ensures the tunnel is automatically destroyed if unused, freeing up resources.
Holesail is essential for bridging the gap between DNS resolution and accessible peer connections. By using Holesail, each domain can securely connect across peers within the P2P network, overcoming traditional DNS constraints and enabling a scalable, decentralized DNS solution.
### HTTP Proxy for Routing P2P Traffic
The HTTP proxy listens on port 80 and reroutes traffic to the appropriate IP for domains within the P2P network. This enables HTTP-based access for P2P-resolved domains.
```javascript
http.createServer(async (req, res) => {
const domain = req.url.replace("/", "");
const localIP = await createInterfaceForDomain(domain);
await restartHolesailClient(domain, 'master_hash', localIP, 80);
const options = { hostname: localIP, port: 80, path: req.url, method: req.method, headers: req.headers };
const proxyRequest = http.request(options, (proxyRes) => {
res.writeHead(proxyRes.statusCode, proxyRes.headers);
proxyRes.pipe(res);
});
proxyRequest.on('error', () => res.writeHead(500).end('Error'));
req.pipe(proxyRequest);
}).listen(80, '127.0.0.1');
```
### Synchronizing DNS Records Across Peers
The `addDomain` function enables peers to add new domains to the P2P DNS system, appending records to `dnsCore`, making them accessible and synchronized across the network.
```javascript
async function addDomain(domain, hash) {
await dnsCore.ready();
const record = JSON.stringify({ domain, hash });
await dnsCore.append(Buffer.from(record));
logDebug(`Domain ${domain} added to DNS core`);
}
```
### Hyperswarm Integration for Peer Synchronization
The DNS core joins the Hyperswarm network using the configured `masterNetworkDiscoveryKey`, ensuring only authorized peers connect and sync DNS data.
```javascript
(async () => {
await dnsCore.ready();
const topic = masterNetworkDiscoveryKey || dnsCore.discoveryKey;
logDebug(`DNS Core ready, joining Hyperswarm with topic: ${topic.toString('hex')}`);
swarm.join(topic, { server: true, client: true });
swarm.on('connection', (conn) => {
logDebug('Peer connected, starting replication...');
dnsCore.replicate(conn);
});
})();
```
### Expanding DNS Record Types for P2P Networks with Hyperdrive in `dnsCore` (Theoretical Concept)
In this system, **Hyperdrive** integrated with `dnsCore` presents a theoretical framework for handling various custom DNS record types. While traditional DNS has a limited set of record types, these hypothetical record types illustrate how P2P-specific records could be organized by top-level domain (TLD) to manage unique functionalities across a decentralized network.
> **Note:** The following examples represent a conceptual approach. These custom record types do not currently exist in the codebase, but they demonstrate how this system could theoretically be extended to handle complex, decentralized data needs.
1. **TX (Transaction)**:
- Under domains like `account.example.p2p`, a `TX` record could store transaction details, ideal for decentralized finance (DeFi) and smart contracts. Each `TX` record would manage transaction histories or balances and could be synchronized across peers using `dnsCore`.
```javascript
// Hypothetical TX record for the example.p2p domain
const txDrive = new Hyperdrive(store, { name: 'account.example.p2p' });
await txDrive.ready();
await txDrive.put('/tx/wallet123.json', Buffer.from(JSON.stringify({
type: "TX",
txId: "abc123",
amount: 100,
currency: "ETH",
timestamp: Date.now()
})));
dnsCore.replicate(txDrive.core);
```
2. **PEER**:
- For domains like `node.service.peer`, a `PEER` record could theoretically store peer information such as public keys, IP addresses, or resource capabilities (e.g., storage, compute power). This would enable efficient resource sharing and peer discovery across the network.
```javascript
// Hypothetical PEER record for the service.peer domain
const peerDrive = new Hyperdrive(store, { name: 'node.service.peer' });
await peerDrive.ready();
await peerDrive.put('/peers/node456.json', Buffer.from(JSON.stringify({
type: "PEER",
publicKey: "abcdef...",
ip: "192.168.1.1",
capabilities: ["storage", "compute"],
timestamp: Date.now()
})));
dnsCore.replicate(peerDrive.core);
```
3. **FINGERPRINT**:
- Under domains like `device.secure.tld`, a `FINGERPRINT` record could theoretically store cryptographic identifiers associated with devices or software versions. This would allow nodes to verify the integrity of hardware or software configurations before establishing connections.
```javascript
// Hypothetical FINGERPRINT record for the secure.tld domain
const fingerprintDrive = new Hyperdrive(store, { name: 'device.secure.tld' });
await fingerprintDrive.ready();
await fingerprintDrive.put('/fingerprint/device123.json', Buffer.from(JSON.stringify({
type: "FINGERPRINT",
fingerprint: "unique-device-hash",
deviceType: "sensor",
softwareVersion: "v1.0.0",
timestamp: Date.now()
})));
dnsCore.replicate(fingerprintDrive.core);
```
4. **CONTENT**:
- A `CONTENT` record under domains like `media.site.p2p` could theoretically serve as a decentralized content delivery record, storing metadata for files or media associated with the domain. This approach could support distributed websites or applications across peers.
```javascript
// Hypothetical CONTENT record for the site.p2p domain
const contentDrive = new Hyperdrive(store, { name: 'media.site.p2p' });
await contentDrive.ready();
await contentDrive.put('/content/video-metadata.json', Buffer.from(JSON.stringify({
type: "CONTENT",
title: "Decentralized Video",
contentHash: "content-hash",
author: "AuthorName",
timestamp: Date.now()
})));
dnsCore.replicate(contentDrive.core);
```
5. **AUTH (Authentication Data)**:
- Although purely theoretical, an `AUTH` record under domains like `auth.node.peer` could store access tokens or permissions, supporting decentralized access control across the network. This would allow peers to manage encrypted credentials or control access to resources.
```javascript
// Hypothetical AUTH record for the node.peer domain
const authDrive = new Hyperdrive(store, { name: 'auth.node.peer' });
await authDrive.ready();
await authDrive.put('/auth/peer-access.json', Buffer.from(JSON.stringify({
type: "AUTH",
peer: "peer123",
permissions: ["read", "write"],
accessToken: "encrypted-token",
timestamp: Date.now()
})));
dnsCore.replicate(authDrive.core);
```
These examples are intended to provide a vision for how `dnsCore` and Hyperdrive could be expanded with domain-specific TLD storage in a fully decentralized DNS system, although they are not implemented in the current codebase.
#### Customizing DNS Records for P2P Flexibility
While these record types are theoretical, they represent exciting possibilities for how `dnsCore` and Hyperdrive could be customized to meet the unique requirements of decentralized networks. Future implementations could redefine DNS concepts to support service discovery, data replication, and secure peer communication within a dynamic, P2P-friendly environment.
## Security and Privacy Implications
This P2P DNS systems architecture offers significant privacy advantages. By decentralizing DNS queries and encrypting traffic over Holesail tunnels, it:
- Prevents ISPs from logging or tracking DNS requests.
- Protects DNS data from centralized surveillance or censorship.
- Enables users to create their own namespaces without interference from external authorities.
## Use Cases: Expanding Applications Beyond Traditional DNS
The applications of this P2P DNS system are vast. Beyond typical DNS, it allows users to create isolated namespaces for organizational use, IoT device management, and censorship-resistant communication. By removing centralized control, it empowers users with autonomy and flexibility over their digital presence.
## New Era of DNS Freedom
This decentralized, firewall-resistant P2P DNS system implemented in Node.js offers a resilient, censorship-resistant alternative to traditional DNS. By combining Corestore, Hyperswarm, and Holesail, it provides the infrastructure needed for a free, self-governing Internet. This DNS solution enables users around the world to reclaim control over their digital identities, creating a more open, accessible, and secure online ecosystem.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,504 @@
<!-- lead -->
Monitoring containerized applications is essential for ensuring optimal performance, diagnosing issues promptly, and maintaining overall system health.
In a dynamic environment where containers can be spun up or down based on demand, having a flexible and responsive monitoring solution becomes even more critical. This article delves into how I utilize the Netdata REST API to generate real-time, visually appealing graphs and an interactive dashboard for each container dynamically. By integrating technologies like Node.js, Express.js, Chart.js, Docker, and web sockets, I create a seamless monitoring experience that provides deep insights into container performance metrics.
## Example Dynamic Page
https://ssh42113405732790.syscall.lol/
## Introduction
As containerization becomes the backbone of modern application deployment, monitoring solutions need to adapt to the ephemeral nature of containers. Traditional monitoring tools may not provide the granularity or real-time feedback necessary for containerized environments. Netdata, with its poIrful real-time monitoring capabilities and RESTful API, offers a robust solution for collecting and accessing performance metrics. By leveraging the Netdata REST API, I can fetch detailed metrics about CPU usage, memory consumption, network traffic, disk I/O, and running processes within each container.
Our goal is to create an interactive dashboard that not only displays these metrics in real-time but also provides users with the ability to interact with the data, such as filtering processes or adjusting timeframes. To achieve this, I build a backend server that interfaces with the Netdata API, processes the data, and serves it to the frontend where it's rendered using Chart.js and other web technologies.
## System Architecture
Understanding the system architecture is crucial to grasp how each component interacts to provide a cohesive monitoring solution. The architecture comprises several key components:
1. **Netdata Agent**: Installed on the host machine, it collects real-time performance metrics and exposes them via a RESTful API.
2. **Backend Server**: A Node.js application built with Express.js that serves as an intermediary betIen the Netdata API and the frontend clients.
3. **Interactive Dashboard**: A web interface that displays real-time graphs and system information, built using HTML, CSS, JavaScript, and libraries like Chart.js.
4. **Docker Integration**: Utilizing Dockerode, a Node.js Docker client, to interact with Docker containers, fetch process lists, and verify container existence.
5. **Proxy Server**: Routes incoming requests to the appropriate container's dashboard based on subdomain mapping.
6. **Discord Bot**: Allows users to request performance graphs directly from Discord, enhancing accessibility and user engagement.
### Data Flow
- The Netdata Agent continuously collects performance metrics and makes them available via its RESTful API.
- The Backend Server fetches data from the Netdata API based on requests from clients or scheduled intervals.
- The Interactive Dashboard requests data from the Backend Server, which processes and serves it in a format suitable for visualization.
- Docker Integration ensures that the system is aware of the running containers and can fetch container-specific data.
- The Proxy Server handles subdomain-based routing, directing users to the correct dashboard for their container.
- The Discord Bot interacts with the Backend Server to fetch graphs and sends them to users upon request.
## Backend Server Implementation
The backend server is the linchpin of our monitoring solution. It handles data fetching, processing, and serves as an API endpoint for the frontend dashboard and the Discord bot.
### Setting Up Express.js Server
I start by setting up an Express.js server that listens for incoming HTTP requests. The server is configured to handle Cross-Origin Resource Sharing (CORS) to allow requests from different origins, which is essential for serving the dashboard to users accessing it from various domains.
```javascript
const express = require('express');
const app = express();
const port = 6666;
app.use(cors()); // Enable CORS
app.listen(port, "0.0.0.0", () => {
console.log(`Server running on http://localhost:${port}`);
});
```
### Interacting with Netdata API
To fetch metrics from Netdata, I define a function that constructs the appropriate API endpoints based on the container ID and the desired timeframe.
```javascript
const axios = require('axios');
const getEndpoints = (containerId, timeframe) => {
const after = -(timeframe * 60); // Timeframe in seconds
return {
cpu: `http://netdata.local/api/v1/data?chart=cgroup_${containerId}.cpu&format=json&after=${after}`,
memory: `http://netdata.local/api/v1/data?chart=cgroup_${containerId}.mem_usage&format=json&after=${after}`,
// Additional endpoints for io, pids, network...
};
};
```
I then define a function to fetch data for a specific metric:
```javascript
const fetchMetricData = async (metric, containerId, timeframe = 5) => {
const endpoints = getEndpoints(containerId, timeframe);
try {
const response = await axios.get(endpoints[metric]);
return response.data;
} catch (error) {
console.error(`Error fetching ${metric} data for container ${containerId}:`, error);
throw new Error(`Failed to fetch ${metric} data.`);
}
};
```
### Data Processing
Once I have the raw data from Netdata, I need to process it to extract timestamps and values suitable for graphing. The data returned by Netdata is typically in a time-series format, with each entry containing a timestamp and one or more metric values.
```javascript
const extractMetrics = (data, metric) => {
const labels = data.data.map((entry) => new Date(entry[0] * 1000).toLocaleTimeString());
let values;
switch (metric) {
case 'cpu':
case 'memory':
case 'pids':
values = data.data.map(entry => entry[1]); // Adjust index based on metric specifics
break;
case 'io':
values = {
read: data.data.map(entry => entry[1]),
write: data.data.map(entry => entry[2]),
};
break;
case 'network':
values = {
received: data.data.map(entry => entry[1]),
sent: data.data.map(entry => entry[2]),
};
break;
default:
values = [];
}
return { labels, values };
};
```
### Graph Generation with Chart.js
To generate graphs, I use the `chartjs-node-canvas` library, which allows us to render Chart.js graphs server-side and output them as images.
```javascript
const { ChartJSNodeCanvas } = require('chartjs-node-canvas');
const chartJSMetricCanvas = new ChartJSNodeCanvas({ width: 1900, height: 400, backgroundColour: 'black' });
const generateMetricGraph = async (metricData, labels, label, borderColor) => {
const configuration = {
type: 'line',
data: {
labels: labels,
datasets: [{
label: label,
data: metricData,
borderColor: borderColor,
fill: false,
tension: 0.1,
}],
},
options: {
scales: {
x: {
title: {
display: true,
text: 'Time',
color: 'white',
},
},
y: {
title: {
display: true,
text: `${label} Usage`,
color: 'white',
},
},
},
plugins: {
legend: {
labels: {
color: 'white',
},
},
},
},
};
return chartJSMetricCanvas.renderToBuffer(configuration);
};
```
This function takes the metric data, labels, and graph styling options to produce a PNG image buffer of the graph, which can then be sent to clients or used in the dashboard.
### API Endpoints for Metrics
I define API endpoints for each metric that clients can request. For example, the CPU usage endpoint:
```javascript
app.get('/api/graph/cpu/:containerId', async (req, res) => {
const { containerId } = req.params;
const timeframe = parseInt(req.query.timeframe) || 5;
const format = req.query.format || 'graph';
try {
const data = await fetchMetricData('cpu', containerId, timeframe);
if (format === 'json') {
return res.json(data);
}
const { labels, values } = extractMetrics(data, 'cpu');
const imageBuffer = await generateMetricGraph(values, labels, 'CPU Usage (%)', 'rgba(255, 99, 132, 1)');
res.set('Content-Type', 'image/png');
res.send(imageBuffer);
} catch (error) {
res.status(500).send(`Error generating CPU graph: ${error.message}`);
}
});
```
Similar endpoints are created for memory, network, disk I/O, and PIDs.
### Full Report Generation
For users who want a comprehensive view of their container's performance, I offer a full report that combines all the individual graphs into one image.
```javascript
app.get('/api/graph/full-report/:containerId', async (req, res) => {
// Fetch data for all metrics
// Generate graphs for each metric
// Combine graphs into a single image using Canvas
// Send the final image to the client
});
```
By using the `canvas` and `loadImage` modules, I can composite multiple graphs into a single image, adding titles and styling as needed.
## Interactive Dashboard
The interactive dashboard provides users with real-time insights into their container's performance. It is designed to be responsive, visually appealing, and informative.
### Live Data Updates
To achieve real-time updates, I use client-side JavaScript to periodically fetch the latest data from the backend server. I use `setInterval` to schedule data fetches every second or at a suitable interval based on performance considerations.
```html
<script>
async function updateGraphs() {
const response = await fetch(`/api/graph/full-report/${containerId}?format=json&timeframe=1`);
const data = await response.json();
// Update charts with new data
}
setInterval(updateGraphs, 1000);
</script>
```
### Chart.js Integration
I use Chart.js on the client side to render graphs directly in the browser. This allows for smooth animations and interactivity.
```javascript
const cpuChart = new Chart(cpuCtx, {
type: 'line',
data: {
labels: [],
datasets: [{
label: 'CPU Usage (%)',
data: [],
borderColor: 'rgba(255, 99, 132, 1)',
borderWidth: 2,
pointRadius: 3,
fill: false,
}]
},
options: {
animation: { duration: 500 },
responsive: true,
maintainAspectRatio: false,
scales: {
x: { grid: { color: 'rgba(255, 255, 255, 0.1)' } },
y: { grid: { color: 'rgba(255, 255, 255, 0.1)' } }
},
plugins: { legend: { display: false } }
}
});
```
### Process List Display
An essential aspect of container monitoring is understanding what processes are running inside the container. I fetch the process list using Docker's API and display it in a searchable table.
```javascript
// Backend endpoint
app.get('/api/processes/:containerId', async (req, res) => {
const { containerId } = req.params;
try {
const container = docker.getContainer(containerId);
const processes = await container.top();
res.json(processes.Processes || []);
} catch (err) {
console.error(`Error fetching processes for container ${containerId}:`, err);
res.status(500).json({ error: 'Failed to fetch processes' });
}
});
// Client-side function to update the process list
async function updateProcessList() {
const processResponse = await fetch(`/api/processes/${containerId}`);
const processList = await processResponse.json();
// Render the process list in the table
}
```
I enhance the user experience by adding a search box that allows users to filter the processes by PID, user, or command.
### Visual Enhancements
To make the dashboard more engaging, I incorporate visual elements like particle effects using libraries like `particles.js`. I also apply a dark theme with styling that emphasizes the data visualizations.
```css
body {
background-color: #1c1c1c;
color: white;
font-family: Arial, sans-serif;
}
```
### Responsive Design
Using Bootstrap and custom CSS, I ensure that the dashboard is responsive and accessible on various devices and screen sizes.
```html
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css" rel="stylesheet">
<div class="container mt-4">
<!-- Dashboard content -->
</div>
```
## Docker Integration
Docker plays a pivotal role in our system, not just for running the containers but also for providing data about them.
### Fetching Container Information
I use the `dockerode` library to interact with Docker:
```javascript
const Docker = require('dockerode');
const docker = new Docker();
async function containerExists(subdomain) {
try {
const containers = await docker.listContainers();
return containers.some(container => container.Names.some(name => name.includes(subdomain)));
} catch (error) {
console.error(`Error checking Docker for subdomain ${subdomain}:`, error.message);
return false;
}
}
```
This function checks whether a container corresponding to a subdomain exists, which is essential for routing and security purposes.
### Fetching Process Lists
As mentioned earlier, I can retrieve the list of processes running inside a container:
```javascript
const container = docker.getContainer(containerId);
const processes = await container.top();
```
This allows us to display detailed information about what's happening inside the container, which can be invaluable for debugging and monitoring.
## Proxy Server for web UI
To provide users with a seamless experience, I set up a proxy server that routes requests to the appropriate container dashboards based on subdomains.
### Subdomain-Based Routing
I parse the incoming request's hostname to extract the subdomain, which corresponds to a container ID.
```javascript
app.use(async (req, res, next) => {
const host = req.hostname;
let subdomain = host.split('.')[0].toUpperCase();
if (!subdomain || ['LOCALHOST', 'WWW', 'SYSCALL'].includes(subdomain)) {
return res.redirect('https://discord-linux.com');
}
const exists = await containerExists(subdomain);
if (!exists) {
return res.redirect('https://discord-linux.com');
}
// Proceed to proxy the request
});
```
### Proxying Requests
Using `http-proxy-middleware`, I forward the requests to the backend server's live dashboard endpoint:
```javascript
const { createProxyMiddleware } = require('http-proxy-middleware');
createProxyMiddleware({
target: `https://g.syscall.lol/full-report/${subdomain}`,
changeOrigin: true,
pathRewrite: {
'^/': '/live', // Rewrite the root path to /live
}
})(req, res, next);
```
This setup allows users to access their container's dashboard by visiting a URL like `https://SSH42113405732790.syscall.lol`, where `SSH42113405732790` is the container ID.
## Discord Bot Integration
To make the monitoring solution more accessible, I integrate a Discord bot that allows users to request graphs and reports directly within Discord.
### Command Handling
I define a `graph` command that users can invoke to get performance graphs:
```javascript
module.exports = {
name: "graph",
description: "Retrieves a graph report for your container.",
options: [
// Command options for report type, timeframe, etc.
],
run: async (client, interaction) => {
// Command implementation
},
};
```
### User Authentication
I authenticate users by matching their Discord ID with the container IDs stored in our database:
```javascript
let sshSurfID;
connection.query(
"SELECT uid FROM users WHERE discord_id = ?",
[interaction.user.id],
(err, results) => {
if (results.length === 0) {
interaction.editReply("Sorry, you do not have a container associated with your account.");
} else {
sshSurfID = results[0].uid;
}
}
);
```
### Fetching and Sending Graphs
Once I have the user's container ID, I fetch the graph image from the backend server and send it as a reply in Discord:
```javascript
const apiUrl = `https://g.syscall.lol/${reportType}/${sshSurfID}?timeframe=${timeframe}`;
const response = await axios.get(apiUrl, { responseType: 'stream' });
// Send the image in the reply
await interaction.editReply({
files: [{
attachment: response.data,
name: `${reportType}_graph.png`
}]
});
```
This integration provides users with an easy way to monitor their containers without leaving Discord.
## Security Considerations
When building a monitoring system, especially one that exposes container data over the network, security is paramount.
### Access Control
I ensure that only authenticated users can access the data for their containers. This involves:
- Verifying container existence and ownership before serving data.
- Using secure communication protocols (HTTPS) to encrypt data in transit.
- Implementing proper authentication mechanisms in the backend server and Discord bot.
### Input Validation
I sanitize and validate all inputs, such as container IDs, to prevent injection attacks and unauthorized access.
### Rate Limiting
To protect against Denial of Service (DoS) attacks, I can implement rate limiting on API endpoints.
## Performance Optimizations
To ensure the system performs Ill under load, I implement several optimizations:
- **Caching**: Cache frequently requested data to reduce load on the Netdata Agent and backend server.
- **Efficient Data Structures**: Use efficient data structures and algorithms for data processing.
- **Asynchronous Operations**: Utilize asynchronous programming to prevent blocking operations.
- **Load Balancing**: Distribute incoming requests across multiple instances of the backend server if needed.
## Future Enhancements
There are several areas where I can expand and improve the monitoring solution:
- **Alerting Mechanisms**: Integrate alerting to notify users of critical events or thresholds being exceeded.
- **Historical Data Analysis**: Store metrics over longer periods for trend analysis and capacity planning.
- **Custom Metrics**: Allow users to define custom metrics or integrate with application-level monitoring.
- **Mobile Accessibility**: Optimize the dashboard for mobile devices or create a dedicated mobile app.
## My Thoughts
By leveraging the Netdata REST API and integrating it with modern web technologies, I have built a dynamic and interactive monitoring solution tailored for containerized environments. The combination of real-time data visualization, user-friendly interfaces, and accessibility through platforms like Discord empoIrs users to maintain and optimize their applications effectively.
This approach showcases the poIr of combining open-source tools and technologies to solve complex monitoring challenges in a scalable and efficient manner. As containerization continues to evolve, such solutions will become increasingly vital in managing and understanding the performance of distributed applications.
*Note: The code snippets provided are simplified for illustrative purposes. In a production environment, additional error handling, security measures, and optimizations should be implemented.*

View File

@ -0,0 +1,257 @@
<!-- lead -->
Creating a Linux Based Hosting Solution for Twitter.
It all started with a question that initially seemed a bit absurd: *Could you manage a Linux server using only Twitter?* At first glance, the idea appeared impractical. After all, social media platforms arent exactly designed for hosting virtual machines or executing server-side commands. But, as someone who thrives on pushing boundaries, I decided to take this concept and run with it.
I envisioned a Twitter bot that could host Linux containers, execute commands, and interact with users all through tweets. In the end, this project led me to become what I believe is the first person to host Linux containers directly through Twitter interactions. This blog post will take you through the entire process in meticulous detail — from conceptualizing the idea, writing the code, and overcoming challenges, to launching the bot and maintaining it.
<p align="center">
<img src="https://raven-scott.fyi/twit_linux.webp" alt="Twit Linux">
</p>
# Source
## https://git.ssh.surf/snxraven/codename-t
Some scripts mentioned in the above code are covered in the below Blog Post:
https://blog.raven-scott.fyi/deep-dive-discord-linux-automated-container-platform
### The Concept: Why Twitter for Linux Hosting?
Before diving into the technical details, its important to understand the motivations behind this project. The world of DevOps and Linux system administration is traditionally one of terminals, SSH keys, and intricate scripts. However, I wanted to bring this technical space to a more social, accessible environment. And what better place to do that than Twitter, a platform where millions of people spend their time daily?
The goal was simple: **I wanted to democratize Linux hosting by allowing users to spin up containers, execute commands, and even destroy their own instances — all with a tweet.**
Social media, particularly Twitter, has built-in engagement mechanisms that could make such a concept interactive and fun. Moreover, it would introduce new audiences to Linux without requiring them to navigate the complexities of traditional terminal-based environments. Instead, they could tweet commands like `neofetch` or `apt update` and have my bot respond with the commands output.
Lets break down how I made it all work.
## Phase 1: Setting Up the Infrastructure
### Tools and Technologies
To pull this off, I needed a robust stack of tools and technologies. The project hinged on several core technologies, which Ill go over here:
1. **Node.js**: JavaScripts asynchronous nature makes it ideal for handling Twitter streams and Docker management, two of the key tasks in this project.
2. **Docker**: Docker allows us to create isolated Linux environments for each user who interacts with the bot. Containers are lightweight, ephemeral, and easy to manage — perfect for this use case.
3. **Twit and Twitter API SDK**: Twit and the Twitter SDK provide all the necessary hooks into Twitters API, allowing me to listen for mentions, respond to tweets, and manage streams.
4. **Simple-Dockerode**: This library offers a simplified interface for managing Docker containers directly from Node.js.
5. **Generate-password**: I needed this to dynamically create user passwords for each container, ensuring secure access.
### Setting up Twitter's API Access
To begin with, I needed access to Twitters API. Twitters API has been around for years and allows developers to interact with almost every aspect of Twitter — from fetching user data to posting tweets. For my project, I needed the bot to:
- **Listen for mentions**: Whenever someone tweets @mybotname, it should trigger a response.
- **Respond with command outputs**: If the mention contains a command like `neofetch`, the bot needs to execute that command in a users container and tweet back the result.
- **Manage Docker containers**: Each users interaction with the bot creates or destroys their own Linux container.
I set up the basic configuration in Node.js using Twit and Twitters SDK.
```javascript
const config = {
consumer_key: process.env.TWITTER_API_KEY,
consumer_secret: process.env.TWITTER_API_SECRET_KEY,
access_token: process.env.TWITTER_ACCESS_TOKEN,
access_token_secret: process.env.TWITTER_ACCESS_TOKEN_SECRET,
};
const T = new Twit(config);
const client = new Client(process.env.TWITTER_BEARER_TOKEN);
```
This snippet initializes Twit and the Twitter SDK with my credentials, which are securely stored in environment variables.
### Docker Containers: Isolated Linux Hosts for Every User
The next big piece of the puzzle was Docker. Docker allows for easy creation and management of Linux containers, making it a perfect fit for this project. Each user interaction would generate an isolated Linux container for them, in which they could run commands.
Heres how the code works to generate and start a container:
```javascript
const Dockerode = require('simple-dockerode');
const docker = new Dockerode({ socketPath: '/var/run/docker.sock' });
function createContainer(userID) {
docker.createContainer({
Image: 'ubuntu',
Cmd: ['/bin/bash'],
name: `container_${userID}`
}).then(container => {
container.start();
console.log(`Container for user ${userID} started.`);
});
}
```
In this code:
- A new Docker container running Ubuntu is created for each user.
- The container is named after the users Twitter ID to keep things organized.
- The container is started immediately after creation.
To avoid clutter and wasted resources, the containers are ephemeral — theyre automatically destroyed after seven days. This ensures that resources arent wasted on inactive containers.
## Phase 2: The Twitter Bot in Action
### Listening for Mentions and Running Commands
With the infrastructure in place, I had to set up the Twitter bot to continuously monitor tweets for mentions of the bot. The logic here is straightforward:
1. **Monitor Twitter for mentions** of the bot using a streaming API.
2. **Identify valid commands** like `generate`, `neofetch`, `destroy`, etc.
3. **Execute the command** in the appropriate Docker container.
4. **Tweet the result back** to the user.
Heres the code that makes this happen:
```javascript
async function getMentionedTweet() {
const stream = await client.tweets.searchStream({
"tweet.fields": ["author_id", "id"],
"expansions": ["referenced_tweets.id.author_id"]
});
for await (const response of stream) {
if (response.data.text.includes(`@${process.env.BOT_USERNAME}`)) {
handleCommand(response.data.text, response.data.author_id);
}
}
}
async function handleCommand(tweet, userID) {
if (tweet.includes('generate')) {
createContainer(userID);
} else if (tweet.includes('destroy')) {
destroyContainer(userID);
} else {
executeCommandInContainer(userID, tweet);
}
}
```
This code does the following:
- **Listens for tweets** that mention the bot.
- **Parses the tweet** to determine whether the user wants to generate a new container, destroy an existing one, or run a command.
- **Delegates the appropriate action**: generating, destroying, or executing a command inside a Docker container.
### Command Execution: Lets Get Interactive
For me, the real fun began when I allowed users to run Linux commands inside their containers directly from Twitter. To make this happen, I had to set up the bot to execute commands like `neofetch`, `pwd`, and more.
Heres how the bot executes commands inside the container:
```javascript
function executeCommandInContainer(userID, command) {
const container = docker.getContainer(`container_${userID}`);
container.exec({
Cmd: ['/bin/bash', '-c', command],
AttachStdout: true,
AttachStderr: true
}, (err, exec) => {
if (err) {
console.log(err);
return;
}
exec.start({ Detach: false }, (err, stream) => {
if (err) {
console.log(err);
return;
}
stream.on('data', (data) => {
const output = data.toString();
tweetBack(userID, output);
});
});
});
}
```
In this code:
- We grab the appropriate Docker container for the user using their Twitter ID.
- We then execute their command inside the container.
- Once the command is executed, the output is captured and sent back to the user in a tweet.
### Dynamic Password Generation for User Security
One of the trickier aspects of this project was ensuring that each users container was secure. For this, I dynamically generated passwords for each container using the `generate-password` library:
```javascript
const password = generator.generate({
length: 10,
numbers: true
});
container.exec({
Cmd: ['/bin/bash', '-c', `echo 'root:${password}' | chpasswd`]
});
```
By dynamically generating passwords for each container and assigning them only to the user who created it, I ensured that each container had a unique, secure password. This way, only the container's owner could execute commands or access its shell.
## Phase 3: Challenges and Solutions
### 1. Twitter Rate Limits
One of the most significant challenges was Twitters rate limiting. Twitters API has strict limits on how many requests can be made in a specific timeframe. This meant I had to be strategic about how often the bot responded to users. Too many commands, and Id hit the rate limit, rendering the bot temporarily unusable.
**Solution**: I implemented a throttling mechanism to ensure the bot could only respond to a certain number of users per minute. This keeps the bot running smoothly and prevents any downtime due to rate limiting.
### 2. Resource Management
Another challenge was ensuring that Docker containers were efficiently managed. If too many users created containers without proper monitoring, server resources could quickly be exhausted.
**Solution**: I implemented a garbage collection system to automatically destroy containers after seven days. This prevents resource leaks and keeps the system running efficiently.
```javascript
function destroyOldContainers() {
docker.listContainers((err, containers) => {
containers.forEach(container => {
if (isOlderThanSevenDays(container)) {
docker.getContainer(container.Id).stop();
docker.getContainer(container.Id).remove();
}
});
});
}
```
This simple function checks all active containers, determines their age, and destroys any that are older than seven days.
### 3. Running Complex Commands
Some users wanted to run more complex commands, which generated large outputs that Twitter couldnt handle (due to character limits). For example, commands like `neofetch` could generate lengthy outputs that couldnt be tweeted back directly.
**Solution**: For large outputs, I utilized an external paste service. If the command output was too large for a tweet, the bot would generate a paste and tweet the link back to the user.
```javascript
if (output.length > 280) {
const pasteURL = await createPaste(output);
tweetBack(userID, `Output too long! Check it here: ${pasteURL}`);
} else {
tweetBack(userID, output);
}
```
This way, users could still run complex commands and access their output, even if Twitters character limits were restrictive.
## Phase 4: Going Live and User Reactions
Finally, after weeks of coding, testing, and refining, the bot was ready to go live. I made the bot publicly accessible and tweeted out a simple message explaining how to interact with it. Within hours, users were tweeting `generate` to create containers, running `neofetch` to see their system stats, and even running complex commands like `apt update`.
The response was overwhelming. People loved the idea of managing a Linux server using only Twitter. The bot provided a fun, interactive way to introduce users to Linux without needing them to understand terminal commands or SSH keys.
## At the end of the day...
Becoming the first Linux host on Twitter was an incredible journey, blending the power of Docker, the simplicity of Twitters API, and a dose of creative coding. What started as a wild idea quickly evolved into a fully-fledged project that allowed people to manage Linux containers through social media. The integration of social media and DevOps opened up fascinating possibilities, and I believe this was just the beginning of what could be done in this space.
However, since Elon Musks acquisition of Twitter and its transformation into X, the platform's API has become significantly more expensive, making it no longer feasible to continue this project in its original form. The increased cost of access to the API means that projects like this, which rely heavily on interaction through the platform, are now difficult to sustain without a large budget.
Despite this setback, Im excited to have developed this system in the first place. It was an innovative experiment that pushed the boundaries of what can be done with social media and cloud infrastructure. The project taught me a lot about integrating two very different worlds—social media and Linux hosting—and theres no doubt that the lessons I learned here will fuel future innovations.
Even though its no longer feasible on X, this experiment will remain a unique milestone in my journey through technology. Who knows whats next? Maybe the future holds even more exciting possibilities for blending technology with unconventional platforms.
And thats the story of how I became the first Linux host on Twitter!

View File

@ -0,0 +1,280 @@
<!-- lead -->
Streamlining Node.js Tunnels: Isolation, port management, and resource efficiency for peak performance!
In this post, I dive deeply into recent optimizations made to a Node.js clustered application managing tunneled requests with isolated connections. I explore the issues with the initial setup, outline each enhancement in detail, and contrast the old and new methods. These changes aim to improve tunnel isolation, streamline resource management, and prevent critical errors that could disrupt the applications operation.
# Source
## https://git.ssh.surf/hypermc/hyperMC-Web-Relay
# Git Commit
## https://s.nodejs.lol/ff51iaPLY
## Initial Setup and Issues
Our application originally served HTTP requests over a peer-to-peer network using **clustered Node.js workers**. Each incoming request established a **tunnel** to relay data through the `hyperdht` protocol using public keys derived from subdomain headers. The tunnels enabled communication to unique remote peers, allowing each HTTP request to reach its intended destination.
### Key Components in the Original Code
1. **Clustered Node.js Workers**: Using `cluster`, the application spawns multiple workers, leveraging all CPU cores for better concurrency and faster request handling.
2. **HyperDHT Tunnels**: For each request, the application creates a tunnel to relay data between the client and the destination using the `hyperdht` protocol.
3. **Port Management for Tunnels**: To assign ports for tunnel servers, I used randomly generated port numbers within a specified range.
### The Problem
As the application scaled, several issues began to emerge:
1. **Tunnel Confusion Across Requests**:
- Since tunnels werent strictly isolated to individual requests, responses sometimes bled into unrelated requests, causing data mix-ups.
- Persistent tunnels without proper cleanup led to stale or unintended connections, increasing the risk of incorrect data delivery.
2. **Port Conflicts**:
- The application frequently encountered `EADDRINUSE` errors, meaning some generated ports were already in use by other tunnels.
- Port conflicts led to worker crashes, causing downtime and reduced concurrency.
3. **Inefficient Resource Management**:
- Tunnels remained open even after requests completed, resulting in unnecessary resource consumption.
- Workers were busy managing unused connections instead of handling new requests, leading to performance bottlenecks.
Given these challenges, I set out to improve tunnel isolation, ensure reliable port availability, and enhance resource efficiency.
## New Approach: Enhanced Isolation, Dynamic Port Allocation, and Resource Management
To tackle these issues, I implemented several key improvements:
### 1. Strict Tunnel Isolation Per Request
Previously, tunnels were reused across requests, leading to data mix-ups and unintended connections. In the new approach:
- **Unique Tunnel Instances**: Each HTTP request now creates a dedicated `tunnelServer` instance, serving only that specific request. This ensures strict one-to-one mapping between the request and the tunnel, eliminating any chance of cross-request interference.
- **No Shared Tunnel State**: By eliminating shared tunnel tracking objects, each request operates with complete isolation, reducing complexity and risk of data leakage.
**Code Difference**:
**Old Method**:
```javascript
if (!tunnels[publicKey]) {
tunnels[publicKey] = port; // Assign port to a tunnel that may get reused
}
```
**New Method**:
```javascript
const tunnelServer = net.createServer((servsock) => {
// Dedicated tunnel for each request
});
```
With this change, each tunnel becomes ephemeral, existing solely to complete a single request before its removed, reducing unintended interactions between requests.
### 2. Robust Port Availability Check with `getAvailablePort`
In the initial implementation, the application generated random ports without checking their availability, leading to frequent `EADDRINUSE` errors. To address this:
- **Port Checking with `net.createServer`**: I enhanced `getAvailablePort` by creating a temporary server to verify port availability. If the port is free, the function closes the test server and assigns that port to the new tunnel. If the port is already in use, it retries until it finds a free port.
- **Automatic Retry Mechanism**: This approach ensures no `EADDRINUSE` errors by dynamically testing ports until an available one is found.
**Code Difference**:
**Old Method**:
```javascript
const port = 1337 + Math.floor(Math.random() * 1000); // No check for availability
```
**New Method**:
```javascript
async function getAvailablePort() {
return new Promise((resolve, reject) => {
const tryPort = () => {
const port = 1337 + Math.floor(Math.random() * 1000);
const tester = net.createServer()
.once('error', (err) => {
if (err.code === 'EADDRINUSE') {
tryPort(); // Retry if port is in use
} else {
reject(err);
}
})
.once('listening', () => {
tester.close(() => resolve(port)); // Port is available
})
.listen(port, '127.0.0.1');
};
tryPort();
});
}
```
This method guarantees that ports are only assigned if they are actually available, ensuring reliable tunnel creation and eliminating port-related crashes.
### 3. Automatic Tunnel Closure for Efficient Resource Management
Previously, tunnels remained open even after completing requests, wasting system resources and risking data leaks. Now, each tunnel is closed as soon as the associated response finishes.
- **Tunnel Lifecycle Bound to Request Lifecycle**: Using the `res.on('finish')` event, the tunnel server closes immediately after the response is sent, freeing resources for other requests.
- **Reduced Memory and CPU Overhead**: By closing tunnels promptly, workers are freed to handle new requests, reducing CPU and memory consumption.
**Code Difference**:
**Old Method**:
```javascript
// Tunnels were left open, requiring manual cleanup and causing resource issues
```
**New Method**:
```javascript
res.on('finish', () => {
tunnelServer.close(() => {
if (DEBUG === 1 || CONINFO === 1) console.log("Tunnel closed after request completion.");
});
});
```
With this approach, the system efficiently reclaims resources after each request, making the application more scalable and responsive under load.
## Detailed Code Walkthrough
Heres the fully optimized code with all the changes:
```javascript
const cluster = require('cluster');
const numCPUs = require('os').cpus().length;
const net = require('net');
if (cluster.isMaster) {
console.log(`Master ${process.pid} is running`);
console.log(`Total Workers ${numCPUs * 6}`);
for (let i = 0; i < numCPUs * 4; i++) {
cluster.fork();
}
cluster.on('exit', (worker, code, signal) => {
console.log(`Worker ${worker.process.pid} died`);
});
} else {
const fs = require('fs');
const http = require('http');
const httpProxy = require('http-proxy');
const HyperDHTServer = require('hyperdht');
const b32 = require("hi-base32");
const agent = new http.Agent({ maxSockets: Number.MAX_VALUE });
const content = fs.readFileSync('404.txt', 'utf8');
const DEBUG = 0;
const CONINFO = 0;
const dhtServer = new HyperDHTServer();
const startServer = async () => {
console.log(`Worker ${process.pid} started`);
await dhtServer.ready();
const proxy = httpProxy.createProxyServer({
ws: true,
agent: agent,
timeout: 360000
});
const server = http.createServer(async function (req, res) {
try {
const split = req.headers.host.split('.');
const publicKey = Buffer.from(b32.decode.asBytes(split[0].toUpperCase()));
if (publicKey.length < 32) {
console.log("Invalid Connection!");
res.writeHead(418, { 'Content-Type': 'text/html' });
res.end(content);
return;
}
const port = await getAvailablePort();
const tunnelServer = net.createServer(function (servsock) {
const socket = dhtServer.connect(publicKey);
let open = { local: true, remote: true };
servsock.on('data', (d) => socket.write(d));
socket.on('data', (d) => servsock.write(d));
const remoteend = () => {
if (open.remote) socket.end();
open.remote = false;
};
const localend = () => {
if (open.local) servsock.end();
open.local = false;
};
servsock.on('error', remoteend);
servsock.on('finish', remoteend);
servsock.on('end', remoteend);
socket.on('finish', localend);
socket.on('error', localend);
socket.on('end', localend);
});
tunnelServer.listen(port, "127.0.0.1", () => {
if (DEBUG === 1 || CONINFO === 1) console.log(`Tunnel server listening on port ${port}`);
proxy.web(req, res, {
target: 'http://127.0.0.1:' + port
}, function (e) {
console.log("Proxy Web Error: ", e);
res.writeHead(404, { 'Content-Type': 'text/html' });
res.end(content);
});
res.on('finish', () => {
tunnelServer.close(() => {
if (DEBUG === 1 || CONINFO === 1) console.log("Tunnel closed after request completion.");
});
});
});
} catch (e) {
console.error("Error Occurred: ", e);
}
});
server.listen(8081, () => {
console.log(`Worker ${process.pid} listening on port 8081`);
});
};
startServer().catch(console.error);
async function getAvailablePort() {
return new Promise((resolve, reject) => {
const tryPort = () => {
const port = 1337 + Math.floor(Math.random() * 1000);
const tester = net.createServer()
.once('error', (err) => {
if (err.code === 'EADDRINUSE') {
tryPort();
} else {
reject(err);
}
})
.once('listening', () => {
tester.close(() => resolve(port));
})
.listen(port, '127.0.0.1');
};
tryPort();
});
}
}
```
## Final Thoughts
The new design introduces strict isolation for tunnels, efficient port management, and automatic resource cleanup. By implementing these changes, I:
- Solved the `EADDRINUSE` errors by dynamically checking port availability.
- Isolated tunnels to prevent cross-request data confusion.
- Enhanced performance and scalability by closing tunnels immediately after requests finish.
These updates not only improve reliability but also ensure that the application scales effectively, even under heavy load. This level of optimization is essential for any high-traffic Node.js service, as it directly impacts the user experience, system stability, and overall application performance.

View File

@ -0,0 +1,409 @@
<!-- lead -->
How I built RayAI into this Very Blog.
In my previous post, <u>[Building a Feature-Rich Blog Platform with Node.js, Express, and Markdown](https://blog.raven-scott.fyi/building-a-feature-rich-blog-platform-with-nodejs-express-and-markdown)</u>, I walked through the technical architecture of a blog platform designed for flexibility, performance, and ease of content management. Today, Im introducing a new feature: **RayAI** — an interactive AI chat system integrated directly into the blog.
RayAI allows visitors to ask questions, get real-time responses, and even interact with the blog content on a deeper level. Think of it as an intelligent assistant embedded within the blog, ready to help users understand, explore, and engage with the content. This is not just a simple Q&A bot; RayAI can handle markdown, provide code snippets, and even let users copy content for their own use.
In this post, Ill take you through every step of integrating RayAI into the blog, from the front-end interface to the back-end API handling. Let's dive into the details of this integration and see how it all comes together.
# Source
## https://git.ssh.surf/snxraven/ravenscott-blog
## Why Implement RayAI?
The primary motivation behind RayAI is to enhance user engagement. Static blog posts are great, but they often leave the user with questions or areas where they might want further explanation. RayAI bridges that gap by offering:
1. **Real-time Chat**: Users can interact directly with the AI to get instant responses to questions.
2. **Markdown Support**: RayAI interprets markdown, making it perfect for developers who might want to see code snippets or explanations in a well-formatted way.
3. **Code Snippet Copying**: Users can easily copy code from AI responses, reducing the time they would spend manually copying and pasting.
4. **Interactive Tools**: RayAI includes utilities for checking logs, system stats, and even fetching specific data directly within the chat interface.
With these features in mind, let's explore how RayAI was integrated into the blog platform.
## The Front-End: Creating the Chat Interface
The **front-end** implementation of RayAI begins with the **`chat.ejs`** file, which defines the structure of the chat interface. This file includes the layout, user input area, and the section for displaying chat messages. It also leverages **Bootstrap** for styling and **FontAwesome** for icons, keeping the UI clean and responsive.
### Setting Up the HTML Structure
Here's the code for `chat.ejs`:
```html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="<%= process.env.OWNER_NAME %>'s Blog">
<title><%= title %> | <%= process.env.OWNER_NAME %>'s Blog</title>
<!-- Bootstrap and Custom Styles -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css">
<link rel="stylesheet" href="<%= process.env.HOST_URL %>/css/chat.css">
</head>
<body class="bg-dark text-white">
<!-- Chat Container -->
<div class="chat-container">
<!-- Navbar -->
<nav class="navbar navbar-expand-lg navbar-dark">
<div class="container-fluid">
<a class="navbar-brand" href="<%= process.env.HOST_URL %>"><%= process.env.SITE_NAME %></a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav" aria-controls="navbarNav" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNav">
<ul class="navbar-nav ms-auto">
<% menuItems.forEach(item => { %>
<li class="nav-item">
<a class="nav-link" href="<%= item.url %>" <%= item.openNewPage ? 'target="_blank"' : '' %>><%= item.title %></a>
</li>
<% }) %>
</ul>
</div>
</div>
</nav>
<!-- Chat Box -->
<div class="chat-box">
<div id="messages" class="messages"></div>
<!-- Input area -->
<div class="input-area">
<textarea id="messageInput" class="form-control mb-2" rows="3" placeholder="Type your message..." onkeydown="handleKeyDown(event)" autofocus></textarea>
<div class="d-flex justify-content-between">
<button class="btn btn-secondary" onclick="resetChat()">Reset Chat</button>
<div id="loading" class="spinner-border text-primary" role="status" style="display: none;">
<span class="visually-hidden">Loading...</span>
</div>
<button class="btn btn-primary" onclick="sendMessage()">Send Message</button>
</div>
</div>
</div>
</div>
<!-- Bootstrap JS -->
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/js/bootstrap.bundle.min.js"></script>
<!-- Custom Chat JS -->
<script src="<%= process.env.HOST_URL %>/js/chat.js"></script>
</body>
</html>
```
### Key Sections in the HTML
- **Navbar**: Displays the blog title and navigation items dynamically loaded from the `menuItems` array.
- **Chat Box**: This is where the chat messages between the user and RayAI are displayed. Each message is dynamically added via JavaScript.
- **Input Area**: Includes a textarea for input and two buttons—one for sending the message, and the other for resetting the chat.
#### Styling with Bootstrap and FontAwesome
The **Bootstrap CSS framework** is used to ensure the page is responsive and user-friendly. I also included **FontAwesome** for icons, like the send button and the loading spinner, ensuring the interface looks polished.
## Handling User Input and Chat Logic: `chat.js`
Now that the front-end structure is in place, lets look at the **JavaScript** that powers the chat functionality. This file, **`chat.js`**, is responsible for:
- Handling key events (like sending messages when the user presses Enter)
- Interacting with the AI via API calls
- Displaying messages in the chat window
- Managing the loading indicator while waiting for responses
### Handling Key Events and Sending Messages
The **key event handler** ensures that when the user presses "Enter", the message is sent without needing to click the "Send" button.
```javascript
// Handles key down event to send message on Enter
function handleKeyDown(event) {
if (event.key === 'Enter' && !event.shiftKey) {
event.preventDefault();
sendMessage();
}
}
```
This function ensures that users can submit messages quickly by hitting Enter, making the chat experience smooth.
### Sending Messages to the API
The **`sendMessage`** function is the core of RayAI's interaction. It takes the user input, sends it to the AI backend, and displays the AI's response.
```javascript
// Sends a message to the chat API
async function sendMessage() {
const messageInput = document.getElementById('messageInput');
let message = messageInput.value.trim();
if (message === '') return;
// Encode the message to avoid XSS attacks
message = he.encode(message);
// Display the user's message in the chat
displayMessage(message, 'user');
messageInput.value = ''; // Clear the input
toggleLoading(true); // Show loading indicator
try {
const response = await fetch('https://infer.x64.world/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ message: message })
});
if (response.ok) {
const data = await response.json();
displayMessage(data.content, 'assistant');
} else {
handleErrorResponse(response.status);
}
} catch (error) {
displayMessage('Error: ' + error.message, 'assistant');
} finally {
toggleLoading(false); // Hide loading indicator
}
}
```
- **API Interaction**: This function sends the users message to the API endpoint `https://infer.x64.world/chat`, which handles the AI interaction.
- **XSS Protection**: The message is encoded using `he.encode()` to prevent cross-site scripting attacks.
- **Message Display**: After the message is sent, its displayed in the chat window and the loading spinner is shown while waiting for the response.
### Displaying Chat Messages
Once the API responds, the message is displayed in the chat box using the **`displayMessage`** function. This function takes care of rendering both user and AI messages in the chat window.
```javascript
// Displays a message in the chat window
function displayMessage(content, sender) {
const messages = document.getElementById('messages');
const messageElement = document.createElement('div');
messageElement.classList.add('message', sender);
// Decode HTML entities and
render Markdown
const decodedContent = he.decode(content);
const htmlContent = marked(decodedContent);
messageElement.innerHTML = htmlContent;
messages.appendChild(messageElement);
messages.scrollTop = messages.scrollHeight; // Scroll to the bottom of the chat
// Highlight code blocks if any
document.querySelectorAll('pre code').forEach((block) => {
hljs.highlightElement(block);
if (sender === 'assistant') {
addCopyButton(block); // Add copy button to code blocks
}
});
// Add "Copy Full Response" button after each assistant response
if (sender === 'assistant') {
addCopyFullResponseButton(messages, messageElement);
}
}
```
Key features of this function:
- **Markdown Support**: RayAIs responses are parsed as Markdown using **`marked`**, allowing code snippets and formatted text to display beautifully.
- **Scroll Management**: Automatically scrolls the chat window to the bottom when a new message is added.
- **Code Highlighting**: If the AI responds with a code snippet, its highlighted using **highlight.js**, improving readability for developers.
- **Copy Functionality**: Adds a "Copy" button to code blocks, allowing users to copy snippets with a single click.
### Adding Utility Buttons
RayAI goes beyond just chat by offering interactive tools directly within the chat interface. Here's how we implement the "Copy Full Response" and "Copy Code" buttons.
```javascript
// Adds "Copy Full Response" button below the assistant response
function addCopyFullResponseButton(messagesContainer, messageElement) {
const copyFullResponseButton = document.createElement('button');
copyFullResponseButton.classList.add('copy-button');
copyFullResponseButton.textContent = 'Copy Full Response';
copyFullResponseButton.addEventListener('click', () => copyFullResponse(messageElement));
messagesContainer.appendChild(copyFullResponseButton);
}
```
This button copies the entire AI response, including any markdown or code blocks, into the users clipboard in Markdown format.
Certainly! Here's an extended and more detailed version of the section on **The Backend: RayAI API Integration**.
## The Backend: RayAI API Integration
The backbone of RayAIs functionality lies in how it communicates with an external AI service, processes the user's inputs, and returns intelligent, contextual responses. In this section, I will break down how RayAI handles requests, the architecture behind it, and the measures taken to ensure smooth interaction between the blog platform and the AI API.
### API Overview and How It Works
At its core, the AI powering RayAI is hosted at an external endpoint, `https://infer.x64.world/chat`. This API accepts POST requests with user input and returns a generated response. To integrate this with the blog platform, the JavaScript on the front-end captures the user's message, sends it to the API, and processes the AI's response for display within the chat interface.
The **key objectives** of this integration are:
1. Seamlessly send user input to the AI API.
2. Receive and display the response from the API in real time.
3. Handle network issues or server overloads gracefully.
4. Secure communication to prevent potential vulnerabilities, such as cross-site scripting (XSS) or API abuse.
### Sending Requests to the AI API
The main logic for sending user messages to the API is contained in the **`sendMessage`** function. Lets go deeper into the code to explain how the back-and-forth communication is handled.
Heres the function once again:
```javascript
async function sendMessage() {
const messageInput = document.getElementById('messageInput');
let message = messageInput.value.trim();
if (message === '') return;
// Encode the message to avoid XSS attacks
message = he.encode(message);
// Display the user's message in the chat
displayMessage(message, 'user');
messageInput.value = ''; // Clear the input
toggleLoading(true); // Show loading indicator
try {
// Send the user's message to the AI API
const response = await fetch('https://infer.x64.world/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ message: message })
});
// Handle the API response
if (response.ok) {
const data = await response.json();
displayMessage(data.content, 'assistant');
} else {
handleErrorResponse(response.status);
}
} catch (error) {
displayMessage('Error: ' + error.message, 'assistant');
} finally {
toggleLoading(false); // Hide loading indicator
}
}
```
#### Breaking Down the Code
1. **Input Validation**:
Before sending any request, the users input is first **trimmed** to remove unnecessary whitespace. If the input is empty (i.e., no text was entered), the function exits early to avoid sending unnecessary requests.
2. **XSS Protection**:
User input is **encoded** using `he.encode()`. This is crucial for preventing **cross-site scripting (XSS)** attacks, where malicious users could inject harmful code into the chat. By encoding the input, we ensure that any special characters are safely transformed, so the AI API only processes clean text.
3. **Displaying the Users Message**:
Once the message is encoded, it is immediately displayed in the chat interface (before even getting a response from the AI). This gives the user a smooth, instant feedback loop and ensures that their input is visually confirmed.
4. **Sending the Request**:
The **`fetch()`** function sends the message as a **POST request** to the external API (`https://infer.x64.world/chat`). Heres what happens in the background:
- The request contains a JSON object with the key `message` holding the users input.
- Headers are set to indicate that the request body is JSON (`'Content-Type': 'application/json'`), which is what the API expects.
5. **Handling the Response**:
- **Success**: If the response from the API is successful (status code 200), the response body (a JSON object) is parsed. The content generated by the AI is then passed to the `displayMessage` function to be shown as an AI response in the chat interface.
- **Error**: If the response is not successful (e.g., status codes like 400 or 500), the `handleErrorResponse` function is called to manage error feedback to the user.
6. **Loading Indicator**:
While waiting for the API to respond, a loading indicator (a spinner) is displayed, improving user experience by signaling that the system is processing their request. This indicator is hidden once the response is received.
### Error Handling and Rate Limiting
Integrating a third-party API like this comes with challenges, such as network errors, rate limits, and potential server overloads. These are handled through robust error-checking and failover mechanisms.
```javascript
function handleErrorResponse(status) {
if (status === 429) {
displayMessage('Sorry, I am currently too busy at the moment!', 'assistant');
} else {
displayMessage('Error: ' + status, 'assistant');
}
}
```
#### Handling Rate Limiting (429 Status Code)
When multiple users interact with RayAI simultaneously, or when the API receives too many requests in a short span of time, it may respond with a **429 (Too Many Requests)** status. This is known as **rate limiting**, a mechanism used by APIs to prevent overloading.
In such cases, RayAI responds with a friendly message like:
> "Sorry, I am currently too busy at the moment!"
This ensures users know the service is temporarily unavailable, rather than simply failing silently. Additionally, **retry logic** could be implemented if needed, to automatically attempt a new request after a short delay.
#### Handling Network and Other Errors
Other common errors include server issues (status code 500) or connection issues (e.g., when the users internet connection is unstable). These are captured within the `catch` block of the `try-catch` structure. In case of failure, an error message such as the following is displayed:
> "Error: {error.message}"
This ensures that even if something goes wrong, the user receives a clear explanation of the issue.
### Securing the API
While RayAIs front-end handles much of the logic, securing the API interaction is essential to prevent abuse, data breaches, or malicious attacks.
#### XSS and Input Validation
As mentioned earlier, we encode all user input using **`he.encode()`** before sending it to the API. This ensures that special characters (such as `<`, `>`, and `&`) are converted into their HTML-safe equivalents, protecting the application from cross-site scripting attacks.
#### Rate Limiting and Abuse Prevention
The external AI service includes built-in rate limiting (as indicated by the 429 error code), but further measures could be implemented to prevent API abuse:
1. **IP-Based Rate Limiting**: You can restrict the number of API requests allowed from a single IP address within a given time window. This prevents users or bots from spamming the service with excessive requests.
2. **Authentication**: Adding a layer of authentication (such as API keys or OAuth) could ensure that only authorized users have access to RayAIs features.
### Enhancing AI Responses
RayAI is more than just a simple chatbot. The backend API offers several features that enhance the experience:
1. **Markdown Rendering**:
RayAIs responses often include formatted text, especially when answering technical questions. The responses are processed using **marked.js** to ensure markdown syntax is rendered correctly in the chat.
2. **Code Highlighting**:
If the AI responds with a code snippet, its automatically highlighted using **highlight.js**, which provides syntax highlighting for over 180 programming languages. This is particularly useful for developers who interact with RayAI.
3. **Copy Buttons**:
Each response that includes code also has a "Copy" button. This feature is implemented using custom JavaScript, which allows users to copy entire code blocks or full responses to their clipboard in a single click.
### Additional Tools in RayAI
Apart from chat functionality, RayAI also provides access to **interactive tools** through the chat interface. These include:
- **Live Logs**: Users can view live system logs in real-time by clicking a "Live Log" button.
- **System Stats**: Displays system metrics like CPU, memory, and GPU stats through the "GPU Stats" button.
- **Reset Chat**: This feature allows users to clear the current chat history and reset the conversation.
It sends a POST request to `https://infer.x64.world/reset-conversation`, clearing the session data.
These tools are essential for advanced users who want to monitor system performance or interact with RayAI in more technical ways.
## Final Thoughts: The Power of RayAI
By integrating RayAI, the blog has evolved from a static platform into a dynamic, interactive experience. RayAI provides real-time interactions, makes technical content more accessible, and helps users engage with blog posts on a deeper level. Features like code highlighting, markdown rendering, and interactive tools bring immense value to both developers and casual readers.
In the future, RayAI could be extended even further with additional features like natural language search for blog posts, personalized recommendations, or even interactive tutorials. The possibilities are endless.
For now, Im thrilled with how RayAI enhances the blog, and I cant wait to see how users engage with it.
If you want to try it yourself, head over to the <u>[RayAI Chat Page](https://chat.raven.scott-fyi)</U> and start a conversation!

View File

@ -0,0 +1,304 @@
### What is Oracle Free Tier?
When exploring cloud services, one of the most important factors to consider is how much you can accomplish without breaking the bank.
That's where **Oracle's Free Tier** comes in—a cloud offering that provides access to powerful computing, storage, networking, and database resources at no cost.
Oracles Free Tier gives you the freedom to build and scale applications without worrying about upfront costs or limitations.
In this post, well dive into what Oracle Free Tier is, what it offers, and how you can take advantage of it.
### Access Oracle's free services today: https://www.oracle.com/cloud/free/
## Before we begin...
# What is a VPS (Virtual Private Server)?
<!-- lead -->
Why Oracle Cloud is still one of the best offerings on the planet since its launch.
A **Virtual Private Server (VPS)** is a type of server that operates in a shared environment but behaves as though it's a dedicated server. The VPS provider allocates a portion of their physical server's resources to you, along with an operating system, granting you the freedom to build websites, run software, or even host game servers.
With a VPS, you essentially have your own **virtual machine** on the server, allowing you complete control over your space. This flexibility makes VPS hosting ideal when you require more power than a shared hosting account but don't want the hefty costs associated with a dedicated server. It combines the functionality and customization of a dedicated server at a fraction of the price.
Unlike shared hosting, where hundreds of websites are housed on the same server, VPS hosting offers much greater control over your resources. You can install custom software, configure settings, and tailor the environment to your specific needs. Best of all, VPS hosting is often far less expensive than dedicated hosting, making it a great choice for those seeking the best of both worlds.
## Why Do You Need a VPS?
VPS hosting offers enhanced security and privacy for individual websites and applications.
Its a fantastic option for hosting websites, online stores, or any other project that demands ample resources and scalability. VPS hosting is like a **server on steroids**, providing abundant resources and the ability to scale up or down as your needs evolve. Its an excellent choice for both beginners and experienced users alike.
With a VPS, youll experience a significant speed boost for your website—much faster than shared hosting can offer. If you're running a **mission-critical** website or project that requires zero downtime or experiences significant traffic fluctuations, a VPS is a must.
Additionally, virtual private servers tend to be as affordable as quality shared hosting plans but come with far less downtime.
Besides web hosting, VPS is a perfect solution for hosting game servers. Whether youre running a **Minecraft** server or another game, you can easily set up a VPS and play with your friends.
When your shared hosting account reaches its limits, it's time to upgrade to a VPS to harness the full power of virtual servers.
A VPS hosting environment allows you to host websites without sharing resources with others on the same server, giving you complete control over your environment. You can also host multiple websites on a single physical server, lowering costs and improving performance.
For high-traffic websites, VPS hosting is the way to go—it delivers superior performance and reliability compared to shared hosting.
### Free Trial Explained
Oracle Cloud offers a **free trial** for first-time customers, providing an excellent opportunity to explore the platform and test its capabilities before making a financial commitment. This trial allows users to familiarize themselves with Oracles services, ensuring compatibility and performance within their specific use cases.
The free trial is a hands-on way for users to gain valuable experience and understand how to best utilize the platforms features. It helps potential customers feel more confident and secure about how Oracle Cloud will perform in their environment, making it an ideal way to experiment with different cloud solutions.
The trial lasts for **30 days** and includes **$300 in credits**, giving users full access to Oracle Cloud's upgraded services during this period. These credits can be used across a wide range of services, including databases, analytics, compute, and container engines for Kubernetes.
It's important to note that the **free trial** is different from the **Always Free Tier**. Once the trial ends, users transition into the Always Free Tier, which provides a set of free resources that last indefinitely. The trials credits are consumed at a discounted rate, allowing users to fully explore Oracle Clouds premium offerings before the free tier kicks in.
This trial is available worldwide in multiple regions, but remember to distinguish between the **trial** and the **Always Free Tier**, as they serve different purposes and offer different benefits.
#### Now that we know what those concepts are, lets talk about free hosting.
## Oracle Always Free Tier: Features
Oracle offers an exceptionally generous **Always Free Tier** that provides multiple services at no cost.
Below is a summarized breakdown of what Oracle offers within the **Always Free** plan:
### **Compute**
- **AMD Compute Instance**
2 AMD-based Compute VMs with 1/8 OCPU and 1 GB of memory each.
- **Arm Compute Instance**
4 Arm-based Ampere A1 cores with 24 GB of memory, which can be used as a single VM or split into up to 4 VMs.
- **3,000 OCPU hours** and **18,000 GB hours** per month.
### **Developer Services**
- **APEX (Application Express)**
A low-code platform for building, integrating, and innovating applications with up to 744 hours per instance.
### **Networking**
- **Flexible Network Load Balancer**
Provides automated traffic distribution, ensuring services remain available by routing traffic only to healthy servers.
Always Free: 1 instance.
- **Load Balancer**
Allows for highly available load balancers within your Virtual Cloud Network (VCN) with 10 Mbps bandwidth.
Always Free: 1 instance.
- **Outbound Data Transfer**
Always Free: Up to 10 TB per month.
- **Service Connector Hub**
A cloud message bus platform for moving data between Oracle Cloud services.
Always Free: 2 service connectors.
- **Site-to-Site VPN**
Secure IPSec connection between your on-premises network and your VCN.
Always Free: 50 IPSec connections.
- **VCN Flow Logs**
Logs traffic through your VCN for auditing and security purposes.
Always Free: 10 GB per month shared across OCI Logging services.
- **Virtual Cloud Networks (VCN)**
Allows the creation of software-defined networks.
Always Free: 2 VCNs with IPv4 and IPv6 support.
### **Observability and Management**
- **Application Performance Monitoring**
Monitors application performance and diagnoses issues.
Always Free: Up to 1000 tracing events and 10 Synthetic Monitoring runs per hour.
- **Email Delivery**
Managed solution for sending secure, high-volume emails.
Always Free: Up to 100 emails sent per day.
- **Logging**
Centralized, scalable logging service.
Always Free: Up to 10 GB per month.
- **Monitoring**
Queries metrics and manages alarms to monitor the health of cloud resources.
Always Free: Up to 500 million ingestion datapoints, 1 billion retrieval datapoints.
- **Notification**
Sends alerts based on cloud resource activities.
Always Free: Up to 1 million HTTP notifications and 1,000 email notifications per month.
### **Oracle Databases**
- **Autonomous Database**
Fully managed databases like Oracle Autonomous Transaction Processing and Autonomous Data Warehouse.
Always Free: Up to two databases total.
- **HeatWave**
An integrated service for transactions, analytics, and generative AI.
Always Free: 1 standalone instance with 50 GB of storage and 50 GB of backup storage.
- **NoSQL Database**
Fully managed, low-latency NoSQL database.
Always Free: Up to 133 million reads, 133 million writes per month, and 25 GB of storage per table (up to 3 tables).
### **Others**
- **Console Dashboards**
Custom dashboards to monitor resources and key metrics.
Always Free: Up to 100 dashboards.
### **Security**
- **Bastions**
Provides restricted SSH access to resources without public endpoints.
Always Free: Up to 5 OCI Bastions.
- **Certificates**
Issuance, storage, and management of certificates, including automatic renewal.
Always Free: Up to 5 Private CAs and 150 private TLS certificates.
- **Vault**
Manages master encryption keys and secrets with hardware security module (HSM) protection.
Always Free: Up to 20 key versions and 150 secrets.
### **Storage**
- **Archive Storage**
Unstructured archive storage.
Always Free: Up to 20 GB total for standard, infrequent, and archive storage.
- **Block Volume Storage**
Boot and block volume storage.
Always Free: Up to 2 block volumes (200 GB total) and 5 backups.
- **Object Storage**
Object Storage API requests.
Always Free: Up to 50,000 API requests per month.
- **Object Storage - Infrequent Access**
Unstructured storage for infrequently accessed data.
Always Free: Up to 20 GB total.
- **Object Storage - Standard**
Standard unstructured storage.
Always Free: Up to 20 GB total for standard, infrequent, and archive storage.
## Oracle Free Tier: A Comprehensive Breakdown of the Above summary
Oracle Clouds Always Free Tier stands out as a top choice for developers, startups, and businesses that require a robust, scalable cloud environment without the burden of upfront costs. With an impressive suite of compute, storage, database, and networking services, Oracle consistently outperforms competitors, offering far more generous free resources that can support everything from small web applications to more demanding, high-traffic environments.
In my personal experience, Ive hosted Discord-Linux.com on Oracle Cloud for over 5 years, and I can confidently say that I have had zero issues during this time. The platform has proven to be reliable and efficient, handling everything from server management to traffic fluctuations effortlessly. In fact, the very blog you are reading is running on Oracle Cloud, utilizing only the free resources available under the Always Free Tier. Oracles stability and generous offering make it an unbeatable solution for those looking for both reliability and scalability without the associated costs.
Below, well review Oracles **Always Free Tier** offerings, highlight the strengths and limitations, and assess how they compare to other providers like AWS, Google Cloud, and Microsoft Azure.
### **Compute Services**
Oracle Clouds **compute offerings** are perhaps the most significant draw of the Always Free Tier. The **AMD Compute Instances** and **Arm Compute Instances** provide considerable flexibility for users with a variety of needs.
- **AMD Compute Instance**: This includes **2 VMs** with 1/8 OCPU and 1 GB of memory each. While these may seem modest specs, for simple workloads such as small web servers, test environments, or lightweight applications, these VMs can be invaluable. The ability to run two instances simultaneously increases the scope of potential projects, making it possible to deploy a frontend/backend setup or even experiment with multi-node applications.
- **Arm Compute Instance**: The **Arm-based Ampere A1** cores are the stars of Oracles free compute resources. You get **24 GB of memory**, which can be allocated to **one large VM or split into four smaller VMs**. This flexibility offers developers an excellent opportunity to scale their applications, test distributed systems, or handle larger workloads without paying a cent. The performance of Ampere A1 processors is highly competitive, and with up to **3,000 OCPU hours** per month, users can push these instances to handle significant loads, making them suitable for high-traffic websites, data processing tasks, or even hosting games like Minecraft.
Oracles free compute offerings stand out for providing more CPU hours than competitors, allowing for sustained, non-stop operations, unlike other platforms that offer more limited timeframes.
### **Developer Services**
- **APEX (Application Express)**: Oracle APEX is one of the industrys leading **low-code platforms**, and Oracles inclusion of this in the Always Free Tier is a major boon for developers looking to quickly build and deploy applications. This service is ideal for businesses or individuals looking to create **internal tools**, web apps, or even mobile backends without needing to manage large infrastructure. With **up to 744 hours** per instance, its a valuable tool for anyone needing rapid development without the complexity of traditional coding.
### **Networking Features**
Networking is often where hidden costs accumulate in cloud platforms, but Oracles Always Free Tier offers a range of useful services that help alleviate these concerns.
- **Flexible Network Load Balancer**: Oracles **Network Load Balancer** ensures that your applications remain available, distributing traffic only to healthy servers. This is especially valuable for high-availability applications. While **1 instance** is available for free, its sufficient for most small-scale projects that require automated traffic distribution.
- **Load Balancer**: For those who need a more traditional load balancing setup with **provisioned bandwidth** of up to 10 Mbps, this service provides excellent reliability. Again, **1 instance** is included, which can support basic scaling needs for your applications without additional cost.
- **Outbound Data Transfer**: A key feature here is the **10 TB per month** of free outbound data transfer. This is **exceptionally generous**, especially when compared to AWS or Google Cloud, which often charge after a very limited amount of free outbound data. This amount allows you to handle moderate to high-traffic websites, content delivery, or other data-heavy applications without worrying about expensive data transfer fees.
- **Site-to-Site VPN**: For hybrid cloud setups or secure on-premises connections, Oracles free tier includes **50 IPSec VPN connections**. This feature is invaluable for enterprises or developers needing to securely extend their local network into the cloud.
- **VCN Flow Logs**: Oracle offers **up to 10 GB of VCN flow logs per month**. These logs are essential for auditing traffic, troubleshooting network issues, and ensuring security across your cloud infrastructure.
### **Observability and Management**
- **Application Performance Monitoring (APM)**: Application monitoring and tracing are typically expensive features in most cloud platforms, but Oracle includes **1000 tracing events** and **10 Synthetic Monitoring runs per hour**. This is particularly useful for developers needing to pinpoint performance bottlenecks or ensure their applications are running smoothly without extra cost.
- **Email Delivery**: Oracles free tier allows you to send **up to 100 emails per day**, making it a viable solution for sending transactional emails, marketing campaigns, or notifications directly from your cloud applications.
- **Logging**: The **Logging service** allows for **up to 10 GB per month** of log data. This amount is sufficient for most small- to medium-scale applications and provides a scalable way to store and analyze logs without paying for third-party logging solutions.
- **Monitoring and Notifications**: Oracle offers **500 million ingestion datapoints** and **1 billion retrieval datapoints**, which is ample for even larger-scale applications. The **Notifications service** adds another layer of observability, sending up to **1 million HTTP notifications** or **1,000 emails per month**. These capabilities are highly advantageous for DevOps teams or developers needing to monitor performance metrics in real-time.
### **Oracle Databases**
Oracle is well-known for its database technology, and the Always Free Tier includes multiple offerings that can be pivotal for building robust data-driven applications.
- **Autonomous Databases**: Oracles Autonomous Database products, such as **Autonomous Transaction Processing** and **Autonomous Data Warehouse**, offer up to **two databases** free of charge. These databases are ideal for organizations requiring high performance, automation, and scalability without the management overhead of traditional database administration.
- **HeatWave**: With **1 standalone HeatWave instance** available, Oracle allows users to perform **AI-enhanced analytics** and data processing without needing to integrate external services. HeatWaves ability to handle transactional workloads and lakehouse-scale analytics makes it a unique offering among cloud providers.
- **NoSQL Database**: For those working with **key-value**, document-based, or fixed-schema data, Oracles NoSQL offering provides **133 million reads/writes per month** with **25 GB of storage per table**. This is a compelling choice for building scalable, low-latency applications without worrying about high database costs.
### **Security Features**
- **Bastions**: Securely managing cloud infrastructure can be complex, but Oracle simplifies this with **up to 5 Bastions**. These provide **restricted SSH access** to resources without exposing them to the internet, ensuring enhanced security.
- **Certificates**: Oracles **Certificates service** allows for **automatic renewal** of up to **150 private TLS certificates**, along with **5 Private CAs**. This makes it easy for developers to manage secure communication between applications.
- **Vault**: OCI Vault is included with **20 key versions** and **150 secrets**. This provides a managed, secure way to handle encryption keys and secrets, which is essential for any sensitive or mission-critical application.
### **Storage Services**
Oracles storage options are well-rounded and comprehensive:
- **Block Volume Storage**: Up to **2 block volumes** with **200 GB total** storage, along with **5 backups**, make Oracles block storage a solid choice for hosting applications, databases, or even Docker containers.
- **Object Storage**: With **50,000 API requests per month** and **20 GB of standard and infrequent storage**, Oracle ensures that you have enough capacity for most personal or small-business use cases.
- **Archive Storage**: With **20 GB** of archive storage included, Oracle offers an easy solution for long-term backups or low-cost storage of infrequently accessed data.
### My Thoughts: Oracle Cloud's Always Free Tier
Oracle Clouds **Always Free Tier** stands out as a top choice for developers, startups, and businesses that require a robust, scalable cloud environment without the burden of upfront costs. With an impressive suite of compute, storage, database, and networking services, Oracle consistently outperforms competitors, offering far more generous free resources that can support everything from small web applications to more demanding, high-traffic environments.
Whether you're building a simple website, running a data-heavy application, or deploying complex services, Oracle Cloud provides the performance and flexibility needed to grow without incurring significant costs. Heres a more in-depth look at its advantages and potential drawbacks:
### **Pros**:
- **Generous compute resources**: Oracle's Always Free Tier offers **Arm-based Ampere A1 cores** and **AMD-based VMs**, giving users access to up to **24 GB of memory**. These resources far exceed what other cloud providers offer for free, enabling you to run substantial workloads, such as high-traffic web servers, data processing, or even game hosting.
- **Substantial outbound data transfer**: Oracle provides **10 TB per month** of outbound data transfer, which is especially generous compared to AWS, Google Cloud, and Azure. This is more than enough to handle the data requirements of most websites, applications, or content delivery networks without worrying about hidden fees.
- **Industry-leading Autonomous and NoSQL Databases**: Free access to Oracles **Autonomous Database** and **NoSQL Database** ensures that users benefit from top-tier database management without the need for manual optimization or costly administration. This is a major selling point for developers who need powerful, scalable, and self-managing databases.
- **Extensive networking and security features**: Oracle includes **VPN**, **Site-to-Site VPN**, **Bastions**, and **Load Balancers**, all within the Always Free Tier. These networking and security tools are essential for securely connecting on-premise networks to the cloud and ensuring seamless service availability with minimal latency.
- **Comprehensive monitoring, logging, and notifications**: Oracle's observability services, including **Application Performance Monitoring**, **Logging**, **Monitoring**, and **Notifications**, provide deep insights into cloud applications. These tools offer real-time visibility into application health, performance metrics, and security, making it easy to maintain optimal operation without added cost.
### **Cons**:
- **Complex interface and setup**: While Oracle Cloud is highly powerful, its interface and setup process can be more complex than other platforms, such as AWS or Google Cloud, which offer more beginner-friendly user experiences. For those unfamiliar with Oracles cloud ecosystem, the learning curve can be steep, requiring additional time and effort to master.
- **Limited third-party integrations**: Oracle's ecosystem may not offer as wide a selection of third-party integrations and tools as AWS or Google Cloud. If your project relies heavily on third-party services, you might find Oracle's range somewhat restrictive compared to the broad support offered by its competitors.
# **FAQs**
- **Can I use a Virtual Credit Card (VCC) with Oracle Cloud?**
Unfortunately, Oracle Cloud does not support Virtual Credit Cards (VCC).
To complete the sign-up process, youll need to use a real credit card issued by a recognized bank.
This ensures verification and is necessary to activate your Oracle Cloud account, even for the free tier.
- **Will I ever be charged using a free teir account?**
From what I have noticed Oracle does not seem to store payment details during the signup process within the always free service.
You must click the purple upgrade button and submit payment details aswell as wait 24 hours for your account to be upgraded to a paid version of the Oracle Cloud.
Due to this, there should not be any charges imposed during the usage of an always free account.
- **Why was my Oracle VM made inaccessible even though I'm on the free tier?**
If your Oracle VM was created during the free trial period, it will be automatically locked after 30 days unless you upgrade to a paid account.
After the trial ends, you'll transition into the Always Free Tier, where any resources created under this tier will remain active indefinitely.
To retain the data from your current instance, you can terminate the premium instance created during the trial without deleting the boot disk.
You can then use that boot disk to create a new Always Free instance. Once booted, your server will resume as expected, with all your software and configurations intact.
- **Can I mine cryptocurrency on my Oracle Cloud account?**
No, Oracle strictly prohibits cryptocurrency mining on its cloud infrastructure.
Any account found engaging in crypto mining will be terminated immediately.
Oracle enforces this policy to prevent resource abuse and ensure fair use of its free tier services.
- **Why are all ports blocked on my Oracle VPS?**
By default, all incoming traffic is blocked to enhance security.
To allow specific traffic, youll need to create **ingress rules** in your security group settings to open the required ports.
This ensures that only authorized traffic can access your virtual private server, offering an additional layer of protection.
- **Is Windows VPS available in the free tier?**
No, Windows VPS instances are not available in the Always Free Tier.
Access to Windows servers is reserved for upgraded accounts.
However, you can utilize Linux-based VMs, such as Ubuntu or Oracle Linux, in the free tier.
- **I received an "Out of Capacity" error. What should I do?**
Oracle Cloud resources are limited by region, and if your selected region has reached capacity, youll receive this error.
To resolve this, you can either wait for Oracle to replenish resources in your chosen region or select another region with available capacity to create your instance.
I also find that upgrading your account is a great option to resolve this issue. Always free resources remain even if the account is upgraded.

View File

@ -0,0 +1,202 @@
<!-- lead -->
Tensions between WordPress and WP Engine are raising concerns about the future of WordPress and its open-source values.
<CENTER><iframe width="50%" height="150" scrolling="no" frameborder="no" allow="" src="https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/tracks/1925202203&color=%2310121c&auto_play=true&hide_related=true&show_comments=false&show_user=false&show_reposts=false&show_teaser=false&visual=true"></iframe><div style="font-size: 10px; color: #cccccc;line-break: anywhere;word-break: normal;overflow: hidden;white-space: nowrap;text-overflow: ellipsis; font-family: Interstate,Lucida Grande,Lucida Sans Unicode,Lucida Sans,Garuda,Verdana,Tahoma,sans-serif;font-weight: 100;"><a href="https://soundcloud.com/snxraven" title="snxraven" target="_blank" style="color: #cccccc; text-decoration: none;">snxraven</a> · <a href="https://soundcloud.com/snxraven/open-source-breakdown" title="Open Source Breakdown" target="_blank" style="color: #cccccc; text-decoration: none;">Open Source Breakdown</a></div></CENTER>
## The Beginning
In recent days, the actions of WordPress.org and Automattic have raised serious concerns about the future of the open-source WordPress project and its relationship with developers, hosting providers, and users. While these entities claim to act in the best interests of the community, their recent choices have the potential to backfire, undermining trust, and pushing developers and web hosts to seek alternatives.
In this article, I will break down my thoughts on why the decisions made by WordPress and Automattic are not only detrimental but may ultimately erode their dominance within the content management system (CMS) space.
## The False Promise of Open Source
WordPress has long been championed as the beacon of open-source software. Its extensive ecosystem, flexibility, and community-driven development have made it one of the most popular CMS platforms globally. However, as Automattic, the company behind WordPress.com and WooCommerce, gains more influence, it appears the open-source spirit is taking a back seat to corporate interests. This shift can have disastrous effects on the community at large.
Automattics insistence on enforcing the WordPress trademark is one example of this. WP Engine, a popular managed hosting provider, has come under fire from Automattic for allegedly misleading users by using "WP" in its name. WordPress.orgs recent updates to its **trademark policy** even go so far as to imply WP Engine is purposefully confusing users into thinking they are officially affiliated with WordPress. This aggressive trademark enforcement sets a dangerous precedent.
## Alienating Developers and Hosts
At its core, WordPress thrives because of the vast network of developers and hosts who build, extend, and support it. Automattics growing tendency to view these third-party contributors as competitors is counterproductive and could spell the beginning of an exodus of talent from the platform.
WP Engine is just one case. Many developers and hosts contribute back to WordPress by creating themes, plugins, and infrastructure that power millions of websites. These contributions are integral to the success of the CMS, yet the heavy-handed approach Automattic is taking suggests they see themselves as the sole proprietors of WordPress. They even went so far as to criticize WP Engine for only contributing **40 hours a week** compared to Automattics **3,915 hours**, further intensifying the divide between corporate entities and independent contributors.
## Cutting Off Access to Resources
One of the most troubling developments is WordPress.orgs decision to block WP Engine from accessing core resources on their platform. This move will likely affect WP Engine customers, leaving them without the critical infrastructure that WordPress.org provides. In essence, WordPress is asking WP Engine to replicate its entire infrastructure independently, including updates, security patches, directories, and more. This isn't just a blow to WP Engine—it's a disservice to the broader WordPress community that relies on these hosts.
Rather than working collaboratively with WP Engine to address concerns, WordPress.org has chosen to cut them off, effectively penalizing their users. These customers, who are already invested in the platform, will likely reconsider their allegiance to WordPress if the services they rely on degrade in quality.
## The Slippery Slope of Monopolizing Contributions
Another major issue is Automattics increasing monopolization of contributions to WordPress. With their vast resources, they contribute an overwhelming share of hours to WordPress development. At first glance, this may seem like a positive. However, the imbalance of power means that Automattic is setting the agenda, steering the project to benefit its business model. This reduces the influence of independent developers and contributors and risks stagnating the diversity of ideas that have historically fueled the platforms growth.
This shift in dynamics could cause independent developers, who have built their livelihoods around WordPress, to feel sidelined or neglected. If their contributions are increasingly seen as insignificant or undervalued compared to Automattics overwhelming presence, they may stop contributing altogether, further consolidating control in the hands of a single entity.
## The Communitys Growing Distrust
For years, the WordPress community has prided itself on its openness, inclusivity, and ability to foster innovation from all corners of the world. Automattics actions, however, appear to be fracturing this community. The “scorched earth” approach taken by Automattics leadership—demanding exorbitant sums of money from competitors and discrediting them in public forums—reeks of corporate greed.
## What Lies Ahead?
The trajectory Automattic and WordPress are on is unsustainable. By alienating core developers, web hosts, and their own user base, they risk driving away the very people who built WordPress into the powerhouse it is today.
The open-source nature of WordPress has always been its strength, allowing developers, designers, and users to collaborate and innovate freely. However, when a single entity exerts too much control, it undermines the core principles of the platform and forces people to seek alternatives. This is already happening, with some developers opting for platforms like **Joomla**, **Drupal**, **GHOST** or even building their own CMS from scratch to regain control over their content and infrastructure.
If Automattic continues to push forward with this “nuclear” approach, it may backfire spectacularly. What was once a thriving and vibrant community could fragment, with disgruntled developers and hosts breaking off to create new alternatives, leaving WordPress a shell of its former self.
## In the Eyes of the Community
The recent feud between WordPress and WP Engine has sparked widespread frustration and disappointment within the WordPress community. Users across platforms, including Reddit, have expressed a strong sentiment that WordPress and Automattic's actions are misguided, damaging, and ultimately harmful to the broader ecosystem. Heres a summary of the communitys key concerns and reactions:
### Confusion and Fear of Overreach
Many users are concerned about the heavy-handed use of trademark enforcement, particularly the targeting of WP Engine for using "WP" in their branding. This has caused confusion and worry, as several other businesses in the WordPress space—such as WPForms, WP Rocket, and WP Astra—also use "WP" in their names. The fear is that these businesses could also become targets, leading to a chilling effect across the ecosystem. Users feel that Automattic's efforts to control the narrative around "WP" are excessive and could hurt smaller businesses in the space.
### Alienation of Developers and Web Hosts
A significant portion of the community views Automattics approach as an attempt to consolidate control over WordPress at the expense of the open-source community. Some see this as Automattic using its power to extract money from competitors like WP Engine while diminishing contributions from those companies. This has led to a sense of alienation, with many expressing that Automattic's actions are pushing away developers and web hosts that have supported and contributed to WordPress for years.
### Disruption to Businesses and Users
The most immediate impact has been the disruption to WP Engine users, many of whom are now unable to update plugins and themes on their sites. This has caused a cascade of issues for businesses that rely on WP Engine for hosting, leading to frustration and even legal threats. Users who manage numerous websites on WP Engine have been left in a difficult position, having to explain the situation to clients and stakeholders.
### Disappointment with Leadership
The community has voiced strong criticism of Matt Mullenweg, CEO of Automattic, for his handling of the situation. Many see his actions as unprofessional, childish, and damaging to WordPresss reputation. Comparisons have been made to figures like Elon Musk, with users noting that this "scorched earth" approach is unbecoming of a leader in an open-source project. Some are even calling for Mullenweg to step down or be removed from his leadership role, as they fear his personal vendetta is undermining the integrity of WordPress.
### Calls for a Fork
In light of these events, some community members are seriously considering forking WordPress. What once seemed like an extreme option now feels like a necessary step to preserve the spirit of open source. Users are discussing the possibility of creating a version of WordPress free from Automattics influence, where contributions are not dictated by a single company. This potential fork is seen as a way to return to the values that originally made WordPress successful.
## My Final Thoughts Thus Far
Automattic and WordPress.org must take a step back and consider the long-term consequences of their actions. A CMS platform is only as strong as the community that supports it. If they continue down this path of alienation and control, they will erode the foundation that has allowed WordPress to dominate the market for over a decade.
The WordPress community has always thrived on collaboration and shared success. By prioritizing corporate interests over the collective good, Automattic is at risk of turning WordPress into a product, rather than a project. And when that happens, developers, hosts, and users alike will be forced to ask themselves: Is WordPress still worth it?
The time for course correction is now.
# UPDATE 9.27
## Matt Mullenweg Talks About WordPress Situation
<CENTER>
<iframe width="560" height="315" src="https://www.youtube.com/embed/H6F0PgMcKWM?si=z4OWyA1bWnP8qgqq" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe></CENTER>
### Community Sentiment Update
The impromptu interview with Matt Mullenweg, CEO of Automattic, has sparked widespread debate and a mix of emotions in the WordPress community. Matt's appearance was unexpected, leading to an unrehearsed discussion where he addressed a range of concerns, particularly surrounding the ongoing trademark disputes with WP Engine. The comment section of the video reveals a deepening divide within the community, as many users express frustration, confusion, and skepticism regarding both the handling of WordPress's trademark policies and Matts responses during the interview.
### Concerns About Trademark Enforcement and Legal Strategy
One of the core issues brought up in the interview is Automattics aggressive enforcement of the WordPress trademark, particularly against WP Engine, a prominent managed hosting provider. While many in the community believe WP Engine has built a successful business around the open-source CMS, Automattics insistence on contribution requirements has led to accusations of overreach.
For example, one user, @distilledmark, expressed concern that Matts stance could signal a shift in the WordPress ecosystem, where any company using “WP” in their name could become a target for trademark enforcement. Another commenter, @sodapoppug, pointed out the irony of trademark disputes over the use of "WP," noting how “saying 'WP' seems to defeat the purpose of speaking in acronyms since its twice as many syllables as just saying 'WordPress.’” This playful jab reflects a broader sentiment that the enforcement seems heavy-handed and unnecessary, especially when smaller businesses have been using "WP" in good faith for years without issue.
This concern is amplified by @nephatrines comment, which underscores how WP Engines branding may confuse users into thinking they are affiliated with the core WordPress project. The fear that Automattics legal actions may extend beyond WP Engine is echoed by many commenters, including @SoreBrain, who admits that they initially thought WP Engine was an official part of WordPress. This shows the confusion that exists within the community regarding the relationships between various WordPress-related companies.
### Community Frustration Over Matts Responses
Throughout the interview, many viewers felt that Matt failed to provide clear, fact-based answers to critical questions, often deflecting or giving vague responses. One commenter, @maxdayton614, criticized Matt for relying too much on emotional appeals rather than presenting concrete evidence. They noted that most of Matts arguments seemed to be based on personal frustration, such as his claim that WP Engine hasnt contributed enough hours or money to WordPress. They wrote, “Almost every answer was emotionally charged. 'Im the one committing to open source' or 'Look at all of the companies in good standing with me.'” This sentiment was echoed by others who were disappointed in the lack of legal documentation or hard evidence presented during the interview.
Another user, @rafalfaro, took a more critical stance, stating, “It should've been damage control, but he doubled down instead,” referring to Matts decision to continue pushing his point rather than acknowledging the potential faults in his legal strategy. This approach led to several comments questioning whether Matt was being entirely forthright during the interview. @datguy4104 added, “Around 11 minutes in, he says Yeah, its open-source, you can do what you want with it, then at 15:15 refers to altering the Stripe plugin as hacking it. He is very careful about using specific language to paint WP Engine as the bad guy.”
For some viewers, the lack of specificity was a major issue. @rns10 highlighted this by writing, “There is no definitive evidence in front of the public that what WP Engine is doing is wrong. Matt is going from a perspective of keeping his word, but without any written communication, WP Engines lawyers will chew him out.” This reflects a growing concern among the community that Matts personal frustrations are driving the legal battle, rather than a solid legal foundation.
### Ethical vs. Legal Obligations
Many commenters discussed the broader ethical implications of the situation, with some agreeing that WP Engine, as a highly profitable company, should be contributing more to the WordPress ecosystem. However, the method of enforcing this contribution through legal threats and trademark enforcement was seen as problematic.
@shableep raised an insightful point, arguing that while Matt may be ethically correct in believing that companies profiting from WordPress should contribute, the way Automattic has managed these situations feels coercive. They wrote, “At what level of revenue must you hit before you have to contribute or risk having the trademark enforced? Hosts are at the whim of when Matt decides enough is enough, and they must contribute. […] I think the way hes been managing it by nudging these companies in the right direction but asking for fees or contributions and it leading to an or else conversation is probably, technically, extortion, even if his heart is in the right place.”
Other users like @roadrunner2324 attempted to summarize Matts stance: “Everyone is allowed to use the WP trademark, but if your company is making half a billion in revenue, and your huge base of clients consumes the official WordPress endpoints, then they would like you to pay for their license OR pay that amount in hours/contribution to the core to make the platform better for everyone.”
This comment captures the frustration many feel about the blurred lines between WordPresss open-source nature and the business side of Automattics trademark policies. It highlights the delicate balance between open-source values and corporate realities—something that many users believe Matt is mishandling.
### Calls for Greater Transparency and Communication
A recurring theme in the comment section was the demand for more transparency and clearer guidelines from Automattic. Many viewers pointed out that the lack of clarity surrounding contribution requirements and trademark enforcement has caused confusion and fear within the community. @geoffl commented, “Matt needs to give clear guidelines on when/how he will ask for contributions from hosting companies. That will reduce consumer uncertainty on whether they will have access to plugins.”
The broader WordPress community seems unsure of where Automattic draws the line between companies that need to contribute and those that do not. @mauricioac added a general piece of advice about business dealings, saying, “Something I learned a long time ago after getting backstabbed: never make verbal agreements. Always, ALWAYS, have written records.” This sentiment reinforces the idea that Matts informal approach to business dealings with companies like WP Engine could come back to haunt him in court.
### The Risk of Alienating the WordPress Ecosystem
Another significant concern is the potential long-term damage to the WordPress ecosystem if this issue continues to escalate. Commenters like @UODZU-P expressed frustration that WP Engine customers were already being affected by the trademark dispute. They wrote, “We are now looking to migrate 20+ sites off WPE to something else because we cant receive security updates due to him going scorched earth. So now Im looking to abandon WP from my tech stack altogether.”
This highlights a critical issue for Automattic: by taking legal action against a major player like WP Engine, they risk alienating the very community that has made WordPress successful. Many users expressed concern that similar actions could be taken against other hosting companies, leading to an exodus of developers, businesses, and users from the platform.
### My Thoughts: A Divided Community
Overall, the community sentiment following the interview with Matt Mullenweg reflects a growing divide between Automattics leadership and the broader WordPress community. While some users support Matts ethical stance, many are concerned about the legal strategies being employed and the potential fallout for the WordPress ecosystem. The lack of clear communication, the emotional nature of Matts responses, and the perceived coercion of companies like WP Engine have left many users feeling uneasy about the future of WordPress.
As one user, @sluffmo, summed it up: “All hes achieved is showing the WP community that one immature, greedy dude throwing a tantrum can screw up their site without warning. Who cares about WP Engine? Time to pick another CMS.” This comment encapsulates the risk Automattic faces if they continue down this path—alienating the very developers and users who have built their success.
# UPDATE: 10.12
### ACF Plugin Taken Over by WordPress.org
In a surprising and contentious move, WordPress.org has forcibly taken over control of the Advanced Custom Fields (ACF) plugin, a popular tool used by developers to customize their WordPress sites, without the consent of the ACF team. This action has left many in the WordPress community shocked and concerned about the future of the plugin, which has been trusted for over a decade.
### Key Points:
- **No Action Needed for WP Engine, Flywheel, or ACF PRO Customers**: If you are using WP Engine, Flywheel, or have ACF PRO, you will continue receiving updates directly from the ACF team. Theres no need to worry about losing access to future updates.
- **For Other Hosts**: If your site is hosted elsewhere, you must take action to ensure the security of your site. ACF advises users to perform a one-time download of version 6.3.8 from their website to maintain control over plugin updates. The ACF team no longer manages updates via WordPress.org, leaving many developers to manually update to stay protected.
- **ACF's Continued Commitment**: Despite the forced takeover, the ACF team reassures users that they will continue to support and enhance the plugin, maintaining its high standards of functionality and security.
### Community Response
This move by WordPress.org raises further questions about their aggressive control over key plugins in the ecosystem. With WP Engine and other major platforms already feeling the strain from WordPresss trademark and legal pressures, the seizure of ACF only deepens concerns about the future of third-party development within the WordPress ecosystem.
The ACF team, trusted for over a decade, remains dedicated to serving their users outside of WordPress.orgs ecosystem, but this event adds fuel to the growing fire of distrust towards Automattic and their heavy-handed approach.
### What Does This Mean for Developers?
This takeover has developers wondering which plugin or service might be next. Many see this as another move by Automattic to centralize control over WordPress development, alienating independent plugin developers and reducing diversity in the ecosystem. The community is now asking: if even well-established plugins like ACF are not safe from WordPress.orgs actions, is any plugin truly secure?
As WordPress continues to push forward with such actions, developers, hosts, and users alike are reassessing their commitment to the platform.
⚠️ **Ensure your site's security by downloading the genuine ACF 6.3.8 version directly from the ACF team.**
## The Corporate Battle: Silver Lake and the Future of WordPress
At the heart of the escalating conflict between WordPress and WP Engine lies the involvement of Silver Lake, a prominent private equity firm. Silver Lake, which acquired a majority stake in WP Engine in 2018, has been at the center of this dispute, introducing a new dimension of corporate power dynamics to the traditionally open-source-driven WordPress ecosystem.
### Silver Lakes Role in the WP Engine Controversy
Silver Lake's acquisition of WP Engine marked a pivotal moment for the company, providing the financial backing to scale its operations and solidify its position as one of the leading managed WordPress hosting providers. However, this backing also means that WP Engine is no longer simply a player in the open-source world—it's now heavily influenced by the demands and expectations of its corporate stakeholders.
For Automattic, this creates a unique tension. On one hand, WP Engine is a vital part of the WordPress ecosystem, hosting countless websites and contributing to its overall success. On the other hand, Silver Lakes presence signifies a shift towards commercial interests that may not align with WordPress's open-source values. Automattic founder Matt Mullenweg has framed this as a battle for the soul of WordPress, accusing Silver Lake of prioritizing profits over the community-driven ethos that WordPress was built upon.
In Mullenwegs own words:
> "Silver Lake doesnt care about your open-source ideals. It just wants a return on capital."
This sentiment reflects a broader concern among developers and users—namely, that the influence of private equity in the WordPress ecosystem could undermine the collaborative, open-source nature that has long been its strength.
### The Stakes for Open-Source Software
The involvement of a major private equity firm like Silver Lake raises questions about the long-term sustainability of open-source projects like WordPress. While open-source software has always operated in a delicate balance between community contributions and commercial interests, the current dispute underscores the challenges that arise when corporate entities with vastly different goals enter the picture.
Silver Lakes role in WP Engine brings into focus the larger debate about how open-source projects should be monetized and governed. Can a company like WP Engine, backed by corporate interests, still claim to champion the open-source philosophy that made WordPress successful? Or does Silver Lakes influence signify a shift towards a more closed, profit-driven model that could alienate developers and users alike?
### The Risk of Fragmentation
As the legal battle between Automattic and WP Engine unfolds, there is a real risk of fragmentation within the WordPress ecosystem. Some developers and hosting providers may begin to look for alternatives, fearing that Automattics increasingly aggressive stance toward WP Engine could set a precedent for other companies that use WordPress as part of their business model.
The broader question that arises is: **How much control should Automattic have over the WordPress ecosystem?**
Silver Lakes involvement complicates this question, as it introduces a powerful corporate entity into the mix, one that is primarily concerned with generating returns for its investors. The friction between Automattics vision for WordPress and Silver Lakes business interests could drive a wedge between the various stakeholders in the community, ultimately leading to forks of the platform or the rise of new competitors.
### A Battle for the Future of WordPress
This conflict between Automattic and WP Engine, with Silver Lake as a key player, is about more than just trademark disputes or licensing fees. It's about the future of WordPress itself. Will WordPress remain an open, community-driven platform, or will it evolve into a more corporatized product, shaped by the interests of private equity and large businesses?
For developers, hosts, and users, the outcome of this legal battle will have far-reaching implications. The decisions made in the coming months could determine whether WordPress continues to thrive as an open-source project, or whether it becomes increasingly controlled by a small number of corporate entities with competing interests.
As one Reddit commenter put it:
> "This feels like a turning point for WordPress. If were not careful, we could lose the open-source spirit that made this platform great in the first place."
As the battle between Automattic, WP Engine, and Silver Lake rages on, its clear that the stakes couldnt be higher—for WordPress, for its community, and for the future of open-source software.
# To summarize
<iframe width="560" height="315" src="https://www.youtube.com/embed/mc5P_082bvY?si=xVGqhKRS35yk98sl" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe>
## Sources
- [WordPress.org: WP Engine is not WordPress](https://wordpress.org/news/2024/09/wp-engine/)
- [WP Engine Cease and Desist Letter](https://wpengine.com/wp-content/uploads/2024/09/Cease-and-Desist-Letter-to-Automattic-and-Request-to-Preserve-Documents-Sent.pdf)
- [WordPress Trademark Policy Changes (Archived)](https://web.archive.org/web/20240924024555/https://wordpressfoundation.org/trademark-policy/)
- [WordPress Foundation: Trademark Policy](https://wordpressfoundation.org/trademark-policy/)
- [Reddit Discussion: WordPress Trademark Policy](https://www.reddit.com/r/Wordpress/comments/1foknoq/the_wordpress_foundation_trademark_policy_was/?share_id=pDvacXlhttDifEUjnc5tq)
- [Reddit Discussion: WP Engine Plugin Repository Inaccessibility](https://www.reddit.com/r/Wordpress/comments/1fpeqn2/plugin_repository_inaccessible_to_wp_engine/)
- [WordPress.org: WP Engine Banned](https://wordpress.org/news/2024/09/wp-engine-banned/)
- [WordPress.org: ACF](https://www.advancedcustomfields.com/)

View File

@ -0,0 +1,201 @@
<!-- lead -->
Empowering Open Communication Through Decentralized Audio Streaming
Control over information is both a powerful asset and a contentious issue. Centralized services hold significant sway over what content can be shared, placing constraints on open communication. But, with advancements in peer-to-peer (P2P) technology, were beginning to break down these walls. One powerful tool for this revolution is **pearCast**, an entirely decentralized, real-time audio broadcasting application that enables users to share audio without any centralized control.
pearCast uses **Hyperswarm** and the **Web Audio API** to allow anyone with internet access to broadcast audio directly to listeners, removing the need for servers and intermediaries. This P2P approach offers advantages like privacy, resilience against censorship, and enhanced freedom of communication. Built with **Pear CLI**, pearCast is accessible as a desktop application, empowering users with tools to sidestep centralized restrictions and create their own channels of communication.
<p align="center">
<img src="https://git.ssh.surf/snxraven/pearCast/media/branch/main/screenshots/create.png" alt="pearCast">
</p>
# Source
## https://git.ssh.surf/snxraven/pearCast
## The Power of P2P Broadcasting
In a traditional client-server setup, broadcasters send their data to a central server, which then redistributes it to listeners. However, central servers can impose restrictions, leading to censorship or surveillance. pearCast changes this by adopting a P2P model: data flows directly between the broadcaster and each listener, avoiding central servers altogether.
This approach offers significant benefits:
1. **Freedom from Censorship**: In a P2P model, theres no central authority that can restrict, alter, or monitor content.
2. **Enhanced Privacy**: With no central server logging or monitoring user activity, P2P connections enhance privacy.
3. **Resilience**: In pearCast, if one peer disconnects, the network remains operational. Broadcasters retain control and connections remain active for listeners who are still tuned in.
P2P connections are especially useful in regions where internet access is regulated, or in situations where people need a secure way to broadcast audio without surveillance. With pearCast, users can host a private radio station, hold secure discussions, or share music with friends, all without centralized oversight.
## Behind the Scenes: How pearCast Works
pearCast is powered by several key technologies: **Hyperswarm** for peer discovery and P2P connections, **Web Audio API** for capturing and streaming audio, and **Pear CLI** for running the app as a desktop application. Lets break down how these technologies work together to create a smooth broadcasting experience.
### Hyperswarm: Building P2P Connections
Hyperswarm enables pearCasts decentralized networking. Its designed for building large, scalable P2P networks where users connect directly to one another, bypassing the need for servers. Hyperswarm operates over a Distributed Hash Table (DHT), allowing users to find each other based on a unique identifier, or “topic.” Heres how it works in pearCast:
- **Creating a Station ID**: When a broadcaster creates a station, pearCast generates a unique `topic` using `crypto.randomBytes(32)`. This 32-byte random key becomes the station ID.
- **Joining a Station**: Listeners enter the station ID to connect. Hyperswarm uses the DHT to locate peers that are on the same topic, establishing direct connections.
- **Handling Connections**: Hyperswarms `swarm.on('connection')` event is triggered whenever a peer connects, enabling data streaming without the need for a central server. Each connection is secure and private, only accessible to those with the correct topic key.
This DHT-based discovery mechanism allows pearCast to function entirely independently of DNS or IP-based connections, enabling connections that are fast, efficient, and censorship-resistant.
### Web Audio API: Capturing and Streaming Audio
The **Web Audio API** provides pearCast with powerful tools for capturing, processing, and playing audio directly within the browser. The Web Audio API enables real-time audio streaming by capturing microphone input and encoding it for P2P transmission. Heres how it works:
1. **Setting Up Audio Capture**: When a broadcaster starts a station, pearCast requests microphone access using `navigator.mediaDevices.getUserMedia()`. The chosen input device (e.g., the default microphone or any selected audio device) begins capturing audio in real time.
2. **Audio Processing**: The captured audio stream is sent to an `AudioContext` and processed by a `ScriptProcessorNode`, which allows pearCast to take chunks of audio data, encode them into `Float32Array` format, and transmit them over Hyperswarm.
3. **Playing Audio for Listeners**: When listeners receive audio data, pearCast uses the Web Audio API to decode the audio data and play it through an `AudioBufferSourceNode` connected to the `AudioContext`.
### Pear CLI: Running as a Desktop Application
Pear CLI is a tool for creating and managing P2P desktop applications. By running pearCast as a Pear application, users can connect to peers more reliably and bypass web-based limitations. Pear CLI provides a native experience for P2P applications, improving performance, stability, and connection resilience.
## Setting Up pearCast
## P2P Runtime
To run pearCast via the Pear network, simply run:
`npm i pear -g`
Then run:
`pear run pear://q3rutpfbtdsr7ikdpntpojcxy5u356qfczzgqomxqk3jdxn6ao8y`
To set up and use pearCast in a development environment, Heres how you can get started:
1. **Clone the Repository**:
```bash
git clone https://git.ssh.surf/snxraven/pearCast.git
cd pearCast
```
2. **Install Dependencies**:
```bash
npm install
```
3. **Run the Application**:
```bash
pear run --dev .
```
Once the app is running, you can start broadcasting or join an existing station by entering the station ID.
## Walkthrough of pearCasts Code
Lets dive into pearCasts code to understand how each component works together to create this powerful P2P audio streaming experience.
### HTML Layout: index.html
The HTML layout in `index.html` is designed to be clean and intuitive. It includes the main controls for creating or joining a station, a modal for entering station IDs, and a dropdown for broadcasters to select their audio input.
Key Elements:
- **Create and Join Buttons**: Users can start a new broadcast station or join an existing one.
- **Audio Input Selector**: Only visible to broadcasters, this dropdown allows them to choose their preferred microphone input.
- **Bootstrap Modal**: Used to prompt users to enter a station ID when joining a broadcast.
### JavaScript Logic: app.js
The JavaScript in `app.js` handles all application logic, from establishing P2P connections to capturing and streaming audio data.
#### Setting Up Hyperswarm Connections
The following code sets up Hyperswarm connections for broadcasters and listeners:
```javascript
let swarm;
let topic = crypto.randomBytes(32);
swarm = new Hyperswarm();
swarm.join(topic, { client: false, server: true });
swarm.on('connection', (conn) => {
// Handle incoming connection
});
```
This `topic` serves as a unique identifier for the station. Broadcasters join in server mode, while listeners join in client mode, enabling Hyperswarm to automatically discover peers.
#### Capturing and Streaming Audio with Web Audio API
Once a broadcaster creates a station, the app captures audio and processes it for streaming:
```javascript
navigator.mediaDevices.getUserMedia({ audio: { deviceId: currentDeviceId } })
.then(stream => {
const source = audioContext.createMediaStreamSource(stream);
const processor = audioContext.createScriptProcessor(4096, 1, 1);
source.connect(processor);
processor.connect(audioContext.destination);
processor.onaudioprocess = (event) => {
const audioData = event.inputBuffer.getChannelData(0);
const buffer = b4a.from(new Float32Array(audioData).buffer);
conn.write(buffer);
};
});
```
This function:
1. **Requests Microphone Access**: Captures audio based on the selected input device.
2. **Processes Audio in Real Time**: The `ScriptProcessorNode` divides audio into chunks and encodes it as a `Float32Array`.
3. **Streams Audio to Listeners**: The broadcaster sends audio data to all connected peers over Hyperswarm.
#### Playing Audio for Listeners
Listeners receive audio data, decode it, and play it using the Web Audio API:
```javascript
function processIncomingAudioData(data) {
accumulatedBuffer = b4a.concat([accumulatedBuffer, data]);
while (accumulatedBuffer.byteLength >= 4) {
const chunkSize = accumulatedBuffer.byteLength;
const audioData = new Float32Array(accumulatedBuffer.slice(0, chunkSize).buffer);
accumulatedBuffer = accumulatedBuffer.slice(chunkSize);
const buffer = audioContext.createBuffer(1, audioData.length, audioContext.sampleRate);
buffer.copyToChannel(audioData, 0);
const source = audioContext.createBufferSource();
source.buffer = buffer;
source.connect(audioContext.destination);
source.start();
}
}
```
The function:
1. **Buffers Incoming Audio Data**: As data packets arrive, theyre stored until theres enough for smooth playback.
2. **Decodes Audio Data**: Audio chunks are converted back into audio buffer format.
3. **Plays Audio in Real Time**: The data is played using an `AudioBufferSourceNode`.
#### Managing Peer Connections and Disconnects
pearCast handles connections and disconnects gracefully, avoiding crashes and logging disconnections:
```javascript
swarm.on
('connection', (conn) => {
conn.once('close', () => {
console.log("Peer disconnected.");
});
conn.on('error', (err) => {
if (err.code === 'ECONNRESET') {
console.log("Peer connection reset by remote peer.");
} else {
console.error("Connection error:", err);
}
});
});
```
By listening to connection events, pearCast ensures that disconnected peers are handled smoothly, enhancing the stability and resilience of the broadcast.
## Use Cases and Real-World Applications
The potential of pearCast goes beyond casual broadcasting. Some real-world applications include:
1. **Independent News Stations**: In regions where information is controlled, pearCast can be used to share uncensored news directly with listeners.
2. **Community Radio**: Local communities can create their own online radio stations without needing a central server.
3. **Private Discussions**: pearCast allows users to host private audio channels where discussions are free from surveillance or interference.
4. **Remote Music Jams**: Musicians can use pearCast to broadcast live performances, even in locations with limited internet access.
## Final Thoughts
pearCast is more than just a broadcasting app. Its a testament to the power of decentralized technology and a step forward in the fight for communication freedom. By combining P2P networking with real-time audio streaming, pearCast empowers users to create their own audio platforms, share content securely, and circumvent traditional barriers to communication.
Whether youre an activist, an artist, or simply a fan of free, uncensored broadcasting, pearCast gives you the tools to break free from central control and make your voice heard.

19
me/about-rayai.md Normal file
View File

@ -0,0 +1,19 @@
<h1 style="text-align: center;">About RayAI</h1>
<h2 style="text-align: center;">System Overview</h2>
<p style="text-align: center;">OS: Ubuntu 24.04 LTS (x86_64)</p>
<p style="text-align: center;">Kernel: 15.5</p>
<h2 style="text-align: center;">Hardware Specs</h2>
<p style="text-align: center;">CPU: AMD FX-8320E (8 cores) @ 3.20 GHz</p>
<p style="text-align: center;">GPU: NVIDIA GeForce RTX 2060 SUPER</p>
<p style="text-align: center;">Total System Memory: 24 GB</p>
<h2 style="text-align: center;">AI Model</h2>
<p style="text-align: center;">Model: Meta-Llama-3.2 (3B)</p>
<p style="text-align: center;"><a href= "https://blog.raven-scott.fyi/rayai-at-home-chat-assistant-server">https://blog.raven-scott.fyi/rayai-at-home-chat-assistant-server</a></p>

17
me/about.md Normal file
View File

@ -0,0 +1,17 @@
# About Me
Hi, Im Raven Scott, a Linux enthusiast and problem solver with a deep passion for technology and creativity. I thrive in environments where I can learn, experiment, and turn ideas into reality. Whether it's building systems, coding, or tackling complex technical challenges, I find joy in using technology to make life easier and more efficient.
My passion for Linux and open-source technologies began early on, and since then, Ive been on a continuous journey of growth and discovery. From troubleshooting networking issues to optimizing servers for performance, I love diving deep into the intricate details of how things work. The thrill of solving problems, especially when it comes to system security or performance optimization, is what fuels me every day.
## What Drives Me
Im passionate about more than just the technical side. I believe in the power of technology to bring people together, and thats why Im dedicated to creating platforms and solutions that are accessible and impactful. Whether it's hosting services, developing peer-to-peer applications, or automating complex tasks, Im always exploring new ways to push the boundaries of what's possible.
Outside of work, I love contributing to community projects and sharing my knowledge with others. Helping people grow their own skills is one of the most rewarding aspects of what I do. From mentoring to writing documentation, Im constantly looking for ways to give back to the tech community.
## Creative Side
When Im not deep in the technical world, Im exploring my creative side through music. I run my own music label, where I produce and distribute AI-generated music across all platforms. Music and technology blend seamlessly for me, as both are outlets for innovation and expression.
In everything I do, from coding to creating music, my goal is to keep learning, growing, and sharing my passion with the world. If you ever want to connect, collaborate, or simply chat about tech or music, feel free to reach out!

View File

@ -10,6 +10,7 @@
"author": "",
"license": "ISC",
"dependencies": {
"axios": "^1.7.7",
"body-parser": "^1.20.3",
"bootstrap": "^5.3.3",
"date-fns": "^4.0.0",

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

BIN
public/apple-touch-icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.3 KiB

9
public/browserconfig.xml Normal file
View File

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="utf-8"?>
<browserconfig>
<msapplication>
<tile>
<square150x150logo src="/mstile-150x150.png"/>
<TileColor>#da532c</TileColor>
</tile>
</msapplication>
</browserconfig>

369
public/css/chat.css Normal file
View File

@ -0,0 +1,369 @@
html,
body {
height: 100%;
margin: 0;
padding: 0;
overflow: hidden;
background-color: #121212;
color: white;
font-family: Arial, sans-serif;
}
p {
margin-top: 3px;
margin-bottom: 5.2px;
}
a {
color: #32a4e0;
text-decoration: none;
}
a:hover {
color: #1e8ac6;
/* Optional: Darker shade on hover */
text-decoration: underline;
}
.bg-dark {
background-color: #121212 !important;
}
.chat-container {
display: flex;
flex-direction: column;
height: 100%;
}
.navbar {
flex-shrink: 0;
}
.chat-box {
display: flex;
flex-direction: column;
flex-grow: 1;
background-color: #1e1e1e;
overflow: hidden;
}
.messages {
flex-grow: 1;
overflow-y: auto;
padding: 20px;
background-color: #2e2e2e;
border-radius: 5px;
display: flex;
flex-direction: column;
}
.message {
margin-bottom: 10px;
padding: 10px;
border-radius: 5px;
}
.alert {
margin-top: 10px;
text-align: center;
/* Center the text content */
display: flex;
/* Make sure the alert is a flex container */
justify-content: center;
/* Center flex items horizontally */
align-items: center;
/* Center flex items vertically (if needed) */
}
body,
.container {
background-color: transparent;
}
/* Success Alert - Blue Theme */
.alert-success {
background-color: #000000;
/* Blue background similar to your button */
color: white;
/* White text */
border: none;
/* Remove border */
padding: 10px 20px;
/* Padding for spacing */
border-radius: 5px;
/* Rounded corners */
text-align: center;
/* Center text */
display: flex;
/* Flex layout */
justify-content: center;
/* Center horizontally */
align-items: center;
/* Center vertically */
}
/* Error Alert - Maroon Red Theme */
.alert-danger {
background-color: #800000;
/* Maroon red background */
color: white;
/* White text */
border: none;
/* Remove border */
padding: 10px 20px;
/* Padding for spacing */
border-radius: 5px;
/* Rounded corners */
text-align: center;
/* Center text */
display: flex;
/* Flex layout */
justify-content: center;
/* Center horizontally */
align-items: center;
/* Center vertically */
}
/* Common alert styles */
.alert {
margin-top: 10px;
/* Add margin at the top */
font-size: 14px;
/* Adjust the font size for readability */
}
.message.user {
background-color: #3a3a3a;
color: white;
}
.message.assistant {
background-color: #282828;
color: #f1f1f1;
}
.form-control {
background-color: #2e2e2e;
color: white;
border-color: #444;
}
.form-control:focus {
background-color: #2e2e2e;
color: white;
border-color: #888;
}
pre code {
background-color: #1e1e1e;
color: #f8f8f2;
padding: 10px;
border-radius: 5px;
display: block;
white-space: pre-wrap;
word-wrap: break-word;
}
.copy-button {
background-color: #444;
color: white;
border: none;
cursor: pointer;
padding: 5px;
margin-top: 5px;
margin-bottom: 5px;
display: inline-block;
}
.copy-button:hover {
background-color: #555;
}
.input-area {
background-color: #1e1e1e;
padding: 10px 20px;
z-index: 10;
flex-shrink: 0;
color: white;
}
.footer {
flex-shrink: 0;
text-align: center;
padding: 10px;
background-color: #121212;
}
.copy-button {
background-color: #444;
/* Button background color */
color: white;
/* Button text color */
border: none;
/* Remove border */
cursor: pointer;
/* Pointer cursor for interaction */
padding: 5px 10px;
/* Adjusted padding for smaller size */
margin-top: 5px;
/* Space at the top */
display: inline-block;
/* Keep the button in line */
font-size: 14px;
/* Slightly smaller font size for compactness */
width: 147px;
/* Prevent the button from stretching */
text-align: left;
/* Align the text left */
}
.copy-button-code {
background-color: #444;
/* Button background color */
color: white;
/* Button text color */
border: none;
/* Remove border */
cursor: pointer;
/* Pointer cursor for interaction */
padding: 5px 10px;
/* Adjusted padding for smaller size */
margin-top: 5px;
/* Space at the top */
display: inline-block;
/* Keep the button in line */
font-size: 14px;
/* Slightly smaller font size for compactness */
width: 56px;
/* Prevent the button from stretching */
text-align: left;
/* Align the text left */
}
.copy-button:hover {
background-color: #555;
/* Darker shade on hover */
}
/* Add animations for the alerts */
@keyframes fadeSlideIn {
0% {
opacity: 0;
transform: translateY(-20px);
}
100% {
opacity: 1;
transform: translateY(0);
}
}
@keyframes fadeSlideOut {
0% {
opacity: 1;
transform: translateY(0);
}
100% {
opacity: 0;
transform: translateY(-20px);
}
}
/* Success Alert with fade-in and slide-down animation */
.alert {
opacity: 0;
transition: opacity 0.5s ease-in-out, transform 0.5s ease-in-out;
display: flex;
justify-content: center;
align-items: center;
animation: fadeSlideIn 0.5s forwards;
}
/* Add class for fading out when alert is being removed */
.alert.fade-out {
animation: fadeSlideOut 0.5s forwards;
}
/* Dropdown menu styling */
.navbar .dropdown-menu {
background-color: #1e1e1e;
/* Match the chat box background */
border: none;
/* Remove border */
box-shadow: none;
/* Remove shadow */
padding: 0;
/* Remove padding */
}
.navbar .dropdown-item {
background-color: #1e1e1e;
/* Match the chat box background */
color: white;
/* White text */
padding: 10px 20px;
/* Add padding for spacing */
border-bottom: 1px solid #444;
/* Add a subtle separator */
}
.navbar .dropdown-item:last-child {
border-bottom: none;
/* Remove border for the last item */
}
.navbar .dropdown-item:hover {
background-color: #282828;
/* Slightly lighter background on hover */
color: #f1f1f1;
/* Lighter text on hover */
}
.navbar .dropdown-item:focus {
background-color: #282828;
/* Match hover style for focused items */
outline: none;
/* Remove default outline */
}
.navbar .dropdown-toggle {
color: white;
/* White text */
}
.navbar .dropdown-toggle::after {
border-top: 0.3em solid white;
/* White arrow for dropdown */
}
.navbar .dropdown-menu.show {
opacity: 1;
/* Fully opaque when shown */
transform: translateY(0);
/* Reset transform */
transition: opacity 0.3s ease-in-out, transform 0.3s ease-in-out;
/* Smooth fade and slide transition */
}
.navbar .dropdown-menu {
opacity: 0;
transform: translateY(-10px);
/* Start hidden and slightly raised */
transition: opacity 0.3s ease-in-out, transform 0.3s ease-in-out;
/* Smooth fade and slide transition */
}
/* Styling the dropdown toggle button */
.navbar .dropdown-toggle:hover {
color: #f1f1f1;
/* Lighter text on hover */
}
/* Separator between regular menu items and dropdown */
.navbar .separator {
color: #555;
/* Subtle separator color */
}

View File

@ -1,3 +1,4 @@
/* Base Styles */
body {
font-family: 'Roboto', sans-serif;
display: flex;
@ -5,49 +6,81 @@ body {
min-height: 100vh;
color: #333;
margin: 0;
background-color: #f8f9fa;
}
main {
flex-grow: 1;
padding: 20px;
}
a {
color: #000;
text-decoration: none;
transition: color 0.2s ease;
}
a:hover {
color: #42484a;
}
/* Add margin between list items */
li {
margin-bottom: 1rem; /* Space between list items */
line-height: 1.6; /* Improve readability for longer text */
}
/* Adjust list container margins */
dl, ol, ul {
margin-top: -13px;
margin-top: 0;
margin-bottom: 1rem;
}
/* Primary Background */
.bg-primary {
--bs-bg-opacity: 1;
background-color: rgb(0, 0, 0) !important;
}
.bg-dark {
--bs-bg-opacity: 1;
background-color: rgb(0 0 0) !important;
}
/* Navbar Styles */
.navbar {
background-color: #121212;
background-color: #000000;
padding: 10px 20px;
}
.navbar-brand {
font-size: 1.75rem;
font-weight: bold;
color: #ffffff;
transition: color 0.2s ease;
}
.navbar-brand:hover {
color: #42484a;
}
.navbar-nav .nav-link {
font-size: 1.15rem;
padding-right: 1rem;
color: #ffffff;
transition: color 0.2s ease;
}
.navbar-nav .nav-link:hover {
color: #42484a;
}
/* Header */
header {
background: #000000;
background: #000;
color: #fff;
padding: 2px 0;
padding: 10px 0;
text-align: center;
height: auto;
}
h1 {
@ -57,9 +90,10 @@ h1 {
p.lead {
font-size: 1.5rem;
margin-bottom: 1.5rem;
}
/* Read Article Button Styling */
/* Button Styles */
.btn-outline-primary {
font-size: 1.25rem;
padding: 10px 20px;
@ -70,38 +104,41 @@ p.lead {
}
.btn-outline-primary:hover {
background-color: #2c5364;
border-color: #2c5364;
background-color: #42484a;
border-color: #42484a;
color: #ffffff;
}
/* Pagination Styling */
/* Pagination Styles */
.pagination {
margin-top: 20px;
display: flex;
justify-content: center;
}
.pagination .page-item .page-link {
color: #ffffff;
background-color: #1e1e1e;
border: 1px solid #2c5364;
border: 1px solid #42484a;
padding: 10px 15px;
transition: background-color 0.3s ease, color 0.3s ease;
}
.pagination .page-item.active .page-link {
background-color: #2c5364;
border-color: #2c5364;
background-color: #42484a;
border-color: #42484a;
color: #ffffff;
}
.pagination .page-item .page-link:hover {
background-color: #2c5364;
border-color: #2c5364;
background-color: #42484a;
border-color: #42484a;
color: #ffffff;
}
/* Footer Styles */
footer {
background-color: #121212;
background-color: #000000;
color: #fff;
padding: 20px 0;
text-align: center;
@ -111,40 +148,21 @@ footer {
.footer-logo {
font-size: 1.5rem;
font-weight: bold;
margin-bottom: 10px;
}
.footer-links a {
color: #999;
text-decoration: none;
margin-right: 1rem;
transition: color 0.2s ease;
}
.footer-links a:hover {
color: #fff;
}
/* Custom Styles for Navbar and Dropdown */
.navbar {
background-color: #121212;
}
.navbar-brand {
font-size: 1.75rem;
font-weight: bold;
color: #ffffff;
}
.navbar-nav .nav-link {
font-size: 1.15rem;
color: #ffffff;
padding-right: 1rem;
}
.navbar-nav .nav-link:hover {
color: #2c5364;
}
/* Custom Dropdown Styling */
/* Dropdown Styles */
.custom-dropdown {
background-color: #1e1e1e;
border: none;
@ -159,21 +177,77 @@ footer {
transition: background-color 0.3s ease, color 0.3s ease;
}
.custom-dropdown .dropdown-item:hover {
background-color: #000000;
color: #ffffff;
}
.custom-dropdown .dropdown-item:hover,
.custom-dropdown .dropdown-item:active {
background-color: #000000;
color: #ffffff;
}
/* Mobile Toggler */
/* Mobile Navbar Toggler */
.navbar-toggler {
border-color: #ffffff;
}
.navbar-toggler-icon {
background-color: #ffffff;
width: 30px;
height: 3px;
}
/* .py-4 {
padding-top: 1.2rem !important;
padding-bottom: 1.5rem !important;
} */
.py-4 {
padding-top: 0.2rem !important;
padding-bottom: 0.5rem !important;
}
/* Search Button Styles */
.input-group .btn-primary {
font-size: 1.25rem;
padding: 10px 20px;
color: #ffffff;
border: 2px solid #000000;
background-color: #000000;
transition: background-color 0.3s ease, color 0.3s ease;
}
p {
margin-top: 3px;
margin-bottom: 5.2px;
}
.input-group .btn-primary:hover {
background-color: #42484a;
border-color: #42484a;
color: #ffffff;
}
/* Custom Scrollbar for WebKit (Chrome, Safari) */
::-webkit-scrollbar {
width: 8px; /* Width of the entire scrollbar */
}
::-webkit-scrollbar-thumb {
background-color: #42484a; /* Color of the scrollbar handle */
border-radius: 10px; /* Roundness of the handle */
}
::-webkit-scrollbar-thumb:hover {
background-color: #333; /* Handle color on hover */
}
::-webkit-scrollbar-track {
background-color: #121212; /* Color of the scrollbar background/track */
}
/* Responsive Image Styling */
img {
max-width: 100%; /* Ensures the image cannot be larger than its container */
height: auto; /* Maintains the aspect ratio */
display: block; /* Removes any inline spacing */
object-fit: contain; /* Ensures the image scales within its container */
}

BIN
public/favicon-16x16.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

BIN
public/favicon-32x32.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

BIN
public/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

238
public/js/chat-local.js Normal file
View File

@ -0,0 +1,238 @@
// Handles key down event to send message on Enter
function handleKeyDown(event) {
if (event.key === 'Enter' && !event.shiftKey) {
event.preventDefault();
sendMessage();
}
}
// Sends a message to the chat API
async function sendMessage() {
const messageInput = document.getElementById('messageInput');
let message = messageInput.value.trim();
if (message === '') return;
// Encode the message to avoid XSS attacks
message = he.encode(message);
// Display the user's message in the chat
displayMessage(message, 'user');
messageInput.value = ''; // Clear the input
toggleLoading(true); // Show loading indicator
try {
const response = await fetch('https://infer.x64.world/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ message: message })
});
if (response.ok) {
const data = await response.json();
displayMessage(data, 'assistant');
} else {
handleErrorResponse(response.status);
}
} catch (error) {
displayMessage('Error: ' + error.message, 'assistant');
} finally {
toggleLoading(false); // Hide loading indicator
}
}
// Toggles the loading indicator
function toggleLoading(show) {
const loadingElement = document.getElementById('loading');
loadingElement.style.display = show ? 'block' : 'none';
}
// Displays a message in the chat window
function displayMessage(content, sender) {
const messages = document.getElementById('messages');
const messageElement = document.createElement('div');
messageElement.classList.add('message', sender);
// Decode HTML entities and render Markdown
const decodedContent = he.decode(content);
const htmlContent = marked(decodedContent);
messageElement.innerHTML = htmlContent;
messages.appendChild(messageElement);
messages.scrollTop = messages.scrollHeight; // Scroll to the bottom of the chat
// Highlight code blocks if any
document.querySelectorAll('pre code').forEach((block) => {
hljs.highlightElement(block);
if (sender === 'assistant') {
addCopyButton(block); // Add copy button to code blocks
}
});
// Add "Copy Full Response" button after each assistant response
if (sender === 'assistant') {
addCopyFullResponseButton(messages, messageElement);
}
}
// Adds a copy button to a code block
function addCopyButton(block) {
const button = document.createElement('button');
button.classList.add('copy-button-code');
button.textContent = 'Copy';
button.addEventListener('click', () => copyToClipboard(block));
block.parentNode.appendChild(button);
}
// Adds "Copy Full Response" button below the assistant response
function addCopyFullResponseButton(messagesContainer, messageElement) {
const copyFullResponseButton = document.createElement('button');
copyFullResponseButton.classList.add('copy-button');
copyFullResponseButton.textContent = 'Copy Full Response';
copyFullResponseButton.addEventListener('click', () => copyFullResponse(messageElement));
messagesContainer.appendChild(copyFullResponseButton);
}
// Copies code block content to the clipboard
function copyToClipboard(block) {
const text = block.innerText;
navigator.clipboard.writeText(text).then(() => {
displayAlert('success', 'The code block was copied to the clipboard!');
}).catch((err) => {
displayAlert('error', 'Failed to copy code: ' + err);
});
}
// Copies the full response content to the clipboard in Markdown format
function copyFullResponse(messageElement) {
const markdownContent = convertToMarkdown(messageElement);
navigator.clipboard.writeText(markdownContent).then(() => {
displayAlert('success', 'Full response copied to clipboard!');
}).catch((err) => {
displayAlert('error', 'Failed to copy response: ' + err);
});
}
// Converts the HTML content of the response to Markdown, including language identifier
function convertToMarkdown(element) {
let markdown = '';
const nodes = element.childNodes;
nodes.forEach(node => {
if (node.nodeName === 'P') {
markdown += `${node.innerText}\n\n`;
} else if (node.nodeName === 'PRE') {
const codeBlock = node.querySelector('code');
const languageClass = codeBlock.className.match(/language-(\w+)/); // Extract language from class if available
const language = languageClass ? languageClass[1] : ''; // Default to empty if no language found
const codeText = codeBlock.innerText;
// Add language identifier to the Markdown code block
markdown += `\`\`\`${language}\n${codeText}\n\`\`\`\n\n`;
}
});
return markdown;
}
// Displays an alert message with animation
function displayAlert(type, message) {
const alertElement = document.getElementById(`${type}-alert`);
alertElement.textContent = message;
alertElement.style.display = 'flex'; // Show the alert
alertElement.classList.remove('fade-out'); // Remove fade-out class if present
alertElement.style.opacity = '1'; // Ensure it's fully visible
// Automatically hide the alert after 3 seconds with animation
setTimeout(() => {
alertElement.classList.add('fade-out'); // Add fade-out class to trigger animation
setTimeout(() => {
alertElement.style.display = 'none'; // Hide after animation finishes
}, 500); // Match this time to the animation duration
}, 3000); // Show the alert for 3 seconds before hiding
}
// Handles error responses based on status code
function handleErrorResponse(status) {
const messages = document.getElementById('messages');
const lastMessage = messages.lastElementChild;
if (status === 429) {
displayAlert('error', 'Sorry, I am currently too busy at the moment!');
// Remove the last user message if the status is 429
if (lastMessage && lastMessage.classList.contains('user')) {
messages.removeChild(lastMessage);
}
} else {
displayMessage('Error: ' + status, 'assistant');
// Remove the last user message for any other errors
if (lastMessage && lastMessage.classList.contains('user')) {
messages.removeChild(lastMessage);
}
}
}
// Helper function to reset the conversation on the server
async function sendResetRequest() {
const response = await fetch('https://infer.x64.world/reset-conversation', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
}
});
return response;
}
// Resets the chat messages and optionally displays a success message
async function resetChat(displaySuccessMessage = true) {
try {
const response = await sendResetRequest();
if (response.ok) {
const messagesContainer = document.getElementById('messages');
messagesContainer.innerHTML = ''; // Clear all messages
if (displaySuccessMessage) {
displayAlert('success', 'Messages Cleared!');
}
} else {
displayAlert('error', 'Failed to reset conversation. Please try again.');
}
} catch (error) {
displayAlert('error', 'An error occurred while resetting the conversation.');
}
}
// Resets the chat on page load without displaying the success message
document.addEventListener('DOMContentLoaded', function () {
resetChat(false);
});
function openWindow(url, windowName, width, height) {
window.open(url, windowName, `width=${width},height=${height},menubar=no,toolbar=no,location=no,status=no,scrollbars=yes,resizable=yes`);
}
function openGpuStats() {
openWindow('https://raven-scott.fyi/smi.html', 'gpuStatsWindow', 800, 600);
}
function openLiveLog() {
openWindow('https://llama-live-log.x64.world/', 'liveLogWindow', 600, 600);
}
function openTop() {
openWindow('https://ai-top.x64.world/', 'liveLogGtopWindow', 800, 600);
}
function openNetdata() {
openWindow('https://ai-monitor.x64.world/', 'netDataWindow', 800, 650);
}
function openAbout() {
openWindow('https://raven-scott.fyi/about-rayai', 'aboutRAIWindow', 800, 650);
}

238
public/js/chat.js Normal file
View File

@ -0,0 +1,238 @@
// Handles key down event to send message on Enter
function handleKeyDown(event) {
if (event.key === 'Enter' && !event.shiftKey) {
event.preventDefault();
sendMessage();
}
}
// Sends a message to the chat API
async function sendMessage() {
const messageInput = document.getElementById('messageInput');
let message = messageInput.value.trim();
if (message === '') return;
// Encode the message to avoid XSS attacks
message = he.encode(message);
// Display the user's message in the chat
displayMessage(message, 'user');
messageInput.value = ''; // Clear the input
toggleLoading(true); // Show loading indicator
try {
const response = await fetch('https://infer.x64.world/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ message: message })
});
if (response.ok) {
const data = await response.json();
displayMessage(data, 'assistant');
} else {
handleErrorResponse(response.status);
}
} catch (error) {
displayMessage('Error: ' + error.message, 'assistant');
} finally {
toggleLoading(false); // Hide loading indicator
}
}
// Toggles the loading indicator
function toggleLoading(show) {
const loadingElement = document.getElementById('loading');
loadingElement.style.display = show ? 'block' : 'none';
}
// Displays a message in the chat window
function displayMessage(content, sender) {
const messages = document.getElementById('messages');
const messageElement = document.createElement('div');
messageElement.classList.add('message', sender);
// Decode HTML entities and render Markdown
const decodedContent = he.decode(content);
const htmlContent = marked(decodedContent);
messageElement.innerHTML = htmlContent;
messages.appendChild(messageElement);
messages.scrollTop = messages.scrollHeight; // Scroll to the bottom of the chat
// Highlight code blocks if any
document.querySelectorAll('pre code').forEach((block) => {
hljs.highlightElement(block);
if (sender === 'assistant') {
addCopyButton(block); // Add copy button to code blocks
}
});
// Add "Copy Full Response" button after each assistant response
if (sender === 'assistant') {
addCopyFullResponseButton(messages, messageElement);
}
}
// Adds a copy button to a code block
function addCopyButton(block) {
const button = document.createElement('button');
button.classList.add('copy-button-code');
button.textContent = 'Copy';
button.addEventListener('click', () => copyToClipboard(block));
block.parentNode.appendChild(button);
}
// Adds "Copy Full Response" button below the assistant response
function addCopyFullResponseButton(messagesContainer, messageElement) {
const copyFullResponseButton = document.createElement('button');
copyFullResponseButton.classList.add('copy-button');
copyFullResponseButton.textContent = 'Copy Full Response';
copyFullResponseButton.addEventListener('click', () => copyFullResponse(messageElement));
messagesContainer.appendChild(copyFullResponseButton);
}
// Copies code block content to the clipboard
function copyToClipboard(block) {
const text = block.innerText;
navigator.clipboard.writeText(text).then(() => {
displayAlert('success', 'The code block was copied to the clipboard!');
}).catch((err) => {
displayAlert('error', 'Failed to copy code: ' + err);
});
}
// Copies the full response content to the clipboard in Markdown format
function copyFullResponse(messageElement) {
const markdownContent = convertToMarkdown(messageElement);
navigator.clipboard.writeText(markdownContent).then(() => {
displayAlert('success', 'Full response copied to clipboard!');
}).catch((err) => {
displayAlert('error', 'Failed to copy response: ' + err);
});
}
// Converts the HTML content of the response to Markdown, including language identifier
function convertToMarkdown(element) {
let markdown = '';
const nodes = element.childNodes;
nodes.forEach(node => {
if (node.nodeName === 'P') {
markdown += `${node.innerText}\n\n`;
} else if (node.nodeName === 'PRE') {
const codeBlock = node.querySelector('code');
const languageClass = codeBlock.className.match(/language-(\w+)/); // Extract language from class if available
const language = languageClass ? languageClass[1] : ''; // Default to empty if no language found
const codeText = codeBlock.innerText;
// Add language identifier to the Markdown code block
markdown += `\`\`\`${language}\n${codeText}\n\`\`\`\n\n`;
}
});
return markdown;
}
// Displays an alert message with animation
function displayAlert(type, message) {
const alertElement = document.getElementById(`${type}-alert`);
alertElement.textContent = message;
alertElement.style.display = 'flex'; // Show the alert
alertElement.classList.remove('fade-out'); // Remove fade-out class if present
alertElement.style.opacity = '1'; // Ensure it's fully visible
// Automatically hide the alert after 3 seconds with animation
setTimeout(() => {
alertElement.classList.add('fade-out'); // Add fade-out class to trigger animation
setTimeout(() => {
alertElement.style.display = 'none'; // Hide after animation finishes
}, 500); // Match this time to the animation duration
}, 3000); // Show the alert for 3 seconds before hiding
}
// Handles error responses based on status code
function handleErrorResponse(status) {
const messages = document.getElementById('messages');
const lastMessage = messages.lastElementChild;
if (status === 429) {
displayAlert('error', 'Sorry, I am currently too busy at the moment!');
// Remove the last user message if the status is 429
if (lastMessage && lastMessage.classList.contains('user')) {
messages.removeChild(lastMessage);
}
} else {
displayMessage('Error: ' + status, 'assistant');
// Remove the last user message for any other errors
if (lastMessage && lastMessage.classList.contains('user')) {
messages.removeChild(lastMessage);
}
}
}
// Helper function to reset the conversation on the server
async function sendResetRequest() {
const response = await fetch('https://infer.x64.world/reset-conversation', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
}
});
return response;
}
// Resets the chat messages and optionally displays a success message
async function resetChat(displaySuccessMessage = true) {
try {
const response = await sendResetRequest();
if (response.ok) {
const messagesContainer = document.getElementById('messages');
messagesContainer.innerHTML = ''; // Clear all messages
if (displaySuccessMessage) {
displayAlert('success', 'Messages Cleared!');
}
} else {
displayAlert('error', 'Failed to reset conversation. Please try again.');
}
} catch (error) {
displayAlert('error', 'An error occurred while resetting the conversation.');
}
}
// Resets the chat on page load without displaying the success message
document.addEventListener('DOMContentLoaded', function () {
resetChat(false);
});
function openWindow(url, windowName, width, height) {
window.open(url, windowName, `width=${width},height=${height},menubar=no,toolbar=no,location=no,status=no,scrollbars=yes,resizable=yes`);
}
function openGpuStats() {
openWindow('https://raven-scott.fyi/smi.html', 'gpuStatsWindow', 800, 600);
}
function openLiveLog() {
openWindow('https://llama-live-log.x64.world/', 'liveLogWindow', 600, 600);
}
function openTop() {
openWindow('https://ai-top.x64.world/', 'liveLogGtopWindow', 800, 600);
}
function openNetdata() {
openWindow('https://ai-monitor.x64.world/', 'netDataWindow', 800, 650);
}
function openAbout() {
openWindow('https://raven-scott.fyi/about-rayai', 'aboutRAIWindow', 800, 650);
}

BIN
public/mstile-150x150.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.7 KiB

3
public/robots.txt Normal file
View File

@ -0,0 +1,3 @@
User-agent: *
Allow: /
Sitemap: https://raven-scott.fyi/sitemap.xml

19
public/site.webmanifest Normal file
View File

@ -0,0 +1,19 @@
{
"name": "",
"short_name": "",
"icons": [
{
"src": "/android-chrome-192x192.png",
"sizes": "192x192",
"type": "image/png"
},
{
"src": "/android-chrome-512x512.png",
"sizes": "512x512",
"type": "image/png"
}
],
"theme_color": "#ffffff",
"background_color": "#ffffff",
"display": "standalone"
}

118
public/smi.html Normal file
View File

@ -0,0 +1,118 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>GPU Stats</title>
<style>
body {
background-color: #121212;
color: #ffffff;
font-family: Arial, sans-serif;
padding: 20px;
line-height: 1.6;
}
.container {
max-width: 800px;
margin: 0 auto;
padding: 20px;
border: 1px solid #333;
border-radius: 10px;
background-color: #1e1e1e;
}
.title {
font-size: 2em;
margin-bottom: 20px;
text-align: center;
}
.stat-group {
margin-bottom: 20px;
}
.stat-group h2 {
font-size: 1.5em;
margin-bottom: 10px;
border-bottom: 1px solid #444;
padding-bottom: 5px;
}
.stat {
margin-bottom: 10px;
}
.stat span {
font-weight: bold;
}
pre {
white-space: pre-wrap;
word-wrap: break-word;
}
</style>
</head>
<body>
<div class="container">
<div class="title">GPU Statistics</div>
<div id="gpu-stats"></div>
</div>
<script>
async function fetchGpuStats() {
try {
const response = await fetch('https://smi.x64.world/nvidia-smi');
const data = await response.json();
displayStats(data);
} catch (error) {
console.error('Error fetching GPU stats:', error);
}
}
function displayStats(data) {
const gpuStatsDiv = document.getElementById('gpu-stats');
const gpu = data.nvidia_smi_log.gpu;
gpuStatsDiv.innerHTML = `
<div class="stat-group">
<h2>Processes</h2>
${Array.isArray(gpu.processes.process_info) ? gpu.processes.process_info.map(process => `
<div class="stat"><span>Process ID:</span> ${process.pid}, <span>Name:</span> ${process.process_name}, <span>Memory Used:</span> ${process.used_memory}</div>
`).join('') : `<div class="stat"><span>Process ID:</span> ${gpu.processes.process_info.pid}, <span>Name:</span> ${gpu.processes.process_info.process_name}, <span>Memory Used:</span> ${gpu.processes.process_info.used_memory}</div>`}
</div>
<div class="stat-group">
<h2>Utilization</h2>
<div class="stat"><span>GPU Utilization:</span> ${gpu.utilization.gpu_util}</div>
<div class="stat"><span>Memory Utilization:</span> ${gpu.utilization.memory_util}</div>
</div>
<div class="stat-group">
<h2>Clocks</h2>
<div class="stat"><span>Graphics Clock:</span> ${gpu.clocks.graphics_clock}</div>
<div class="stat"><span>SM Clock:</span> ${gpu.clocks.sm_clock}</div>
<div class="stat"><span>Memory Clock:</span> ${gpu.clocks.mem_clock}</div>
<div class="stat"><span>Video Clock:</span> ${gpu.clocks.video_clock}</div>
</div>
<div class="stat-group">
<h2>Memory Usage</h2>
<div class="stat"><span>Total FB Memory:</span> ${gpu.fb_memory_usage.total}</div>
<div class="stat"><span>Used FB Memory:</span> ${gpu.fb_memory_usage.used}</div>
<div class="stat"><span>Free FB Memory:</span> ${gpu.fb_memory_usage.free}</div>
</div>
<div class="stat-group">
<h2>Performance</h2>
<div class="stat"><span>Performance State:</span> ${gpu.performance_state}</div>
<div class="stat"><span>Fan Speed:</span> ${gpu.fan_speed}</div>
<div class="stat"><span>Power Draw:</span> ${gpu.gpu_power_readings.power_draw}</div>
</div>
<div class="stat-group">
<h2>General Information</h2>
<div class="stat"><span>Product Name:</span> ${gpu.product_name}</div>
<div class="stat"><span>Product Brand:</span> ${gpu.product_brand}</div>
<div class="stat"><span>Architecture:</span> ${gpu.product_architecture}</div>
<div class="stat"><span>UUID:</span> ${gpu.uuid}</div>
<div class="stat"><span>VBIOS Version:</span> ${gpu.vbios_version}</div>
</div>
`;
}
window.onload = () => {
fetchGpuStats();
setInterval(fetchGpuStats, 2000);
};
</script>
</body>
</html>

61
views/about-rayai.ejs Normal file
View File

@ -0,0 +1,61 @@
<!DOCTYPE html>
<html lang="en">
<head>
<!-- Meta and Title -->
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="<%= process.env.OWNER_NAME %>'s Blog">
<title><%= title %> | <%= process.env.OWNER_NAME %>'s' Blog</title>
<!-- Bootstrap CSS -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css">
<!-- Font Awesome CSS for Icons -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css">
<!-- Custom CSS -->
<link rel="stylesheet" href="<%= process.env.HOST_URL %>/css/styles.css">
<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png">
<link rel="manifest" href="/site.webmanifest">
<meta name="msapplication-TileColor" content="#da532c">
<meta name="theme-color" content="#ffffff">
</head>
<body>
<!-- Navbar -->
<nav class="navbar navbar-expand-lg navbar-dark">
<div class="container-fluid">
<a class="navbar-brand" href="<%= process.env.HOST_URL %>"><%= process.env.SITE_NAME %></a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav" aria-controls="navbarNav" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNav">
<ul class="navbar-nav ms-auto">
<% menuItems.forEach(item => { %>
<li class="nav-item">
<a class="nav-link" href="<%= item.url %>" <%= item.openNewPage ? 'target="_blank"' : '' %>><%= item.title %></a>
</li>
<% }) %>
</ul>
</div>
</div>
</nav>
<!-- About Me Section -->
<section class="about-me py-5">
<div class="container">
<div class="section-divider"></div>
<!-- Inject the HTML content generated from the markdown -->
<div class="markdown-content">
<%- content %>
</div>
</div>
</section>
<!-- Bootstrap JS Bundle -->
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/js/bootstrap.bundle.min.js"></script>
</body>
</html>

View File

@ -5,102 +5,38 @@
<!-- Meta and Title -->
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>About Me - Raven Scott</title>
<meta name="description" content="<%= process.env.OWNER_NAME %>'s Blog">
<title><%= title %> | <%= process.env.OWNER_NAME %>'s' Blog</title>
<!-- Bootstrap CSS -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css">
<!-- Font Awesome CSS for Icons -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css">
<!-- Custom CSS -->
<link rel="stylesheet" href="/css/styles.css">
<style>
/* Custom styles for a more professional look */
body {
background-color: #1a1a1a;
color: #e0e0e0;
}
.about-me h2,
.about-me h3 {
color: #ffffff;
font-weight: bold;
}
.about-me p {
font-size: 1.1rem;
line-height: 1.7;
color: #d1d1d1;
}
.about-me {
padding: 50px 0;
}
.container {
max-width: 900px;
margin: auto;
}
.footer-logo {
font-size: 1.5rem;
font-weight: bold;
}
.footer-links a {
color: #9a9a9a;
font-size: 0.9rem;
}
.footer-links a:hover {
color: #ffffff;
}
.btn-primary {
background-color: #007bff;
border-color: #007bff;
}
.btn-primary:hover {
background-color: #0056b3;
border-color: #004085;
}
/* Add padding for better text readability */
.about-me p {
padding-bottom: 15px;
}
/* Separator style for sections */
.section-divider {
width: 80px;
height: 3px;
background-color: #007bff;
margin: 20px 0;
border-radius: 2px;
}
</style>
<link rel="stylesheet" href="<%= process.env.HOST_URL %>/css/styles.css">
<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png">
<link rel="manifest" href="/site.webmanifest">
<meta name="msapplication-TileColor" content="#da532c">
<meta name="theme-color" content="#ffffff">
</head>
<body>
<!-- Navigation Bar -->
<!-- Navbar -->
<nav class="navbar navbar-expand-lg navbar-dark">
<div class="container-fluid">
<a class="navbar-brand" href="/">raven-scott.fyi</a>
<a class="navbar-brand" href="<%= process.env.HOST_URL %>"><%= process.env.SITE_NAME %></a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav" aria-controls="navbarNav" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNav">
<ul class="navbar-nav ms-auto">
<li class="nav-item">
<a class="nav-link" href="/">Home</a>
</li>
<li class="nav-item">
<a class="nav-link active" href="/about">About Me</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/contact">Contact</a>
</li>
<% menuItems.forEach(item => { %>
<li class="nav-item">
<a class="nav-link" href="<%= item.url %>" <%= item.openNewPage ? 'target="_blank"' : '' %>><%= item.title %></a>
</li>
<% }) %>
</ul>
</div>
</div>
@ -108,37 +44,27 @@
<!-- About Me Section -->
<section class="about-me py-5">
<div class="container text-center">
<h2 class="text-white mb-4">About Me</h2>
<div class="container">
<div class="section-divider"></div>
<p class="lead">Hi, Im Raven Scott, a Linux enthusiast and problem solver with a deep passion for technology and creativity. I thrive in environments where I can learn, experiment, and turn ideas into reality. Whether it's building systems, coding, or tackling complex technical challenges, I find joy in using technology to make life easier and more efficient.</p>
<p>My passion for Linux and open-source technologies began early on, and since then, Ive been on a continuous journey of growth and discovery. From troubleshooting networking issues to optimizing servers for performance, I love diving deep into the intricate details of how things work. The thrill of solving problems, especially when it comes to system security or performance optimization, is what fuels me every day.</p>
<h3 class="text-white mt-5">What Drives Me</h3>
<div class="section-divider"></div>
<p>Im passionate about more than just the technical side. I believe in the power of technology to bring people together, and thats why Im dedicated to creating platforms and solutions that are accessible and impactful. Whether it's hosting services, developing peer-to-peer applications, or automating complex tasks, Im always exploring new ways to push the boundaries of what's possible.</p>
<p>Outside of work, I love contributing to community projects and sharing my knowledge with others. Helping people grow their own skills is one of the most rewarding aspects of what I do. From mentoring to writing documentation, Im constantly looking for ways to give back to the tech community.</p>
<h3 class="text-white mt-5">Creative Side</h3>
<div class="section-divider"></div>
<p>When Im not deep in the technical world, Im exploring my creative side through music. I run my own music label, where I produce and distribute AI-generated music across all platforms. Music and technology blend seamlessly for me, as both are outlets for innovation and expression.</p>
<p>In everything I do, from coding to creating music, my goal is to keep learning, growing, and sharing my passion with the world. If you ever want to connect, collaborate, or simply chat about tech or music, feel free to reach out!</p>
<!-- Inject the HTML content generated from the markdown -->
<div class="markdown-content">
<%- content %>
</div>
</div>
</section>
<!-- Footer -->
<footer class="bg-dark text-white text-center py-4">
<footer class="text-white text-center py-4">
<div class="container">
<h4 class="footer-logo mb-3">Never Stop Learning</h4>
<h4 class="footer-logo mb-3"><%= process.env.FOOTER_TAGLINE %></h4>
<p class="footer-links mb-3">
<a href="/" class="text-white text-decoration-none me-3">Home</a>
<a href="/about" class="text-white text-decoration-none me-3">About</a>
<a href="/contact" class="text-white text-decoration-none">Contact</a>
<a href="/contact" class="text-white text-decoration-none me-3">Contact</a>
<a href="<%= process.env.HOST_URL %>/sitemap.xml" class="text-white text-decoration-none me-3">Sitemap</a>
<a href="<%= process.env.HOST_URL %>/rss" class="text-white text-decoration-none">RSS Feed</a>
</p>
<p class="mb-0">&copy; 2024 Raven Scott. All rights reserved.</p>
<p class="mb-0">&copy; 2024 <%= process.env.OWNER_NAME %>. All rights reserved.</p>
</div>
</footer>

View File

@ -3,41 +3,43 @@
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title><%= title %></title>
<meta name="description" content="<%= description %>">
<title><%= title %> | <%= process.env.OWNER_NAME %>'s Blog</title>
<!-- Stylesheets -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css">
<link rel="stylesheet" href="/css/styles.css">
<link rel="stylesheet" href="<%= process.env.HOST_URL %>/css/styles.css">
<!-- Highlight.js CSS for Syntax Highlighting -->
<!-- <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.8.0/styles/default.min.css"> -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.6.0/styles/atom-one-dark.min.css">
<link rel="apple-touch-icon" sizes="180x180" href="<%= process.env.HOST_URL %>/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="<%= process.env.HOST_URL %>/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="<%= process.env.HOST_URL %>/favicon-16x16.png">
<link rel="manifest" href="<%= process.env.HOST_URL %>/site.webmanifest">
<meta name="msapplication-TileColor" content="#da532c">
<meta name="theme-color" content="#ffffff">
</head>
<body>
<!-- Navbar -->
<nav class="navbar navbar-expand-lg navbar-dark">
<div class="container-fluid">
<a class="navbar-brand" href="/">raven-scott.fyi</a>
<a class="navbar-brand" href="<%= process.env.HOST_URL %>"><%= process.env.SITE_NAME %></a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav" aria-controls="navbarNav" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNav">
<ul class="navbar-nav ms-auto">
<li class="nav-item">
<a class="nav-link active" href="/">Home</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/about">About Me</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/contact">Contact</a>
</li>
<% menuItems.forEach(item => { %>
<li class="nav-item">
<a class="nav-link" href="<%= item.url %>" <%= item.openNewPage ? 'target="_blank"' : '' %>><%= item.title %></a>
</li>
<% }) %>
</ul>
</div>
</div>
</nav>
<header class="bg-primary text-white text-center py-5">
<h1><%= title %></h1>
<p class="lead"><%= lead %></p> <!-- Lead is dynamically set here -->
@ -51,15 +53,17 @@
</main>
<!-- Footer -->
<footer class="bg-dark text-white text-center py-4">
<footer class="text-white text-center py-4">
<div class="container">
<h4 class="footer-logo mb-3">Never Stop Learning</h4>
<h4 class="footer-logo mb-3"><%= process.env.FOOTER_TAGLINE %></h4>
<p class="footer-links mb-3">
<a href="/" class="text-white text-decoration-none me-3">Home</a>
<a href="/about" class="text-white text-decoration-none me-3">About</a>
<a href="/contact" class="text-white text-decoration-none">Contact</a>
<a href="/contact" class="text-white text-decoration-none me-3">Contact</a>
<a href="<%= process.env.HOST_URL %>/sitemap.xml" class="text-white text-decoration-none me-3">Sitemap</a>
<a href="<%= process.env.HOST_URL %>/rss" class="text-white text-decoration-none">RSS Feed</a>
</p>
<p class="mb-0">&copy; 2024 Raven Scott. All rights reserved.</p>
<p class="mb-0">&copy; 2024 <%= process.env.OWNER_NAME %>. All rights reserved.</p>
</div>
</footer>

123
views/chat.ejs Normal file
View File

@ -0,0 +1,123 @@
<!DOCTYPE html>
<html lang="en">
<head>
<!-- Meta and Title -->
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="<%= process.env.OWNER_NAME %>'s Blog">
<title>
<%= title %> | <%= process.env.OWNER_NAME %>'s Blog
</title>
<!-- Bootstrap CSS -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css">
<!-- Font Awesome CSS for Icons -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css">
<link rel="stylesheet"
href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.6.0/styles/atom-one-dark.min.css">
<!-- Custom CSS -->
<link rel="stylesheet" href="<%= process.env.HOST_URL %>/css/styles.css">
<link rel="stylesheet" href="<%= process.env.HOST_URL %>/css/chat.css">
<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png">
<link rel="manifest" href="/site.webmanifest">
<meta name="msapplication-TileColor" content="#da532c">
<meta name="theme-color" content="#ffffff">
</head>
<body class="bg-dark text-white">
<div class="chat-container">
<!-- Navbar -->
<nav class="navbar navbar-expand-lg navbar-dark">
<div class="container-fluid">
<a class="navbar-brand" href="<%= process.env.HOST_URL %>">
<%= process.env.SITE_NAME %>
</a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav"
aria-controls="navbarNav" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNav">
<ul class="navbar-nav ms-auto">
<% menuItems.forEach(item=> { %>
<li class="nav-item">
<a class="nav-link" href="<%= item.url %>" <%=item.openNewPage ? 'target="_blank"' : ''
%>>
<%= item.title %>
</a>
</li>
<% }) %>
<!-- Add a vertical divider -->
<!-- <li class="nav-item">
<span class="nav-link separator">|</span>
</li> -->
<!-- Inject custom menu items here as a dropdown -->
<!-- <li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="navbarDropdown" role="button"
data-bs-toggle="dropdown" aria-expanded="false">
Tools
</a>
<ul class="dropdown-menu dropdown-menu-end" aria-labelledby="navbarDropdown">
<li><a class="dropdown-item" href="#" onclick="openLiveLog()">Live Log</a></li>
<li><a class="dropdown-item" href="#" onclick="openTop()">Top</a></li>
<li><a class="dropdown-item" href="#" onclick="openNetdata()">Netdata</a></li>
<li><a class="dropdown-item" href="#" onclick="openGpuStats()">GPU Stats</a>
</li>
<li><a class="dropdown-item" href="#" onclick="openAbout()">About RayAI</a></li>
</ul>
</li> -->
</ul>
</div>
</div>
</nav>
<!-- Alert Messages -->
<div id="success-alert" class="alert alert-success mt-3" style="display: none;"></div>
<div id="error-alert" class="alert alert-danger mt-3" style="display: none;"></div>
<!-- Chat Box -->
<div class="chat-box">
<div id="messages" class="messages"></div>
<!-- Input area with sticky behavior -->
<div class="input-area">
<textarea id="messageInput" class="form-control mb-2" rows="3" placeholder=""
onkeydown="handleKeyDown(event)" autofocus></textarea>
<!-- Buttons side by side -->
<div class="d-flex justify-content-between">
<button class="btn btn-secondary" onclick="resetChat()">Reset Chat</button>
<!-- Loading Indicator -->
<div id="loading" class="text-center mt-3" style="display: none;">
<div class="spinner-border spinner-border-sm text-primary" role="status">
<span class="visually-hidden">Loading...</span>
</div>
</div>
<button class="btn btn-primary" onclick="sendMessage()">Send Message</button>
</div>
</div>
</div>
</div>
<!-- Bootstrap JS Bundle -->
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/js/bootstrap.bundle.min.js"></script>
<!-- Additional Libraries -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/marked/3.0.7/marked.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.6.0/highlight.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/he@1.2.0/he.min.js"></script>
<!-- Inline JavaScript -->
<script src="<%= process.env.HOST_URL %>/js/chat.js"></script>
</body>
</html>

View File

@ -5,35 +5,41 @@
<!-- Meta and Title -->
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Contact Me - Raven Scott</title>
<meta name="description" content="Contact <%= process.env.OWNER_NAME %> using a contact form">
<title>Contact Me - <%= process.env.OWNER_NAME %></title>
<!-- Bootstrap CSS -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css">
<!-- Font Awesome CSS for Icons -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css">
<!-- Custom CSS -->
<link rel="stylesheet" href="/css/styles.css">
<link rel="stylesheet" href="<%= process.env.HOST_URL %>/css/styles.css">
<link rel="apple-touch-icon" sizes="180x180" href="<%= process.env.HOST_URL %>/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="<%= process.env.HOST_URL %>/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="<%= process.env.HOST_URL %>/favicon-16x16.png">
<link rel="manifest" href="<%= process.env.HOST_URL %>/site.webmanifest">
<meta name="msapplication-TileColor" content="#da532c">
<meta name="theme-color" content="#ffffff">
<!-- reCAPTCHA API -->
<script src="https://www.google.com/recaptcha/api.js" async defer></script>
</head>
<body class="bg-dark text-white">
<!-- Navigation Bar -->
<!-- Navbar -->
<nav class="navbar navbar-expand-lg navbar-dark">
<div class="container-fluid">
<a class="navbar-brand" href="/">raven-scott.fyi</a>
<a class="navbar-brand" href="<%= process.env.HOST_URL %>"><%= process.env.SITE_NAME %></a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav" aria-controls="navbarNav" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNav">
<ul class="navbar-nav ms-auto">
<li class="nav-item">
<a class="nav-link active" href="/">Home</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/about">About Me</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/contact">Contact</a>
</li>
<% menuItems.forEach(item => { %>
<li class="nav-item">
<a class="nav-link" href="<%= item.url %>" <%= item.openNewPage ? 'target="_blank"' : '' %>><%= item.title %></a>
</li>
<% }) %>
</ul>
</div>
</div>
@ -42,7 +48,7 @@
<!-- Contact Me Section -->
<header class="d-flex align-items-center justify-content-center text-center py-5">
<div class="container">
<h2 class="mb-4 text-white">Contact Me</h2>
<h2 class="mb-4 text-white">Contacting <%= process.env.OWNER_NAME %></h2>
<p class="lead text-white">Have a question or need help with a project? Fill out the form below, and I'll be in touch!</p>
<!-- Display success or error message -->
@ -70,21 +76,19 @@
<label for="message" class="form-label">Your Message<span class="text-danger">*</span></label>
<textarea class="form-control bg-dark text-white border-secondary" id="message" name="message" rows="6" required></textarea>
</div>
<!-- reCAPTCHA -->
<CENTER><div class="g-recaptcha" data-sitekey="<%= process.env.CAPTCHA_SITE_KEY %>"></div></CENTER>
<BR>
<button type="submit" class="btn btn-primary">Send Message</button>
</form>
</div>
</header>
<!-- Footer -->
<footer class="bg-dark text-white text-center py-4">
<footer class=" text-white text-center py-4">
<div class="container">
<h4 class="footer-logo mb-3">Never Stop Learning</h4>
<p class="footer-links mb-3">
<a href="/" class="text-white text-decoration-none me-3">Home</a>
<a href="/about" class="text-white text-decoration-none me-3">About</a>
<a href="/contact" class="text-white text-decoration-none">Contact</a>
</p>
<p class="mb-0">&copy; 2024 Raven Scott. All rights reserved.</p>
<p class="mb-0">&copy; 2024 <%= process.env.OWNER_NAME %>. All rights reserved.</p>
</div>
</footer>

View File

@ -3,56 +3,79 @@
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="<%= process.env.OWNER_NAME %>'s Blog">
<title><%= title %></title>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css">
<link rel="stylesheet" href="/css/styles.css">
<link rel="stylesheet" href="<%= process.env.HOST_URL %>/css/styles.css">
<script>
window.onload = function() {
const input = document.getElementById("search-input");
input.focus();
input.setSelectionRange(input.value.length, input.value.length);
};
</script>
</head>
<body>
<!-- Navbar -->
<nav class="navbar navbar-expand-lg navbar-dark">
<div class="container-fluid">
<a class="navbar-brand" href="/">raven-scott.fyi</a>
<a class="navbar-brand" href="<%= process.env.HOST_URL %>"><%= process.env.SITE_NAME %></a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav" aria-controls="navbarNav" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNav">
<ul class="navbar-nav ms-auto">
<li class="nav-item">
<a class="nav-link active" href="/">Home</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/about">About Me</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/contact">Contact</a>
</li>
<% menuItems.forEach(item => { %>
<li class="nav-item">
<a class="nav-link" href="<%= item.url %>" <%= item.openNewPage ? 'target="_blank"' : '' %>><%= item.title %></a>
</li>
<% }) %>
</ul>
</div>
</div>
</nav>
<!-- Main Content -->
<header class="py-5">
<div class="container text-center">
<h1>Welcome to my long form post blog</h1>
<p class="lead">Latest articles and insights from Raven Scott</p>
<h1><%= process.env.FRONT_PAGE_TITLE %></h1>
<p class="lead"><%= process.env.FRONT_PAGE_LEAD %></p>
<form action="/" method="get" class="mb-4">
<div class="input-group">
<input type="text" id="search-input" name="search" class="form-control" placeholder="Search blog posts..." value="<%= typeof searchQuery !== 'undefined' ? searchQuery : '' %>" autofocus>
</div>
</form>
</div>
</header>
<!-- Blog Content -->
<section class="py-5">
<div class="container">
<h2>Recent Posts</h2>
<!-- Blog post list -->
<% if (noResults) { %>
<p><center>Sorry, no blog posts found matching "<%= searchQuery %>"</center></p>
<% } else { %>
<h2><%= searchQuery ? 'Search results for "' + searchQuery + '"' : 'Recent Posts' %></h2>
<% } %>
<ul class="list-group list-group-flush">
<% blogPosts.forEach(post => { %>
<li class="list-group-item d-flex justify-content-between align-items-center py-4">
<div>
<h5 class="mb-1"><a href="/blog/<%= post.slug %>"> <%= post.title %> </a></h5>
<p class="mb-1 text-muted">Posted on <%= post.date %></p>
<h5 class="mb-1"><a href="<%= process.env.BLOG_URL %><%= post.slug %>"><%= post.title %></a></h5>
<p class="mb-1 text-muted">Posted on
<%= new Date(post.dateCreated).toLocaleDateString('en-US', {
year: 'numeric',
month: 'long',
day: 'numeric'
}) %>
</p>
</div>
<a href="/blog/<%= post.slug %>" class="btn btn-outline-primary">Read Article</a>
<a href="<%= process.env.BLOG_URL %><%= post.slug %>" class="btn btn-outline-primary">Read Article</a>
</li>
<% }) %>
</ul>
<!-- Pagination controls -->
<nav aria-label="Page navigation">
<ul class="pagination justify-content-center mt-4">
@ -61,13 +84,13 @@
<a class="page-link" href="?page=<%= currentPage - 1 %>">Previous</a>
</li>
<% } %>
<% for (let i = 1; i <= totalPages; i++) { %>
<li class="page-item <%= currentPage === i ? 'active' : '' %>">
<a class="page-link" href="?page=<%= i %>"><%= i %></a>
</li>
<% } %>
<% if (currentPage < totalPages) { %>
<li class="page-item">
<a class="page-link" href="?page=<%= currentPage + 1 %>">Next</a>
@ -78,18 +101,37 @@
</div>
</section>
<footer class="bg-dark text-white text-center py-4">
<footer class="text-white text-center py-4">
<div class="container">
<h4 class="footer-logo mb-3">Never Stop Learning</h4>
<h4 class="footer-logo mb-3"><%= process.env.FOOTER_TAGLINE %></h4>
<p class="footer-links mb-3">
<a href="/" class="text-white text-decoration-none me-3">Home</a>
<a href="/about" class="text-white text-decoration-none me-3">About</a>
<a href="/contact" class="text-white text-decoration-none">Contact</a>
<a href="/contact" class="text-white text-decoration-none me-3">Contact</a>
<a href="<%= process.env.HOST_URL %>/sitemap.xml" class="text-white text-decoration-none me-3">Sitemap</a>
<a href="<%= process.env.HOST_URL %>/rss" class="text-white text-decoration-none">RSS Feed</a>
</p>
<p class="mb-0">&copy; 2024 Raven Scott. All rights reserved.</p>
<p class="mb-0">&copy; 2024 <%= process.env.OWNER_NAME %>. All rights reserved.</p>
</div>
</footer>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/js/bootstrap.bundle.min.js"></script>
<script>
let typingTimer; // Timer identifier
const doneTypingInterval = 500; // Time in ms, adjust for desired delay
const searchInput = document.getElementById('search-input');
searchInput.addEventListener('input', function() {
clearTimeout(typingTimer);
typingTimer = setTimeout(function() {
searchInput.form.submit();
}, doneTypingInterval);
});
searchInput.addEventListener('keydown', function() {
clearTimeout(typingTimer);
});
</script>
</body>
</html>