mirror of
https://github.com/neon-mmd/websurfx.git
synced 2024-11-24 23:18:22 -05:00
Compare commits
28 Commits
22b59e2f3e
...
089d598db6
Author | SHA1 | Date | |
---|---|---|---|
|
089d598db6 | ||
|
4d5e7bddda | ||
|
4847a6eed2 | ||
|
951060dc45 | ||
|
2693cd18d2 | ||
|
838d1b6958 | ||
|
c527897a4d | ||
|
2141b88c35 | ||
|
b0c99f25e5 | ||
|
d5c4206afe | ||
|
4950106871 | ||
|
0b48f671cb | ||
|
16717bc27d | ||
|
f51d2e6881 | ||
|
ad5b754741 | ||
|
d5524d7eae | ||
|
3c2533f69a | ||
|
8225d34a9c | ||
|
d2954862ea | ||
|
f55abf934d | ||
|
052d9fd167 | ||
|
bf7e73f9ff | ||
|
2f4e4038b1 | ||
|
5d06cce220 | ||
|
33363a83ef | ||
|
6200c5d53c | ||
|
bbc49cbf42 | ||
|
4993da4d89 |
1168
Cargo.lock
generated
1168
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
69
Cargo.toml
69
Cargo.toml
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "websurfx"
|
||||
version = "1.12.1"
|
||||
version = "1.17.0"
|
||||
edition = "2021"
|
||||
description = "An open-source alternative to Searx that provides clean, ad-free, and organic results with incredible speed while keeping privacy and security in mind."
|
||||
repository = "https://github.com/neon-mmd/websurfx"
|
||||
@ -13,10 +13,11 @@ bench = false
|
||||
path = "src/bin/websurfx.rs"
|
||||
|
||||
[dependencies]
|
||||
reqwest = { version = "0.11.24", default-features = false, features = [
|
||||
reqwest = { version = "0.12.5", default-features = false, features = [
|
||||
"rustls-tls",
|
||||
"brotli",
|
||||
"gzip",
|
||||
"http2"
|
||||
] }
|
||||
tokio = { version = "1.32.0", features = [
|
||||
"rt-multi-thread",
|
||||
@ -24,13 +25,13 @@ tokio = { version = "1.32.0", features = [
|
||||
"fs",
|
||||
"io-util",
|
||||
], default-features = false }
|
||||
serde = { version = "1.0.196", default-features = false, features = ["derive"] }
|
||||
serde_json = { version = "1.0.116", default-features = false }
|
||||
serde = { version = "1.0.209", default-features = false, features = ["derive"] }
|
||||
serde_json = { version = "1.0.122", default-features = false }
|
||||
maud = { version = "0.26.0", default-features = false, features = [
|
||||
"actix-web",
|
||||
] }
|
||||
scraper = { version = "0.18.1", default-features = false }
|
||||
actix-web = { version = "4.4.0", features = [
|
||||
scraper = { version = "0.20.0", default-features = false }
|
||||
actix-web = { version = "4.9.0", features = [
|
||||
"cookies",
|
||||
"macros",
|
||||
"compress-brotli",
|
||||
@ -40,15 +41,15 @@ actix-cors = { version = "0.7.0", default-features = false }
|
||||
fake-useragent = { version = "0.1.3", default-features = false }
|
||||
env_logger = { version = "0.11.1", default-features = false }
|
||||
log = { version = "0.4.21", default-features = false }
|
||||
mlua = { version = "0.9.1", features = [
|
||||
mlua = { version = "0.9.9", features = [
|
||||
"luajit",
|
||||
"vendored",
|
||||
], default-features = false }
|
||||
redis = { version = "0.24.0", features = [
|
||||
redis = { version = "0.25.4", features = [
|
||||
"tokio-comp",
|
||||
"connection-manager",
|
||||
], default-features = false, optional = true }
|
||||
blake3 = { version = "1.5.0", default-features = false }
|
||||
blake3 = { version = "1.5.4", default-features = false }
|
||||
error-stack = { version = "0.4.0", default-features = false, features = [
|
||||
"std",
|
||||
] }
|
||||
@ -60,13 +61,13 @@ smallvec = { version = "1.13.1", features = [
|
||||
], default-features = false }
|
||||
futures = { version = "0.3.30", default-features = false, features = ["alloc"] }
|
||||
dhat = { version = "0.3.2", optional = true, default-features = false }
|
||||
mimalloc = { version = "0.1.38", default-features = false }
|
||||
mimalloc = { version = "0.1.43", default-features = false }
|
||||
async-once-cell = { version = "0.5.3", default-features = false }
|
||||
actix-governor = { version = "0.5.0", default-features = false }
|
||||
mini-moka = { version = "0.10", optional = true, default-features = false, features = [
|
||||
"sync",
|
||||
] }
|
||||
async-compression = { version = "0.4.6", default-features = false, features = [
|
||||
async-compression = { version = "0.4.12", default-features = false, features = [
|
||||
"brotli",
|
||||
"tokio",
|
||||
], optional = true }
|
||||
@ -96,7 +97,7 @@ criterion = { version = "0.5.1", default-features = false }
|
||||
tempfile = { version = "3.10.1", default-features = false }
|
||||
|
||||
[build-dependencies]
|
||||
lightningcss = { version = "1.0.0-alpha.55", default-features = false, features = [
|
||||
lightningcss = { version = "1.0.0-alpha.57", default-features = false, features = [
|
||||
"grid",
|
||||
] }
|
||||
# Disabled until bug fixing update
|
||||
@ -132,6 +133,50 @@ codegen-units = 1
|
||||
rpath = false
|
||||
strip = "symbols"
|
||||
|
||||
[profile.bsr1]
|
||||
inherits = "release"
|
||||
opt-level = "s"
|
||||
|
||||
[profile.bsr2]
|
||||
inherits = "bsr1"
|
||||
opt-level = "z"
|
||||
|
||||
[profile.lpcb1]
|
||||
inherits = "release"
|
||||
codegen-units = 16
|
||||
|
||||
[profile.lpcb2]
|
||||
inherits = "lpcb1"
|
||||
lto = "off"
|
||||
|
||||
[profile.lpcb3]
|
||||
inherits = "lpcb2"
|
||||
opt-level = 2
|
||||
|
||||
[profile.bsr_and_lpcb1]
|
||||
inherits = "lpcb1"
|
||||
opt-level = "s"
|
||||
|
||||
[profile.bsr_and_lpcb2]
|
||||
inherits = "lpcb2"
|
||||
opt-level = "s"
|
||||
|
||||
[profile.bsr_and_lpcb3]
|
||||
inherits = "lpcb3"
|
||||
opt-level = "s"
|
||||
|
||||
[profile.bsr_and_lpcb4]
|
||||
inherits = "lpcb1"
|
||||
opt-level = "z"
|
||||
|
||||
[profile.bsr_and_lpcb5]
|
||||
inherits = "lpcb1"
|
||||
opt-level = "z"
|
||||
|
||||
[profile.bsr_and_lpcb6]
|
||||
inherits = "lpcb1"
|
||||
opt-level = "z"
|
||||
|
||||
[features]
|
||||
use-synonyms-search = ["thesaurus/static"]
|
||||
default = ["memory-cache"]
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM --platform=$BUILDPLATFORM rust:1.77.2-alpine3.18 AS chef
|
||||
FROM --platform=$BUILDPLATFORM rust:1.78.0-alpine3.18 AS chef
|
||||
# We only pay the installation cost once,
|
||||
# it will be cached from the second build onwards
|
||||
RUN apk add --no-cache alpine-sdk musl-dev g++ make libcrypto3 libressl-dev upx perl build-base
|
||||
|
@ -115,6 +115,9 @@
|
||||
- 🚀 Easy to setup with Docker or on bare metal with various installation and deployment options.
|
||||
- ⛔ Search filtering to filter search results based on four different levels.
|
||||
- 💾 Different caching levels focusing on reliability, speed and resiliancy.
|
||||
- ⬆️ Organic Search results (with ranking algorithm builtin to rerank the search results according to user's search query.).
|
||||
- 🔒 Different compression and encryption levels focusing on speed and privacy.
|
||||
- 🧪 Experimental IO-uring feature for Linux operating systems focused on performance of the engine.
|
||||
- 🔐 Fast, private, and secure
|
||||
- 🆓 100% free and open source
|
||||
- 💨 Ad-free and clean results
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Create Builder image
|
||||
FROM --platform=$BUILDPLATFORM rust:1.77.2-alpine3.18
|
||||
FROM --platform=$BUILDPLATFORM rust:1.78.0-alpine3.18
|
||||
|
||||
# Install required dependencies
|
||||
RUN apk add --no-cache alpine-sdk musl-dev g++ make libcrypto3 libressl-dev perl build-base
|
||||
|
@ -37,13 +37,15 @@ pub struct Config {
|
||||
pub request_timeout: u8,
|
||||
/// It stores the number of threads which controls the app will use to run.
|
||||
pub threads: u8,
|
||||
/// Set the keep-alive time for client connections to the HTTP server
|
||||
pub client_connection_keep_alive: u8,
|
||||
/// It stores configuration options for the ratelimiting middleware.
|
||||
pub rate_limiter: RateLimiter,
|
||||
/// It stores the level of safe search to be used for restricting content in the
|
||||
/// search results.
|
||||
pub safe_search: u8,
|
||||
/// It stores the TCP connection keepalive duration in seconds.
|
||||
pub tcp_connection_keepalive: u8,
|
||||
pub tcp_connection_keep_alive: u8,
|
||||
/// It stores the pool idle connection timeout in seconds.
|
||||
pub pool_idle_connection_timeout: u8,
|
||||
}
|
||||
@ -135,9 +137,10 @@ impl Config {
|
||||
upstream_search_engines: globals
|
||||
.get::<_, HashMap<String, bool>>("upstream_search_engines")?,
|
||||
request_timeout: globals.get::<_, u8>("request_timeout")?,
|
||||
tcp_connection_keepalive: globals.get::<_, u8>("tcp_connection_keepalive")?,
|
||||
tcp_connection_keep_alive: globals.get::<_, u8>("tcp_connection_keep_alive")?,
|
||||
pool_idle_connection_timeout: globals.get::<_, u8>("pool_idle_connection_timeout")?,
|
||||
threads,
|
||||
client_connection_keep_alive: globals.get::<_, u8>("client_connection_keep_alive")?,
|
||||
rate_limiter: RateLimiter {
|
||||
number_of_requests: rate_limiter["number_of_requests"],
|
||||
time_limit: rate_limiter["time_limit"],
|
||||
|
@ -14,7 +14,7 @@ pub mod results;
|
||||
pub mod server;
|
||||
pub mod templates;
|
||||
|
||||
use std::{net::TcpListener, sync::OnceLock};
|
||||
use std::{net::TcpListener, sync::OnceLock, time::Duration};
|
||||
|
||||
use crate::server::router;
|
||||
|
||||
@ -113,6 +113,10 @@ pub fn run(
|
||||
.default_service(web::route().to(router::not_found)) // error page
|
||||
})
|
||||
.workers(config.threads as usize)
|
||||
// Set the keep-alive timer for client connections
|
||||
.keep_alive(Duration::from_secs(
|
||||
config.client_connection_keep_alive as u64,
|
||||
))
|
||||
// Start server on 127.0.0.1 with the user provided port number. for example 127.0.0.1:8080.
|
||||
.listen(listener)?
|
||||
.run();
|
||||
|
@ -81,7 +81,7 @@ pub async fn aggregate(
|
||||
.pool_idle_timeout(Duration::from_secs(
|
||||
config.pool_idle_connection_timeout as u64,
|
||||
))
|
||||
.tcp_keepalive(Duration::from_secs(config.tcp_connection_keepalive as u64))
|
||||
.tcp_keepalive(Duration::from_secs(config.tcp_connection_keep_alive as u64))
|
||||
.connect_timeout(Duration::from_secs(config.request_timeout as u64)) // Add timeout to request to avoid DDOSing the server
|
||||
.https_only(true)
|
||||
.gzip(true)
|
||||
|
@ -10,14 +10,18 @@ production_use = false -- whether to use production mode or not (in other words
|
||||
-- if production_use is set to true
|
||||
-- There will be a random delay before sending the request to the search engines, this is to prevent DDoSing the upstream search engines from a large number of simultaneous requests.
|
||||
request_timeout = 30 -- timeout for the search requests sent to the upstream search engines to be fetched (value in seconds).
|
||||
tcp_connection_keepalive = 30 -- the amount of time the tcp connection should remain alive (or connected to the server). (value in seconds).
|
||||
tcp_connection_keep_alive = 30 -- the amount of time the tcp connection should remain alive to the upstream search engines (or connected to the server). (value in seconds).
|
||||
pool_idle_connection_timeout = 30 -- timeout for the idle connections in the reqwest HTTP connection pool (value in seconds).
|
||||
rate_limiter = {
|
||||
number_of_requests = 20, -- The number of request that are allowed within a provided time limit.
|
||||
time_limit = 3, -- The time limit in which the quantity of requests that should be accepted.
|
||||
}
|
||||
https_adaptive_window_size = false
|
||||
|
||||
number_of_https_connections = 10 -- the number of https connections that should be available in the connection pool.
|
||||
-- Set keep-alive timer in seconds; keeps clients connected to the HTTP server, different from the connection to upstream search engines
|
||||
client_connection_keep_alive = 120
|
||||
|
||||
-- ### Search ###
|
||||
-- Filter results based on different levels. The levels provided are:
|
||||
-- {{
|
||||
|
Loading…
Reference in New Issue
Block a user