0
0
mirror of https://github.com/neon-mmd/websurfx.git synced 2024-11-22 14:08:23 -05:00

Trimmed down aggregate parameters

This commit is contained in:
ddotthomas 2024-02-25 21:28:02 -07:00
parent 8431d8aab5
commit bb6dc68db6
2 changed files with 6 additions and 11 deletions

View File

@ -2,6 +2,7 @@
//! search engines and then removes duplicate results. //! search engines and then removes duplicate results.
use super::user_agent::random_user_agent; use super::user_agent::random_user_agent;
use crate::config::parser::Config;
use crate::handler::{file_path, FileType}; use crate::handler::{file_path, FileType};
use crate::models::{ use crate::models::{
aggregation_models::{EngineErrorInfo, SearchResult, SearchResults}, aggregation_models::{EngineErrorInfo, SearchResult, SearchResults},
@ -66,20 +67,17 @@ type FutureVec = Vec<JoinHandle<Result<HashMap<String, SearchResult>, Report<Eng
pub async fn aggregate( pub async fn aggregate(
query: &str, query: &str,
page: u32, page: u32,
random_delay: bool, config: &Config,
debug: bool,
upstream_search_engines: &[EngineHandler], upstream_search_engines: &[EngineHandler],
request_timeout: u8,
safe_search: u8, safe_search: u8,
adaptive_window: bool,
) -> Result<SearchResults, Box<dyn std::error::Error>> { ) -> Result<SearchResults, Box<dyn std::error::Error>> {
let client = CLIENT.get_or_init(|| { let client = CLIENT.get_or_init(|| {
ClientBuilder::new() ClientBuilder::new()
.timeout(Duration::from_secs(request_timeout as u64)) // Add timeout to request to avoid DDOSing the server .timeout(Duration::from_secs(config.request_timeout as u64)) // Add timeout to request to avoid DDOSing the server
.https_only(true) .https_only(true)
.gzip(true) .gzip(true)
.brotli(true) .brotli(true)
.http2_adaptive_window(adaptive_window) .http2_adaptive_window(config.adaptive_window)
.build() .build()
.unwrap() .unwrap()
}); });
@ -87,7 +85,7 @@ pub async fn aggregate(
let user_agent: &str = random_user_agent(); let user_agent: &str = random_user_agent();
// Add a random delay before making the request. // Add a random delay before making the request.
if random_delay || !debug { if config.aggregator.random_delay || !config.debug {
let nanos = SystemTime::now().duration_since(UNIX_EPOCH)?.subsec_nanos() as f32; let nanos = SystemTime::now().duration_since(UNIX_EPOCH)?.subsec_nanos() as f32;
let delay = ((nanos / 1_0000_0000 as f32).floor() as u64) + 1; let delay = ((nanos / 1_0000_0000 as f32).floor() as u64) + 1;
tokio::time::sleep(Duration::from_secs(delay)).await; tokio::time::sleep(Duration::from_secs(delay)).await;

View File

@ -209,16 +209,13 @@ async fn results(
aggregate( aggregate(
query, query,
page, page,
config.aggregator.random_delay, config,
config.debug,
&search_settings &search_settings
.engines .engines
.iter() .iter()
.filter_map(|engine| EngineHandler::new(engine).ok()) .filter_map(|engine| EngineHandler::new(engine).ok())
.collect::<Vec<EngineHandler>>(), .collect::<Vec<EngineHandler>>(),
config.request_timeout,
safe_search_level, safe_search_level,
config.adaptive_window,
) )
.await? .await?
} }