use main.rs like a sane person
This commit is contained in:
parent
6567af268e
commit
c9fe79eb12
6 changed files with 99 additions and 263 deletions
68
src/cache/error.rs
vendored
68
src/cache/error.rs
vendored
|
@ -1,68 +0,0 @@
|
|||
//! This module provides the error enum to handle different errors associated while requesting data from
|
||||
//! the redis server using an async connection pool.
|
||||
use std::fmt;
|
||||
|
||||
#[cfg(feature = "redis-cache")]
|
||||
use redis::RedisError;
|
||||
|
||||
/// A custom error type used for handling redis async pool associated errors.
|
||||
#[derive(Debug)]
|
||||
pub enum CacheError {
|
||||
/// This variant handles all errors related to `RedisError`,
|
||||
#[cfg(feature = "redis-cache")]
|
||||
RedisError(RedisError),
|
||||
/// This variant handles the errors which occurs when all the connections
|
||||
/// in the connection pool return a connection dropped redis error.
|
||||
PoolExhaustionWithConnectionDropError,
|
||||
/// Whenever serialization or deserialization fails during communication with the cache.
|
||||
SerializationError,
|
||||
/// Returned when the value is missing.
|
||||
MissingValue,
|
||||
/// whenever encryption or decryption of cache results fails
|
||||
EncryptionError,
|
||||
/// Whenever compression of the cache results fails
|
||||
CompressionError,
|
||||
/// Whenever base64 decoding failed
|
||||
Base64DecodingOrEncodingError,
|
||||
}
|
||||
|
||||
impl fmt::Display for CacheError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
#[cfg(feature = "redis-cache")]
|
||||
CacheError::RedisError(redis_error) => {
|
||||
if let Some(detail) = redis_error.detail() {
|
||||
write!(f, "{}", detail)
|
||||
} else {
|
||||
write!(f, "")
|
||||
}
|
||||
}
|
||||
CacheError::PoolExhaustionWithConnectionDropError => {
|
||||
write!(
|
||||
f,
|
||||
"Error all connections from the pool dropped with connection error"
|
||||
)
|
||||
}
|
||||
CacheError::MissingValue => {
|
||||
write!(f, "The value is missing from the cache")
|
||||
}
|
||||
CacheError::SerializationError => {
|
||||
write!(f, "Unable to serialize, deserialize from the cache")
|
||||
}
|
||||
|
||||
CacheError::EncryptionError => {
|
||||
write!(f, "Failed to encrypt or decrypt cache-results")
|
||||
}
|
||||
|
||||
CacheError::CompressionError => {
|
||||
write!(f, "failed to compress or uncompress cache results")
|
||||
}
|
||||
|
||||
CacheError::Base64DecodingOrEncodingError => {
|
||||
write!(f, "base64 encoding or decoding failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl error_stack::Context for CacheError {}
|
11
src/cache/mod.rs
vendored
11
src/cache/mod.rs
vendored
|
@ -1,11 +0,0 @@
|
|||
//! This module provides the modules which provide the functionality to cache the aggregated
|
||||
//! results fetched and aggregated from the upstream search engines in a json format.
|
||||
pub mod cacher;
|
||||
|
||||
#[cfg(any(feature = "encrypt-cache-results", feature = "cec-cache-results"))]
|
||||
/// encryption module contains encryption utils such the cipher and key
|
||||
pub mod encryption;
|
||||
pub mod error;
|
||||
|
||||
#[cfg(feature = "redis-cache")]
|
||||
pub mod redis_cacher;
|
182
src/cache/redis_cacher.rs
vendored
182
src/cache/redis_cacher.rs
vendored
|
@ -1,182 +0,0 @@
|
|||
//! This module provides the functionality to cache the aggregated results fetched and aggregated
|
||||
//! from the upstream search engines in a json format.
|
||||
|
||||
use super::error::CacheError;
|
||||
use error_stack::Report;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use redis::{aio::ConnectionManager, AsyncCommands, Client, RedisError};
|
||||
|
||||
/// A constant holding the redis pipeline size.
|
||||
const REDIS_PIPELINE_SIZE: usize = 3;
|
||||
|
||||
/// A named struct which stores the redis Connection url address to which the client will
|
||||
/// connect to.
|
||||
pub struct RedisCache {
|
||||
/// It stores a pool of connections ready to be used.
|
||||
connection_pool: Vec<ConnectionManager>,
|
||||
/// It stores the size of the connection pool (in other words the number of
|
||||
/// connections that should be stored in the pool).
|
||||
pool_size: u8,
|
||||
/// It stores the index of which connection is being used at the moment.
|
||||
current_connection: u8,
|
||||
/// It stores the max TTL for keys.
|
||||
cache_ttl: u16,
|
||||
/// It stores the redis pipeline struct of size 3.
|
||||
pipeline: redis::Pipeline,
|
||||
}
|
||||
|
||||
impl RedisCache {
|
||||
/// A function which fetches the cached json results as json string.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `redis_connection_url` - It takes the redis Connection url address.
|
||||
/// * `pool_size` - It takes the size of the connection pool (in other words the number of
|
||||
/// connections that should be stored in the pool).
|
||||
/// * `cache_ttl` - It takes the the time to live for cached results to live in the redis
|
||||
/// server.
|
||||
///
|
||||
/// # Error
|
||||
///
|
||||
/// Returns a newly constructed `RedisCache` struct on success otherwise returns a standard
|
||||
/// error type.
|
||||
pub async fn new(
|
||||
redis_connection_url: &str,
|
||||
pool_size: u8,
|
||||
cache_ttl: u16,
|
||||
) -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let client = Client::open(redis_connection_url)?;
|
||||
let tasks: FuturesUnordered<_> = FuturesUnordered::new();
|
||||
|
||||
for _ in 0..pool_size {
|
||||
let client_partially_cloned = client.clone();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
client_partially_cloned.get_connection_manager().await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut outputs = Vec::new();
|
||||
for task in tasks {
|
||||
outputs.push(task.await??);
|
||||
}
|
||||
|
||||
let redis_cache = RedisCache {
|
||||
connection_pool: outputs,
|
||||
pool_size,
|
||||
current_connection: Default::default(),
|
||||
cache_ttl,
|
||||
pipeline: redis::Pipeline::with_capacity(REDIS_PIPELINE_SIZE),
|
||||
};
|
||||
|
||||
Ok(redis_cache)
|
||||
}
|
||||
|
||||
/// A function which fetches the cached json as json string from the redis server.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - It takes a string as key.
|
||||
///
|
||||
/// # Error
|
||||
///
|
||||
/// Returns the json as a String from the cache on success otherwise returns a `CacheError`
|
||||
/// on a failure.
|
||||
pub async fn cached_json(&mut self, key: &str) -> Result<String, Report<CacheError>> {
|
||||
self.current_connection = Default::default();
|
||||
|
||||
let mut result: Result<String, RedisError> = self.connection_pool
|
||||
[self.current_connection as usize]
|
||||
.get(key)
|
||||
.await;
|
||||
|
||||
// Code to check whether the current connection being used is dropped with connection error
|
||||
// or not. if it drops with the connection error then the current connection is replaced
|
||||
// with a new connection from the pool which is then used to run the redis command then
|
||||
// that connection is also checked whether it is dropped or not if it is not then the
|
||||
// result is passed as a `Result` or else the same process repeats again and if all of the
|
||||
// connections in the pool result in connection drop error then a custom pool error is
|
||||
// returned.
|
||||
loop {
|
||||
match result {
|
||||
Err(error) => match error.is_connection_dropped() {
|
||||
true => {
|
||||
self.current_connection += 1;
|
||||
if self.current_connection == self.pool_size {
|
||||
return Err(Report::new(
|
||||
CacheError::PoolExhaustionWithConnectionDropError,
|
||||
));
|
||||
}
|
||||
result = self.connection_pool[self.current_connection as usize]
|
||||
.get(key)
|
||||
.await;
|
||||
continue;
|
||||
}
|
||||
false => return Err(Report::new(CacheError::RedisError(error))),
|
||||
},
|
||||
Ok(res) => return Ok(res),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A function which caches the json by using the key and
|
||||
/// `json results` as the value and stores it in redis server with ttl(time to live)
|
||||
/// set to 60 seconds.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `json_results` - It takes the json results string as an argument.
|
||||
/// * `key` - It takes the key as a String.
|
||||
///
|
||||
/// # Error
|
||||
///
|
||||
/// Returns an unit type if the results are cached succesfully otherwise returns a `CacheError`
|
||||
/// on a failure.
|
||||
pub async fn cache_json(
|
||||
&mut self,
|
||||
json_results: impl Iterator<Item = String>,
|
||||
keys: impl Iterator<Item = String>,
|
||||
) -> Result<(), Report<CacheError>> {
|
||||
self.current_connection = Default::default();
|
||||
|
||||
for (key, json_result) in keys.zip(json_results) {
|
||||
self.pipeline
|
||||
.set_ex(key, json_result, self.cache_ttl.into());
|
||||
}
|
||||
|
||||
let mut result: Result<(), RedisError> = self
|
||||
.pipeline
|
||||
.query_async(&mut self.connection_pool[self.current_connection as usize])
|
||||
.await;
|
||||
|
||||
// Code to check whether the current connection being used is dropped with connection error
|
||||
// or not. if it drops with the connection error then the current connection is replaced
|
||||
// with a new connection from the pool which is then used to run the redis command then
|
||||
// that connection is also checked whether it is dropped or not if it is not then the
|
||||
// result is passed as a `Result` or else the same process repeats again and if all of the
|
||||
// connections in the pool result in connection drop error then a custom pool error is
|
||||
// returned.
|
||||
loop {
|
||||
match result {
|
||||
Err(error) => match error.is_connection_dropped() {
|
||||
true => {
|
||||
self.current_connection += 1;
|
||||
if self.current_connection == self.pool_size {
|
||||
return Err(Report::new(
|
||||
CacheError::PoolExhaustionWithConnectionDropError,
|
||||
));
|
||||
}
|
||||
result = self
|
||||
.pipeline
|
||||
.query_async(
|
||||
&mut self.connection_pool[self.current_connection as usize],
|
||||
)
|
||||
.await;
|
||||
continue;
|
||||
}
|
||||
false => return Err(Report::new(CacheError::RedisError(error))),
|
||||
},
|
||||
Ok(_) => return Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
97
src/main.rs
Normal file
97
src/main.rs
Normal file
|
@ -0,0 +1,97 @@
|
|||
//! This main library module provides the functionality to provide and handle the Tcp server
|
||||
//! and register all the routes for the `websurfx` meta search engine website.
|
||||
|
||||
#![forbid(unsafe_code, clippy::panic)]
|
||||
#![deny(missing_docs, clippy::missing_docs_in_private_items, clippy::perf)]
|
||||
#![warn(clippy::cognitive_complexity, rust_2018_idioms)]
|
||||
|
||||
pub mod cache;
|
||||
pub mod config;
|
||||
pub mod engines;
|
||||
pub mod handler;
|
||||
pub mod models;
|
||||
pub mod results;
|
||||
pub mod server;
|
||||
pub mod templates;
|
||||
|
||||
use std::net::TcpListener;
|
||||
|
||||
use crate::cache::Cache;
|
||||
use crate::server::router;
|
||||
|
||||
use actix_cors::Cors;
|
||||
use actix_files as fs;
|
||||
use actix_web::{
|
||||
http::header,
|
||||
middleware::{Compress, Logger},
|
||||
web, App, HttpServer,
|
||||
};
|
||||
use config::parser::Config;
|
||||
use handler::{file_path, FileType};
|
||||
|
||||
/// Runs the web server
|
||||
#[actix_web::main]
|
||||
async fn main() {
|
||||
let config = Config::parse();
|
||||
let cache = Cache::build(&config);
|
||||
|
||||
env_logger::Builder::new()
|
||||
.filter(None, log::LevelFilter::Info)
|
||||
.init();
|
||||
|
||||
log::info!(
|
||||
"started server on port {} and IP {}",
|
||||
config.port,
|
||||
config.binding_ip
|
||||
);
|
||||
log::info!(
|
||||
"Open http://{}:{}/ in your browser",
|
||||
config.binding_ip,
|
||||
config.port,
|
||||
);
|
||||
|
||||
let listener = TcpListener::bind((config.binding_ip.as_str(), config.port))
|
||||
.expect("could not create TcpListener");
|
||||
|
||||
let public_folder_path: &str = file_path(FileType::Theme).unwrap();
|
||||
|
||||
let _ = HttpServer::new(move || {
|
||||
let cors: Cors = Cors::default()
|
||||
.allow_any_origin()
|
||||
.allowed_methods(vec!["GET"])
|
||||
.allowed_headers(vec![
|
||||
header::ORIGIN,
|
||||
header::CONTENT_TYPE,
|
||||
header::REFERER,
|
||||
header::COOKIE,
|
||||
]);
|
||||
|
||||
App::new()
|
||||
// Compress the responses provided by the server for the client requests.
|
||||
.wrap(Compress::default())
|
||||
.wrap(Logger::default()) // added logging middleware for logging.
|
||||
.app_data(web::Data::new(config.clone()))
|
||||
.app_data(web::Data::new(cache.clone()))
|
||||
.wrap(cors)
|
||||
// Serve images and static files (css and js files).
|
||||
.service(
|
||||
fs::Files::new("/static", format!("{}/static", public_folder_path))
|
||||
.show_files_listing(),
|
||||
)
|
||||
.service(
|
||||
fs::Files::new("/images", format!("{}/images", public_folder_path))
|
||||
.show_files_listing(),
|
||||
)
|
||||
.service(router::robots_data) // robots.txt
|
||||
.service(router::index) // index page
|
||||
.service(server::routes::search::search) // search page
|
||||
.service(router::about) // about page
|
||||
.service(router::settings) // settings page
|
||||
.default_service(web::route().to(router::not_found)) // error page
|
||||
})
|
||||
// Start server on 127.0.0.1 with the user provided port number. for example 127.0.0.1:8080.
|
||||
.listen(listener)
|
||||
.expect("could not bind to TCP listener")
|
||||
.run()
|
||||
.await;
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
//! This module handles the search route of the search engine website.
|
||||
|
||||
use crate::{
|
||||
cache::cacher::Cache,
|
||||
cache::Cache,
|
||||
config::parser::Config,
|
||||
models::{
|
||||
aggregation_models::SearchResults,
|
||||
|
@ -145,7 +145,7 @@ pub async fn search(
|
|||
/// the cache or from the upstream search engines otherwise it returns an appropriate error.
|
||||
async fn results(
|
||||
config: web::Data<crate::config::parser::Config>,
|
||||
cache: web::Data<crate::cache::cacher::Cache>,
|
||||
cache: web::Data<crate::cache::Cache>,
|
||||
query: &str,
|
||||
page: u32,
|
||||
search_settings: &server_models::Cookie<'_>,
|
||||
|
|
Loading…
Add table
Reference in a new issue