remove bad if not even unnecessary relevance calculation and its dependencies
This commit is contained in:
parent
215a928eb4
commit
05b6dfda86
4 changed files with 3 additions and 268 deletions
|
@ -4,11 +4,6 @@
|
|||
use super::engine_models::EngineError;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[cfg(any(
|
||||
feature = "use-synonyms-search",
|
||||
feature = "use-non-static-synonyms-search"
|
||||
))]
|
||||
use thesaurus::synonyms;
|
||||
/// A named struct to store the raw scraped search results scraped search results from the
|
||||
/// upstream search engines before aggregating it.It derives the Clone trait which is needed
|
||||
/// to write idiomatic rust using `Iterators`.
|
||||
|
@ -47,45 +42,6 @@ impl SearchResult {
|
|||
engine: engine.iter().map(|name| name.to_string()).collect(),
|
||||
}
|
||||
}
|
||||
/// calculates and update the relevance score of the current search.
|
||||
|
||||
/// # Arguments
|
||||
///
|
||||
/// * query - the query string used to obtain the results
|
||||
///
|
||||
///
|
||||
|
||||
pub fn calculate_relevance(&mut self, query: &str) {
|
||||
use stop_words::{get, LANGUAGE};
|
||||
// when language settings can change to any of the ones supported on this crate: https://docs.rs/crate/stop-words/0.8.0
|
||||
let documents = [
|
||||
self.title.clone(),
|
||||
self.url.clone(),
|
||||
self.description.clone(),
|
||||
];
|
||||
|
||||
let stop_words = get(LANGUAGE::English);
|
||||
let punctuation = [
|
||||
".".to_owned(),
|
||||
",".to_owned(),
|
||||
":".to_owned(),
|
||||
";".to_owned(),
|
||||
"!".to_owned(),
|
||||
"?".to_owned(),
|
||||
"(".to_owned(),
|
||||
")".to_owned(),
|
||||
"[".to_owned(),
|
||||
"]".to_owned(),
|
||||
"{".to_owned(),
|
||||
"}".to_owned(),
|
||||
"\"".to_owned(),
|
||||
"'".to_owned(),
|
||||
"<".to_owned(),
|
||||
">".to_owned(),
|
||||
];
|
||||
|
||||
self.relevance_score = calculate_tf_idf(query, &documents, &stop_words, &punctuation);
|
||||
}
|
||||
|
||||
/// A function which adds the engine name provided as a string into a vector of strings.
|
||||
///
|
||||
|
@ -228,53 +184,3 @@ impl SearchResults {
|
|||
self.no_engines_selected = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to calculate the tf-idf for the search query.
|
||||
/// <br> The approach is as [`as`](https://en.wikipedia.org/wiki/Tf%E2%80%93idf).
|
||||
/// <br> Find a sample article about TF-IDF [`here`](https://medium.com/analytics-vidhya/tf-idf-term-frequency-technique-easiest-explanation-for-text-classification-in-nlp-with-code-8ca3912e58c3)
|
||||
/// ### Arguments
|
||||
/// * `query` - a user's search query
|
||||
/// * `documents` - a list of text used for comparision (url, title, description)
|
||||
/// * `stop_words` - A list of language specific stop words.
|
||||
/// * `punctuation` - list of punctuation symbols.
|
||||
/// ### Returns
|
||||
/// * `score` - The average tf-idf score of the word tokens (and synonyms) in the query
|
||||
fn calculate_tf_idf(
|
||||
query: &str,
|
||||
documents: &[String],
|
||||
stop_words: &[String],
|
||||
punctuation: &[String],
|
||||
) -> f32 {
|
||||
use keyword_extraction::{
|
||||
tf_idf::{TfIdf, TfIdfParams},
|
||||
tokenizer::Tokenizer,
|
||||
};
|
||||
|
||||
let params = TfIdfParams::UnprocessedDocuments(documents, stop_words, Some(punctuation));
|
||||
let tf_idf = TfIdf::new(params);
|
||||
let tokener = Tokenizer::new(query, stop_words, Some(punctuation));
|
||||
let query_tokens = tokener.split_into_words();
|
||||
let mut search_tokens = vec![];
|
||||
|
||||
for token in query_tokens {
|
||||
#[cfg(any(
|
||||
feature = "use-synonyms-search",
|
||||
feature = "use-non-static-synonyms-search"
|
||||
))]
|
||||
{
|
||||
// find some synonyms and add them to the search (from wordnet or moby if feature is enabled)
|
||||
let synonyms = synonyms(&token);
|
||||
search_tokens.extend(synonyms)
|
||||
}
|
||||
search_tokens.push(token);
|
||||
}
|
||||
|
||||
let mut total_score = 0.0f32;
|
||||
for token in search_tokens.iter() {
|
||||
total_score += tf_idf.get_score(token);
|
||||
}
|
||||
|
||||
let result = total_score / (search_tokens.len() as f32);
|
||||
|
||||
f32::from(!result.is_nan()) * result
|
||||
}
|
||||
|
|
|
@ -10,15 +10,9 @@ use crate::models::{
|
|||
|
||||
use error_stack::Report;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use regex::Regex;
|
||||
use reqwest::{Client, ClientBuilder};
|
||||
use std::sync::Arc;
|
||||
use tokio::{
|
||||
fs::File,
|
||||
io::{AsyncBufReadExt, BufReader},
|
||||
task::JoinHandle,
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::{task::JoinHandle, time::Duration};
|
||||
|
||||
/// A constant for holding the prebuilt Client globally in the app.
|
||||
static CLIENT: std::sync::OnceLock<Client> = std::sync::OnceLock::new();
|
||||
|
@ -153,77 +147,7 @@ pub async fn aggregate(
|
|||
};
|
||||
}
|
||||
|
||||
let mut results: Vec<SearchResult> = result_map
|
||||
.iter()
|
||||
.map(|(_, value)| {
|
||||
let mut copy = value.clone();
|
||||
if !copy.url.contains("temu.com") {
|
||||
copy.calculate_relevance(query.as_str())
|
||||
}
|
||||
copy
|
||||
})
|
||||
.collect();
|
||||
sort_search_results(&mut results);
|
||||
let results: Vec<SearchResult> = result_map.iter().map(|(_, value)| value.clone()).collect();
|
||||
|
||||
Ok(SearchResults::new(results, &engine_errors_info))
|
||||
}
|
||||
|
||||
/// Filters a map of search results using a list of regex patterns.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `map_to_be_filtered` - A mutable reference to a `Vec` of search results to filter, where the filtered results will be removed from.
|
||||
/// * `resultant_map` - A mutable reference to a `Vec` to hold the filtered results.
|
||||
/// * `file_path` - A `&str` representing the path to a file containing regex patterns to use for filtering.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the file at `file_path` cannot be opened or read, or if a regex pattern is invalid.
|
||||
pub async fn filter_with_lists(
|
||||
map_to_be_filtered: &mut Vec<(String, SearchResult)>,
|
||||
resultant_map: &mut Vec<(String, SearchResult)>,
|
||||
file_path: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let reader = BufReader::new(File::open(file_path).await?);
|
||||
let mut lines = reader.lines();
|
||||
|
||||
while let Some(line) = lines.next_line().await? {
|
||||
let re = Regex::new(line.trim())?;
|
||||
|
||||
let mut length = map_to_be_filtered.len();
|
||||
let mut idx: usize = Default::default();
|
||||
// Iterate over each search result in the map and check if it matches the regex pattern
|
||||
while idx < length {
|
||||
let ele = &map_to_be_filtered[idx];
|
||||
let ele_inner = &ele.1;
|
||||
match re.is_match(&ele.0.to_lowercase())
|
||||
|| re.is_match(&ele_inner.title.to_lowercase())
|
||||
|| re.is_match(&ele_inner.description.to_lowercase())
|
||||
{
|
||||
true => {
|
||||
// If the search result matches the regex pattern, move it from the original map to the resultant map
|
||||
resultant_map.push(map_to_be_filtered.swap_remove(idx));
|
||||
length -= 1;
|
||||
}
|
||||
false => idx += 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
/// Sorts SearchResults by relevance score.
|
||||
/// <br> sort_unstable is used as its faster,stability is not an issue on our side.
|
||||
/// For reasons why, check out [`this`](https://rust-lang.github.io/rfcs/1884-unstable-sort.html)
|
||||
/// # Arguments
|
||||
/// * `results` - A mutable slice or Vec of SearchResults
|
||||
///
|
||||
fn sort_search_results(results: &mut [SearchResult]) {
|
||||
results.sort_unstable_by(|a, b| {
|
||||
use std::cmp::Ordering;
|
||||
|
||||
b.relevance_score
|
||||
.partial_cmp(&a.relevance_score)
|
||||
.unwrap_or(Ordering::Less)
|
||||
})
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue