1
0
mirror of https://gitlab.com/comunic/comunicapiv3 synced 2024-12-27 22:18:51 +00:00

Make limit system lives

This commit is contained in:
Pierre HUBERT 2021-01-23 09:44:34 +01:00
parent dac83ba437
commit ddce4062c7
4 changed files with 116 additions and 16 deletions

View File

@ -34,6 +34,21 @@ pub enum LimitPolicy {
ANY(u64),
}
impl LimitPolicy {
pub fn is_none(&self) -> bool {
matches!(self, LimitPolicy::NONE)
}
pub fn get_count(&self) -> u64 {
match self {
LimitPolicy::NONE => 0,
LimitPolicy::SUCCESS(n) => n.clone(),
LimitPolicy::FAILURE(n) => n.clone(),
LimitPolicy::ANY(n) => n.clone(),
}
}
}
/// Define types
pub type RequestResult = Result<(), Box<dyn Error>>;
pub type RequestProcess = Box<dyn Fn(&mut HttpRequestHandler) -> RequestResult>;

View File

@ -196,6 +196,9 @@ impl FromRequest for CustomRequest {
/// Process a "simple request" aka not a WebSocket request
fn process_simple_route(route: &Route, req: &mut HttpRequestHandler) -> RequestResult {
if requests_limit_helper::trigger_before(req, route).is_err() {
req.too_many_requests("Too many request. Please try again later.")?;
}
// Validate client token
req.check_client_token()?;
@ -205,7 +208,11 @@ fn process_simple_route(route: &Route, req: &mut HttpRequestHandler) -> RequestR
req.check_user_token()?;
}
(route.func)(req)
let res: RequestResult = (route.func)(req);
requests_limit_helper::trigger_after(res.is_ok(), req, route)?;
res
}
/// Process an incoming request

View File

@ -146,6 +146,13 @@ impl HttpRequestHandler {
Err(Box::new(ExecError::new(&message)))
}
/// Too many requests (429)
pub fn too_many_requests(&mut self, message: &str) -> RequestResult {
self.response = Some(HttpResponse::TooManyRequests().json(
HttpError::new(429, message)));
Err(Box::new(ExecError::new(message)))
}
/// If result is not OK, return a bad request
pub fn ok_or_bad_request<E>(&mut self, res: ResultBoxError<E>, msg: &str) -> ResultBoxError<E> {
match res {

View File

@ -2,26 +2,32 @@
//!
//! Handle the limitation of requests, depending on threshold criterias
use std::sync::{Arc, Mutex};
use crate::constants::LIMIT_COUNTER_LIFETIME;
use crate::data::error::ResultBoxError;
use crate::controllers::routes::{LimitPolicy, Route};
use crate::data::error::{ExecError, ResultBoxError};
use crate::data::http_request_handler::HttpRequestHandler;
use crate::utils::date_utils;
use crate::utils::date_utils::time;
/// Information about a IP address limitation
struct IpInfo {
ip: String,
time_start: u64,
uri: String,
count: u64,
}
/// Limits cache
type Cache = Arc<dashmap::DashMap<String, IpInfo>>;
type Cache = Vec<IpInfo>;
static mut LIMITS_CACHE: Option<Arc<Mutex<Cache>>> = None;
/// Initialize limit cache storage
pub fn init() {
let limits_cache = Arc::new(dashmap::DashMap::new());
let limits_cache = Vec::new();
let limits_cache = Some(Arc::new(Mutex::new(limits_cache)));
unsafe {
@ -30,12 +36,11 @@ pub fn init() {
}
/// Get access to the cache. This resource must absolutely be released as quickly as possible
fn get_cache() -> ResultBoxError<Cache> {
let cache: Cache;
fn get_cache() -> ResultBoxError<Arc<Mutex<Cache>>> {
let cache;
unsafe {
let guard = LIMITS_CACHE.as_ref().unwrap().lock();
cache = guard.unwrap().clone();
cache = LIMITS_CACHE.as_ref().unwrap().clone();
}
Ok(cache)
@ -45,16 +50,82 @@ fn get_cache() -> ResultBoxError<Cache> {
pub fn clean_cache() -> ResultBoxError {
let time = date_utils::time();
let cache = get_cache()?;
let obsolete_ips: Vec<String> = cache
.iter()
.filter(|k| k.time_start + LIMIT_COUNTER_LIFETIME < time)
.map(|k| k.key().to_string())
.collect();
let mut i = 0;
for obsolete_ip in obsolete_ips {
cache.remove(&obsolete_ip);
let cache = get_cache()?;
let mut cache = cache.lock().unwrap();
while i < cache.len() {
if cache[i].time_start + LIMIT_COUNTER_LIFETIME < time {
cache.remove(i);
} else {
i = i + 1;
}
}
Ok(())
}
/// Trigger limit helper at the beginning of requests
pub fn trigger_before(req: &HttpRequestHandler, route: &Route) -> ResultBoxError {
if route.limit_policy.is_none() {
return Ok(());
}
let max_count = route.limit_policy.get_count();
let ip = req.remote_ip();
let cache = get_cache()?;
let found = cache.lock().unwrap()
.iter()
.find(
|k| k.uri.eq(route.uri)
&& k.count >= max_count
&& k.ip.eq(&ip))
.is_some();
if found {
return Err(ExecError::boxed_new("Limit exceeded!"));
}
Ok(())
}
/// Trigger limit at the end of the request
pub fn trigger_after(is_success: bool, req: &HttpRequestHandler, route: &Route) -> ResultBoxError {
let need_trigger = match (&route.limit_policy, is_success)
{
(LimitPolicy::NONE, _) => false,
(LimitPolicy::ANY(_), _) => true,
(LimitPolicy::SUCCESS(_), res) => res,
(LimitPolicy::FAILURE(_), res) => !res,
};
if !need_trigger {
return Ok(());
}
let ip = req.remote_ip();
let cache = get_cache()?;
let mut cache = cache.lock().unwrap();
// We search for existing entry
for i in 0..cache.len() {
if cache[i].ip.eq(&ip) && cache[i].uri.eq(route.uri) {
cache[i].count += 1;
return Ok(());
}
}
// Otherwise we must add the entry to the table
cache.push(IpInfo {
ip,
time_start: time(),
uri: route.uri.to_string(),
count: 1,
});
Ok(())
}