Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update error handling #19

Open
wants to merge 1 commit into
base: rewrite_backend_rust
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions backend-rs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
anyhow = "1.0.86"
axum = { version = "0.7", features = ["tracing"] }
chrono = { version = "0.4.38", features = ["serde"] }
dotenv = "0.15"
Expand Down
51 changes: 39 additions & 12 deletions backend-rs/src/api/flake.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use anyhow::Context;
use axum::{
extract::{Query, State},
Json,
Expand All @@ -6,7 +7,7 @@ use chrono::NaiveDateTime;
use opensearch::{OpenSearch, SearchParts};
use serde_json::{json, Value};
use sqlx::{postgres::PgRow, FromRow, Pool, Postgres, Row};
use std::{collections::HashMap, sync::Arc};
use std::{cmp::Ordering, collections::HashMap, sync::Arc};

use crate::common::{AppError, AppState};

Expand All @@ -21,6 +22,26 @@ struct FlakeRelease {
created_at: NaiveDateTime,
}

impl Eq for FlakeRelease {}

impl Ord for FlakeRelease {
fn cmp(&self, other: &Self) -> Ordering {
self.id.cmp(&other.id)
}
}

impl PartialOrd for FlakeRelease {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}

impl PartialEq for FlakeRelease {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}

impl FromRow<'_, PgRow> for FlakeRelease {
fn from_row(row: &PgRow) -> sqlx::Result<Self> {
Ok(Self {
Expand Down Expand Up @@ -53,7 +74,7 @@ pub async fn get_flake(

if !releases.is_empty() {
// Should this be done by the DB?
releases.sort_by(|a, b| hits[&b.id].partial_cmp(&hits[&a.id]).unwrap());
releases.sort();
}

releases
Expand Down Expand Up @@ -116,10 +137,7 @@ async fn get_flakes(pool: &Pool<Postgres>) -> Result<Vec<FlakeRelease>, sqlx::Er
Ok(releases)
}

async fn search_flakes(
opensearch: &OpenSearch,
q: &String,
) -> Result<HashMap<i32, f64>, opensearch::Error> {
async fn search_flakes(opensearch: &OpenSearch, q: &String) -> Result<HashMap<i32, f64>, AppError> {
let res = opensearch
.search(SearchParts::Index(&["flakes"]))
.size(10)
Expand All @@ -146,12 +164,21 @@ async fn search_flakes(
// TODO: Remove this unwrap, use fold or map to create the HashMap
let mut hits: HashMap<i32, f64> = HashMap::new();

for hit in res["hits"]["hits"].as_array().unwrap() {
// TODO: properly handle errors
hits.insert(
hit["_id"].as_str().unwrap().parse().unwrap(),
hit["_score"].as_f64().unwrap(),
);
let hit_res = res["hits"]["hits"]
.as_array()
.context("failed to extract hits from open search response")?;

for hit in hit_res {
let id = hit["_id"]
.as_str()
.context("failed to read id as string from open search hit")?
.parse()
.context("failed to parse id from open search hit")?;
let score = hit["_score"]
.as_f64()
.context("failed to parse score from open search hit")?;

hits.insert(id, score);
}

Ok(hits)
Expand Down
8 changes: 8 additions & 0 deletions backend-rs/src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ pub struct AppState {
pub enum AppError {
OpenSearchError(opensearch::Error),
SqlxError(sqlx::Error),
UnexpectedError(anyhow::Error),
}

impl From<opensearch::Error> for AppError {
Expand All @@ -24,11 +25,18 @@ impl From<sqlx::Error> for AppError {
}
}

impl From<anyhow::Error> for AppError {
fn from(value: anyhow::Error) -> Self {
AppError::UnexpectedError(value)
}
}

impl IntoResponse for AppError {
fn into_response(self) -> axum::response::Response {
let body = match self {
AppError::OpenSearchError(error) => error.to_string(),
AppError::SqlxError(error) => error.to_string(),
AppError::UnexpectedError(error) => error.to_string(),
};
(StatusCode::INTERNAL_SERVER_ERROR, Json(body)).into_response()
}
Expand Down
13 changes: 9 additions & 4 deletions backend-rs/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,22 +32,27 @@ async fn main() {
.with(fmt::layer().with_target(false))
.with(EnvFilter::from_default_env())
.init();
let database_url = env::var("DATABASE_URL").unwrap();
let pool = PgPoolOptions::new().connect(&database_url).await.unwrap();
let database_url = env::var("DATABASE_URL").expect("Failed to parse database url");
let pool = PgPoolOptions::new()
.connect(&database_url)
.await
.expect("failed to start database pool");
let state = Arc::new(AppState {
opensearch: OpenSearch::default(),
pool,
});
let _ = create_flake_index(&state.opensearch).await;
// run our app with hyper, listening globally on port 3000
let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap();
let listener = tokio::net::TcpListener::bind("0.0.0.0:3000")
.await
.expect("Failed to bind TCP listener");
tracing::info!("Listening on 0.0.0.0:3000");
axum::serve(
listener,
app(state).into_make_service_with_connect_info::<SocketAddr>(),
)
.await
.unwrap();
.expect("Failed to start axum");
}

async fn add_ip_trace(
Expand Down
Loading