diff --git a/editoast/openapi.yaml b/editoast/openapi.yaml index 99f9495ca35..f1c004e3512 100644 --- a/editoast/openapi.yaml +++ b/editoast/openapi.yaml @@ -2472,7 +2472,16 @@ paths: post: tags: - stdcm - summary: Compute a STDCM and return the simulation result + summary: This function computes a STDCM and returns the result. + description: |- + It first checks user authorization, then retrieves timetable, infrastructure, + train schedules, and rolling stock data, and runs train simulations. + The result contains the simulation output based on the train schedules + and infrastructure provided. + + If the simulation fails, the function uses a fake train to detect conflicts + with existing train schedules. It then returns both the conflict information + and the pathfinding result from the fake train's simulation. parameters: - name: infra in: query @@ -2604,6 +2613,22 @@ paths: type: string enum: - path_not_found + - type: object + required: + - pathfinding_result + - conflicts + - status + properties: + conflicts: + type: array + items: + $ref: '#/components/schemas/Conflict' + pathfinding_result: + $ref: '#/components/schemas/PathfindingResult' + status: + type: string + enum: + - conflicts - type: object required: - error diff --git a/editoast/src/core/conflict_detection.rs b/editoast/src/core/conflict_detection.rs index e2c3e3a5ee1..6b264a79aa5 100644 --- a/editoast/src/core/conflict_detection.rs +++ b/editoast/src/core/conflict_detection.rs @@ -29,7 +29,7 @@ pub struct ConflictDetectionRequest { pub work_schedules: Option, } -#[derive(Debug, Serialize)] +#[derive(Debug, Clone, Serialize)] pub struct TrainRequirements { pub start_time: DateTime, pub spacing_requirements: Vec, @@ -49,7 +49,7 @@ pub struct ConflictDetectionResponse { pub conflicts: Vec, } -#[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, ToSchema)] pub struct Conflict { /// List of train ids involved in the conflict pub train_ids: Vec, @@ -70,7 +70,7 @@ pub struct Conflict { /// /// The start and end time describe the conflicting time span (not the full /// requirement's time span). -#[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, ToSchema)] pub struct ConflictRequirement { pub zone: String, pub start_time: DateTime, diff --git a/editoast/src/core/simulation.rs b/editoast/src/core/simulation.rs index 01f49fb3fd2..900c59f513c 100644 --- a/editoast/src/core/simulation.rs +++ b/editoast/src/core/simulation.rs @@ -138,7 +138,7 @@ pub struct SimulationPath { pub path_item_positions: Vec, } -#[derive(Deserialize, Default, Serialize, Clone, Debug, ToSchema)] +#[derive(Deserialize, Default, PartialEq, Serialize, Clone, Debug, ToSchema)] pub struct ReportTrain { /// List of positions of a train /// Both positions (in mm) and times (in ms) must have the same length @@ -153,7 +153,7 @@ pub struct ReportTrain { pub path_item_times: Vec, } -#[derive(Deserialize, Default, Serialize, Clone, Debug, ToSchema)] +#[derive(Deserialize, Default, PartialEq, Serialize, Clone, Debug, ToSchema)] pub struct CompleteReportTrain { #[serde(flatten)] pub report_train: ReportTrain, @@ -279,7 +279,7 @@ pub struct SimulationRequest { pub electrical_profile_set_id: Option, } -#[derive(Serialize, Deserialize, Clone, Debug, ToSchema)] +#[derive(Serialize, Deserialize, PartialEq, Clone, Debug, ToSchema)] #[serde(tag = "status", rename_all = "snake_case")] // We accepted the difference of memory size taken by variants // Since there is only on success and others are error cases diff --git a/editoast/src/core/stdcm.rs b/editoast/src/core/stdcm.rs index 5e154548b9d..401b726eccb 100644 --- a/editoast/src/core/stdcm.rs +++ b/editoast/src/core/stdcm.rs @@ -11,7 +11,9 @@ use serde::Deserialize; use serde::Serialize; use utoipa::ToSchema; +use super::conflict_detection::Conflict; use super::conflict_detection::TrainRequirements; +use super::pathfinding::PathfindingResult; use super::pathfinding::PathfindingResultSuccess; use super::pathfinding::TrackRange; use super::simulation::PhysicsRollingStock; @@ -113,7 +115,7 @@ pub struct UndirectedTrackRange { pub end: u64, } -#[derive(Serialize, Deserialize, Clone, Debug, ToSchema)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, ToSchema)] #[serde(tag = "status", rename_all = "snake_case")] // We accepted the difference of memory size taken by variants // Since there is only on success and others are error cases @@ -125,6 +127,10 @@ pub enum STDCMResponse { departure_time: DateTime, }, PathNotFound, + Conflicts { + pathfinding_result: PathfindingResult, + conflicts: Vec, + }, PreprocessingSimulationError { error: SimulationResponse, }, diff --git a/editoast/src/views/timetable/stdcm.rs b/editoast/src/views/timetable/stdcm.rs index d0b39ddb116..5db7ea7884e 100644 --- a/editoast/src/views/timetable/stdcm.rs +++ b/editoast/src/views/timetable/stdcm.rs @@ -25,14 +25,17 @@ use utoipa::IntoParams; use utoipa::ToSchema; use super::SelectionSettings; +use crate::core::conflict_detection::ConflictDetectionRequest; use crate::core::conflict_detection::TrainRequirements; +use crate::core::conflict_detection::WorkSchedulesRequest; use crate::core::pathfinding::InvalidPathItem; use crate::core::pathfinding::PathfindingResult; use crate::core::simulation::{RoutingRequirement, SimulationResponse, SpacingRequirement}; +use crate::core::stdcm::STDCMPathItem; +use crate::core::stdcm::STDCMRequest; use crate::core::stdcm::STDCMResponse; -use crate::core::stdcm::TemporarySpeedLimit as CoreTemporarySpeedLimit; -use crate::core::stdcm::{STDCMPathItem, UndirectedTrackRange, WorkSchedule as CoreWorkSchedule}; -use crate::core::stdcm::{STDCMRequest, STDCMStepTimingData}; +use crate::core::stdcm::STDCMStepTimingData; +use crate::core::stdcm::UndirectedTrackRange; use crate::core::AsCoreRequest; use crate::core::CoreClient; use crate::error::Result; @@ -139,7 +142,15 @@ struct InfraIdQueryParam { infra: i64, } -/// Compute a STDCM and return the simulation result +/// This function computes a STDCM and returns the result. +/// It first checks user authorization, then retrieves timetable, infrastructure, +/// train schedules, and rolling stock data, and runs train simulations. +/// The result contains the simulation output based on the train schedules +/// and infrastructure provided. +/// +/// If the simulation fails, the function uses a fake train to detect conflicts +/// with existing train schedules. It then returns both the conflict information +/// and the pathfinding result from the fake train's simulation. #[utoipa::path( post, path = "", tag = "stdcm", @@ -183,8 +194,8 @@ async fn stdcm( let infra = Infra::retrieve_or_fail(conn, infra_id, || STDCMError::InfraNotFound { infra_id }).await?; - let (trains, _): (Vec<_>, _) = - TrainSchedule::retrieve_batch(conn, timetable_trains.train_ids).await?; + let (train_schedules, _): (Vec<_>, _) = + TrainSchedule::retrieve_batch(conn, timetable_trains.train_ids.clone()).await?; let rolling_stock = RollingStockModel::retrieve_or_fail(conn, stdcm_request.rolling_stock_id, || { @@ -198,7 +209,7 @@ async fn stdcm( conn, valkey_client.clone(), core_client.clone(), - &trains, + &train_schedules, &infra, stdcm_request.electrical_profile_set_id, ) @@ -206,7 +217,12 @@ async fn stdcm( // 2. Compute the earliest start time, maximum running time and maximum departure delay // Simulation time without stop duration - let simulation_run_time_result = get_simulation_run_time( + let ( + simulation_run_time, + fake_train_schedule, + fake_train_sim_result, + fake_train_pathfinding_result, + ) = simulate_train_run( db_pool.clone(), valkey_client.clone(), core_client.clone(), @@ -216,7 +232,7 @@ async fn stdcm( timetable_id, ) .await?; - let simulation_run_time = match simulation_run_time_result { + let simulation_run_time = match simulation_run_time { SimulationTimeResult::SimulationTime { value } => value, SimulationTimeResult::Error { error } => { return Ok(Json(STDCMResponse::PreprocessingSimulationError { @@ -224,6 +240,7 @@ async fn stdcm( })) } }; + let earliest_step_tolerance_window = get_earliest_step_tolerance_window(&stdcm_request); let maximum_departure_delay = get_maximum_departure_delay( &stdcm_request, @@ -241,8 +258,8 @@ async fn stdcm( // 3. Get scheduled train requirements let trains_requirements = build_train_requirements( - trains, - simulations, + train_schedules.clone(), + simulations.clone(), earliest_departure_time, latest_simulation_end, ); @@ -264,10 +281,20 @@ async fn stdcm( None => vec![], }; - // 6. Build STDCM request - let stdcm_response = STDCMRequest { + // 6. Retrieve work schedules + let work_schedules = match stdcm_request.work_schedule_group_id { + Some(work_schedule_group_id) => { + let selection_setting = SelectionSettings::new() + .filter(move || WorkSchedule::WORK_SCHEDULE_GROUP_ID.eq(work_schedule_group_id)); + WorkSchedule::list(conn, selection_setting).await? + } + None => vec![], + }; + + // 7. Build STDCM request + let stdcm_request = STDCMRequest { infra: infra.id, - expected_version: infra.version, + expected_version: infra.version.clone(), rolling_stock: rolling_stock.clone().into(), rolling_stock_loading_gauge: rolling_stock.loading_gauge, rolling_stock_supported_signaling_systems: rolling_stock @@ -276,7 +303,7 @@ async fn stdcm( comfort: stdcm_request.comfort, path_items, start_time: earliest_departure_time, - trains_requirements, + trains_requirements: trains_requirements.clone(), maximum_departure_delay, maximum_run_time, speed_limit_tag: stdcm_request.speed_limit_tags, @@ -284,36 +311,116 @@ async fn stdcm( time_gap_after: stdcm_request.time_gap_after, margin: stdcm_request.margin, time_step: Some(2000), - work_schedules: match stdcm_request.work_schedule_group_id { - Some(work_schedule_group_id) => { - build_work_schedules( - conn, - earliest_departure_time, - maximum_run_time, - work_schedule_group_id, - ) - .await? - } - None => vec![], - }, + work_schedules: filter_stdcm_work_schedules( + &work_schedules, + earliest_departure_time, + maximum_run_time, + ), temporary_speed_limits, + }; + + let stdcm_response = stdcm_request.fetch(core_client.as_ref()).await?; + + // 8. Handle PathNotFound response of STDCM + if let STDCMResponse::PathNotFound = stdcm_response { + let stdcm_response = handle_path_not_found( + fake_train_schedule, + train_schedules, + simulations, + fake_train_sim_result, + fake_train_pathfinding_result, + earliest_departure_time, + maximum_run_time, + latest_simulation_end, + &work_schedules, + infra_id, + infra.version, + core_client, + ) + .await?; + + return Ok(Json(stdcm_response)); } - .fetch(core_client.as_ref()) - .await?; Ok(Json(stdcm_response)) } +#[allow(clippy::too_many_arguments)] +async fn handle_path_not_found( + fake_train_schedule: TrainSchedule, + train_schedules: Vec, + simulations: Vec<(SimulationResponse, PathfindingResult)>, + fake_train_sim_result: SimulationResponse, + fake_train_pathfinding_result: PathfindingResult, + earliest_departure_time: DateTime, + maximum_run_time: u64, + latest_simulation_end: DateTime, + work_schedules: &[WorkSchedule], + infra_id: i64, + infra_version: String, + core_client: Arc, +) -> Result { + let fake_train_id = fake_train_schedule.id; + + // Combine the original train schedules with the fake train schedule. + let train_schedules = [train_schedules, vec![fake_train_schedule]].concat(); + + // Combine the original simulations with the fake train's simulation results. + let simulations = [ + simulations, + vec![(fake_train_sim_result, fake_train_pathfinding_result.clone())], + ] + .concat(); + + // Build train requirements based on the combined train schedules and simulations + // This prepares the data structure required for conflict detection. + let trains_requirements = build_train_requirements( + train_schedules, + simulations, + earliest_departure_time, + latest_simulation_end, + ); + + // Filter the provided work schedules to find those that conflict with the given parameters + // This identifies any work schedules that may overlap with the earliest departure time and maximum run time. + let conflict_work_schedules = + filter_conflict_work_schedules(work_schedules, earliest_departure_time, maximum_run_time); + + // Prepare the conflict detection request. + let conflict_detection_request = ConflictDetectionRequest { + infra: infra_id, + expected_version: infra_version, + trains_requirements, + work_schedules: conflict_work_schedules, + }; + + // Send the conflict detection request and await the response. + let conflict_detection_response = conflict_detection_request.fetch(&core_client).await?; + + // Filter the conflicts to find those specifically related to the fake train. + let conflicts: Vec<_> = conflict_detection_response + .conflicts + .into_iter() + .filter(|conflict| conflict.train_ids.contains(&fake_train_id)) + .collect(); + + // Return the conflicts found along with the pathfinding result for the fake train. + Ok(STDCMResponse::Conflicts { + pathfinding_result: fake_train_pathfinding_result, + conflicts, + }) +} + /// Build the list of scheduled train requirements, only including requirements /// that overlap with the possible simulation times. fn build_train_requirements( - trains: Vec, + train_schedules: Vec, simulations: Vec<(SimulationResponse, PathfindingResult)>, departure_time: DateTime, latest_simulation_end: DateTime, ) -> HashMap { let mut trains_requirements = HashMap::new(); - for (train, (sim, _)) in trains.iter().zip(simulations) { + for (train, (sim, _)) in train_schedules.iter().zip(simulations) { let final_output = match sim { SimulationResponse::Success { final_output, .. } => final_output, _ => continue, @@ -445,9 +552,12 @@ fn get_earliest_step_tolerance_window(data: &STDCMRequestPayload) -> u64 { .unwrap_or(0) } -/// Computes the simulation run time -/// Returns an enum with either the result or a SimulationResponse if it failed -async fn get_simulation_run_time( +/// Returns a `Result` containing: +/// * `SimulationTimeResult` - The result of the simulation time calculation. +/// * `TrainSchedule` - The generated train schedule based on the provided data. +/// * `SimulationResponse` - Simulation response. +/// * `PathfindingResult` - Pathfinding result. +async fn simulate_train_run( db_pool: Arc, valkey_client: Arc, core_client: Arc, @@ -455,7 +565,12 @@ async fn get_simulation_run_time( infra: &Infra, rolling_stock: &RollingStockModel, timetable_id: i64, -) -> Result { +) -> Result<( + SimulationTimeResult, + TrainSchedule, + SimulationResponse, + PathfindingResult, +)> { // Doesn't matter for now, but eventually it will affect tmp speed limits let approx_start_time = get_earliest_step_time(data); @@ -487,45 +602,50 @@ async fn get_simulation_run_time( options: Default::default(), }; - let (sim_result, _) = train_simulation( + let (sim_result, pathfinding_result) = train_simulation( &mut db_pool.get().await?, valkey_client, core_client, - train_schedule, + train_schedule.clone(), infra, None, ) .await?; - return Ok(match sim_result { + let simulation_run_time = match sim_result.clone() { SimulationResponse::Success { provisional, .. } => SimulationTimeResult::SimulationTime { value: *provisional.times.last().expect("empty simulation result"), }, err => SimulationTimeResult::Error { error: Box::from(err), }, - }); + }; + Ok(( + simulation_run_time, + train_schedule, + sim_result, + pathfinding_result, + )) } /// Returns the request's total stop time fn get_total_stop_time(data: &STDCMRequestPayload) -> u64 { - return data - .steps + data.steps .iter() .map(|step: &PathfindingItem| step.duration.unwrap_or_default()) - .sum(); + .sum() } /// Convert the list of pathfinding items into a list of path item fn convert_steps(steps: &[PathfindingItem]) -> Vec { - return steps + steps .iter() .map(|step| PathItem { id: Default::default(), deleted: false, location: step.location.clone(), }) - .collect(); + .collect() } /// Build a margins object with one margin value covering the entire range @@ -542,37 +662,56 @@ fn build_single_margin(margin: Option) -> Margins { } } -/// Build the list of work schedules for the given time range -async fn build_work_schedules( - conn: &mut DbConnection, - time: DateTime, +fn filter_core_work_schedule( + ws: &WorkSchedule, + start_time: DateTime, +) -> crate::core::stdcm::WorkSchedule { + crate::core::stdcm::WorkSchedule { + start_time: elapsed_since_time_ms(&ws.start_date_time, &start_time), + end_time: elapsed_since_time_ms(&ws.end_date_time, &start_time), + track_ranges: ws + .track_ranges + .iter() + .map(|track| UndirectedTrackRange { + track_section: track.track.to_string(), + begin: (track.begin * 1000.0) as u64, + end: (track.end * 1000.0) as u64, + }) + .collect(), + } +} + +fn filter_stdcm_work_schedules( + work_schedules: &[WorkSchedule], + start_time: DateTime, maximum_run_time: u64, - work_schedule_group_id: i64, -) -> Result> { - let selection_setting: SelectionSettings = SelectionSettings::new() - .filter(move || WorkSchedule::WORK_SCHEDULE_GROUP_ID.eq(work_schedule_group_id)); - let res = Ok(WorkSchedule::list(conn, selection_setting) - .await? +) -> Vec { + work_schedules .iter() - .map(|ws| { - let schedule = CoreWorkSchedule { - start_time: elapsed_since_time_ms(&ws.start_date_time, &time), - end_time: elapsed_since_time_ms(&ws.end_date_time, &time), - track_ranges: ws - .track_ranges - .iter() - .map(|track| UndirectedTrackRange { - track_section: track.track.to_string(), - begin: (track.begin * 1000.0) as u64, - end: (track.end * 1000.0) as u64, - }) - .collect(), - }; - schedule - }) + .map(|ws| filter_core_work_schedule(ws, start_time)) .filter(|ws| ws.end_time > 0 && ws.start_time < maximum_run_time) - .collect()); - res + .collect() +} + +fn filter_conflict_work_schedules( + work_schedules: &[WorkSchedule], + start_time: DateTime, + maximum_run_time: u64, +) -> Option { + if work_schedules.is_empty() { + return None; + } + + let work_schedule_requirements = work_schedules + .iter() + .map(|ws| (ws.id, filter_core_work_schedule(ws, start_time))) + .filter(|(_, ws)| ws.end_time > 0 && ws.start_time < maximum_run_time) + .collect(); + + Some(WorkSchedulesRequest { + start_time, + work_schedule_requirements, + }) } /// Return the list of speed limits that are active at any point in a given time range @@ -581,7 +720,7 @@ async fn build_temporary_speed_limits( start_date_time: DateTime, end_date_time: DateTime, temporary_speed_limit_group_id: i64, -) -> Result> { +) -> Result> { if end_date_time <= start_date_time { return Ok(Vec::new()); } @@ -638,7 +777,221 @@ async fn parse_stdcm_steps( .collect()) } +#[derive(Debug)] enum SimulationTimeResult { SimulationTime { value: u64 }, Error { error: Box }, } + +#[cfg(test)] +pub mod tests { + use axum::http::StatusCode; + use chrono::DateTime; + use editoast_models::DbConnectionPoolV2; + use pretty_assertions::assert_eq; + use rstest::rstest; + use serde_json::json; + use std::str::FromStr; + use uuid::Uuid; + + use crate::core::conflict_detection::Conflict; + use crate::core::conflict_detection::ConflictType; + use crate::core::mocking::MockingClient; + use crate::core::pathfinding::PathfindingResult; + use crate::core::pathfinding::PathfindingResultSuccess; + use crate::core::simulation::CompleteReportTrain; + use crate::core::simulation::ElectricalProfiles; + use crate::core::simulation::ReportTrain; + use crate::core::simulation::SimulationResponse; + use crate::core::simulation::SpeedLimitProperties; + use crate::core::stdcm::STDCMResponse; + use crate::models::fixtures::create_fast_rolling_stock; + use crate::models::fixtures::create_small_infra; + use crate::models::fixtures::create_timetable; + use crate::views::test_app::TestAppBuilder; + + fn pathfinding_result_success() -> PathfindingResult { + PathfindingResult::Success(PathfindingResultSuccess { + blocks: vec![], + routes: vec![], + track_section_ranges: vec![], + length: 0, + path_item_positions: vec![0, 10], + }) + } + + fn simulation_response() -> SimulationResponse { + SimulationResponse::Success { + base: ReportTrain { + positions: vec![], + times: vec![], + speeds: vec![], + energy_consumption: 0.0, + path_item_times: vec![0, 10], + }, + provisional: ReportTrain { + positions: vec![], + times: vec![0, 10], + speeds: vec![], + energy_consumption: 0.0, + path_item_times: vec![0, 10], + }, + final_output: CompleteReportTrain { + report_train: ReportTrain { + positions: vec![], + times: vec![], + speeds: vec![], + energy_consumption: 0.0, + path_item_times: vec![0, 10], + }, + signal_sightings: vec![], + zone_updates: vec![], + spacing_requirements: vec![], + routing_requirements: vec![], + }, + mrsp: SpeedLimitProperties { + boundaries: vec![], + values: vec![], + }, + electrical_profiles: ElectricalProfiles { + boundaries: vec![], + values: vec![], + }, + } + } + + fn stdcm_payload(rolling_stock_id: i64) -> serde_json::Value { + json!({ + "comfort": "STANDARD", + "margin": "4.5min/100km", + "rolling_stock_id": rolling_stock_id, + "speed_limit_tags": "AR120", + "steps": [ + { + "duration": 0, + "location": { "trigram": "WS", "secondary_code": "BV" }, + "timing_data": { + "arrival_time": "2024-09-17T20:05:00+02:00", + "arrival_time_tolerance_before": 0, + "arrival_time_tolerance_after": 0 + } + }, + { "duration": 0, "location": { "trigram": "MWS", "secondary_code": "BV" } } + ], + "time_gap_after": 35000, + "time_gap_before": 35000 + }) + } + + fn core_mocking_client() -> MockingClient { + let mut core = MockingClient::new(); + core.stub("/v2/pathfinding/blocks") + .method(reqwest::Method::POST) + .response(StatusCode::OK) + .json(pathfinding_result_success()) + .finish(); + core.stub("/v2/standalone_simulation") + .method(reqwest::Method::POST) + .response(StatusCode::OK) + .json(serde_json::to_value(simulation_response()).unwrap()) + .finish(); + core + } + + fn conflict_data() -> Conflict { + Conflict { + train_ids: vec![0, 1], + work_schedule_ids: vec![], + start_time: DateTime::from_str("2024-01-01T00:00:00Z") + .expect("Failed to parse datetime"), + end_time: DateTime::from_str("2024-01-02T00:00:00Z").expect("Failed to parse datetime"), + conflict_type: ConflictType::Spacing, + requirements: vec![], + } + } + + #[rstest] + async fn stdcm_return_success() { + let db_pool = DbConnectionPoolV2::for_tests(); + let mut core = core_mocking_client(); + core.stub("/v2/stdcm") + .method(reqwest::Method::POST) + .response(StatusCode::OK) + .json(json!({ + "status": "success", + "simulation": serde_json::to_value(simulation_response()).unwrap(), + "path": serde_json::to_value(pathfinding_result_success()).unwrap(), + "departure_time": "2024-01-02T00:00:00Z" + })) + .finish(); + + let app = TestAppBuilder::new() + .db_pool(db_pool.clone()) + .core_client(core.into()) + .build(); + let small_infra = create_small_infra(&mut db_pool.get_ok()).await; + let timetable = create_timetable(&mut db_pool.get_ok()).await; + let rolling_stock = + create_fast_rolling_stock(&mut db_pool.get_ok(), &Uuid::new_v4().to_string()).await; + + let request = app + .post(format!("/timetable/{}/stdcm?infra={}", timetable.id, small_infra.id).as_str()) + .json(&stdcm_payload(rolling_stock.id)); + + let stdcm_response: STDCMResponse = + app.fetch(request).assert_status(StatusCode::OK).json_into(); + + if let PathfindingResult::Success(path) = pathfinding_result_success() { + assert_eq!( + stdcm_response, + STDCMResponse::Success { + simulation: simulation_response(), + path, + departure_time: DateTime::from_str("2024-01-02T00:00:00Z") + .expect("Failed to parse datetime") + } + ); + } + } + + #[rstest] + async fn stdcm_return_conflicts() { + let db_pool = DbConnectionPoolV2::for_tests(); + let mut core = core_mocking_client(); + core.stub("/v2/stdcm") + .method(reqwest::Method::POST) + .response(StatusCode::OK) + .json(json!({"status": "path_not_found"})) + .finish(); + core.stub("/v2/conflict_detection") + .method(reqwest::Method::POST) + .response(StatusCode::OK) + .json(json!({"conflicts": [ + serde_json::to_value(conflict_data()).unwrap() + ]})) + .finish(); + + let app = TestAppBuilder::new() + .db_pool(db_pool.clone()) + .core_client(core.into()) + .build(); + let small_infra = create_small_infra(&mut db_pool.get_ok()).await; + let timetable = create_timetable(&mut db_pool.get_ok()).await; + let rolling_stock = + create_fast_rolling_stock(&mut db_pool.get_ok(), &Uuid::new_v4().to_string()).await; + + let request = app + .post(format!("/timetable/{}/stdcm?infra={}", timetable.id, small_infra.id).as_str()) + .json(&stdcm_payload(rolling_stock.id)); + + let stdcm_response: STDCMResponse = + app.fetch(request).assert_status(StatusCode::OK).json_into(); + assert_eq!( + stdcm_response, + STDCMResponse::Conflicts { + pathfinding_result: pathfinding_result_success(), + conflicts: vec![conflict_data()], + } + ); + } +} diff --git a/front/src/common/api/generatedEditoastApi.ts b/front/src/common/api/generatedEditoastApi.ts index a3912a97352..ddc4a4d3295 100644 --- a/front/src/common/api/generatedEditoastApi.ts +++ b/front/src/common/api/generatedEditoastApi.ts @@ -1467,6 +1467,11 @@ export type PostTimetableByIdStdcmApiResponse = /** status 201 The simulation re | { status: 'path_not_found'; } + | { + conflicts: Conflict[]; + pathfinding_result: PathfindingResult; + status: 'conflicts'; + } | { error: SimulationResponse; status: 'preprocessing_simulation_error'; diff --git a/tests/tests/test_stdcm.py b/tests/tests/test_stdcm.py index ee371709444..7744f032cea 100644 --- a/tests/tests/test_stdcm.py +++ b/tests/tests/test_stdcm.py @@ -266,7 +266,75 @@ def test_max_running_time(small_scenario: Scenario, fast_rolling_stock: int): r = requests.post(url, json=payload) response = r.json() assert r.status_code == 200 - assert response == {"status": "path_not_found"} + assert response == { + "status": "conflicts", + "conflicts": [ + { + "conflict_type": "Spacing", + "start_time": "2024-01-01T07:30:00Z", + "end_time": "2024-01-01T16:00:00Z", + "train_ids": [0], + "work_schedule_ids": [3], + "requirements": [ + { + "start_time": "2024-01-01T07:30:00Z", + "end_time": "2024-01-01T16:00:00Z", + "zone": "zone.[DG2:DECREASING, DH2:INCREASING, DH3:DECREASING]", + }, + { + "start_time": "2024-01-01T07:30:00Z", + "end_time": "2024-01-01T16:00:00Z", + "zone": "zone.[DH1_1:DECREASING, DH3:INCREASING]", + }, + { + "start_time": "2024-01-01T07:30:00Z", + "end_time": "2024-01-01T16:00:00Z", + "zone": "zone.[DH1_1:INCREASING, DH1_2:DECREASING]", + }, + ], + } + ], + "pathfinding_result": { + "blocks": [ + "block.257fce538543c5f960490c7606bbc603", + "block.1dbf71a8896e98cd71157a44bb918a9e", + "block.97661cd54d96453abdc191d1be184af5", + "block.52b46c47a8253001dad72b6907da3a07", + "block.26351683b3a305dab1fb15183e256f82", + "block.9fd6806a7f1c25dbc9809036f8799c50", + "block.6eac7803d789741b4aab9b9d347ab7b0", + "block.077f958857f85779fd13430750bfdd80", + "block.99f89fff7dabab637ca7e7fb823faa8c", + "block.7a8dbf58fefc7795eb972d52f931ee3f", + "block.ddd4e5a371d90d2522e86eb3ced76206", + "block.1345e674f8d1c39dae0cd05951af5d8d", + "block.a69d6a804dfee71d0cc91314b915e0c7", + "block.f2b0ac13a7f1d9149ee6df4004cbaac3", + ], + "length": 42000000, + "path_item_positions": [0, 42000000], + "routes": [ + "rt.buffer_stop.2->DA1", + "rt.DA1->DA6", + "rt.DA6->DC6", + "rt.DC6->DD3", + "rt.DD3->DH0", + "rt.DH0->DH2", + "rt.DH2->buffer_stop.7", + ], + "status": "success", + "track_section_ranges": [ + {"begin": 0, "end": 1950000, "track_section": "TA2", "direction": "START_TO_STOP"}, + {"begin": 0, "end": 50000, "track_section": "TA5", "direction": "START_TO_STOP"}, + {"begin": 0, "end": 10000000, "track_section": "TA7", "direction": "START_TO_STOP"}, + {"begin": 0, "end": 1000000, "track_section": "TC2", "direction": "START_TO_STOP"}, + {"begin": 0, "end": 25000000, "track_section": "TD1", "direction": "START_TO_STOP"}, + {"begin": 0, "end": 3000000, "track_section": "TD3", "direction": "START_TO_STOP"}, + {"begin": 0, "end": 1000000, "track_section": "TH0", "direction": "START_TO_STOP"}, + {"begin": 0, "end": 0, "track_section": "TH1", "direction": "START_TO_STOP"}, + ], + }, + } def _get_stdcm_response(infra: Infra, timetable_id: int, stdcm_payload: Dict[str, Any]):