nautilus_backtest/config.rs
1// -------------------------------------------------------------------------------------------------
2// Copyright (C) 2015-2025 Nautech Systems Pty Ltd. All rights reserved.
3// https://nautechsystems.io
4//
5// Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
6// You may not use this file except in compliance with the License.
7// You may obtain a copy of the License at https://www.gnu.org/licenses/lgpl-3.0.en.html
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14// -------------------------------------------------------------------------------------------------
15
16// Under development
17#![allow(dead_code)]
18#![allow(unused_variables)]
19
20use std::collections::HashMap;
21
22use nautilus_core::UnixNanos;
23use nautilus_model::{
24 data::BarSpecification,
25 enums::{AccountType, BookType, OmsType},
26 identifiers::{ClientId, InstrumentId},
27 types::Currency,
28};
29use nautilus_system::config::NautilusKernelConfig;
30use ustr::Ustr;
31
32/// Configuration for ``BacktestEngine`` instances.
33#[derive(Debug, Clone)]
34pub struct BacktestEngineConfig {
35 /// The kernel configuration for the backtest engine.
36 pub kernel: NautilusKernelConfig,
37 /// If logging should be bypassed.
38 bypass_logging: bool,
39 /// If post backtest performance analysis should be run.
40 run_analysis: bool,
41}
42
43impl BacktestEngineConfig {
44 #[must_use]
45 pub fn new(
46 kernel: NautilusKernelConfig,
47 bypass_logging: Option<bool>,
48 run_analysis: Option<bool>,
49 ) -> Self {
50 Self {
51 kernel,
52 bypass_logging: bypass_logging.unwrap_or(false),
53 run_analysis: run_analysis.unwrap_or(true),
54 }
55 }
56}
57
58impl Default for BacktestEngineConfig {
59 fn default() -> Self {
60 Self {
61 kernel: NautilusKernelConfig::default(),
62 bypass_logging: false,
63 run_analysis: true,
64 }
65 }
66}
67
68/// Represents a venue configuration for one specific backtest engine.
69#[derive(Debug, Clone)]
70pub struct BacktestVenueConfig {
71 /// The name of the venue.
72 name: Ustr,
73 /// The order management system type for the exchange. If ``HEDGING`` will generate new position IDs.
74 oms_type: OmsType,
75 /// The account type for the exchange.
76 account_type: AccountType,
77 /// The default order book type.
78 book_type: BookType,
79 /// The starting account balances (specify one for a single asset account).
80 starting_balances: Vec<String>,
81 /// If multi-venue routing should be enabled for the execution client.
82 routing: bool,
83 /// If the account for this exchange is frozen (balances will not change).
84 frozen_account: bool,
85 /// If stop orders are rejected on submission if trigger price is in the market.
86 reject_stop_orders: bool,
87 /// If orders with GTD time in force will be supported by the venue.
88 support_gtd_orders: bool,
89 /// If contingent orders will be supported/respected by the venue.
90 /// If False, then it's expected the strategy will be managing any contingent orders.
91 support_contingent_orders: bool,
92 /// If venue position IDs will be generated on order fills.
93 use_position_ids: bool,
94 /// If all venue generated identifiers will be random UUID4's.
95 use_random_ids: bool,
96 /// If the `reduce_only` execution instruction on orders will be honored.
97 use_reduce_only: bool,
98 /// If bars should be processed by the matching engine(s) (and move the market).
99 bar_execution: bool,
100 /// Determines whether the processing order of bar prices is adaptive based on a heuristic.
101 /// This setting is only relevant when `bar_execution` is True.
102 /// If False, bar prices are always processed in the fixed order: Open, High, Low, Close.
103 /// If True, the processing order adapts with the heuristic:
104 /// - If High is closer to Open than Low then the processing order is Open, High, Low, Close.
105 /// - If Low is closer to Open than High then the processing order is Open, Low, High, Close.
106 bar_adaptive_high_low_ordering: bool,
107 /// If trades should be processed by the matching engine(s) (and move the market).
108 trade_execution: bool,
109 /// The account base currency for the exchange. Use `None` for multi-currency accounts.
110 base_currency: Option<Currency>,
111 /// The account default leverage (for margin accounts).
112 default_leverage: Option<f64>,
113 /// The instrument specific leverage configuration (for margin accounts).
114 leverages: Option<HashMap<Currency, f64>>,
115}
116
117impl BacktestVenueConfig {
118 #[allow(clippy::too_many_arguments)]
119 #[must_use]
120 pub fn new(
121 name: Ustr,
122 oms_type: OmsType,
123 account_type: AccountType,
124 book_type: BookType,
125 routing: Option<bool>,
126 frozen_account: Option<bool>,
127 reject_stop_orders: Option<bool>,
128 support_gtd_orders: Option<bool>,
129 support_contingent_orders: Option<bool>,
130 use_position_ids: Option<bool>,
131 use_random_ids: Option<bool>,
132 use_reduce_only: Option<bool>,
133 bar_execution: Option<bool>,
134 bar_adaptive_high_low_ordering: Option<bool>,
135 trade_execution: Option<bool>,
136 starting_balances: Vec<String>,
137 base_currency: Option<Currency>,
138 default_leverage: Option<f64>,
139 leverages: Option<HashMap<Currency, f64>>,
140 ) -> Self {
141 Self {
142 name,
143 oms_type,
144 account_type,
145 book_type,
146 routing: routing.unwrap_or(false),
147 frozen_account: frozen_account.unwrap_or(false),
148 reject_stop_orders: reject_stop_orders.unwrap_or(true),
149 support_gtd_orders: support_gtd_orders.unwrap_or(true),
150 support_contingent_orders: support_contingent_orders.unwrap_or(true),
151 use_position_ids: use_position_ids.unwrap_or(true),
152 use_random_ids: use_random_ids.unwrap_or(false),
153 use_reduce_only: use_reduce_only.unwrap_or(true),
154 bar_execution: bar_execution.unwrap_or(true),
155 bar_adaptive_high_low_ordering: bar_adaptive_high_low_ordering.unwrap_or(false),
156 trade_execution: trade_execution.unwrap_or(false),
157 starting_balances,
158 base_currency,
159 default_leverage,
160 leverages,
161 }
162 }
163}
164
165#[derive(Debug, Clone)]
166/// Represents the data configuration for one specific backtest run.
167pub struct BacktestDataConfig {
168 /// The path to the data catalog.
169 catalog_path: String,
170 /// The `fsspec` filesystem protocol for the catalog.
171 catalog_fs_protocol: Option<String>,
172 /// The instrument ID for the data configuration.
173 instrument_id: Option<InstrumentId>,
174 /// The start time for the data configuration.
175 start_time: Option<UnixNanos>,
176 /// The end time for the data configuration.
177 end_time: Option<UnixNanos>,
178 /// The additional filter expressions for the data catalog query.
179 filter_expr: Option<String>,
180 /// The client ID for the data configuration.
181 client_id: Option<ClientId>,
182 /// The metadata for the data catalog query.
183 metadata: Option<HashMap<String, String>>,
184 /// The bar specification for the data catalog query.
185 bar_spec: Option<BarSpecification>,
186}
187
188impl BacktestDataConfig {
189 #[allow(clippy::too_many_arguments)]
190 #[must_use]
191 pub const fn new(
192 catalog_path: String,
193 catalog_fs_protocol: Option<String>,
194 instrument_id: Option<InstrumentId>,
195 start_time: Option<UnixNanos>,
196 end_time: Option<UnixNanos>,
197 filter_expr: Option<String>,
198 client_id: Option<ClientId>,
199 metadata: Option<HashMap<String, String>>,
200 bar_spec: Option<BarSpecification>,
201 ) -> Self {
202 Self {
203 catalog_path,
204 catalog_fs_protocol,
205 instrument_id,
206 start_time,
207 end_time,
208 filter_expr,
209 client_id,
210 metadata,
211 bar_spec,
212 }
213 }
214}
215
216/// Represents the configuration for one specific backtest run.
217/// This includes a backtest engine with its actors and strategies, with the external inputs of venues and data.
218#[derive(Debug, Clone)]
219pub struct BacktestRunConfig {
220 /// The venue configurations for the backtest run.
221 venues: Vec<BacktestVenueConfig>,
222 /// The data configurations for the backtest run.
223 data: Vec<BacktestDataConfig>,
224 /// The backtest engine configuration (the core system kernel).
225 engine: BacktestEngineConfig,
226 /// The number of data points to process in each chunk during streaming mode.
227 /// If `None`, the backtest will run without streaming, loading all data at once.
228 chunk_size: Option<usize>,
229 /// If the backtest engine should be disposed on completion of the run.
230 /// If `True`, then will drop data and all state.
231 /// If `False`, then will *only* drop data.
232 dispose_on_completion: bool,
233 /// The start datetime (UTC) for the backtest run.
234 /// If `None` engine runs from the start of the data.
235 start: Option<UnixNanos>,
236 /// The end datetime (UTC) for the backtest run.
237 /// If `None` engine runs to the end of the data.
238 end: Option<UnixNanos>,
239}
240
241impl BacktestRunConfig {
242 #[must_use]
243 pub fn new(
244 venues: Vec<BacktestVenueConfig>,
245 data: Vec<BacktestDataConfig>,
246 engine: BacktestEngineConfig,
247 chunk_size: Option<usize>,
248 dispose_on_completion: Option<bool>,
249 start: Option<UnixNanos>,
250 end: Option<UnixNanos>,
251 ) -> Self {
252 Self {
253 venues,
254 data,
255 engine,
256 chunk_size,
257 dispose_on_completion: dispose_on_completion.unwrap_or(true),
258 start,
259 end,
260 }
261 }
262}