-
Notifications
You must be signed in to change notification settings - Fork 89
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Migrate auctions to new database table #3067
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -802,6 +802,92 @@ impl Persistence { | |
ex.commit().await?; | ||
Ok(()) | ||
} | ||
|
||
pub async fn populate_historic_auctions(&self) -> Result<(), DatabaseError> { | ||
const BATCH_SIZE: i64 = 50; | ||
|
||
let mut ex = self.postgres.pool.begin().await?; | ||
|
||
// find entry in `competition_auctions` with the lowest auction_id, as a | ||
// starting point | ||
let current_auction_id: Option<i64> = | ||
sqlx::query_scalar::<_, Option<i64>>("SELECT MIN(id) FROM competition_auctions;") | ||
.fetch_one(ex.deref_mut()) | ||
.await | ||
.context("fetch lowest auction id")?; | ||
|
||
let Some(mut current_auction_id) = current_auction_id else { | ||
tracing::info!("competition_auctions is empty, nothing to process"); | ||
return Ok(()); | ||
}; | ||
|
||
loop { | ||
tracing::debug!( | ||
auction_id = current_auction_id, | ||
"populating historic auctions from auction" | ||
); | ||
|
||
// fetch the next batch of auctions | ||
let competitions: Vec<database::solver_competition::RichSolverCompetition> = | ||
database::solver_competition::fetch_batch(&mut ex, current_auction_id, BATCH_SIZE) | ||
.await?; | ||
|
||
if competitions.is_empty() { | ||
tracing::info!("no more auctions to process"); | ||
break; | ||
} | ||
|
||
tracing::debug!(competitions = ?competitions.len(), "competitions fetched"); | ||
|
||
for solver_competition in &competitions { | ||
let competition: model::solver_competition::SolverCompetitionDB = | ||
serde_json::from_value(solver_competition.json.clone()) | ||
.context("deserialize SolverCompetitionDB")?; | ||
|
||
// populate historic auctions | ||
let auction = database::auction::Auction { | ||
id: solver_competition.id, | ||
block: i64::try_from(competition.auction_start_block) | ||
.context("block overflow")?, | ||
deadline: solver_competition.deadline, | ||
order_uids: competition | ||
.auction | ||
.orders | ||
.iter() | ||
.map(|order| ByteArray(order.0)) | ||
.collect(), | ||
price_tokens: competition | ||
.auction | ||
.prices | ||
.keys() | ||
.map(|token| ByteArray(token.0)) | ||
.collect(), | ||
price_values: competition | ||
.auction | ||
.prices | ||
.values() | ||
.map(u256_to_big_decimal) | ||
.collect(), | ||
surplus_capturing_jit_order_owners: solver_competition | ||
.surplus_capturing_jit_order_owners | ||
.clone(), | ||
}; | ||
|
||
if let Err(err) = database::auction::save(&mut ex, auction).await { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is there are reason we only populate one of the new tables. AFAICS we can populate the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We have a separate issue for proposed solutions migration: #3056 The migration of solver competition is a bit more complicated and I planned doing it in a separate step -> smaller PRs |
||
tracing::warn!(?err, auction_id = ?solver_competition.id, "failed to save auction"); | ||
} | ||
} | ||
|
||
// commit each batch separately | ||
ex.commit().await?; | ||
ex = self.postgres.pool.begin().await?; | ||
|
||
// update the current auction id | ||
current_auction_id = competitions.last().unwrap().id; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what happens if this process get interrupted in the middle of it? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Another point for a separate script, which could store the counter on disk. |
||
} | ||
|
||
Ok(()) | ||
} | ||
} | ||
|
||
#[derive(prometheus_metric_storage::MetricStorage)] | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -449,6 +449,11 @@ pub async fn run(args: Arguments) { | |
.instrument(tracing::info_span!("order_events_cleaner")), | ||
); | ||
|
||
if args.migrate_auctions { | ||
let persistence_clone = persistence.clone(); | ||
tokio::spawn(async move { persistence_clone.populate_historic_auctions().await }); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. when we spawn this I assume we are still populating both tables, therefore the migration will never end, right? 🤔 |
||
} | ||
|
||
let market_makable_token_list_configuration = TokenListConfiguration { | ||
url: args.trusted_tokens_url, | ||
update_interval: args.trusted_tokens_update_interval, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
is it possible to test this beforehand in a unit test or e2e test?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We could even set up a snapshot of the actual DB and and run a local DB migration with that to see how it will behave with real data.