Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

More scalable Proof of Space #2526

Merged
merged 2 commits into from
Feb 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions crates/subspace-proof-of-space/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,4 @@ std = [
]
parallel = [
"dep:rayon",
# Parallel implementation requires std due to usage of channels to achieve highest performance
"std",
]
78 changes: 34 additions & 44 deletions crates/subspace-proof-of-space/src/chiapos/table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ use core::mem;
use core::simd::num::SimdUint;
use core::simd::Simd;
#[cfg(any(feature = "parallel", test))]
use core::sync::atomic::{AtomicUsize, Ordering};
#[cfg(any(feature = "parallel", test))]
use rayon::prelude::*;
use seq_macro::seq;
#[cfg(any(feature = "parallel", test))]
use std::sync::mpsc;
use subspace_core_primitives::crypto::{blake3_hash, blake3_hash_list};

pub(super) const COMPUTE_F1_SIMD_FACTOR: usize = 8;
Expand Down Expand Up @@ -759,56 +759,46 @@ where
// Iteration stopped, but we did not store the last bucket yet
buckets.push(bucket);

let (entries_sender, entries_receiver) = mpsc::sync_channel(1);
let counter = AtomicUsize::new(0);

let t_n_handle = std::thread::spawn(move || {
let num_values = 1 << K;
let mut t_n = Vec::with_capacity(num_values);
let t_n = rayon::broadcast(|_ctx| {
let mut entries = Vec::new();
let mut rmap_scratch = Vec::new();

loop {
let offset = counter.fetch_add(1, Ordering::Relaxed);
if offset >= buckets.len() - 1 {
break;
}

while let Ok(entries) = entries_receiver.recv() {
t_n.extend(entries);
match_and_compute_fn::<K, TABLE_NUMBER, PARENT_TABLE_NUMBER>(
last_table,
buckets[offset],
buckets[offset + 1],
&mut rmap_scratch,
left_targets,
&mut entries,
);
}

t_n
entries
});

buckets
.par_windows(2)
.fold(
|| (Vec::new(), Vec::new()),
|(mut entries, mut rmap_scratch), buckets| {
match_and_compute_fn::<K, TABLE_NUMBER, PARENT_TABLE_NUMBER>(
last_table,
buckets[0],
buckets[1],
&mut rmap_scratch,
left_targets,
&mut entries,
);
(entries, rmap_scratch)
},
)
.for_each(move |(entries, _rmap_scratch)| {
entries_sender
.send(entries)
.expect("Receiver is waiting until sender is exhausted; qed");
});

let mut t_n = t_n_handle.join().expect("Not joining itself; qed");
let mut t_n = t_n.into_iter().flatten().collect::<Vec<_>>();
t_n.par_sort_unstable();

let mut ys = vec![Default::default(); t_n.len()];
let mut positions = vec![Default::default(); t_n.len()];
let mut metadatas = vec![Default::default(); t_n.len()];

// Going in parallel saves a bit of time
t_n.into_par_iter()
.zip(ys.par_iter_mut().zip(&mut positions).zip(&mut metadatas))
.for_each(|(input, output)| {
*output.0 .0 = input.0;
*output.0 .1 = input.1;
*output.1 = input.2;
});
let mut ys = Vec::with_capacity(t_n.len());
let mut positions = Vec::with_capacity(t_n.len());
let mut metadatas = Vec::with_capacity(t_n.len());

for (y, [left_position, right_position], metadata) in t_n {
ys.push(y);
positions.push([left_position, right_position]);
// Last table doesn't have metadata
if metadata_size_bits(K, TABLE_NUMBER) > 0 {
metadatas.push(metadata);
}
}

Self::Other {
ys,
Expand Down
Loading