Process events instead of processing only transactions

Prep work to allow clients to send any type that can end up in
the ledger.
This commit is contained in:
Greg Fitzgerald
2018-05-07 14:51:08 -06:00
parent 880cb8e7cc
commit 893011c3ba
3 changed files with 39 additions and 24 deletions

View File

@ -218,13 +218,18 @@ impl Accountant {
(trs, rest) (trs, rest)
} }
pub fn process_verified_events(&self, events: Vec<Event>) -> Result<()> { pub fn process_verified_events(&self, events: Vec<Event>) -> Vec<Result<Event>> {
let (trs, rest) = Self::partition_events(events); let (trs, rest) = Self::partition_events(events);
self.process_verified_transactions(trs); let mut results: Vec<_> = self.process_verified_transactions(trs)
.into_iter()
.map(|x| x.map(Event::Transaction))
.collect();
for event in rest { for event in rest {
self.process_verified_event(&event)?; results.push(self.process_verified_event(event));
} }
Ok(())
results
} }
/// Process a Witness Signature that has already been verified. /// Process a Witness Signature that has already been verified.
@ -278,12 +283,13 @@ impl Accountant {
} }
/// Process an Transaction or Witness that has already been verified. /// Process an Transaction or Witness that has already been verified.
pub fn process_verified_event(&self, event: &Event) -> Result<()> { pub fn process_verified_event(&self, event: Event) -> Result<Event> {
match *event { match event {
Event::Transaction(ref tr) => self.process_verified_transaction(tr), Event::Transaction(ref tr) => self.process_verified_transaction(tr),
Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig), Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig),
Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt), Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt),
} }?;
Ok(event)
} }
/// Create, sign, and process a Transaction from `keypair` to `to` of /// Create, sign, and process a Transaction from `keypair` to `to` of

View File

@ -308,20 +308,20 @@ impl AccountantSkel {
/// Split Request list into verified transactions and the rest /// Split Request list into verified transactions and the rest
fn partition_requests( fn partition_requests(
req_vers: Vec<(Request, SocketAddr, u8)>, req_vers: Vec<(Request, SocketAddr, u8)>,
) -> (Vec<Transaction>, Vec<(Request, SocketAddr)>) { ) -> (Vec<Event>, Vec<(Request, SocketAddr)>) {
let mut trs = vec![]; let mut events = vec![];
let mut reqs = vec![]; let mut reqs = vec![];
for (msg, rsp_addr, verify) in req_vers { for (msg, rsp_addr, verify) in req_vers {
match msg { match msg {
Request::Transaction(tr) => { Request::Transaction(tr) => {
if verify != 0 { if verify != 0 {
trs.push(tr); events.push(Event::Transaction(tr));
} }
} }
_ => reqs.push((msg, rsp_addr)), _ => reqs.push((msg, rsp_addr)),
} }
} }
(trs, reqs) (events, reqs)
} }
fn process_packets( fn process_packets(
@ -329,16 +329,16 @@ impl AccountantSkel {
req_vers: Vec<(Request, SocketAddr, u8)>, req_vers: Vec<(Request, SocketAddr, u8)>,
) -> Result<Vec<(Response, SocketAddr)>> { ) -> Result<Vec<(Response, SocketAddr)>> {
debug!("partitioning"); debug!("partitioning");
let (trs, reqs) = Self::partition_requests(req_vers); let (events, reqs) = Self::partition_requests(req_vers);
debug!("trs: {} reqs: {}", trs.len(), reqs.len()); debug!("events: {} reqs: {}", events.len(), reqs.len());
// Process the transactions in parallel and then log the successful ones. // Process the transactions in parallel and then log the successful ones.
for result in self.acc.lock().unwrap().process_verified_transactions(trs) { for result in self.acc.lock().unwrap().process_verified_events(events) {
if let Ok(tr) = result { if let Ok(event) = result {
self.historian_input self.historian_input
.lock() .lock()
.unwrap() .unwrap()
.send(Signal::Event(Event::Transaction(tr)))?; .send(Signal::Event(event))?;
} }
} }
@ -436,13 +436,12 @@ impl AccountantSkel {
for msgs in &blobs { for msgs in &blobs {
let blob = msgs.read().unwrap(); let blob = msgs.read().unwrap();
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap(); let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
let acc = obj.acc.lock().unwrap();
for entry in entries { for entry in entries {
obj.acc.lock().unwrap().register_entry_id(&entry.id); acc.register_entry_id(&entry.id);
for result in acc.process_verified_events(entry.events) {
obj.acc result?;
.lock() }
.unwrap()
.process_verified_events(entry.events)?;
} }
//TODO respond back to leader with hash of the state //TODO respond back to leader with hash of the state
} }
@ -805,7 +804,11 @@ mod tests {
// the account balance below zero before the credit is added. // the account balance below zero before the credit is added.
let acc = Accountant::new(&mint); let acc = Accountant::new(&mint);
for entry in entries { for entry in entries {
acc.process_verified_events(entry.events).unwrap(); assert!(
acc.process_verified_events(entry.events)
.into_iter()
.all(|x| x.is_ok())
);
} }
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1)); assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
} }

View File

@ -103,7 +103,13 @@ fn main() {
let mut last_id = entry1.id; let mut last_id = entry1.id;
for entry in entries { for entry in entries {
last_id = entry.id; last_id = entry.id;
acc.process_verified_events(entry.events).unwrap(); let results = acc.process_verified_events(entry.events);
for result in results {
if let Err(e) = result {
eprintln!("failed to process event {:?}", e);
exit(1);
}
}
acc.register_entry_id(&last_id); acc.register_entry_id(&last_id);
} }