Files
solana/src/accountant_skel.rs

383 lines
13 KiB
Rust
Raw Normal View History

2018-03-30 13:10:27 -07:00
//! The `accountant_skel` module is a microservice that exposes the high-level
2018-03-29 12:20:54 -06:00
//! Accountant API to the network. Its message encoding is currently
2018-03-30 10:43:38 -07:00
//! in flux. Clients should use AccountantStub to interact with it.
2018-03-29 12:20:54 -06:00
2018-02-28 10:07:54 -07:00
use accountant::Accountant;
2018-03-28 14:40:58 -06:00
use bincode::{deserialize, serialize};
use ecdsa;
use entry::Entry;
use event::Event;
use hash::Hash;
2018-04-02 21:15:21 -06:00
use historian::Historian;
2018-03-26 21:07:11 -07:00
use packet;
use packet::SharedPackets;
2018-04-02 21:15:21 -06:00
use rayon::prelude::*;
use recorder::Signal;
use result::Result;
use serde_json;
use signature::PublicKey;
2018-03-26 21:07:11 -07:00
use std::cmp::max;
use std::collections::VecDeque;
2018-03-29 12:54:10 -06:00
use std::io::Write;
2018-03-29 13:09:21 -06:00
use std::net::{SocketAddr, UdpSocket};
2018-03-22 14:05:23 -06:00
use std::sync::atomic::{AtomicBool, Ordering};
2018-04-11 11:17:00 -06:00
use std::sync::mpsc::{channel, Receiver, Sender};
2018-03-26 21:07:11 -07:00
use std::sync::{Arc, Mutex};
use std::thread::{spawn, JoinHandle};
use std::time::Duration;
use streamer;
use transaction::Transaction;
2018-02-28 10:07:54 -07:00
2018-03-26 11:17:19 -07:00
pub struct AccountantSkel<W: Write + Send + 'static> {
2018-04-02 09:30:10 -06:00
acc: Accountant,
last_id: Hash,
2018-03-26 11:17:19 -07:00
writer: W,
historian: Historian,
2018-02-28 10:07:54 -07:00
}
2018-03-22 14:59:25 -06:00
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
2018-02-28 14:16:50 -07:00
#[derive(Serialize, Deserialize, Debug)]
2018-02-28 10:07:54 -07:00
pub enum Request {
Transaction(Transaction),
GetBalance { key: PublicKey },
2018-04-02 09:30:10 -06:00
GetLastId,
2018-02-28 10:07:54 -07:00
}
impl Request {
/// Verify the request is valid.
pub fn verify(&self) -> bool {
match *self {
2018-03-26 21:07:11 -07:00
Request::Transaction(ref tr) => tr.verify_plan(),
_ => true,
}
}
}
2018-02-28 14:16:50 -07:00
#[derive(Serialize, Deserialize, Debug)]
2018-02-28 10:07:54 -07:00
pub enum Response {
Balance { key: PublicKey, val: Option<i64> },
Entries { entries: Vec<Entry> },
2018-04-02 09:30:10 -06:00
LastId { id: Hash },
2018-02-28 10:07:54 -07:00
}
2018-03-26 11:17:19 -07:00
impl<W: Write + Send + 'static> AccountantSkel<W> {
2018-03-29 12:20:54 -06:00
/// Create a new AccountantSkel that wraps the given Accountant.
pub fn new(acc: Accountant, last_id: Hash, writer: W, historian: Historian) -> Self {
AccountantSkel {
acc,
last_id,
2018-04-02 09:30:10 -06:00
writer,
historian,
}
2018-03-20 23:15:44 -06:00
}
2018-03-29 12:20:54 -06:00
/// Process any Entry items that have been published by the Historian.
2018-03-26 11:17:19 -07:00
pub fn sync(&mut self) -> Hash {
while let Ok(entry) = self.historian.receiver.try_recv() {
2018-03-20 23:15:44 -06:00
self.last_id = entry.id;
self.acc.register_entry_id(&self.last_id);
2018-03-28 14:40:58 -06:00
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
2018-03-20 23:15:44 -06:00
}
self.last_id
2018-02-28 14:16:50 -07:00
}
2018-03-29 12:20:54 -06:00
/// Process Request items sent by clients.
2018-04-11 11:17:00 -06:00
pub fn process_request(
&mut self,
msg: Request,
rsp_addr: SocketAddr,
) -> Option<(Response, SocketAddr)> {
2018-02-28 10:07:54 -07:00
match msg {
Request::GetBalance { key } => {
let val = self.acc.get_balance(&key);
2018-04-11 11:17:00 -06:00
Some((Response::Balance { key, val }, rsp_addr))
2018-02-28 10:07:54 -07:00
}
2018-04-11 11:17:00 -06:00
Request::GetLastId => Some((Response::LastId { id: self.sync() }, rsp_addr)),
Request::Transaction(_) => unreachable!(),
2018-02-28 10:07:54 -07:00
}
}
2018-04-11 09:02:33 -06:00
fn recv_batch(recvr: &streamer::PacketReceiver) -> Result<Vec<SharedPackets>> {
2018-03-26 21:07:11 -07:00
let timer = Duration::new(1, 0);
let msgs = recvr.recv_timeout(timer)?;
trace!("got msgs");
2018-04-11 09:02:33 -06:00
let mut batch = vec![msgs];
2018-03-26 21:07:11 -07:00
while let Ok(more) = recvr.try_recv() {
trace!("got more msgs");
2018-04-11 09:02:33 -06:00
batch.push(more);
2018-03-26 21:07:11 -07:00
}
2018-04-11 09:02:33 -06:00
info!("batch len {}", batch.len());
Ok(batch)
}
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<Vec<(SharedPackets, Vec<u8>)>> {
let chunk_size = max(1, (batch.len() + 3) / 4);
let batches: Vec<_> = batch.chunks(chunk_size).map(|x| x.to_vec()).collect();
batches
2018-03-26 21:07:11 -07:00
.into_par_iter()
2018-04-11 09:02:33 -06:00
.map(|batch| {
let r = ecdsa::ed25519_verify(&batch);
batch.into_iter().zip(r).collect()
})
.collect()
}
fn verifier(
recvr: &streamer::PacketReceiver,
sendr: &Sender<Vec<(SharedPackets, Vec<u8>)>>,
) -> Result<()> {
let batch = Self::recv_batch(recvr)?;
let verified_batches = Self::verify_batch(batch);
for xs in verified_batches {
sendr.send(xs)?;
2018-03-26 21:07:11 -07:00
}
Ok(())
}
pub fn deserialize_packets(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
2018-04-06 15:52:58 -06:00
p.packets
.par_iter()
.map(|x| {
deserialize(&x.data[0..x.meta.size])
.map(|req| (req, x.meta.addr()))
.ok()
})
.collect()
2018-03-26 21:07:11 -07:00
}
2018-04-11 11:17:00 -06:00
/// Split Request list into verified transactions and the rest
fn partition_requests(
req_vers: Vec<(Request, SocketAddr, u8)>,
) -> (Vec<Transaction>, Vec<(Request, SocketAddr)>) {
let mut trs = vec![];
let mut reqs = vec![];
for (msg, rsp_addr, verify) in req_vers {
match msg {
Request::Transaction(tr) => {
if verify != 0 {
trs.push(tr);
}
}
_ => reqs.push((msg, rsp_addr)),
}
}
(trs, reqs)
}
fn process_packets(
2018-04-11 11:17:00 -06:00
&mut self,
2018-04-10 21:43:53 -06:00
req_vers: Vec<(Request, SocketAddr, u8)>,
2018-04-11 11:17:00 -06:00
) -> Result<Vec<(Response, SocketAddr)>> {
let (trs, reqs) = Self::partition_requests(req_vers);
// Process the transactions in parallel and then log the successful ones.
for result in self.acc.process_verified_transactions(trs) {
if let Ok(tr) = result {
self.historian
.sender
.send(Signal::Event(Event::Transaction(tr)))?;
}
}
// Process the remaining requests serially.
let rsps = reqs.into_iter()
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
.collect();
Ok(rsps)
}
fn serialize_response(
resp: Response,
rsp_addr: SocketAddr,
blob_recycler: &packet::BlobRecycler,
) -> Result<packet::SharedBlob> {
let blob = blob_recycler.allocate();
{
let mut b = blob.write().unwrap();
let v = serialize(&resp)?;
let len = v.len();
b.data[..len].copy_from_slice(&v);
b.meta.size = len;
b.meta.set_addr(&rsp_addr);
}
Ok(blob)
}
fn serialize_responses(
rsps: Vec<(Response, SocketAddr)>,
2018-04-06 16:12:13 -06:00
blob_recycler: &packet::BlobRecycler,
) -> Result<VecDeque<packet::SharedBlob>> {
let mut blobs = VecDeque::new();
for (resp, rsp_addr) in rsps {
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
2018-04-06 16:12:13 -06:00
}
Ok(blobs)
2018-04-06 16:12:13 -06:00
}
fn process(
obj: &Arc<Mutex<AccountantSkel<W>>>,
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
blob_sender: &streamer::BlobSender,
packet_recycler: &packet::PacketRecycler,
blob_recycler: &packet::BlobRecycler,
) -> Result<()> {
let timer = Duration::new(1, 0);
let mms = verified_receiver.recv_timeout(timer)?;
for (msgs, vers) in mms {
2018-04-06 16:12:13 -06:00
let reqs = Self::deserialize_packets(&msgs.read().unwrap());
2018-04-10 21:43:53 -06:00
let req_vers = reqs.into_iter()
.zip(vers)
.filter_map(|(req, ver)| req.map(|(msg, addr)| (msg, addr, ver)))
.filter(|x| x.0.verify())
.collect();
2018-04-11 11:17:00 -06:00
let rsps = obj.lock().unwrap().process_packets(req_vers)?;
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
if !blobs.is_empty() {
2018-03-26 21:07:11 -07:00
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
2018-03-26 21:07:11 -07:00
}
2018-04-06 15:58:11 -06:00
packet_recycler.recycle(msgs);
}
Ok(())
}
2018-02-28 10:07:54 -07:00
2018-03-29 12:20:54 -06:00
/// Create a UDP microservice that forwards messages the given AccountantSkel.
/// Set `exit` to shutdown its threads.
pub fn serve(
obj: &Arc<Mutex<AccountantSkel<W>>>,
addr: &str,
2018-03-22 14:05:23 -06:00
exit: Arc<AtomicBool>,
2018-03-23 21:49:28 -06:00
) -> Result<Vec<JoinHandle<()>>> {
let read = UdpSocket::bind(addr)?;
// make sure we are on the same interface
let mut local = read.local_addr()?;
local.set_port(0);
let write = UdpSocket::bind(local)?;
2018-02-28 10:07:54 -07:00
let packet_recycler = packet::PacketRecycler::default();
let blob_recycler = packet::BlobRecycler::default();
let (packet_sender, packet_receiver) = channel();
let t_receiver =
streamer::receiver(read, exit.clone(), packet_recycler.clone(), packet_sender)?;
let (blob_sender, blob_receiver) = channel();
2018-03-25 00:06:48 -07:00
let t_responder =
streamer::responder(write, exit.clone(), blob_recycler.clone(), blob_receiver);
2018-03-26 21:07:11 -07:00
let (verified_sender, verified_receiver) = channel();
let exit_ = exit.clone();
let t_verifier = spawn(move || loop {
let e = Self::verifier(&packet_receiver, &verified_sender);
if e.is_err() && exit_.load(Ordering::Relaxed) {
break;
}
});
let skel = obj.clone();
let t_server = spawn(move || loop {
let e = AccountantSkel::process(
&skel,
2018-03-26 21:07:11 -07:00
&verified_receiver,
&blob_sender,
&packet_recycler,
&blob_recycler,
);
if e.is_err() && exit.load(Ordering::Relaxed) {
break;
}
});
2018-03-26 21:07:11 -07:00
Ok(vec![t_receiver, t_responder, t_server, t_verifier])
}
}
#[cfg(test)]
mod tests {
use accountant_skel::Request;
use bincode::serialize;
use ecdsa;
use transaction::{memfind, test_tx};
#[test]
fn test_layout() {
let tr = test_tx();
let tx = serialize(&tr).unwrap();
let packet = serialize(&Request::Transaction(tr)).unwrap();
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
2018-02-28 10:07:54 -07:00
}
}
2018-04-11 14:05:29 -06:00
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use accountant::{Accountant, MAX_ENTRY_IDS};
2018-04-11 20:24:14 -06:00
use accountant_skel::*;
use bincode::serialize;
use hash::hash;
2018-04-11 14:05:29 -06:00
use mint::Mint;
2018-04-11 20:24:14 -06:00
use signature::{KeyPair, KeyPairUtil};
2018-04-11 14:05:29 -06:00
use std::collections::HashSet;
use std::io::sink;
use std::time::Instant;
2018-04-11 20:24:14 -06:00
use transaction::Transaction;
2018-04-11 14:05:29 -06:00
#[bench]
fn process_packets_bench(_bencher: &mut Bencher) {
let mint = Mint::new(100_000_000);
let acc = Accountant::new(&mint);
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
// Create transactions between unrelated parties.
let txs = 100_000;
let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
let transactions: Vec<_> = (0..txs)
.into_par_iter()
.map(|i| {
// Seed the 'to' account and a cell for its signature.
let dummy_id = i % (MAX_ENTRY_IDS as i32);
let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
{
let mut last_ids = last_ids.lock().unwrap();
if !last_ids.contains(&last_id) {
last_ids.insert(last_id);
acc.register_entry_id(&last_id);
}
}
// Seed the 'from' account.
let rando0 = KeyPair::new();
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
acc.process_verified_transaction(&tr).unwrap();
let rando1 = KeyPair::new();
let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
acc.process_verified_transaction(&tr).unwrap();
// Finally, return a transaction that's unique
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
})
.collect();
let req_vers = transactions
.into_iter()
.map(|tr| (Request::Transaction(tr), rsp_addr, 1_u8))
.collect();
let historian = Historian::new(&mint.last_id(), None);
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
let now = Instant::now();
assert!(skel.process_packets(req_vers).is_ok());
let duration = now.elapsed();
let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
let tps = txs as f64 / sec;
// Ensure that all transactions were successfully logged.
skel.historian.sender.send(Signal::Tick).unwrap();
drop(skel.historian.sender);
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].events.len(), txs as usize);
println!("{} tps", tps);
}
}