Преглед изворни кода

Removed the Block struct and replaced it with a trait.

Matthew Carr пре 2 година
родитељ
комит
7923009ea3
3 измењених фајлова са 747 додато и 389 уклоњено
  1. 285 186
      crates/btlib/src/crypto/mod.rs
  2. 434 182
      crates/btlib/src/lib.rs
  3. 28 21
      crates/btlib/src/test_helpers.rs

+ 285 - 186
crates/btlib/src/crypto/mod.rs

@@ -1,11 +1,11 @@
 /// Functions for performing cryptographic operations on the main data structures.
 mod tpm;
 
-use crate::{BoxInIoErr, Decompose, IntegrityWrite, SectoredBuf, SECTOR_SZ_DEFAULT};
+use crate::{Block, BoxInIoErr, Decompose, HeaderAccess, Trailered, WriteInteg, SECTOR_SZ_DEFAULT};
 
 use super::{
-    fmt, io, BigArray, Block, Deserialize, Display, Epoch, Formatter, Hashable, Header, Owned,
-    Path, Principal, Read, Sectored, Seek, Serialize, TryCompose, Write, Writecap,
+    fmt, io, BigArray, Deserialize, Display, Epoch, Formatter, Hashable, Header, Owned, Path,
+    Principal, Read, Sectored, Seek, Serialize, TryCompose, Write, Writecap,
 };
 
 use btserde::{self, from_vec, to_vec, write_to};
@@ -25,7 +25,6 @@ use serde::{
     ser::{SerializeStruct, Serializer},
 };
 use std::{
-    convert::Infallible,
     io::{ErrorKind, SeekFrom},
     marker::PhantomData,
     num::TryFromIntError,
@@ -191,6 +190,12 @@ impl Default for HashKind {
     }
 }
 
+impl Default for Hash {
+    fn default() -> Self {
+        HashKind::default().into()
+    }
+}
+
 impl HashKind {
     pub const fn len(self) -> usize {
         match self {
@@ -286,7 +291,7 @@ impl Display for Hash {
 }
 
 /// A cryptographic signature.
-#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
+#[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Default)]
 pub struct Signature {
     kind: Sign,
     data: Vec<u8>,
@@ -630,6 +635,12 @@ pub enum Sign {
     RsaSsaPss(RsaSsaPss),
 }
 
+impl Default for Sign {
+    fn default() -> Self {
+        Self::RSA_PSS_2048_SHA_256
+    }
+}
+
 impl Scheme for Sign {
     type Kind = Sign;
 
@@ -1098,6 +1109,16 @@ pub(crate) trait Encrypter {
     fn encrypt(&self, slice: &[u8]) -> Result<Vec<u8>>;
 }
 
+pub(crate) trait EncrypterExt: Encrypter {
+    fn ser_encrypt<T: Serialize>(&self, value: &T) -> Result<Ciphertext<T>> {
+        let data = to_vec(value)?;
+        let data = self.encrypt(&data)?;
+        Ok(Ciphertext::new(data))
+    }
+}
+
+impl<T: Encrypter + ?Sized> EncrypterExt for T {}
+
 pub(crate) trait Decrypter {
     fn decrypt(&self, slice: &[u8]) -> Result<Vec<u8>>;
 }
@@ -1225,9 +1246,9 @@ pub trait MerkleNode: Default + Serialize + for<'de> Deserialize<'de> {
 // need to have different structs to support different kinds of hashes.
 /// A struct for storing SHA2 256 hashes in a `MerkleTree`.
 #[derive(Default, Serialize, Deserialize)]
-struct Sha2_256(Option<[u8; HashKind::Sha2_256.len()]>);
+struct Sha2_256Node(Option<[u8; HashKind::Sha2_256.len()]>);
 
-impl Sha2_256 {
+impl Sha2_256Node {
     fn as_slice(&self) -> Option<&[u8]> {
         self.0.as_ref().map(|e| e.as_slice())
     }
@@ -1268,13 +1289,13 @@ impl Sha2_256 {
     }
 }
 
-impl MerkleNode for Sha2_256 {
+impl MerkleNode for Sha2_256Node {
     const KIND: HashKind = HashKind::Sha2_256;
 
     fn new<'a, I: Iterator<Item = &'a [u8]>>(parts: I) -> Result<Self> {
         let mut array = [0u8; Self::KIND.len()];
         Self::digest(&mut array, parts)?;
-        Ok(Sha2_256(Some(array)))
+        Ok(Sha2_256Node(Some(array)))
     }
 
     fn combine<'a, I: Iterator<Item = &'a [u8]>>(
@@ -1386,6 +1407,24 @@ impl BinTreeIndex {
     }
 }
 
+trait MerkleTree: Sectored {
+    /// Checks that the root node contains the given hash data. If it does then `Ok(())` is
+    /// returned. If it doesn't, then `Err(Error::HashCmpFailure)` is returned.
+    fn assert_root_contains(&mut self, hash_data: &[u8]) -> Result<()>;
+
+    /// Hashes the given data, adds a new node to the tree with its hash and updates the hashes
+    /// of all parent nodes.
+    fn write(&mut self, offset: usize, data: &[u8]) -> Result<()>;
+
+    /// Verifies that the given data stored from the given offset into the protected data, has not
+    /// been modified.
+    fn verify(&self, offset: usize, data: &[u8]) -> Result<()>;
+
+    /// Returns the hash data stored in the root node of the tree. An error is returned if and only
+    /// if the tree is empty.
+    fn root_hash(&self) -> Result<&[u8]>;
+}
+
 /// An implementation of a Merkle tree, a tree for storing hashes. This implementation is a binary
 /// tree which stores its nodes in a vector to ensure data locality.
 ///
@@ -1395,7 +1434,7 @@ impl BinTreeIndex {
 /// Each sector corresponds to an offset into the protected data, and in order to verify that a
 /// sector has not been modified, you must supply the offset of the sector.
 #[derive(Serialize, Deserialize)]
-pub struct MerkleTree<T> {
+pub struct VecMerkleTree<T> {
     nodes: Vec<T>,
     /// The size of the sectors of data that this tree will protect.
     sector_sz: usize,
@@ -1403,7 +1442,7 @@ pub struct MerkleTree<T> {
     root_verified: bool,
 }
 
-impl<T> MerkleTree<T> {
+impl<T> VecMerkleTree<T> {
     /// A byte to prefix data being hashed for leaf nodes. It's important that this is different
     /// from `INTERIOR_PREFIX`.
     const LEAF_PREFIX: &'static [u8] = b"Leaf";
@@ -1412,8 +1451,8 @@ impl<T> MerkleTree<T> {
     const INTERIOR_PREFIX: &'static [u8] = b"Interior";
 
     /// Creates a new tree with no nodes in it and the given sector size.
-    fn empty(sector_sz: usize) -> MerkleTree<T> {
-        MerkleTree {
+    fn empty(sector_sz: usize) -> VecMerkleTree<T> {
+        VecMerkleTree {
             nodes: Vec::new(),
             sector_sz,
             root_verified: true,
@@ -1478,9 +1517,34 @@ impl<T> MerkleTree<T> {
     }
 }
 
-impl<T: MerkleNode> MerkleTree<T> {
-    /// Checks that the root node contains the given hash data. If it does then `Ok(())` is
-    /// returned. If it doesn't, then `Err(Error::HashCmpFailure)` is returned.
+impl<T: MerkleNode> VecMerkleTree<T> {
+    /// Percolates up the hash change to the given node to the root.
+    fn perc_up(&mut self, start: BinTreeIndex) -> Result<()> {
+        for index in start.ancestors() {
+            self.combine_children(index)?;
+        }
+        Ok(())
+    }
+
+    /// Combines the hashes of the given node's children and stores it in the given node.
+    fn combine_children(&mut self, index: BinTreeIndex) -> Result<()> {
+        let left = index.left();
+        let right = index.right();
+        // Note that index < left && index < right.
+        let split = index.0 + 1;
+        let (front, back) = self.nodes.split_at_mut(split);
+        let dest = &mut front[front.len() - 1];
+        let left = back.get(left.0 - split);
+        let right = back.get(right.0 - split);
+        dest.combine(Self::interior_prefix(), left, right)
+            .map_err(|_| Error::IndexOutOfBounds {
+                index: index.0,
+                limit: Self::len(self.generations() - 1),
+            })
+    }
+}
+
+impl<T: MerkleNode> MerkleTree for VecMerkleTree<T> {
     fn assert_root_contains(&mut self, hash_data: &[u8]) -> Result<()> {
         match self.hash_at(BinTreeIndex(0)) {
             Ok(root) => {
@@ -1492,8 +1556,6 @@ impl<T: MerkleNode> MerkleTree<T> {
         }
     }
 
-    /// Hashes the given data, adds a new node to the tree with its hash and updates the hashes
-    /// of all parent nodes.
     fn write(&mut self, offset: usize, data: &[u8]) -> Result<()> {
         self.assert_sector_sz(data.len())?;
 
@@ -1543,31 +1605,6 @@ impl<T: MerkleNode> MerkleTree<T> {
         self.perc_up(index)
     }
 
-    /// Percolates up the hash change to the given node to the root.
-    fn perc_up(&mut self, start: BinTreeIndex) -> Result<()> {
-        for index in start.ancestors() {
-            self.combine_children(index)?;
-        }
-        Ok(())
-    }
-
-    /// Combines the hashes of the given node's children and stores it in the given node.
-    fn combine_children(&mut self, index: BinTreeIndex) -> Result<()> {
-        let left = index.left();
-        let right = index.right();
-        // Note that index < left && index < right.
-        let split = index.0 + 1;
-        let (front, back) = self.nodes.split_at_mut(split);
-        let dest = &mut front[front.len() - 1];
-        let left = back.get(left.0 - split);
-        let right = back.get(right.0 - split);
-        dest.combine(Self::interior_prefix(), left, right)
-            .map_err(|_| Error::IndexOutOfBounds {
-                index: index.0,
-                limit: Self::len(self.generations() - 1),
-            })
-    }
-
     /// Verifies that the given data stored from the given offset into the protected data, has not
     /// been modified.
     fn verify(&self, offset: usize, data: &[u8]) -> Result<()> {
@@ -1586,88 +1623,182 @@ impl<T: MerkleNode> MerkleTree<T> {
         }
         Ok(())
     }
+
+    fn root_hash(&self) -> Result<&[u8]> {
+        self.nodes
+            .first()
+            .map(|node| node.try_as_slice())
+            .ok_or_else(|| Error::custom("the tree is empty"))?
+    }
 }
 
-impl<T> Sectored for MerkleTree<T> {
+impl<T> Sectored for VecMerkleTree<T> {
     fn sector_sz(&self) -> usize {
         self.sector_sz
     }
 }
 
-struct MerkleStream<T, H> {
-    inner: T,
-    tree: MerkleTree<H>,
-    offset: usize,
+impl<T> Default for VecMerkleTree<T> {
+    fn default() -> Self {
+        Self::empty(SECTOR_SZ_DEFAULT)
+    }
 }
 
-impl<H> MerkleStream<(), H> {
-    fn new(tree: MerkleTree<H>) -> Self {
-        MerkleStream {
-            inner: (),
-            tree,
-            offset: 0,
+#[derive(Serialize, Deserialize, EnumDiscriminants)]
+#[strum_discriminants(name(MerkleTreeKind))]
+enum VariantMerkleTree {
+    Sha2_256(VecMerkleTree<Sha2_256Node>),
+}
+
+impl VariantMerkleTree {
+    fn empty(kind: MerkleTreeKind, sector_sz: usize) -> VariantMerkleTree {
+        match kind {
+            MerkleTreeKind::Sha2_256 => {
+                Self::Sha2_256(VecMerkleTree::<Sha2_256Node>::empty(sector_sz))
+            }
         }
     }
 }
 
-impl<T, H> Sectored for MerkleStream<T, H> {
+impl Sectored for VariantMerkleTree {
+    fn sector_sz(&self) -> usize {
+        match self {
+            Self::Sha2_256(tree) => tree.sector_sz(),
+        }
+    }
+}
+
+impl MerkleTree for VariantMerkleTree {
+    fn assert_root_contains(&mut self, hash_data: &[u8]) -> Result<()> {
+        match self {
+            Self::Sha2_256(tree) => tree.assert_root_contains(hash_data),
+        }
+    }
+
+    fn root_hash(&self) -> Result<&[u8]> {
+        match self {
+            Self::Sha2_256(tree) => tree.root_hash(),
+        }
+    }
+
+    fn verify(&self, offset: usize, data: &[u8]) -> Result<()> {
+        match self {
+            Self::Sha2_256(tree) => tree.verify(offset, data),
+        }
+    }
+
+    fn write(&mut self, offset: usize, data: &[u8]) -> Result<()> {
+        match self {
+            Self::Sha2_256(tree) => tree.write(offset, data),
+        }
+    }
+}
+
+impl Default for VariantMerkleTree {
+    fn default() -> Self {
+        Self::Sha2_256(VecMerkleTree::<Sha2_256Node>::default())
+    }
+}
+
+pub struct MerkleStream<T> {
+    trailered: Trailered<T, VariantMerkleTree>,
+    tree: VariantMerkleTree,
+    pos: usize,
+}
+
+impl<T: Read + Seek> MerkleStream<T> {
+    /// Reads a `MerkleTree` from the end of the given stream and returns a stream which uses it.
+    pub fn new(inner: T) -> Result<MerkleStream<T>> {
+        let (trailered, tree) = Trailered::new(inner)?;
+        Ok(MerkleStream {
+            trailered,
+            tree: tree.unwrap_or_default(),
+            pos: 0,
+        })
+    }
+
+    fn with_tree(inner: T, tree: VariantMerkleTree) -> Result<MerkleStream<T>> {
+        let (trailered, trailer) = Trailered::new(inner)?;
+        if trailer.is_some() {
+            return Err(Error::custom(
+                "stream already contained a serialized merkle tree",
+            ));
+        }
+        Ok(MerkleStream {
+            trailered,
+            tree,
+            pos: 0,
+        })
+    }
+}
+
+impl<T> Sectored for MerkleStream<T> {
     fn sector_sz(&self) -> usize {
         self.tree.sector_sz()
     }
 }
 
-impl<T, H> TryCompose<T, MerkleStream<T, H>> for MerkleStream<(), H> {
-    type Error = Infallible;
-    fn try_compose(self, inner: T) -> std::result::Result<MerkleStream<T, H>, Infallible> {
+impl<T: Read + Seek> TryCompose<T, MerkleStream<T>> for MerkleStream<()> {
+    type Error = crate::Error;
+    fn try_compose(self, inner: T) -> std::result::Result<MerkleStream<T>, Self::Error> {
+        let (trailered, tree) = Trailered::new(inner)?;
         Ok(MerkleStream {
-            inner,
-            tree: self.tree,
-            offset: self.offset,
+            trailered,
+            tree: tree.unwrap_or_default(),
+            pos: 0,
         })
     }
 }
 
-impl<T, H> Decompose<T> for MerkleStream<T, H> {
+impl<T> Decompose<T> for MerkleStream<T> {
     fn into_inner(self) -> T {
-        self.inner
+        self.trailered.inner
     }
 }
 
-impl<T: IntegrityWrite, H: MerkleNode> Write for MerkleStream<T, H> {
+impl<T: WriteInteg + Seek> Write for MerkleStream<T> {
     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
         self.assert_sector_sz(buf.len())?;
-        self.tree.write(self.offset, buf)?;
-        // Safety: We know the root node exists and is non-empty because we just wrote data into
-        // the tree.
-        let root = self.tree.nodes.first().unwrap();
-        let written = self.inner.integrity_write(buf, root.try_as_slice()?)?;
-        self.offset += self.sector_sz();
+        self.tree.write(self.pos, buf)?;
+        let written = self.trailered.write(buf)?;
+        self.pos += self.sector_sz();
         Ok(written)
     }
 
     fn flush(&mut self) -> io::Result<()> {
-        Ok(())
+        let root = self.tree.root_hash()?;
+        self.trailered.flush_integ(&self.tree, root)
     }
 }
 
-impl<T: Read, H: MerkleNode> Read for MerkleStream<T, H> {
+impl<T: Read + Seek> Read for MerkleStream<T> {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
         self.assert_sector_sz(buf.len())?;
-        self.inner.read_exact(buf)?;
-        self.tree.verify(self.offset, buf)?;
-        self.offset += self.sector_sz();
+        self.trailered.read_exact(buf)?;
+        self.tree.verify(self.pos, buf)?;
+        self.pos += self.sector_sz();
         Ok(self.sector_sz())
     }
 }
 
-impl<T: Seek, H> Seek for MerkleStream<T, H> {
+impl<T: Seek> Seek for MerkleStream<T> {
     fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
-        let from_start = self.inner.seek(pos)?;
-        self.offset = from_start.try_into().box_err()?;
+        let from_start = self.trailered.seek(pos)?;
+        self.pos = from_start.try_into().box_err()?;
         Ok(from_start)
     }
 }
 
+impl<T: HeaderAccess> HeaderAccess for MerkleStream<T> {
+    fn block_key(&self) -> crate::Result<SymKey> {
+        self.trailered.inner.block_key()
+    }
+
+    fn add_readcap_for(&mut self, owner: Principal, key: &dyn Encrypter) -> crate::Result<()> {
+        self.trailered.inner.add_readcap_for(owner, key)
+    }
+}
+
 // A stream which encrypts all data written to it and decrypts all data read from it.
 pub struct SecretStream<T> {
     inner: T,
@@ -1825,37 +1956,25 @@ impl<T: Seek> Seek for SecretStream<T> {
     }
 }
 
-pub(crate) fn unveil<T: Read + IntegrityWrite + Seek, C: Encrypter + Decrypter, D>(
-    block: Block<T, D>,
-    principal: &Principal,
-    creds: &C,
-) -> Result<Block<impl Read + Write + Seek, D>> {
-    let readcap = {
-        let shared = block.shared.read()?;
-        let readcap_ct = shared
-            .header
-            .readcaps
-            .get(principal)
-            .ok_or(Error::NoReadCap)?;
-        decrypt(readcap_ct, creds)?
-    };
-    let block = block
-        // TODO: The merkle tree needs to be supplied in some other way.
-        .compose_body(MerkleStream::new(MerkleTree::<Sha2_256>::empty(
-            SECTOR_SZ_DEFAULT,
-        )))
-        .try_compose_body(SecretStream::new(readcap))?
-        .try_compose_body(SectoredBuf::new())?;
-    Ok(block)
+impl<T: HeaderAccess> HeaderAccess for SecretStream<T> {
+    fn block_key(&self) -> crate::Result<SymKey> {
+        self.inner.block_key()
+    }
+
+    fn add_readcap_for(&mut self, owner: Principal, key: &dyn Encrypter) -> crate::Result<()> {
+        self.inner.add_readcap_for(owner, key)
+    }
 }
 
+impl<T: Read + Write + Seek + HeaderAccess> Block for SecretStream<T> {}
+
 pub(crate) fn encrypt<T: Serialize, K: Encrypter>(value: &T, key: &K) -> Result<Ciphertext<T>> {
     let data = to_vec(value)?;
     let data = key.encrypt(&data)?;
     Ok(Ciphertext::new(data))
 }
 
-pub(crate) fn decrypt<T: Serialize + DeserializeOwned, K: Decrypter>(
+pub(crate) fn decrypt<T: DeserializeOwned, K: Decrypter>(
     ciphertext: &Ciphertext<T>,
     key: &K,
 ) -> Result<T> {
@@ -1864,22 +1983,21 @@ pub(crate) fn decrypt<T: Serialize + DeserializeOwned, K: Decrypter>(
     Ok(from_vec(&plaintext)?)
 }
 
-pub(crate) fn sign_block<T, K: Signer, C>(block: &Block<T, C>, priv_key: &K) -> Result<()> {
-    let mut shared = block.shared.write()?;
-    let header = to_vec(&shared.header)?;
-    let signature = priv_key.sign(std::iter::once(header.as_slice()))?;
-    shared.sig = signature;
-    Ok(())
+pub(crate) fn sign_header<K: Signer>(header: &Header, signer: &K) -> Result<Signature> {
+    let header = to_vec(&header)?;
+    signer.sign(std::iter::once(header.as_slice()))
 }
 
-pub(crate) fn verify_block<T, C>(block: &Block<T, C>) -> Result<()> {
-    let shared = block.shared.read()?;
-    verify_writecap(&shared.header.writecap, &shared.header.path)?;
-    let header_data = to_vec(&shared.header)?;
-    shared.header.writecap.signing_key.verify(
-        std::iter::once(header_data.as_slice()),
-        shared.sig.as_slice(),
-    )
+pub(crate) fn verify_header(header: &Header, sig: &Signature) -> Result<()> {
+    let writecap = header
+        .writecap
+        .as_ref()
+        .ok_or(crate::Error::MissingWritecap)?;
+    verify_writecap(writecap, &header.path)?;
+    let header_data = to_vec(&header)?;
+    writecap
+        .signing_key
+        .verify(std::iter::once(header_data.as_slice()), sig.as_slice())
 }
 
 #[derive(Serialize)]
@@ -1970,52 +2088,22 @@ pub(crate) fn verify_writecap(mut writecap: &Writecap, path: &Path) -> Result<()
     Err(WritecapAuthzErr::ChainTooLong(CHAIN_LEN_LIMIT).into())
 }
 
-pub fn verify_header(_header: &Header, _sig: &Signature) -> Result<()> {
-    unimplemented!()
-}
-
 #[cfg(test)]
 mod tests {
     use super::*;
     use crate::{test_helpers::*, BrotliParams, SectoredBuf, SECTOR_SZ_DEFAULT};
     use std::{io::SeekFrom, time::Duration};
 
-    fn encrypt_decrypt_block_test_case<T: Read + IntegrityWrite + Seek, C: Creds, D>(
-        mut block: Block<T, D>,
-        principal: &Principal,
-        creds: &C,
-    ) {
-        let expected = {
-            let mut dest = Vec::new();
-            block.seek(SeekFrom::Start(0)).expect("seek failed");
-            block
-                .body
-                .read_to_end(&mut dest)
-                .expect("read_to_end failed");
-            block.seek(SeekFrom::Start(0)).expect("seek failed");
-            dest
-        };
-        let mut block = unveil(block, principal, creds).expect("unveil failed");
-        block.write_all(&expected).expect("write_all failed");
-        block.flush().expect("flush failed");
-        block.seek(SeekFrom::Start(0)).expect("seek failed");
-        let actual = {
-            let mut actual = Vec::new();
-            block
-                .read_to_end(&mut actual)
-                .expect("failed to read actual block contents");
-            actual
-        };
-        assert_eq!(expected, actual);
-    }
-
     #[test]
     fn encrypt_decrypt_block() {
+        const SECT_SZ: usize = 16;
+        const SECT_CT: usize = 8;
         let key = make_key_pair();
         let readcap = make_readcap_for(&key);
-        let principal = readcap.issued_to.clone();
-        let block = make_block_with(readcap);
-        encrypt_decrypt_block_test_case(block, &principal, &key)
+        let mut block = make_block_with(readcap);
+        write_fill(&mut block, SECT_SZ, SECT_CT);
+        block.seek(SeekFrom::Start(0)).expect("seek failed");
+        read_check(block, SECT_SZ, SECT_CT);
     }
 
     #[test]
@@ -2027,18 +2115,6 @@ mod tests {
         key.verify([header, message].into_iter(), signature.as_slice())
     }
 
-    #[test]
-    fn sign_verify_block_rsa() -> Result<()> {
-        let readcap = make_readcap();
-        let principal = readcap.issued_to.clone();
-        let block = make_block_with(readcap);
-        let key = make_key_pair();
-        let mut block = unveil(block, &principal, &key).expect("unveil failed");
-        sign_block(&mut block, &key)?;
-        verify_block(&block)?;
-        Ok(())
-    }
-
     #[test]
     fn hash_to_string() {
         let hash = make_principal().0;
@@ -2143,8 +2219,8 @@ mod tests {
     #[test]
     fn aeadkey_encrypt_decrypt_aes256gcm() {
         let key = AeadKey::new(AeadKeyKind::AesGcm256).expect("failed to create key");
-        let aad = [0u8; 16];
-        let expected = [0u8; 32];
+        let aad = [1u8; 16];
+        let expected = [2u8; 32];
         let tagged = key.encrypt(aad, &expected).expect("encrypt failed");
         let actual = key.decrypt(&tagged).expect("decrypt failed");
         assert_eq!(expected, actual.as_slice());
@@ -2153,10 +2229,21 @@ mod tests {
     #[test]
     fn aeadkey_decrypt_fails_when_ct_modified() {
         let key = AeadKey::new(AeadKeyKind::AesGcm256).expect("failed to create key");
-        let aad = [0u8; 16];
-        let expected = [0u8; 32];
+        let aad = [1u8; 16];
+        let expected = [2u8; 32];
         let mut tagged = key.encrypt(aad, &expected).expect("encrypt failed");
-        tagged.ciphertext.data[0] += 1;
+        tagged.ciphertext.data[0] = tagged.ciphertext.data[0].wrapping_add(1);
+        let result = key.decrypt(&tagged);
+        assert!(result.is_err())
+    }
+
+    #[test]
+    fn aeadkey_decrypt_fails_when_aad_modified() {
+        let key = AeadKey::new(AeadKeyKind::AesGcm256).expect("failed to create key");
+        let aad = [1u8; 16];
+        let expected = [2u8; 32];
+        let mut tagged = key.encrypt(aad, &expected).expect("encrypt failed");
+        tagged.aad[0] = tagged.aad[0].wrapping_add(1);
         let result = key.decrypt(&tagged);
         assert!(result.is_err())
     }
@@ -2343,8 +2430,10 @@ mod tests {
         assert_eq!(63, log2(usize::MAX));
     }
 
-    fn make_tree_with<const SZ: usize>(num_sects: usize) -> (MerkleTree<Sha2_256>, Vec<[u8; SZ]>) {
-        let mut tree = MerkleTree::<Sha2_256>::empty(SZ);
+    fn make_tree_with<const SZ: usize>(
+        num_sects: usize,
+    ) -> (VecMerkleTree<Sha2_256Node>, Vec<[u8; SZ]>) {
+        let mut tree = VecMerkleTree::<Sha2_256Node>::empty(SZ);
         let mut sectors = Vec::with_capacity(num_sects);
         for k in 1..(num_sects + 1) {
             let offset = SZ * (k - 1);
@@ -2386,7 +2475,7 @@ mod tests {
     #[test]
     fn merkle_tree_data_changed_verify_fails() {
         const SZ: usize = SECTOR_SZ_DEFAULT;
-        let mut tree = MerkleTree::<Sha2_256>::empty(SZ);
+        let mut tree = VecMerkleTree::<Sha2_256Node>::empty(SZ);
         let one = [1u8; SZ];
         let mut two = [2u8; SZ];
         let three = [3u8; SZ];
@@ -2405,7 +2494,7 @@ mod tests {
     #[test]
     fn merkle_tree_root_not_verified_verify_fails() {
         const SZ: usize = SECTOR_SZ_DEFAULT;
-        let mut tree = MerkleTree::<Sha2_256>::empty(SZ);
+        let mut tree = VecMerkleTree::<Sha2_256Node>::empty(SZ);
         let one = [1u8; SZ];
         let two = [2u8; SZ];
         let three = [3u8; SZ];
@@ -2413,22 +2502,22 @@ mod tests {
         tree.write(SZ, &two).expect("append two failed");
         tree.write(2 * SZ, &three).expect("append three failed");
         let vec = to_vec(&tree).expect("to_vec failed");
-        let tree: MerkleTree<Sha2_256> = from_vec(&vec).expect("from_vec failed");
+        let tree: VecMerkleTree<Sha2_256Node> = from_vec(&vec).expect("from_vec failed");
 
         tree.verify(SZ, &two)
             .expect_err("verify succeeded, though it should have failed");
     }
 
-    fn merkle_stream_sequential_test_case(sect_sz: usize, sect_count: usize) {
-        let mut stream = MerkleStream::new(MerkleTree::<Sha2_256>::empty(sect_sz))
-            .try_compose(BtCursor::new(vec![0u8; sect_count * sect_sz]))
-            .expect("compose failed");
-        for k in 1..(sect_count + 1) {
+    fn merkle_stream_sequential_test_case(sect_sz: usize, sect_ct: usize) {
+        let tree = VariantMerkleTree::empty(MerkleTreeKind::Sha2_256, sect_sz);
+        let mut stream =
+            MerkleStream::with_tree(BtCursor::new(Vec::new()), tree).expect("read from end failed");
+        for k in 1..(sect_ct + 1) {
             let sector = vec![k as u8; sect_sz];
             stream.write(&sector).expect("write failed");
         }
         stream.seek(SeekFrom::Start(0)).expect("seek failed");
-        for k in 1..(sect_count + 1) {
+        for k in 1..(sect_ct + 1) {
             let expected = vec![k as u8; sect_sz];
             let mut actual = vec![0u8; sect_sz];
             stream.read(&mut actual).expect("read failed");
@@ -2445,10 +2534,23 @@ mod tests {
         merkle_stream_sequential_test_case(8192, 20);
     }
 
+    fn make_merkle_stream_filled_with_zeros(
+        sect_sz: usize,
+        sect_ct: usize,
+    ) -> MerkleStream<BtCursor<Vec<u8>>> {
+        let tree = VariantMerkleTree::empty(MerkleTreeKind::Sha2_256, sect_sz);
+        let mut stream =
+            MerkleStream::with_tree(BtCursor::new(Vec::new()), tree).expect("read from end failed");
+        let zeros = vec![0u8; sect_sz];
+        for _ in 0..sect_ct {
+            stream.write(&zeros).expect("write zeros failed");
+        }
+        stream.seek(SeekFrom::Start(0)).expect("seek failed");
+        stream
+    }
+
     fn merkle_stream_random_test_case(rando: Randomizer, sect_sz: usize, sect_ct: usize) {
-        let mut stream = MerkleStream::new(MerkleTree::<Sha2_256>::empty(sect_sz))
-            .try_compose(BtCursor::new(vec![0u8; sect_sz * sect_ct]))
-            .expect("compose failed");
+        let mut stream = make_merkle_stream_filled_with_zeros(sect_sz, sect_ct);
         let indices: Vec<usize> = rando.take(sect_ct).map(|e| e % sect_ct).collect();
         for index in indices.iter().map(|e| *e) {
             let offset = sect_sz * index;
@@ -2486,10 +2588,7 @@ mod tests {
     fn compose_merkle_and_secret_streams() {
         const SECT_SZ: usize = 4096;
         const SECT_CT: usize = 16;
-        let memory = BtCursor::new([0u8; SECT_SZ * SECT_CT]);
-        let merkle = MerkleStream::new(MerkleTree::<Sha2_256>::empty(SECT_SZ))
-            .try_compose(memory)
-            .expect("compose for merkle failed");
+        let merkle = make_merkle_stream_filled_with_zeros(SECT_SZ, SECT_CT);
         let key = SymKey::generate(SymKeyKind::Aes256Cbc).expect("key generation failed");
         let mut secret = SecretStream::new(key)
             .try_compose(merkle)

+ 434 - 182
crates/btlib/src/lib.rs

@@ -18,10 +18,13 @@ extern crate lazy_static;
 use brotli::{CompressorWriter, Decompressor};
 use btserde::{self, read_from, write_to};
 mod crypto;
-use crypto::{AsymKeyPub, Ciphertext, Hash, HashKind, Sign, Signature, Signer, SymKey};
+use crypto::{
+    AsymKeyPub, Ciphertext, CredsPriv, Decrypter, Encrypter, EncrypterExt, Hash, HashKind,
+    MerkleStream, SecretStream, Sign, Signature, Signer, SymKey,
+};
 
 use log::error;
-use serde::{Deserialize, Serialize};
+use serde::{de::DeserializeOwned, Deserialize, Serialize};
 use serde_big_array::BigArray;
 use std::{
     collections::HashMap,
@@ -30,13 +33,15 @@ use std::{
     fs::{File, OpenOptions},
     hash::Hash as Hashable,
     io::{self, Read, Seek, SeekFrom, Write},
+    marker::PhantomData,
     ops::{Add, Sub},
-    sync::{Arc, PoisonError, RwLock},
+    sync::PoisonError,
     time::{Duration, SystemTime},
 };
 
 #[derive(Debug)]
 enum Error {
+    MissingWritecap,
     Io(std::io::Error),
     Serde(btserde::Error),
     Crypto(crypto::Error),
@@ -53,6 +58,7 @@ impl Error {
 impl Display for Error {
     fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
         match self {
+            Error::MissingWritecap => write!(f, "missing writecap"),
             Error::Io(err) => err.fmt(f),
             Error::Serde(err) => err.fmt(f),
             Error::Crypto(err) => err.fmt(f),
@@ -126,16 +132,10 @@ impl<T, E: Display> StrInIoErr<T> for std::result::Result<T, E> {
     }
 }
 
-/// A Block tagged with its version number. When a block of a previous version is received over
-/// the network or read from the filesystem, it is upgraded to the current version before being
-/// processed.
-#[derive(Debug)]
-enum VersionedBlock<T, C> {
-    V0(Block<T, C>),
-}
-
 const SECTOR_SZ_DEFAULT: usize = 4096;
 
+trait Block: Read + Write + Seek + HeaderAccess {}
+
 // A trait for streams which only allow reads and writes in fixed sized units called sectors.
 trait Sectored {
     // Returns the size of the sector for this stream.
@@ -151,97 +151,141 @@ trait Sectored {
     }
 }
 
-/// A version of the `Write` trait, which allows integrity information to be supplied when writing.
-trait IntegrityWrite {
-    fn integrity_write(&mut self, buf: &[u8], integrity: &[u8]) -> io::Result<usize>;
+/// A version of the `Write` trait, which allows integrity information to be supplied when flushing.
+trait WriteInteg: Write {
+    fn flush_integ(&mut self, integrity: &[u8]) -> io::Result<()>;
+}
+
+trait Decompose<T> {
+    fn into_inner(self) -> T;
+}
+
+trait TryCompose<T, U: Decompose<T>> {
+    type Error;
+    fn try_compose(self, inner: T) -> std::result::Result<U, Self::Error>;
+}
+
+trait Compose<T, U> {
+    fn compose(self, inner: T) -> U;
+}
+
+impl<T, U: Decompose<T>, S: TryCompose<T, U, Error = Infallible>> Compose<T, U> for S {
+    fn compose(self, inner: T) -> U {
+        let result = self.try_compose(inner);
+        // Safety: Infallible has no values, so `result` must be `Ok`.
+        unsafe { result.unwrap_unchecked() }
+    }
+}
+
+trait HeaderAccess {
+    fn block_key(&self) -> Result<SymKey>;
+    fn add_readcap_for(&mut self, owner: Principal, key: &dyn Encrypter) -> Result<()>;
+}
+
+/// Extensions to the `Read` trait.
+trait ReadExt: Read {
+    /// Reads repeatedly until one of the following occur:
+    ///  1. The given buffer is full.
+    ///  2. A call to `read` returns 0.
+    ///  3. A call to `read` returns an error.
+    /// The number of bytes read is returned. If an error is returned, then no bytes were read.
+    fn fill_buf(&mut self, mut dest: &mut [u8]) -> io::Result<usize> {
+        let dest_len_start = dest.len();
+        while !dest.is_empty() {
+            let byte_ct = match self.read(dest) {
+                Ok(byte_ct) => byte_ct,
+                Err(err) => {
+                    if dest_len_start == dest.len() {
+                        return Err(err);
+                    } else {
+                        // We're not allowed to return an error if we've already read from self.
+                        error!("an error occurred in fill_buf: {}", err);
+                        break;
+                    }
+                }
+            };
+            if 0 == byte_ct {
+                break;
+            }
+            dest = &mut dest[byte_ct..];
+        }
+        Ok(dest_len_start - dest.len())
+    }
 }
 
-#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
+impl<T: Read> ReadExt for T {}
+
+#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Default)]
 pub struct Header {
     path: Path,
     readcaps: HashMap<Principal, Ciphertext<SymKey>>,
-    writecap: Writecap,
+    writecap: Option<Writecap>,
     /// A hash which provides integrity for the contents of the block body.
     integrity: Hash,
 }
 
-#[derive(Debug, PartialEq, Serialize, Deserialize)]
-struct BlockShared<C> {
+#[derive(Serialize, Deserialize, Default)]
+struct BlockTrailer {
     header: Header,
     sig: Signature,
-    #[serde(skip)]
-    creds: C,
 }
 
 struct BlockStream<T, C> {
-    shared: Arc<RwLock<BlockShared<C>>>,
-    body_len: u64,
+    trailered: Trailered<T, BlockTrailer>,
+    trailer: BlockTrailer,
     header_buf: Vec<u8>,
-    inner: T,
+    creds: C,
 }
 
-impl<T, C> BlockStream<T, C> {
-    fn new(shared: BlockShared<C>, inner: T, body_len: u64) -> BlockStream<T, C> {
-        BlockStream {
-            shared: Arc::new(RwLock::new(shared)),
-            inner,
+impl<T: Read + Seek, C> BlockStream<T, C> {
+    fn new(inner: T, creds: C) -> Result<BlockStream<T, C>> {
+        let (trailered, trailer) = Trailered::new(inner)?;
+        Ok(BlockStream {
+            trailered,
+            trailer: trailer.unwrap_or_default(),
             header_buf: Vec::new(),
-            body_len,
-        }
+            creds,
+        })
     }
 }
 
-impl<T: Seek + Write, C: std::fmt::Debug + Signer> BlockStream<T, C> {
-    fn write_trailer(&mut self, integrity: &[u8]) -> Result<()> {
-        let pos = self.inner.stream_position()?;
-        self.body_len = self.body_len.max(pos);
-        self.inner.seek(SeekFrom::Start(self.body_len))?;
-        {
-            let mut shared = self.shared.write()?;
-            shared.header.integrity.as_mut().copy_from_slice(integrity);
-            self.header_buf.clear();
-            write_to(&shared.header, &mut self.header_buf)?;
-            shared.sig = shared
-                .creds
-                .sign(std::iter::once(self.header_buf.as_slice()))?;
-
-            self.inner.write_all(&self.header_buf)?;
-            write_to(&shared.sig, &mut self.inner)?;
-        }
-        let end: i64 = (self.inner.stream_position()? + 8).try_into()?;
-        let body_len: i64 = self.body_len.try_into()?;
-        let offset = end - body_len;
-        write_to(&offset, &mut self.inner)?;
-        self.inner.seek(SeekFrom::Start(pos))?;
-        Ok(())
+impl<C> BlockStream<File, C> {
+    fn open<P: AsRef<std::path::Path>>(path: P, creds: C) -> Result<BlockStream<File, C>> {
+        let inner = OpenOptions::new().read(true).write(true).open(path)?;
+        BlockStream::new(inner, creds)
     }
 }
 
-impl<T: Write + Seek, C: std::fmt::Debug + Signer> IntegrityWrite for BlockStream<T, C> {
-    fn integrity_write(&mut self, buf: &[u8], integrity: &[u8]) -> io::Result<usize> {
-        let written = self.inner.write(buf)?;
-        if written > 0 {
-            let result = self.write_trailer(integrity);
-            if let Err(err) = result {
-                error!("error occurred while writing block trailer: {}", err);
-            }
-        }
-        Ok(written)
+impl<T: Write + Seek, C: Signer> Write for BlockStream<T, C> {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.trailered.write(buf)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        Err(io::Error::new(
+            io::ErrorKind::Unsupported,
+            "flush is not supported, use flush_integ instead",
+        ))
     }
 }
 
-impl<T: Read, C> Read for BlockStream<T, C> {
-    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
-        self.inner.read(buf)
+impl<T: Write + Seek, C: Signer> WriteInteg for BlockStream<T, C> {
+    fn flush_integ(&mut self, integrity: &[u8]) -> io::Result<()> {
+        let header = &mut self.trailer.header;
+        header.integrity.as_mut().copy_from_slice(integrity);
+        self.header_buf.clear();
+        write_to(&header, &mut self.header_buf).box_err()?;
+        self.trailer.sig = self
+            .creds
+            .sign(std::iter::once(self.header_buf.as_slice()))?;
+        self.trailered.flush(&self.trailer)?;
+        Ok(())
     }
 }
 
-/// Adds a signed integer to an unsigned integer and returns the result.
-fn add_signed(unsigned: u64, signed: i64) -> u64 {
-    if signed >= 0 {
-        unsigned + signed as u64
-    } else {
-        unsigned - (-signed as u64)
+impl<T: Read + Seek, C> Read for BlockStream<T, C> {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        self.trailered.read(buf)
     }
 }
 
@@ -249,152 +293,214 @@ impl<T: Seek, C> Seek for BlockStream<T, C> {
     /// Seeks to the given position in the stream. If a position beyond the end of the stream is
     /// specified, the the seek will be to the end of the stream.
     fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
-        let from_start = match pos {
-            SeekFrom::Start(from_start) => from_start,
-            SeekFrom::Current(from_curr) => add_signed(self.inner.stream_position()?, from_curr),
-            SeekFrom::End(from_end) => add_signed(self.body_len, from_end),
-        };
-        self.inner
-            .seek(SeekFrom::Start(from_start.min(self.body_len)))
+        self.trailered.seek(pos)
     }
 }
 
-/// A container which binds together ciphertext along with the metadata needed to identify,
-/// verify and decrypt it.
-#[derive(Debug)]
-struct Block<T, C> {
-    shared: Arc<RwLock<BlockShared<C>>>,
-    body: T,
-}
-
-impl<T, C> Block<T, C> {
-    fn try_compose_body<E: Into<Error>, U: Decompose<T>, V: TryCompose<T, U, Error = E>>(
-        self,
-        new_body: V,
-    ) -> Result<Block<U, C>> {
-        Ok(Block {
-            shared: self.shared,
-            body: new_body.try_compose(self.body).map_err(|err| err.into())?,
-        })
+impl<T, C: Decrypter + Owned> HeaderAccess for BlockStream<T, C> {
+    fn block_key(&self) -> Result<SymKey> {
+        let readcap = self
+            .trailer
+            .header
+            .readcaps
+            .get(&self.creds.owner())
+            .ok_or(Error::Crypto(crypto::Error::NoReadCap))?;
+        Ok(crypto::decrypt(readcap, &self.creds)?)
+    }
+
+    fn add_readcap_for(&mut self, owner: Principal, key: &dyn Encrypter) -> Result<()> {
+        let block_key = self.block_key()?;
+        let readcap = key.ser_encrypt(&block_key)?;
+        self.trailer.header.readcaps.insert(owner, readcap);
+        Ok(())
     }
+}
 
-    fn compose_body<U: Decompose<T>, V: Compose<T, U>>(self, new_body: V) -> Block<U, C> {
-        Block {
-            shared: self.shared,
-            body: new_body.compose(self.body),
+struct BlockOpenOptions<T, C> {
+    inner: T,
+    creds: C,
+    encrypt: bool,
+    compress: bool,
+}
+
+impl BlockOpenOptions<(), ()> {
+    fn new() -> BlockOpenOptions<(), ()> {
+        BlockOpenOptions {
+            inner: (),
+            creds: (),
+            encrypt: true,
+            compress: true,
         }
     }
 }
 
-impl<T: Read + Seek, C> Block<T, C> {
-    fn with_body(body: BlockStream<T, C>) -> Block<BlockStream<T, C>, C> {
-        Block {
-            shared: body.shared.clone(),
-            body,
+impl<T, C> BlockOpenOptions<T, C> {
+    fn with_inner<U>(self, inner: U) -> BlockOpenOptions<U, C> {
+        BlockOpenOptions {
+            inner,
+            creds: self.creds,
+            encrypt: self.encrypt,
+            compress: self.compress,
         }
     }
 
-    fn new(mut inner: T, creds: C) -> Result<Block<BlockStream<T, C>, C>> {
-        // TODO: What if the inner stream is empty?
-        inner.seek(SeekFrom::End(-8))?;
-        let offset: i64 = read_from(&mut inner)?;
-        let body_len = inner.seek(SeekFrom::Current(offset))?;
-        let header: Header = read_from(&mut inner)?;
-        let sig: Signature = read_from(&mut inner)?;
-        crypto::verify_header(&header, &sig)?;
-        inner.seek(SeekFrom::Start(0))?;
-        let shared = BlockShared { header, sig, creds };
-        let body = BlockStream::new(shared, inner, body_len);
-        Ok(Block::with_body(body))
-    }
-}
-
-impl<C> Block<File, C> {
-    fn from_path<P: AsRef<std::path::Path>>(
-        creds: C,
-        path: P,
-    ) -> Result<Block<BlockStream<File, C>, C>> {
-        let inner = OpenOptions::new().read(true).write(true).open(path)?;
-        Block::new(inner, creds)
+    fn with_creds<D>(self, creds: D) -> BlockOpenOptions<T, D> {
+        BlockOpenOptions {
+            inner: self.inner,
+            creds,
+            encrypt: self.encrypt,
+            compress: self.compress,
+        }
     }
-}
 
-impl<T: Write, C> Write for Block<T, C> {
-    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
-        self.body.write(buf)
+    fn with_encrypt(mut self, encrypt: bool) -> Self {
+        self.encrypt = encrypt;
+        self
     }
 
-    fn flush(&mut self) -> io::Result<()> {
-        self.body.flush()
+    fn with_compress(mut self, compress: bool) -> Self {
+        self.compress = compress;
+        self
     }
 }
 
-impl<T: Read, C> Read for Block<T, C> {
-    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
-        self.body.read(buf)
+impl<T: Read + Write + Seek + 'static, C: CredsPriv + Owned + 'static> BlockOpenOptions<T, C> {
+    fn open(self) -> Result<Box<dyn Block>> {
+        let stream = BlockStream::new(self.inner, self.creds)?;
+        let block_key = stream.block_key()?;
+        let stream = MerkleStream::new(stream)?;
+        if self.encrypt {
+            let stream = SecretStream::new(block_key).try_compose(stream)?;
+            let stream = SectoredBuf::new().try_compose(stream)?;
+            Ok(Box::new(stream))
+        } else {
+            let stream = SectoredBuf::new().try_compose(stream)?;
+            Ok(Box::new(stream))
+        }
     }
 }
 
-impl<T: Seek, C> Seek for Block<T, C> {
-    fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
-        self.body.seek(pos)
+/// A struct which wraps a stream and which writes a trailing data structure to when flushed.
+struct Trailered<T, D> {
+    inner: T,
+    body_len: u64,
+    phantom: PhantomData<D>,
+}
+
+impl<T: Read + Seek, D: DeserializeOwned> Trailered<T, D> {
+    fn empty(inner: T) -> Trailered<T, D> {
+        Trailered {
+            inner,
+            body_len: 0,
+            phantom: PhantomData,
+        }
+    }
+
+    /// Creates a new `Trailered<T>` containing the given `T`. This method requires that the given
+    /// stream is either empty, or contains a valid serialization of `D` and a the offset at which
+    /// `D` is stored.
+    fn new(mut inner: T) -> Result<(Trailered<T, D>, Option<D>)> {
+        let pos = inner.stream_position()?;
+        let end = inner.seek(SeekFrom::End(0))?;
+        if 0 == end {
+            return Ok((Self::empty(inner), None));
+        }
+        inner.seek(SeekFrom::End(-8))?;
+        let offset: i64 = read_from(&mut inner)?;
+        let body_len = inner.seek(SeekFrom::End(offset))?;
+        let trailer: D = read_from(&mut inner)?;
+        inner.seek(SeekFrom::Start(pos))?;
+        Ok((
+            Trailered {
+                inner,
+                body_len,
+                phantom: PhantomData,
+            },
+            Some(trailer),
+        ))
     }
 }
 
-trait Decompose<T> {
-    fn into_inner(self) -> T;
+impl<T: Seek, D> Trailered<T, D> {
+    fn post_write(&mut self, written: usize) -> io::Result<usize> {
+        if 0 == written {
+            return Ok(0);
+        }
+        // I cannot return an error at this point because bytes have already been written to inner.
+        // So if I can't track the body len due to a failure, a panic is the only option.
+        let pos = self
+            .inner
+            .stream_position()
+            .expect("failed to get stream position");
+        self.body_len = self.body_len.max(pos);
+        Ok(written)
+    }
 }
 
-trait TryCompose<T, U: Decompose<T>> {
-    type Error;
-    fn try_compose(self, inner: T) -> std::result::Result<U, Self::Error>;
+impl<T: Read + Seek, D> Read for Trailered<T, D> {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        let pos = self.inner.stream_position()?;
+        let available_u64 = self.body_len - pos;
+        let available: usize = available_u64.try_into().box_err()?;
+        let limit = buf.len().min(available);
+        self.inner.read(&mut buf[..limit])
+    }
 }
 
-trait Compose<T, U> {
-    fn compose(self, inner: T) -> U;
+impl<T: Write + Seek, D: Serialize> Trailered<T, D> {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        let written = self.inner.write(buf)?;
+        self.post_write(written)
+    }
+
+    fn write_trailer(&mut self, trailer: &D) -> io::Result<u64> {
+        let pos = self.inner.stream_position()?;
+        self.inner.seek(SeekFrom::Start(self.body_len))?;
+        write_to(trailer, &mut self.inner).box_err()?;
+        let offset_u64 = 8 + self.inner.stream_position()? - self.body_len;
+        let offset = -(offset_u64 as i64);
+        write_to(&offset, &mut self.inner).box_err()?;
+        Ok(pos)
+    }
+
+    fn flush(&mut self, trailer: &D) -> io::Result<()> {
+        let prev_pos = self.write_trailer(trailer)?;
+        self.inner.flush()?;
+        self.inner.seek(SeekFrom::Start(prev_pos))?;
+        Ok(())
+    }
 }
 
-impl<T, U: Decompose<T>, S: TryCompose<T, U, Error = Infallible>> Compose<T, U> for S {
-    fn compose(self, inner: T) -> U {
-        let result = self.try_compose(inner);
-        // Safety: Infallible has no values, so `result` must be `Ok`.
-        unsafe { result.unwrap_unchecked() }
+impl<T: WriteInteg + Seek, D: Serialize> Trailered<T, D> {
+    fn flush_integ(&mut self, trailer: &D, integrity: &[u8]) -> io::Result<()> {
+        let prev_pos = self.write_trailer(trailer)?;
+        self.inner.flush_integ(integrity)?;
+        self.inner.seek(SeekFrom::Start(prev_pos))?;
+        Ok(())
     }
 }
 
-/// Extensions to the `Read` trait.
-trait ReadExt: Read {
-    /// Reads repeatedly until one of the following occur:
-    ///  1. The given buffer is full.
-    ///  2. A call to `read` returns 0.
-    ///  3. A call to `read` returns an error.
-    /// The number of bytes read is returned. If an error is returned, then no bytes were read.
-    fn fill_buf(&mut self, mut dest: &mut [u8]) -> io::Result<usize> {
-        let dest_len_start = dest.len();
-        while !dest.is_empty() {
-            let byte_ct = match self.read(dest) {
-                Ok(byte_ct) => byte_ct,
-                Err(err) => {
-                    if dest_len_start == dest.len() {
-                        return Err(err);
-                    } else {
-                        // We're not allowed to return an error if we've already read from self.
-                        error!("an error occurred in fill_buf: {}", err);
-                        break;
-                    }
-                }
-            };
-            if 0 == byte_ct {
-                break;
+impl<T: Seek, D> Seek for Trailered<T, D> {
+    fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+        /// Adds a signed integer to an unsigned integer and returns the result.
+        fn add_signed(unsigned: u64, signed: i64) -> u64 {
+            if signed >= 0 {
+                unsigned + signed as u64
+            } else {
+                unsigned - (-signed as u64)
             }
-            dest = &mut dest[byte_ct..];
         }
-        Ok(dest_len_start - dest.len())
+
+        let from_start = match pos {
+            SeekFrom::Start(from_start) => from_start,
+            SeekFrom::Current(from_curr) => add_signed(self.inner.stream_position()?, from_curr),
+            SeekFrom::End(from_end) => add_signed(self.body_len, from_end),
+        };
+        let from_start = from_start.min(self.body_len);
+        self.inner.seek(SeekFrom::Start(from_start))
     }
 }
 
-impl<T: Read> ReadExt for T {}
-
 impl<T: Write> Decompose<T> for CompressorWriter<T> {
     fn into_inner(self) -> T {
         self.into_inner()
@@ -733,6 +839,18 @@ impl<T: Seek + Read + Write> Seek for SectoredBuf<T> {
     }
 }
 
+impl<T: HeaderAccess> HeaderAccess for SectoredBuf<T> {
+    fn block_key(&self) -> Result<SymKey> {
+        self.inner.block_key()
+    }
+
+    fn add_readcap_for(&mut self, owner: Principal, key: &dyn Encrypter) -> Result<()> {
+        self.inner.add_readcap_for(owner, key)
+    }
+}
+
+impl<T: Read + Write + Seek + HeaderAccess> Block for SectoredBuf<T> {}
+
 /// An envelopment of a key, which is tagged with the principal who the key is meant for.
 #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
 struct Readcap {
@@ -827,7 +945,7 @@ impl FragmentRecord {
 }
 
 /// An identifier for a security principal, which is any entity that can be authenticated.
-#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Hashable, Clone)]
+#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Hashable, Clone, Default)]
 struct Principal(Hash);
 
 impl Principal {
@@ -848,7 +966,7 @@ trait Owned {
 }
 
 /// An identifier for a block in a tree.
-#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
+#[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Default)]
 struct Path {
     owner: Principal,
     components: Vec<String>,
@@ -998,7 +1116,7 @@ impl Display for PathError {
 }
 
 /// An instant in time represented by the number of seconds since January 1st 1970, 00:00:00 UTC.
-#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, PartialOrd, Ord, Default)]
 struct Epoch(u64);
 
 impl Epoch {
@@ -1351,4 +1469,138 @@ mod tests {
         sectored.read(&mut actual).expect("read failed");
         assert_eq!(expected, actual);
     }
+
+    /// Tests that a new `Trailered<T>` can be created from an empty stream.
+    #[test]
+    fn trailered_new_empty() {
+        let cursor = Cursor::new(Vec::new());
+
+        let (_, trailer): (_, Option<String>) =
+            Trailered::new(cursor).expect("Trailered::new failed");
+
+        assert_eq!(None, trailer);
+    }
+
+    /// Tests that an error is returned when an attempt is made to create a `Trailered<T>` from a
+    /// non-empty stream which is too short.
+    #[test]
+    fn trailered_new_inner_too_short_is_error() {
+        let cursor = Cursor::new([0u8; 5]);
+
+        let result = Trailered::<_, u128>::new(cursor);
+
+        assert!(result.is_err())
+    }
+
+    /// Checks that the trailer is persisted to the inner stream.
+    #[test]
+    fn trailered_trailer_persisted() {
+        const EXPECTED: &str = "Everyone deserves to be remembered,";
+        let cursor = {
+            let cursor = Cursor::new(Vec::new());
+            let (mut trailered, trailer) =
+                Trailered::<_, String>::new(cursor).expect("Trailered::new failed");
+            assert!(trailer.is_none());
+            trailered
+                .flush(&EXPECTED.to_string())
+                .expect("flush failed");
+            trailered.inner
+        };
+
+        let (_, trailer) = Trailered::<_, String>::new(cursor).expect("Trailered::new failed");
+
+        assert_eq!(EXPECTED, trailer.unwrap());
+    }
+
+    #[test]
+    fn trailered_written_data_persisted() {
+        const EXPECTED: &[u8] = b"and every life has something to teach us.";
+        let mut cursor = {
+            let (mut trailered, _) = Trailered::<_, u8>::new(Cursor::new(Vec::new()))
+                .expect("failed to create first trailered");
+            trailered.write(EXPECTED).expect("write failed");
+            trailered.flush(&1).expect("flush failed");
+            trailered.inner
+        };
+        cursor.seek(SeekFrom::Start(0)).expect("seek failed");
+        let (mut trailered, _) =
+            Trailered::<_, u8>::new(cursor).expect("failed to created second trailered");
+        let mut actual = vec![0u8; EXPECTED.len()];
+
+        trailered.read(&mut actual).expect("read failed");
+
+        assert_eq!(EXPECTED, actual);
+    }
+
+    fn trailered_for_seek_test() -> Trailered<impl Read + Seek, u8> {
+        let (mut trailered, _) =
+            Trailered::new(Cursor::new(Vec::new())).expect("failed to create trailered");
+        trailered
+            .write(&[0, 1, 2, 3, 4, 5, 6, 7])
+            .expect("write failed");
+        trailered.seek(SeekFrom::Start(0)).expect("seek failed");
+        trailered
+    }
+
+    #[test]
+    fn trailered_seek_from_start() {
+        const EXPECTED: u8 = 2;
+        let mut trailered = trailered_for_seek_test();
+
+        trailered
+            .seek(SeekFrom::Start(EXPECTED as u64))
+            .expect("seek failed");
+
+        let mut actual = [0u8; 1];
+        trailered.read(&mut actual).expect("read failed");
+        assert_eq!(EXPECTED, actual[0]);
+    }
+
+    #[test]
+    fn trailered_seek_from_curr() {
+        const EXPECTED: u8 = 5;
+        let mut trailered = trailered_for_seek_test();
+        trailered
+            .seek(SeekFrom::Start(6))
+            .expect("seek from start failed");
+
+        trailered
+            .seek(SeekFrom::Current(-1))
+            .expect("seek from current failed");
+
+        let mut actual = [0u8; 1];
+        trailered.read(&mut actual).expect("read failed");
+        assert_eq!(EXPECTED, actual[0]);
+    }
+
+    #[test]
+    fn trailered_seek_from_end() {
+        const EXPECTED: u8 = 7;
+        let mut trailered = trailered_for_seek_test();
+
+        trailered.seek(SeekFrom::End(-1)).expect("seek failed");
+
+        let mut actual = [0u8; 1];
+        trailered.read(&mut actual).expect("read failed");
+        assert_eq!(EXPECTED, actual[0]);
+    }
+
+    /// Tests that a read past the end of the body in a `Trailered<T>` is not allowed.
+    #[test]
+    fn trailered_read_limited_to_body_len() {
+        let (mut trailered, trailer) =
+            Trailered::new(Cursor::new(Vec::new())).expect("failed to create Trailered");
+        assert!(trailer.is_none());
+        const EXPECTED: &[u8] = &[1, 1, 1, 1, 1, 0, 0, 0];
+        trailered.write(&[1u8; 5]).expect("write failed");
+        trailered.flush(&1u8).expect("flush failed");
+        trailered.seek(SeekFrom::Start(0)).expect("seek failed");
+        let mut actual = vec![0u8; EXPECTED.len()];
+
+        // If read goes past the end of the body then there will be a 1 in the sixth position of
+        // actual.
+        trailered.read(&mut actual).expect("read failed");
+
+        assert_eq!(EXPECTED, actual);
+    }
 }

+ 28 - 21
crates/btlib/src/test_helpers.rs

@@ -171,30 +171,37 @@ pub(crate) fn make_readcap_for<C: Encrypter + Owned>(creds: &C) -> Readcap {
     }
 }
 
-pub(crate) fn make_block() -> Block<BtCursor<Vec<u8>>, impl Creds> {
+pub(crate) fn make_block() -> Box<dyn Block> {
     make_block_with(make_readcap())
 }
 
-pub(crate) fn make_block_with(readcap: Readcap) -> Block<BtCursor<Vec<u8>>, impl Creds> {
+pub(crate) fn make_block_with(readcap: Readcap) -> Box<dyn Block> {
     let mut readcaps = HashMap::new();
     readcaps.insert(readcap.issued_to, readcap.key);
     // Notice that the writecap path contains the block path. If this were not the case, the block
     // would be invalid.
     let (writecap, creds) = make_writecap_and_creds(vec!["apps"]);
     let root_writecap = writecap.next.as_ref().unwrap();
-    Block {
-        shared: Arc::new(RwLock::new(BlockShared {
-            header: Header {
-                path: make_path_with_owner(root_writecap.issued_to.clone(), vec!["apps", "verse"]),
-                readcaps,
-                writecap,
-                integrity: Hash::Sha2_256([0u8; HashKind::Sha2_256.len()]),
-            },
-            sig: Signature::copy_from(Sign::RSA_PSS_3072_SHA_256, &SIGNATURE),
-            creds,
-        })),
-        body: BtCursor::new(Vec::new()),
-    }
+    let header = Header {
+        path: make_path_with_owner(root_writecap.issued_to.clone(), vec!["apps", "verse"]),
+        readcaps,
+        writecap: Some(writecap),
+        integrity: Hash::Sha2_256([0u8; HashKind::Sha2_256.len()]),
+    };
+    let sig = Signature::copy_from(Sign::RSA_PSS_3072_SHA_256, &SIGNATURE);
+    let mut stream =
+        BlockStream::new(BtCursor::new(Vec::new()), creds).expect("create block stream failed");
+    stream.trailer.header = header;
+    stream.trailer.sig = sig;
+    let block_key = stream.block_key().expect("get block key failed");
+    let stream = MerkleStream::new(stream).expect("create merkle stream failed");
+    let stream = SecretStream::new(block_key)
+        .try_compose(stream)
+        .expect("create secret stream failed");
+    let stream = SectoredBuf::new()
+        .try_compose(stream)
+        .expect("create sectored buf failed");
+    Box::new(stream)
 }
 
 /// This function can be run as a test to write a new RSA key pair, as two Rust arrays,
@@ -414,15 +421,15 @@ impl<T: FromVec> Seek for BtCursor<T> {
     }
 }
 
-impl IntegrityWrite for BtCursor<Vec<u8>> {
-    fn integrity_write(&mut self, buf: &[u8], _integrity: &[u8]) -> io::Result<usize> {
-        self.cursor.get_mut().write(buf)
+impl WriteInteg for BtCursor<Vec<u8>> {
+    fn flush_integ(&mut self, _: &[u8]) -> io::Result<()> {
+        Ok(())
     }
 }
 
-impl<const N: usize> IntegrityWrite for BtCursor<[u8; N]> {
-    fn integrity_write(&mut self, buf: &[u8], integrity: &[u8]) -> io::Result<usize> {
-        self.cursor.get_mut().write(buf)
+impl<const N: usize> WriteInteg for BtCursor<[u8; N]> {
+    fn flush_integ(&mut self, _: &[u8]) -> io::Result<()> {
+        Ok(())
     }
 }