Selaa lähdekoodia

Refactored btlib into smaller modules for ease of maintenance.

Matthew Carr 2 vuotta sitten
vanhempi
commit
59501744f3

+ 1 - 7
crates/btlib/TODO.txt

@@ -1,4 +1,5 @@
 # Format: - <task ID>, <task points>, <created by user>, <created on commit>, <finished by user>, <finished on commit>
 # Format: - <task ID>, <task points>, <created by user>, <created on commit>, <finished by user>, <finished on commit>
+
 - 1
 - 1
 Fix BufSectored so it doesn't have to write to the first sector every flush.
 Fix BufSectored so it doesn't have to write to the first sector every flush.
 
 
@@ -12,9 +13,6 @@ data written to and read from it.
 - 4
 - 4
 Remove TryCompose?
 Remove TryCompose?
 
 
-- 5
-Move crypto::{encrypt, decrypt} into corresponding {EncrypterExt, DecrypterExt}.
-
 - 6
 - 6
 Create an enum to eliminate the use of Block trait objects?
 Create an enum to eliminate the use of Block trait objects?
 
 
@@ -32,10 +30,6 @@ SecretStream::seek, Trailered::seek and SectoredBuf::seek.
 - 10
 - 10
 Create a struct which digests data written to it before passing it to an underlying Write.
 Create a struct which digests data written to it before passing it to an underlying Write.
 
 
-- 11
-Create a struct called WritecapBody to contain the fields of Writecap which go into the signature
-calculation so that WritecapSigInput is no longer required.
-
 - 12, 8, mdcarr941@gmail.com, 2ebb8a,
 - 12, 8, mdcarr941@gmail.com, 2ebb8a,
 Create a struct for managing the directory used to store blocks in the file system. Design and
 Create a struct for managing the directory used to store blocks in the file system. Design and
 implement an API for creating, opening, moving, copying, deleting and linking blocks. This API must
 implement an API for creating, opening, moving, copying, deleting and linking blocks. This API must

+ 12 - 0
crates/btlib/TODONE.txt

@@ -1,3 +1,15 @@
 - 0, 3, mdcarr941@gmail.com, 2ebb8a
 - 0, 3, mdcarr941@gmail.com, 2ebb8a
 Fix bug where writing to a block that already has a Writecap in its header using the creds of
 Fix bug where writing to a block that already has a Writecap in its header using the creds of
 a different node produces an invalid signature (a signature using the creds of the other node).
 a different node produces an invalid signature (a signature using the creds of the other node).
+
+- 14, 13, mdcarr941@gmail.com, bd6904
+Refactor btlib so that most of the types are in their own modules. This is
+needed to encourage modularity and weak coupling, as it reduces the amount of code that fields
+and helper functions are visible to.
+
+- 11, 3, mdcarr941@gmail.com, bd6904, mdcarr941@gmail.com, bd6904
+Create a struct called WritecapBody to contain the fields of Writecap which go into the signature
+calculation so that WritecapSigInput is no longer required.
+
+- 5, 1, mdcarr941@gmail.com, bd6904, mdcarr941@gmail.com, bd6904
+Move crypto::{encrypt, decrypt} into corresponding {EncrypterExt, DecrypterExt}.

+ 301 - 0
crates/btlib/src/block_path.rs

@@ -0,0 +1,301 @@
+pub use private::{BlockPath, BlockPathError};
+
+mod private {
+    use crate::{crypto::Hash, Principal};
+    use serde::{Deserialize, Serialize};
+    use std::fmt::Display;
+
+    /// An identifier for a block in a tree.
+    #[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Default)]
+    pub struct BlockPath {
+        root: Principal,
+        components: Vec<String>,
+    }
+
+    impl BlockPath {
+        /// The character that is used to separate path components.
+        const SEP: char = '/';
+        /// The limit, in bytes, of a path's length.
+        const BYTE_LIMIT: usize = 4096;
+
+        pub fn new(root: Principal, components: Vec<String>) -> BlockPath {
+            BlockPath { root, components }
+        }
+
+        /// Returns a result which, when successful, contains the index after the last character in
+        /// the current path component.
+        fn component_end<I: Iterator<Item = (usize, char)>>(
+            start: usize,
+            first: char,
+            pairs: &mut I,
+        ) -> std::result::Result<usize, BlockPathError> {
+            if first == BlockPath::SEP {
+                return Err(BlockPathError::EmptyComponent);
+            }
+            let end;
+            let mut last = start;
+            loop {
+                match pairs.next() {
+                    Some((index, BlockPath::SEP)) => {
+                        end = index;
+                        break;
+                    }
+                    Some((index, _)) => last = index,
+                    None => {
+                        end = last + 1;
+                        break;
+                    }
+                }
+            }
+            if end == start {
+                Err(BlockPathError::EmptyComponent)
+            } else {
+                Ok(end)
+            }
+        }
+
+        /// Asserts that the number of bytes in the given string is no more than `Path::BYTE_LIMIT`.
+        fn assert_not_too_long(string: &str) -> std::result::Result<(), BlockPathError> {
+            let len = string.len();
+            if len > BlockPath::BYTE_LIMIT {
+                return Err(BlockPathError::PathTooLong(len));
+            }
+            Ok(())
+        }
+
+        /// Returns true if `other` is a subpath of this `Path`.
+        pub fn contains(&self, other: &BlockPath) -> bool {
+            if self.root != other.root {
+                return false;
+            };
+            // This path must be no longer than the other path.
+            if self.components.len() > other.components.len() {
+                return false;
+            }
+            // Skip the component containing the owner.
+            let self_iter = self.components.iter().skip(1);
+            let other_iter = other.components.iter().skip(1);
+            for pair in self_iter.zip(other_iter) {
+                if pair.0 != pair.1 {
+                    return false;
+                }
+            }
+            true
+        }
+
+        pub fn root(&self) -> &Principal {
+            &self.root
+        }
+
+        pub fn mut_root(&mut self) -> &mut Principal {
+            &mut self.root
+        }
+
+        pub fn components(&self) -> impl Iterator<Item = &str> {
+            self.components.iter().map(|e| e.as_str())
+        }
+
+        pub fn mut_components(&mut self) -> impl Iterator<Item = &mut String> {
+            self.components.iter_mut()
+        }
+
+        pub fn push_component(&mut self, component: String) {
+            self.components.push(component)
+        }
+
+        pub fn pop_component(&mut self) -> Option<String> {
+            self.components.pop()
+        }
+    }
+
+    impl<'s> TryFrom<&'s str> for BlockPath {
+        type Error = BlockPathError;
+
+        fn try_from(string: &'s str) -> std::result::Result<BlockPath, BlockPathError> {
+            BlockPath::assert_not_too_long(string)?;
+            let mut pairs = string.char_indices();
+            let mut components = Vec::new();
+            let mut last_end = 0;
+            while let Some((start, c)) = pairs.next() {
+                let end = BlockPath::component_end(start, c, &mut pairs)?;
+                last_end = end;
+                let slice = &string[start..end];
+                components.push(slice.to_string());
+            }
+            // An empty component is added to the end to indicate if there was a trailing slash.
+            if string.len() - 1 == last_end {
+                components.push("".to_string());
+            }
+            let leading = components
+                .get(0)
+                .ok_or(BlockPathError::InvalidLeadingComponent)?;
+            let hash = Hash::try_from(leading.as_str())
+                .map_err(|_| BlockPathError::InvalidLeadingComponent)?;
+            Ok(BlockPath {
+                root: Principal(hash),
+                components,
+            })
+        }
+    }
+
+    impl Display for BlockPath {
+        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+            if self.components.is_empty() {
+                return write!(f, "");
+            };
+            let mut iter = self.components.iter();
+            let first = iter.next().unwrap();
+            let mut output = write!(f, "{}", first);
+            for component in iter {
+                output = write!(f, "{}{}", BlockPath::SEP, component)
+            }
+            output
+        }
+    }
+
+    /// Errors which can occur when converting a string to a `Path`.
+    #[derive(Debug, PartialEq)]
+    pub enum BlockPathError {
+        /// Occurs when the number of bytes in a string is greater than `Path::BYTE_LIMIT`.
+        PathTooLong(usize),
+        /// Indicates that a path string was empty.
+        Empty,
+        /// Occurs when a component in a path string was empty.
+        EmptyComponent,
+        /// Occurs when the leading component of a path is not in the correct format.
+        InvalidLeadingComponent,
+    }
+
+    impl Display for BlockPathError {
+        fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+            match self {
+                BlockPathError::PathTooLong(length) => formatter.write_fmt(format_args!(
+                    "path contained {} bytes, which is over the {} byte limit",
+                    length,
+                    BlockPath::BYTE_LIMIT
+                )),
+                BlockPathError::Empty => formatter.write_str("path was empty"),
+                BlockPathError::EmptyComponent => {
+                    formatter.write_str("component of path was empty")
+                }
+                BlockPathError::InvalidLeadingComponent => {
+                    formatter.write_str("invalid leading path component")
+                }
+            }
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::{
+        crypto::Hash,
+        test_helpers::{make_path, make_principal, PRINCIPAL2},
+        Principal,
+    };
+
+    use super::*;
+
+    fn path_from_str_test_case(
+        expected: std::result::Result<BlockPath, BlockPathError>,
+        input: &str,
+    ) -> std::result::Result<(), BlockPathError> {
+        let result = BlockPath::try_from(input);
+        assert_eq!(expected, result);
+        Ok(())
+    }
+
+    #[test]
+    fn path_from_str_multiple_components_ok() -> std::result::Result<(), BlockPathError> {
+        let expected = make_path(vec!["red", "green", "blue"]);
+        let input = format!("{}/red/green/blue", expected.root());
+        path_from_str_test_case(Ok(expected), input.as_str())?;
+        Ok(())
+    }
+
+    #[test]
+    fn path_from_str_one_component_ok() -> std::result::Result<(), BlockPathError> {
+        let expected = make_path(vec![]);
+        let input = expected.root().to_string();
+        path_from_str_test_case(Ok(expected), input.as_str())?;
+        Ok(())
+    }
+
+    #[test]
+    fn path_from_str_trailing_slash_ok() -> std::result::Result<(), BlockPathError> {
+        // Notice the empty component at the end of this path due to the trailing slash.
+        let expected = make_path(vec!["orange", "banana", "shotgun", ""]);
+        let input = format!("{}/orange/banana/shotgun/", expected.root());
+        path_from_str_test_case(Ok(expected), input.as_str())?;
+        Ok(())
+    }
+
+    #[test]
+    fn path_from_str_path_too_long_fail() -> std::result::Result<(), BlockPathError> {
+        let principal = make_principal();
+        let input = format!("{}/{}", principal.0, "*".repeat(4097));
+        let expected = Err(BlockPathError::PathTooLong(input.len()));
+        path_from_str_test_case(expected, input.as_str())?;
+        Ok(())
+    }
+
+    #[test]
+    fn path_from_str_multiple_slashes_fail() -> std::result::Result<(), BlockPathError> {
+        let expected = Err(BlockPathError::EmptyComponent);
+        let input = format!("{}//orange", make_principal().0);
+        path_from_str_test_case(expected, input.as_str())?;
+        Ok(())
+    }
+
+    #[test]
+    fn path_from_str_leading_slash_fail() -> std::result::Result<(), BlockPathError> {
+        let expected = Err(BlockPathError::EmptyComponent);
+        let input = format!("/{}/orange/banana/shotgun", make_principal().0);
+        path_from_str_test_case(expected, input.as_str())?;
+        Ok(())
+    }
+
+    #[test]
+    fn path_round_trip() -> std::result::Result<(), BlockPathError> {
+        let expected = make_path(vec!["interstitial", "inter-related", "intersections"]);
+        let actual = BlockPath::try_from(expected.to_string().as_str())?;
+        assert_eq!(expected, actual);
+        Ok(())
+    }
+
+    #[test]
+    fn path_contains_true() {
+        let larger = make_path(vec!["apps"]);
+        let smaller = make_path(vec!["apps", "bohdi"]);
+        assert!(larger.contains(&smaller));
+    }
+
+    #[test]
+    fn path_contains_true_only_owner() {
+        let larger = make_path(vec![]);
+        let smaller = make_path(vec![]);
+        assert!(larger.contains(&smaller));
+    }
+
+    #[test]
+    fn path_contains_false_self_is_longer() {
+        let first = make_path(vec!["apps", "bohdi"]);
+        let second = make_path(vec!["apps"]);
+        assert!(!first.contains(&second));
+    }
+
+    #[test]
+    fn path_contains_false_same_owners() {
+        let first = make_path(vec!["apps"]);
+        let second = make_path(vec!["nodes"]);
+        assert!(!first.contains(&second));
+    }
+
+    #[test]
+    fn path_contains_false_different_owners() {
+        let first = make_path(vec!["apps"]);
+        let mut second = make_path(vec!["apps"]);
+        *second.mut_root() = Principal(Hash::Sha2_256(PRINCIPAL2));
+        assert!(!first.contains(&second));
+    }
+}

+ 841 - 0
crates/btlib/src/crypto/merkle_stream.rs

@@ -0,0 +1,841 @@
+pub use private::{
+    MerkleNode, MerkleStream, MerkleTree, MerkleTreeKind, Sha2_256Node, VariantMerkleTree,
+    VecMerkleTree,
+};
+
+mod private {
+    use crate::{
+        crypto::{Encrypter, Error, HashKind, Result, SymKey},
+        trailered::Trailered,
+        BlockPath, BoxInIoErr, Decompose, MetaAccess, Principal, Sectored, TryCompose, WriteInteg,
+        SECTOR_SZ_DEFAULT,
+    };
+    use serde::{Deserialize, Serialize};
+    use std::io::{self, Read, Seek, Write};
+    use strum::EnumDiscriminants;
+
+    /// Returns the base 2 logarithm of the given number. This function will return -1 when given 0, and
+    /// this is the only input for which a negative value is returned.
+    pub(super) fn log2(mut n: usize) -> isize {
+        // Is there a better implementation of this in std? I wasn't able to find an integer log2
+        // function in std, so I wrote this naive implementation.
+        if 0 == n {
+            return -1;
+        }
+        let num_bits = usize::BITS.try_into().unwrap();
+        for k in 0..num_bits {
+            n >>= 1;
+            if 0 == n {
+                return k;
+            }
+        }
+        num_bits
+    }
+
+    /// Returns 2^x. Note that 0 is returned for any negative input.
+    pub(super) fn exp2(x: isize) -> usize {
+        if x < 0 {
+            0
+        } else {
+            1 << x
+        }
+    }
+
+    /// Trait for types which can be used as nodes in a `MerkleTree`.
+    pub trait MerkleNode: Default + Serialize + for<'de> Deserialize<'de> {
+        /// The kind of hash algorithm that this `HashData` uses.
+        const KIND: HashKind;
+
+        /// Creates a new `HashData` instance by hashing the data produced by the given iterator and
+        /// storing it in self.
+        fn new<'a, I: Iterator<Item = &'a [u8]>>(parts: I) -> Result<Self>;
+
+        /// Combines the hash data from the given children and prefix and stores it in self. It is
+        /// an error for no children to be provided (though one or the other may be `None`).
+        fn combine<'a, I: Iterator<Item = &'a [u8]>>(
+            &mut self,
+            prefix: I,
+            left: Option<&'a Self>,
+            right: Option<&'a Self>,
+        ) -> Result<()>;
+
+        /// Returns `Ok(())` if self contains the given hash data, and `Err(Error::HashCmpFailure)`
+        /// otherwise.
+        fn assert_contains(&self, hash_data: Option<&[u8]>) -> Result<()>;
+
+        /// Returns `Ok(())` if self contains the hash of the given data. Otherwise,
+        /// `Err(Error::HashCmpFailure)` is returned.
+        fn assert_contains_hash_of<'a, I: Iterator<Item = &'a [u8]>>(&self, parts: I)
+            -> Result<()>;
+
+        /// Returns `Ok(())` if the result of combining left and right is contained in self.
+        fn assert_parent_of<'a, I: Iterator<Item = &'a [u8]>>(
+            &self,
+            prefix: I,
+            left: Option<&'a Self>,
+            right: Option<&'a Self>,
+        ) -> Result<()>;
+
+        /// Attempts to borrow the data in this node as a slice.
+        fn try_as_slice(&self) -> Result<&[u8]>;
+
+        /// Computes the hash of the data produced by the given iterator and writes it to the
+        /// given slice.
+        fn digest<'a, I: Iterator<Item = &'a [u8]>>(dest: &mut [u8], parts: I) -> Result<()> {
+            Self::KIND.digest(dest, parts)
+        }
+    }
+
+    // TODO: Once full const generic support lands we can use a HashKind as a const param. Then we won't
+    // need to have different structs to support different kinds of hashes.
+    /// A struct for storing SHA2 256 hashes in a `MerkleTree`.
+    #[derive(Default, Serialize, Deserialize)]
+    pub struct Sha2_256Node(Option<[u8; HashKind::Sha2_256.len()]>);
+
+    impl Sha2_256Node {
+        fn as_slice(&self) -> Option<&[u8]> {
+            self.0.as_ref().map(|e| e.as_slice())
+        }
+
+        /// Returns a mutable reference to the array contained in self, if the array already exists.
+        /// Otherwise, creates a new array filled with zeros owned by self and returns a
+        /// reference.
+        fn mut_or_init(&mut self) -> &mut [u8] {
+            if self.0.is_none() {
+                self.0 = Some([0; HashKind::Sha2_256.len()])
+            }
+            self.0.as_mut().unwrap()
+        }
+
+        // I think this is the most complicated function signature I've ever written in any language.
+        /// Combines the given slices, together with the given prefix, and stores the resulting hash
+        /// in `dest`. If neither `left` nor `right` is `Some`, then `when_neither` is called and
+        /// whatever it returns is returned by this method.
+        fn combine_hash_data<'a, I: Iterator<Item = &'a [u8]>, F: FnOnce() -> Result<()>>(
+            dest: &mut [u8],
+            prefix: I,
+            left: Option<&'a [u8]>,
+            right: Option<&'a [u8]>,
+            when_neither: F,
+        ) -> Result<()> {
+            match (left, right) {
+                (Some(left), Some(right)) => {
+                    Self::digest(dest, prefix.chain([left, right].into_iter()))
+                }
+                (Some(left), None) => Self::digest(dest, prefix.chain([left, b"None"].into_iter())),
+                (None, Some(right)) => {
+                    Self::digest(dest, prefix.chain([b"None", right].into_iter()))
+                }
+                (None, None) => when_neither(),
+            }
+        }
+    }
+
+    impl MerkleNode for Sha2_256Node {
+        const KIND: HashKind = HashKind::Sha2_256;
+
+        fn new<'a, I: Iterator<Item = &'a [u8]>>(parts: I) -> Result<Self> {
+            let mut array = [0u8; Self::KIND.len()];
+            Self::digest(&mut array, parts)?;
+            Ok(Sha2_256Node(Some(array)))
+        }
+
+        fn combine<'a, I: Iterator<Item = &'a [u8]>>(
+            &mut self,
+            prefix: I,
+            left: Option<&'a Self>,
+            right: Option<&'a Self>,
+        ) -> Result<()> {
+            let left = left.and_then(|e| e.as_slice());
+            let right = right.and_then(|e| e.as_slice());
+            Self::combine_hash_data(self.mut_or_init(), prefix, left, right, || {
+                Err(Error::custom(
+                    "at least one argument to combine needs to supply data",
+                ))
+            })
+        }
+
+        fn assert_contains(&self, hash_data: Option<&[u8]>) -> Result<()> {
+            if self.as_slice() == hash_data {
+                Ok(())
+            } else {
+                Err(Error::HashCmpFailure)
+            }
+        }
+
+        fn assert_contains_hash_of<'a, I: Iterator<Item = &'a [u8]>>(
+            &self,
+            parts: I,
+        ) -> Result<()> {
+            let mut buf = [0u8; Self::KIND.len()];
+            Self::digest(&mut buf, parts)?;
+            self.assert_contains(Some(&buf))
+        }
+
+        fn assert_parent_of<'a, I: Iterator<Item = &'a [u8]>>(
+            &self,
+            prefix: I,
+            left: Option<&'a Self>,
+            right: Option<&'a Self>,
+        ) -> Result<()> {
+            let slice = match self.as_slice() {
+                Some(slice) => slice,
+                None => return Err(Error::HashCmpFailure),
+            };
+            let buf = {
+                let mut buf = [0u8; Self::KIND.len()];
+                let left = left.and_then(|e| e.as_slice());
+                let right = right.and_then(|e| e.as_slice());
+                Self::combine_hash_data(&mut buf, prefix, left, right, || {
+                    Err(Error::custom("logic error encountered"))
+                })?;
+                buf
+            };
+            if slice == buf {
+                Ok(())
+            } else {
+                Err(Error::HashCmpFailure)
+            }
+        }
+
+        fn try_as_slice(&self) -> Result<&[u8]> {
+            self.0
+                .as_ref()
+                .map(|arr| arr.as_slice())
+                .ok_or_else(|| Error::custom("this merkle node is empty"))
+        }
+    }
+
+    /// An index into a binary tree. This type provides convenience methods for navigating a tree.
+    #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
+    struct BinTreeIndex(usize);
+
+    impl BinTreeIndex {
+        /// Returns the index of the left child of this node.
+        fn left(self) -> Self {
+            Self(2 * self.0 + 1)
+        }
+
+        /// Returns the index of the right child of this node.
+        fn right(self) -> Self {
+            Self(2 * (self.0 + 1))
+        }
+
+        /// Returns the index of the parent of this node.
+        fn parent(self) -> Option<Self> {
+            if self.0 > 0 {
+                Some(Self((self.0 - 1) / 2))
+            } else {
+                None
+            }
+        }
+
+        /// Returns an iterator over the indices of all of this node's ancestors.
+        fn ancestors(self) -> impl Iterator<Item = BinTreeIndex> {
+            struct ParentIter(Option<BinTreeIndex>);
+
+            impl Iterator for ParentIter {
+                type Item = BinTreeIndex;
+
+                fn next(&mut self) -> Option<Self::Item> {
+                    let parent = match self.0 {
+                        Some(curr) => curr.parent(),
+                        None => None,
+                    };
+                    self.0 = parent;
+                    parent
+                }
+            }
+
+            ParentIter(Some(self))
+        }
+    }
+
+    pub trait MerkleTree: Sectored {
+        /// Checks that the root node contains the given hash data. If it does then `Ok(())` is
+        /// returned. If it doesn't, then `Err(Error::HashCmpFailure)` is returned.
+        fn assert_root_contains(&mut self, hash_data: Option<&[u8]>) -> Result<()>;
+
+        /// Hashes the given data, adds a new node to the tree with its hash and updates the hashes
+        /// of all parent nodes.
+        fn write(&mut self, offset: usize, data: &[u8]) -> Result<()>;
+
+        /// Verifies that the given data stored from the given offset into the protected data, has not
+        /// been modified.
+        fn verify(&self, offset: usize, data: &[u8]) -> Result<()>;
+
+        /// Returns the hash data stored in the root node of the tree. An error is returned if and only
+        /// if the tree is empty.
+        fn root_hash(&self) -> Result<&[u8]>;
+    }
+
+    /// An implementation of a Merkle tree, a tree for storing hashes. This implementation is a binary
+    /// tree which stores its nodes in a vector to ensure data locality.
+    ///
+    /// This type is used to provide integrity protection to a sequence of fixed sized units of data
+    /// called sectors. The size of the sectors are determined when the tree is created and cannot
+    /// be changed later. The hashes contained in the leaf nodes of this tree are hashes of sectors.
+    /// Each sector corresponds to an offset into the protected data, and in order to verify that a
+    /// sector has not been modified, you must supply the offset of the sector.
+    #[derive(Serialize, Deserialize)]
+    pub struct VecMerkleTree<T> {
+        nodes: Vec<T>,
+        /// The size of the sectors of data that this tree will protect.
+        sector_sz: usize,
+        #[serde(skip)]
+        root_verified: bool,
+    }
+
+    impl<T> VecMerkleTree<T> {
+        /// A slice to prefix to data being hashed for leaf nodes. It's important that this is different
+        /// from `INTERIOR_PREFIX`.
+        const LEAF_PREFIX: &'static [u8] = b"Leaf";
+        /// A slice to prefix to data being hashed for interior nodes. It's important that this is
+        /// different from 'LEAF_PREFIX`.
+        const INTERIOR_PREFIX: &'static [u8] = b"Interior";
+
+        /// Creates a new tree with no nodes in it and the given sector size.
+        pub fn empty(sector_sz: usize) -> VecMerkleTree<T> {
+            VecMerkleTree {
+                nodes: Vec::new(),
+                sector_sz,
+                root_verified: true,
+            }
+        }
+
+        /// Returns the number of generations in self. This method returns -1 when the tree is empty,
+        /// and this is the only case where a negative value is returned.
+        fn generations(&self) -> isize {
+            log2(self.nodes.len())
+        }
+
+        /// Returns the number of nodes in a complete binary tree with the given number of
+        /// generations. Note that `generations` is 0-based, so a tree with 1 node has 0 generations,
+        /// and a tree with 3 has 1.
+        fn len(generations: isize) -> usize {
+            if generations >= 0 {
+                exp2(generations + 1) - 1
+            } else {
+                0
+            }
+        }
+
+        /// Returns a reference to the hash stored in the given node, or `Error::IndexOutOfBounds` if
+        /// the given index doesn't exist.
+        fn hash_at(&self, index: BinTreeIndex) -> Result<&T> {
+            self.nodes.get(index.0).ok_or(Error::IndexOutOfBounds {
+                index: index.0,
+                limit: self.nodes.len(),
+            })
+        }
+
+        /// Returns the index which corresponds to the given offset into the protected data.
+        fn offset_to_index(&self, offset: usize) -> Result<BinTreeIndex> {
+            let gens = self.generations();
+            let sector_index = offset / self.sector_sz;
+            let index_limit = exp2(gens);
+            if sector_index >= index_limit {
+                return Err(Error::InvalidOffset {
+                    actual: offset,
+                    limit: index_limit * self.sector_sz,
+                });
+            }
+            Ok(BinTreeIndex(exp2(gens) - 1 + sector_index))
+        }
+
+        /// Returns an iterator of slices which need to be hashed along with the data to create a leaf
+        /// node.
+        fn leaf_parts(data: &[u8]) -> impl Iterator<Item = &[u8]> {
+            [Self::LEAF_PREFIX, data].into_iter()
+        }
+
+        /// Returns an iterator of slices which need to be hashed along with the data to create an
+        /// interior node.
+        fn interior_prefix<'a>() -> impl Iterator<Item = &'a [u8]> {
+            [Self::INTERIOR_PREFIX].into_iter()
+        }
+    }
+
+    impl<T: MerkleNode> VecMerkleTree<T> {
+        /// Percolates up the hash change to the given node to the root.
+        fn perc_up(&mut self, start: BinTreeIndex) -> Result<()> {
+            for index in start.ancestors() {
+                self.combine_children(index)?;
+            }
+            Ok(())
+        }
+
+        /// Combines the hashes of the given node's children and stores it in the given node.
+        fn combine_children(&mut self, index: BinTreeIndex) -> Result<()> {
+            let left = index.left();
+            let right = index.right();
+            // Note that index < left && index < right.
+            let split = index.0 + 1;
+            let (front, back) = self.nodes.split_at_mut(split);
+            let dest = &mut front[front.len() - 1];
+            let left = back.get(left.0 - split);
+            let right = back.get(right.0 - split);
+            dest.combine(Self::interior_prefix(), left, right)
+                .map_err(|_| Error::IndexOutOfBounds {
+                    index: index.0,
+                    limit: Self::len(self.generations() - 1),
+                })
+        }
+    }
+
+    impl<T: MerkleNode> MerkleTree for VecMerkleTree<T> {
+        fn assert_root_contains(&mut self, hash_data: Option<&[u8]>) -> Result<()> {
+            match self.hash_at(BinTreeIndex(0)) {
+                Ok(root) => {
+                    root.assert_contains(hash_data)?;
+                    self.root_verified = true;
+                    Ok(())
+                }
+                Err(Error::IndexOutOfBounds { .. }) => {
+                    if hash_data.is_none() {
+                        Ok(())
+                    } else {
+                        Err(Error::HashCmpFailure)
+                    }
+                }
+                Err(err) => Err(err),
+            }
+        }
+
+        fn write(&mut self, offset: usize, data: &[u8]) -> Result<()> {
+            self.assert_sector_sz(data.len())?;
+
+            let sector_index = offset / self.sector_sz;
+            let generations = self.generations();
+            let sector_index_sup = exp2(generations);
+            if sector_index >= sector_index_sup {
+                // Need to resize the tree.
+                let generations_new = log2(sector_index) + 1;
+                let new_cap = Self::len(generations_new) - self.nodes.len();
+                self.nodes.reserve_exact(new_cap);
+                // Extend the vector so there is enough room to fit the current leaves in the last
+                // generation.
+                let leaf_ct = self.nodes.len() - Self::len(generations - 1);
+                let new_len = Self::len(generations_new - 1) + sector_index + 1;
+                self.nodes.resize_with(new_len, T::default);
+                // Shift all previously allocated nodes down the tree.
+                let generation_gap = generations_new - generations;
+                for gen in (0..(generations + 1)).rev() {
+                    let shift = exp2(gen + generation_gap) - exp2(gen);
+                    let start = exp2(gen) - 1;
+                    let end = start
+                        + if gen == generations {
+                            leaf_ct
+                        } else {
+                            exp2(gen)
+                        };
+                    for index in start..end {
+                        let new_index = index + shift;
+                        self.nodes.swap(index, new_index);
+                    }
+                }
+                // Percolate up the old root to ensure that all nodes on the path from the old
+                // root to the new root are initialized. This is not needed in the case where the
+                // generation gap is only 1, as only the root is uninitialized in this case and it will
+                // be initialized after inserting the new node below.
+                if generation_gap > 1 && generations >= 0 {
+                    self.perc_up(BinTreeIndex(exp2(generation_gap) - 1))?;
+                }
+            }
+
+            let index = self.offset_to_index(offset)?;
+            if index.0 >= self.nodes.len() {
+                self.nodes.resize_with(index.0 + 1, T::default);
+            }
+            self.nodes[index.0] = T::new(Self::leaf_parts(data))?;
+            self.perc_up(index)
+        }
+
+        /// Verifies that the given data stored from the given offset into the protected data, has not
+        /// been modified.
+        fn verify(&self, offset: usize, data: &[u8]) -> Result<()> {
+            if !self.root_verified {
+                return Err(Error::RootHashNotVerified);
+            }
+            self.assert_sector_sz(data.len())?;
+            let start = self.offset_to_index(offset)?;
+            self.hash_at(start)?
+                .assert_contains_hash_of(Self::leaf_parts(data))?;
+            for index in start.ancestors() {
+                let parent = self.hash_at(index)?;
+                let left = self.hash_at(index.left()).ok();
+                let right = self.hash_at(index.right()).ok();
+                parent.assert_parent_of(Self::interior_prefix(), left, right)?;
+            }
+            Ok(())
+        }
+
+        fn root_hash(&self) -> Result<&[u8]> {
+            self.nodes
+                .first()
+                .map(|node| node.try_as_slice())
+                .ok_or_else(|| Error::custom("the tree is empty"))?
+        }
+    }
+
+    impl<T> Sectored for VecMerkleTree<T> {
+        fn sector_sz(&self) -> usize {
+            self.sector_sz
+        }
+    }
+
+    impl<T> Default for VecMerkleTree<T> {
+        fn default() -> Self {
+            Self::empty(SECTOR_SZ_DEFAULT)
+        }
+    }
+
+    #[derive(Serialize, Deserialize, EnumDiscriminants)]
+    #[strum_discriminants(name(MerkleTreeKind))]
+    pub enum VariantMerkleTree {
+        Sha2_256(VecMerkleTree<Sha2_256Node>),
+    }
+
+    impl VariantMerkleTree {
+        pub fn empty(kind: MerkleTreeKind, sector_sz: usize) -> VariantMerkleTree {
+            match kind {
+                MerkleTreeKind::Sha2_256 => {
+                    Self::Sha2_256(VecMerkleTree::<Sha2_256Node>::empty(sector_sz))
+                }
+            }
+        }
+    }
+
+    impl Sectored for VariantMerkleTree {
+        fn sector_sz(&self) -> usize {
+            match self {
+                Self::Sha2_256(tree) => tree.sector_sz(),
+            }
+        }
+    }
+
+    impl MerkleTree for VariantMerkleTree {
+        fn assert_root_contains(&mut self, hash_data: Option<&[u8]>) -> Result<()> {
+            match self {
+                Self::Sha2_256(tree) => tree.assert_root_contains(hash_data),
+            }
+        }
+
+        fn root_hash(&self) -> Result<&[u8]> {
+            match self {
+                Self::Sha2_256(tree) => tree.root_hash(),
+            }
+        }
+
+        fn verify(&self, offset: usize, data: &[u8]) -> Result<()> {
+            match self {
+                Self::Sha2_256(tree) => tree.verify(offset, data),
+            }
+        }
+
+        fn write(&mut self, offset: usize, data: &[u8]) -> Result<()> {
+            match self {
+                Self::Sha2_256(tree) => tree.write(offset, data),
+            }
+        }
+    }
+
+    impl Default for VariantMerkleTree {
+        fn default() -> Self {
+            Self::Sha2_256(VecMerkleTree::<Sha2_256Node>::default())
+        }
+    }
+
+    pub struct MerkleStream<T> {
+        trailered: Trailered<T, VariantMerkleTree>,
+        tree: VariantMerkleTree,
+        pos: usize,
+    }
+
+    impl<T: MetaAccess> MerkleStream<T> {
+        /// Asserts that the root merkle node contains the integrity value given by the inner stream.
+        pub fn assert_root_integrity(&mut self) -> Result<()> {
+            let hash_data = self.trailered.integrity();
+            self.tree.assert_root_contains(hash_data)
+        }
+    }
+
+    impl<T: Read + Seek> MerkleStream<T> {
+        /// Reads a `MerkleTree` from the end of the given stream and returns a stream which uses it.
+        pub fn new(inner: T) -> Result<MerkleStream<T>> {
+            let (trailered, tree) = Trailered::new(inner)?;
+            Ok(MerkleStream {
+                trailered,
+                tree: tree.unwrap_or_default(),
+                pos: 0,
+            })
+        }
+
+        pub fn with_tree(inner: T, tree: VariantMerkleTree) -> Result<MerkleStream<T>> {
+            let (trailered, trailer) = Trailered::new(inner)?;
+            if trailer.is_some() {
+                return Err(Error::custom(
+                    "stream already contained a serialized merkle tree",
+                ));
+            }
+            Ok(MerkleStream {
+                trailered,
+                tree,
+                pos: 0,
+            })
+        }
+    }
+
+    impl<T> Sectored for MerkleStream<T> {
+        fn sector_sz(&self) -> usize {
+            self.tree.sector_sz()
+        }
+    }
+
+    impl<T: Read + Seek> TryCompose<T, MerkleStream<T>> for MerkleStream<()> {
+        type Error = crate::Error;
+        fn try_compose(self, inner: T) -> std::result::Result<MerkleStream<T>, Self::Error> {
+            let (trailered, tree) = Trailered::new(inner)?;
+            Ok(MerkleStream {
+                trailered,
+                tree: tree.unwrap_or_default(),
+                pos: 0,
+            })
+        }
+    }
+
+    impl<T> Decompose<T> for MerkleStream<T> {
+        fn into_inner(self) -> T {
+            self.trailered.into_inner()
+        }
+    }
+
+    impl<T: WriteInteg + Seek> Write for MerkleStream<T> {
+        fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+            self.assert_sector_sz(buf.len())?;
+            self.tree.write(self.pos, buf)?;
+            let written = self.trailered.write(buf)?;
+            self.pos += self.sector_sz();
+            Ok(written)
+        }
+
+        fn flush(&mut self) -> io::Result<()> {
+            let root = self.tree.root_hash()?;
+            self.trailered.flush_integ(&self.tree, root)
+        }
+    }
+
+    impl<T: Read + Seek> Read for MerkleStream<T> {
+        fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+            self.assert_sector_sz(buf.len())?;
+            self.trailered.read_exact(buf)?;
+            self.tree.verify(self.pos, buf)?;
+            self.pos += self.sector_sz();
+            Ok(self.sector_sz())
+        }
+    }
+
+    impl<T: Seek> Seek for MerkleStream<T> {
+        fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
+            let from_start = self.trailered.seek(pos)?;
+            self.pos = from_start.try_into().box_err()?;
+            Ok(from_start)
+        }
+    }
+
+    impl<T: MetaAccess> MetaAccess for MerkleStream<T> {
+        fn block_key(&self) -> crate::Result<SymKey> {
+            self.trailered.block_key()
+        }
+
+        fn add_readcap_for(&mut self, owner: Principal, key: &dyn Encrypter) -> crate::Result<()> {
+            self.trailered.add_readcap_for(owner, key)
+        }
+
+        fn integrity(&self) -> Option<&[u8]> {
+            self.trailered.integrity()
+        }
+
+        fn set_path(&mut self, path: BlockPath) {
+            self.trailered.set_path(path)
+        }
+    }
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+    use std::io::{Read, Seek, SeekFrom, Write};
+
+    use super::private::{exp2, log2};
+    use super::*;
+    use crate::test_helpers::{BtCursor, Randomizer};
+    use crate::SECTOR_SZ_DEFAULT;
+    use btserde::{from_vec, to_vec};
+
+    #[test]
+    fn log2_test() {
+        assert_eq!(-1, log2(0));
+        assert_eq!(0, log2(1));
+        assert_eq!(1, log2(2));
+        assert_eq!(2, log2(4));
+        assert_eq!(2, log2(5));
+        assert_eq!(3, log2(8));
+        assert_eq!(9, log2(1023));
+        assert_eq!(10, log2(1025));
+        assert_eq!(63, log2(usize::MAX));
+    }
+
+    fn make_tree_with<const SZ: usize>(
+        num_sects: usize,
+    ) -> (VecMerkleTree<Sha2_256Node>, Vec<[u8; SZ]>) {
+        let mut tree = VecMerkleTree::<Sha2_256Node>::empty(SZ);
+        let mut sectors = Vec::with_capacity(num_sects);
+        for k in 1..(num_sects + 1) {
+            let offset = SZ * (k - 1);
+            let sector = [k as u8; SZ];
+            sectors.push(sector);
+            tree.write(offset, &sector).expect("append sector failed");
+        }
+        (tree, sectors)
+    }
+
+    fn merkle_tree_build_verify_test_case<const SZ: usize>(num_sects: usize) {
+        let (tree, sectors) = make_tree_with::<SZ>(num_sects);
+        for (k, sector) in sectors.into_iter().enumerate() {
+            tree.verify(k * SZ, &sector).expect("verify failed");
+        }
+    }
+
+    #[test]
+    fn merkle_tree_append_verify() {
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(0));
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(1));
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(2));
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(3));
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(4));
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(0) + 1);
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(1) + 1);
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(2) + 1);
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(3) + 1);
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(4) + 1);
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(0) - 1);
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(1) - 1);
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(2) - 1);
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(3) - 1);
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(exp2(4) - 1);
+        merkle_tree_build_verify_test_case::<SECTOR_SZ_DEFAULT>(1337);
+        merkle_tree_build_verify_test_case::<512>(37);
+    }
+
+    #[test]
+    fn merkle_tree_data_changed_verify_fails() {
+        const SZ: usize = SECTOR_SZ_DEFAULT;
+        let mut tree = VecMerkleTree::<Sha2_256Node>::empty(SZ);
+        let one = [1u8; SZ];
+        let mut two = [2u8; SZ];
+        let three = [3u8; SZ];
+        tree.write(0, &one).expect("append one failed");
+        tree.write(SZ, &two).expect("append two failed");
+        tree.write(2 * SZ, &three).expect("append three failed");
+
+        two[0] = 7u8;
+
+        tree.verify(0, &one).expect("failed to verify one");
+        tree.verify(SZ, &two)
+            .expect_err("verify two was expected to fail");
+        tree.verify(2 * SZ, &three).expect("failed to verify three");
+    }
+
+    #[test]
+    fn merkle_tree_root_not_verified_verify_fails() {
+        const SZ: usize = SECTOR_SZ_DEFAULT;
+        let mut tree = VecMerkleTree::<Sha2_256Node>::empty(SZ);
+        let one = [1u8; SZ];
+        let two = [2u8; SZ];
+        let three = [3u8; SZ];
+        tree.write(0, &one).expect("append one failed");
+        tree.write(SZ, &two).expect("append two failed");
+        tree.write(2 * SZ, &three).expect("append three failed");
+        let vec = to_vec(&tree).expect("to_vec failed");
+        let tree: VecMerkleTree<Sha2_256Node> = from_vec(&vec).expect("from_vec failed");
+
+        tree.verify(SZ, &two)
+            .expect_err("verify succeeded, though it should have failed");
+    }
+
+    fn merkle_stream_sequential_test_case(sect_sz: usize, sect_ct: usize) {
+        let tree = VariantMerkleTree::empty(MerkleTreeKind::Sha2_256, sect_sz);
+        let mut stream =
+            MerkleStream::with_tree(BtCursor::new(Vec::new()), tree).expect("read from end failed");
+        for k in 1..(sect_ct + 1) {
+            let sector = vec![k as u8; sect_sz];
+            stream.write(&sector).expect("write failed");
+        }
+        stream.seek(SeekFrom::Start(0)).expect("seek failed");
+        for k in 1..(sect_ct + 1) {
+            let expected = vec![k as u8; sect_sz];
+            let mut actual = vec![0u8; sect_sz];
+            stream.read(&mut actual).expect("read failed");
+            assert_eq!(expected, actual);
+        }
+    }
+
+    #[test]
+    fn merkle_stream_sequential() {
+        merkle_stream_sequential_test_case(SECTOR_SZ_DEFAULT, 20);
+        merkle_stream_sequential_test_case(SECTOR_SZ_DEFAULT, 200);
+        merkle_stream_sequential_test_case(SECTOR_SZ_DEFAULT, 800);
+        merkle_stream_sequential_test_case(512, 25);
+        merkle_stream_sequential_test_case(8192, 20);
+    }
+
+    pub(crate) fn make_merkle_stream_filled_with_zeros(
+        sect_sz: usize,
+        sect_ct: usize,
+    ) -> MerkleStream<BtCursor<Vec<u8>>> {
+        let tree = VariantMerkleTree::empty(MerkleTreeKind::Sha2_256, sect_sz);
+        let mut stream =
+            MerkleStream::with_tree(BtCursor::new(Vec::new()), tree).expect("read from end failed");
+        let zeros = vec![0u8; sect_sz];
+        for _ in 0..sect_ct {
+            stream.write(&zeros).expect("write zeros failed");
+        }
+        stream.seek(SeekFrom::Start(0)).expect("seek failed");
+        stream
+    }
+
+    fn merkle_stream_random_test_case(rando: Randomizer, sect_sz: usize, sect_ct: usize) {
+        let mut stream = make_merkle_stream_filled_with_zeros(sect_sz, sect_ct);
+        let indices: Vec<usize> = rando.take(sect_ct).map(|e| e % sect_ct).collect();
+        for index in indices.iter().map(|e| *e) {
+            let offset = sect_sz * index;
+            stream
+                .seek(SeekFrom::Start(offset as u64))
+                .expect("seek to write failed");
+            let sector = vec![index as u8; sect_sz];
+            stream.write(&sector).expect("write failed");
+        }
+        for index in indices.iter().map(|e| *e) {
+            let offset = sect_sz * index;
+            stream
+                .seek(SeekFrom::Start(offset as u64))
+                .expect("seek to read failed");
+            let expected = vec![index as u8; sect_sz];
+            let mut actual = vec![0u8; sect_sz];
+            stream.read(&mut actual).expect("read failed");
+            assert_eq!(expected, actual);
+        }
+    }
+
+    #[test]
+    fn merkle_stream_random() {
+        const SEED: [u8; Randomizer::HASH.len()] = [3u8; Randomizer::HASH.len()];
+        merkle_stream_random_test_case(Randomizer::new(SEED), SECTOR_SZ_DEFAULT, 2);
+        merkle_stream_random_test_case(Randomizer::new(SEED), SECTOR_SZ_DEFAULT, 4);
+        merkle_stream_random_test_case(Randomizer::new(SEED), SECTOR_SZ_DEFAULT, 8);
+        merkle_stream_random_test_case(Randomizer::new(SEED), SECTOR_SZ_DEFAULT, 20);
+        merkle_stream_random_test_case(Randomizer::new(SEED), SECTOR_SZ_DEFAULT, 200);
+        merkle_stream_random_test_case(Randomizer::new(SEED), SECTOR_SZ_DEFAULT, 800);
+        merkle_stream_random_test_case(Randomizer::new(SEED), 8192, 63);
+    }
+}

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 48 - 892
crates/btlib/src/crypto/mod.rs


+ 355 - 0
crates/btlib/src/crypto/secret_stream.rs

@@ -0,0 +1,355 @@
+pub use private::SecretStream;
+
+mod private {
+    use crate::{
+        crypto::{Encrypter, Error, Result, SymKey},
+        Block, BlockPath, Decompose, MetaAccess, Principal, Sectored, TryCompose,
+    };
+    use std::io::{self, Read, Seek, SeekFrom, Write};
+
+    // A stream which encrypts all data written to it and decrypts all data read from it.
+    pub struct SecretStream<T> {
+        inner: T,
+        // The sector size of the inner stream. Reads and writes are only executed using buffers of
+        // this size.
+        inner_sect_sz: usize,
+        // The sector size of this stream. Reads and writes are only accepted for buffers of this size.
+        sect_sz: usize,
+        key: SymKey,
+        /// Buffer for ciphertext.
+        ct_buf: Vec<u8>,
+        /// Buffer for plaintext.
+        pt_buf: Vec<u8>,
+    }
+
+    impl<T> SecretStream<T> {
+        /// Given an offset into this stream, produces the corresponding offset into the inner stream.
+        fn inner_offset(&self, outer_offset: u64) -> u64 {
+            let sect_sz = self.sect_sz as u64;
+            let inner_sect_sz = self.inner_sect_sz as u64;
+            // We return the offset into the current sector, plus the size of all previous sectors.
+            outer_offset % sect_sz + outer_offset / sect_sz * inner_sect_sz
+        }
+
+        /// Given an offset into the inner stream, returns the corresponding offset into this stream.
+        fn outer_offset(&self, inner_offset: u64) -> u64 {
+            let sect_sz = self.sect_sz as u64;
+            let inner_sect_sz = self.inner_sect_sz as u64;
+            inner_offset % inner_sect_sz + inner_offset / inner_sect_sz * sect_sz
+        }
+    }
+
+    impl SecretStream<()> {
+        pub fn new(key: SymKey) -> SecretStream<()> {
+            SecretStream {
+                inner: (),
+                inner_sect_sz: 0,
+                sect_sz: 0,
+                key,
+                ct_buf: Vec::new(),
+                pt_buf: Vec::new(),
+            }
+        }
+    }
+
+    impl<T> Decompose<T> for SecretStream<T> {
+        fn into_inner(self) -> T {
+            self.inner
+        }
+    }
+
+    impl<T, U: Sectored> TryCompose<U, SecretStream<U>> for SecretStream<T> {
+        type Error = Error;
+        fn try_compose(mut self, inner: U) -> Result<SecretStream<U>> {
+            let inner_sect_sz = inner.sector_sz();
+            let expansion_sz = self.key.expansion_sz();
+            let sect_sz = inner_sect_sz - expansion_sz;
+            let block_sz = self.key.block_size();
+            if 0 != sect_sz % block_sz {
+                return Err(Error::IndivisibleSize {
+                    divisor: block_sz,
+                    actual: sect_sz,
+                });
+            }
+            self.pt_buf.resize(inner_sect_sz, 0);
+            self.pt_buf.resize(inner_sect_sz + block_sz, 0);
+            Ok(SecretStream {
+                inner,
+                inner_sect_sz,
+                sect_sz: inner_sect_sz - expansion_sz,
+                key: self.key,
+                ct_buf: self.ct_buf,
+                pt_buf: self.pt_buf,
+            })
+        }
+    }
+
+    impl<T> Sectored for SecretStream<T> {
+        fn sector_sz(&self) -> usize {
+            self.sect_sz
+        }
+    }
+
+    impl<T: Write> Write for SecretStream<T> {
+        fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+            self.assert_sector_sz(buf.len())?;
+
+            self.ct_buf.resize(self.inner_sect_sz, 0);
+            let mut encrypter = self.key.to_encrypter()?;
+            let mut count = encrypter.update(buf, &mut self.ct_buf)?;
+            count += encrypter.finalize(&mut self.ct_buf[count..])?;
+            self.ct_buf.truncate(count);
+
+            self.inner.write_all(&self.ct_buf).map(|_| buf.len())
+        }
+
+        fn flush(&mut self) -> io::Result<()> {
+            self.inner.flush()
+        }
+    }
+
+    impl<T: Read> Read for SecretStream<T> {
+        fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+            self.assert_sector_sz(buf.len())?;
+
+            self.ct_buf.resize(self.inner_sect_sz, 0);
+            match self.inner.read_exact(&mut self.ct_buf) {
+                Ok(_) => (),
+                Err(err) => {
+                    if err.kind() == io::ErrorKind::UnexpectedEof {
+                        return Ok(0);
+                    } else {
+                        return Err(err);
+                    }
+                }
+            }
+
+            self.pt_buf
+                .resize(self.inner_sect_sz + self.key.block_size(), 0);
+            let mut decrypter = self.key.to_decrypter()?;
+            let mut count = decrypter.update(&self.ct_buf, &mut self.pt_buf)?;
+            count += decrypter.finalize(&mut self.pt_buf[count..])?;
+            self.pt_buf.truncate(count);
+
+            buf.copy_from_slice(&self.pt_buf);
+            Ok(buf.len())
+        }
+    }
+
+    impl<T: Seek> Seek for SecretStream<T> {
+        fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
+            let outer_offset = match pos {
+                SeekFrom::Start(offset) => offset,
+                SeekFrom::Current(offset) => {
+                    let inner_offset = self.inner.stream_position()?;
+                    let outer_offset = self.outer_offset(inner_offset);
+                    if offset >= 0 {
+                        outer_offset + offset as u64
+                    } else {
+                        outer_offset - (-offset as u64)
+                    }
+                }
+                SeekFrom::End(_) => {
+                    // We can support this once stream_len is stabilized:
+                    // https://github.com/rust-lang/rust/issues/59359
+                    return Err(io::Error::new(
+                        io::ErrorKind::Unsupported,
+                        "seeking from the end of the stream is not supported",
+                    ));
+                }
+            };
+            let inner_offset = self.inner_offset(outer_offset);
+            self.inner.seek(SeekFrom::Start(inner_offset))?;
+            Ok(outer_offset)
+        }
+    }
+
+    impl<T: MetaAccess> MetaAccess for SecretStream<T> {
+        fn block_key(&self) -> crate::Result<SymKey> {
+            self.inner.block_key()
+        }
+
+        fn add_readcap_for(&mut self, owner: Principal, key: &dyn Encrypter) -> crate::Result<()> {
+            self.inner.add_readcap_for(owner, key)
+        }
+
+        fn integrity(&self) -> Option<&[u8]> {
+            self.inner.integrity()
+        }
+
+        fn set_path(&mut self, path: BlockPath) {
+            self.inner.set_path(path)
+        }
+    }
+
+    impl<T: Read + Write + Seek + MetaAccess> Block for SecretStream<T> {}
+}
+
+#[cfg(test)]
+mod tests {
+    use std::io::{Read, Seek, SeekFrom, Write};
+
+    use crate::{
+        crypto::{SymKey, SymKeyKind},
+        test_helpers::{Randomizer, SectoredCursor},
+        Sectored, TryCompose, SECTOR_SZ_DEFAULT,
+    };
+
+    use super::*;
+
+    fn secret_stream_sequential_test_case(key: SymKey, inner_sect_sz: usize, sect_ct: usize) {
+        let mut stream = SecretStream::new(key)
+            .try_compose(SectoredCursor::new(
+                vec![0u8; inner_sect_sz * sect_ct],
+                inner_sect_sz,
+            ))
+            .expect("compose failed");
+        let sector_sz = stream.sector_sz();
+        for k in 0..sect_ct {
+            let sector = vec![k as u8; sector_sz];
+            stream.write(&sector).expect("write failed");
+        }
+        stream.seek(SeekFrom::Start(0)).expect("seek failed");
+        for k in 0..sect_ct {
+            let expected = vec![k as u8; sector_sz];
+            let mut actual = vec![0u8; sector_sz];
+            stream.read(&mut actual).expect("read failed");
+            assert_eq!(expected, actual);
+        }
+    }
+
+    fn secret_stream_sequential_test_suite(kind: SymKeyKind) {
+        let key = SymKey::generate(kind).expect("key generation failed");
+        secret_stream_sequential_test_case(key.clone(), SECTOR_SZ_DEFAULT, 16);
+    }
+
+    #[test]
+    fn secret_stream_encrypt_decrypt_are_inverse_aes256cbc() {
+        secret_stream_sequential_test_suite(SymKeyKind::Aes256Cbc)
+    }
+
+    #[test]
+    fn secret_stream_encrypt_decrypt_are_inverse_aes256ctr() {
+        secret_stream_sequential_test_suite(SymKeyKind::Aes256Ctr)
+    }
+
+    fn secret_stream_random_access_test_case(
+        rando: Randomizer,
+        key: SymKey,
+        inner_sect_sz: usize,
+        sect_ct: usize,
+    ) {
+        let mut stream = SecretStream::new(key)
+            .try_compose(SectoredCursor::new(
+                vec![0u8; inner_sect_sz * sect_ct],
+                inner_sect_sz,
+            ))
+            .expect("compose failed");
+        let sect_sz = stream.sector_sz();
+        let indices: Vec<usize> = rando.take(sect_ct).map(|e| e % sect_ct).collect();
+        for index in indices.iter().map(|e| *e) {
+            let offset = index * sect_sz;
+            stream
+                .seek(SeekFrom::Start(offset as u64))
+                .expect("seek to write failed");
+            let sector = vec![index as u8; sect_sz];
+            stream.write(&sector).expect("write failed");
+        }
+        for index in indices.iter().map(|e| *e) {
+            let offset = index * sect_sz;
+            stream
+                .seek(SeekFrom::Start(offset as u64))
+                .expect("seek to read failed");
+            let expected = vec![index as u8; sect_sz];
+            let mut actual = vec![0u8; sect_sz];
+            stream.read(&mut actual).expect("read failed");
+            assert_eq!(expected, actual);
+        }
+    }
+
+    fn secret_stream_random_access_test_suite(kind: SymKeyKind) {
+        const SEED: [u8; Randomizer::HASH.len()] = [3u8; Randomizer::HASH.len()];
+        let key = SymKey::generate(kind).expect("key generation failed");
+        secret_stream_random_access_test_case(
+            Randomizer::new(SEED),
+            key.clone(),
+            SECTOR_SZ_DEFAULT,
+            20,
+        );
+        secret_stream_random_access_test_case(
+            Randomizer::new(SEED),
+            key.clone(),
+            SECTOR_SZ_DEFAULT,
+            800,
+        );
+        secret_stream_random_access_test_case(Randomizer::new(SEED), key.clone(), 512, 200);
+        secret_stream_random_access_test_case(Randomizer::new(SEED), key.clone(), 512, 20);
+        secret_stream_random_access_test_case(Randomizer::new(SEED), key.clone(), 512, 200);
+    }
+
+    #[test]
+    fn secret_stream_random_access() {
+        secret_stream_random_access_test_suite(SymKeyKind::Aes256Cbc);
+        secret_stream_random_access_test_suite(SymKeyKind::Aes256Ctr);
+    }
+
+    fn make_secret_stream(
+        key_kind: SymKeyKind,
+        num_sectors: usize,
+    ) -> SecretStream<SectoredCursor<Vec<u8>>> {
+        let key = SymKey::generate(key_kind).expect("key generation failed");
+        let inner = SectoredCursor::new(
+            vec![0u8; num_sectors * SECTOR_SZ_DEFAULT],
+            SECTOR_SZ_DEFAULT,
+        );
+        SecretStream::new(key)
+            .try_compose(inner)
+            .expect("compose failed")
+    }
+
+    #[test]
+    fn secret_stream_seek_from_start() {
+        let mut stream = make_secret_stream(SymKeyKind::Aes256Cbc, 3);
+        let sector_sz = stream.sector_sz();
+        let expected = vec![2u8; sector_sz];
+        // Write one sector of ones, one sector of twos and one sector of threes.
+        for k in 1..4 {
+            let sector: Vec<u8> = std::iter::repeat(k as u8).take(sector_sz).collect();
+            stream.write(&sector).expect("writing to stream failed");
+        }
+
+        stream
+            .seek(SeekFrom::Start(sector_sz as u64))
+            .expect("seek failed");
+
+        // A read from the stream should now return the second sector, which is filled with twos.
+        let mut actual = vec![0u8; sector_sz];
+        stream
+            .read(&mut actual)
+            .expect("reading from stream failed");
+        assert_eq!(expected, actual);
+    }
+
+    #[test]
+    fn secret_stream_seek_from_current() {
+        let mut stream = make_secret_stream(SymKeyKind::Aes256Cbc, 3);
+        let sector_sz = stream.sector_sz();
+        let expected = vec![3u8; sector_sz];
+        // Write one sector of ones, one sector of twos and one sector of threes.
+        for k in 1..4 {
+            let sector: Vec<u8> = std::iter::repeat(k as u8).take(sector_sz).collect();
+            stream.write(&sector).expect("writing to stream failed");
+        }
+
+        stream
+            .seek(SeekFrom::Current(-1 * (sector_sz as i64)))
+            .expect("seek failed");
+
+        // A read from the stream should now return the last sector, which is filled with threes.
+        let mut actual = vec![0u8; sector_sz];
+        stream
+            .read(&mut actual)
+            .expect("reading from stream failed");
+        assert_eq!(expected, actual);
+    }
+}

+ 11 - 29
crates/btlib/src/crypto/tpm.rs

@@ -385,18 +385,10 @@ impl Cookie {
         Ok(Cookie(rand_array()?))
         Ok(Cookie(rand_array()?))
     }
     }
 
 
-    fn empty() -> Cookie {
-        Cookie([0; Self::LEN])
-    }
-
     fn as_slice(&self) -> &[u8] {
     fn as_slice(&self) -> &[u8] {
         self.0.as_slice()
         self.0.as_slice()
     }
     }
 
 
-    fn as_mut_slice(&mut self) -> &mut [u8] {
-        self.0.as_mut_slice()
-    }
-
     /// Returns the `Auth` value associated with this cookie.
     /// Returns the `Auth` value associated with this cookie.
     fn auth(&self) -> Auth {
     fn auth(&self) -> Auth {
         // This shouldn't fail because the given slice is only 64 bytes long.
         // This shouldn't fail because the given slice is only 64 bytes long.
@@ -463,10 +455,6 @@ pub struct ExportedKeyPair<S> {
     tagged_ct: TaggedCiphertext<TpmBlobs, PublicWrapper>,
     tagged_ct: TaggedCiphertext<TpmBlobs, PublicWrapper>,
 }
 }
 
 
-impl<S: Scheme> ExportedKeyPair<S> {
-    const FIELDS: &'static [&'static str] = &["scheme", "tagged_ct"];
-}
-
 #[derive(Serialize, Deserialize)]
 #[derive(Serialize, Deserialize)]
 pub struct ExportedCreds {
 pub struct ExportedCreds {
     sign: ExportedKeyPair<Sign>,
     sign: ExportedKeyPair<Sign>,
@@ -682,11 +670,6 @@ impl<'a, S: Scheme> KeyBuilder<'a, S> {
         self
         self
     }
     }
 
 
-    fn with_name_hash(mut self, name_hash: HashingAlgorithm) -> Self {
-        self.name_hash = name_hash;
-        self
-    }
-
     fn with_policy_digest(mut self, policy_digest: Digest) -> Self {
     fn with_policy_digest(mut self, policy_digest: Digest) -> Self {
         self.policy_digest = policy_digest;
         self.policy_digest = policy_digest;
         self
         self
@@ -863,7 +846,7 @@ impl State {
     }
     }
 }
 }
 
 
-pub(crate) struct TpmCredStore {
+pub struct TpmCredStore {
     state: Arc<RwLock<State>>,
     state: Arc<RwLock<State>>,
     storage_path: PathBuf,
     storage_path: PathBuf,
     cookie: Cookie,
     cookie: Cookie,
@@ -874,7 +857,7 @@ impl TpmCredStore {
     const ENCRYPT_SCHEME: Encrypt = Encrypt::RSA_OAEP_2048_SHA_256;
     const ENCRYPT_SCHEME: Encrypt = Encrypt::RSA_OAEP_2048_SHA_256;
     const DEFAULT_WRITECAP_EXP: Duration = Duration::from_secs(60 * 60 * 24 * 365 * 10);
     const DEFAULT_WRITECAP_EXP: Duration = Duration::from_secs(60 * 60 * 24 * 365 * 10);
 
 
-    pub(crate) fn new<P: AsRef<Path>>(mut context: Context, state_path: P) -> Result<TpmCredStore> {
+    pub fn new<P: AsRef<Path>>(mut context: Context, state_path: P) -> Result<TpmCredStore> {
         let storage = Storage::load_or_init(state_path.as_ref())?;
         let storage = Storage::load_or_init(state_path.as_ref())?;
         let session = context.start_default_auth_session()?;
         let session = context.start_default_auth_session()?;
         context.set_sessions((Some(session), None, None));
         context.set_sessions((Some(session), None, None));
@@ -1327,7 +1310,7 @@ impl HashcheckTicketExt for HashcheckTicket {
 }
 }
 
 
 #[derive(Clone)]
 #[derive(Clone)]
-pub(crate) struct TpmCreds {
+pub struct TpmCreds {
     state: Arc<RwLock<State>>,
     state: Arc<RwLock<State>>,
     sign: KeyPair<Sign>,
     sign: KeyPair<Sign>,
     enc: KeyPair<Encrypt>,
     enc: KeyPair<Encrypt>,
@@ -1606,6 +1589,7 @@ mod test {
 
 
     /// Displays the numeric identifiers used by the supported hash algorithms.
     /// Displays the numeric identifiers used by the supported hash algorithms.
     //#[test]
     //#[test]
+    #[allow(dead_code)]
     fn show_nids() {
     fn show_nids() {
         fn show_nid(digest: MessageDigest) {
         fn show_nid(digest: MessageDigest) {
             let nid = digest.type_();
             let nid = digest.type_();
@@ -1756,10 +1740,7 @@ mod test {
             .gen_root_creds(&"TranslationInvariant")
             .gen_root_creds(&"TranslationInvariant")
             .expect("failed to gen root creds");
             .expect("failed to gen root creds");
         let writecap = root_creds.writecap().expect("no root writecap was present");
         let writecap = root_creds.writecap().expect("no root writecap was present");
-        let path = crate::Path {
-            root: root_creds.principal(),
-            components: Vec::new(),
-        };
+        let path = crate::BlockPath::new(root_creds.principal(), Vec::new());
         writecap
         writecap
             .assert_valid_for(&path)
             .assert_valid_for(&path)
             .expect("failed to verify root writecap");
             .expect("failed to verify root writecap");
@@ -1771,15 +1752,15 @@ mod test {
         let root_creds = store
         let root_creds = store
             .gen_root_creds(&"TranslationInvariant")
             .gen_root_creds(&"TranslationInvariant")
             .expect("failed to gen root creds");
             .expect("failed to gen root creds");
-        let path = crate::Path {
-            root: root_creds.principal(),
-            components: vec!["apps".to_string(), "comms".to_string()],
-        };
+        let path = crate::BlockPath::new(
+            root_creds.principal(),
+            vec!["apps".to_string(), "comms".to_string()],
+        );
         let node_creds = store.node_creds().expect("failed to gen node creds");
         let node_creds = store.node_creds().expect("failed to gen node creds");
         let writecap = root_creds
         let writecap = root_creds
             .issue_writecap(
             .issue_writecap(
                 node_creds.principal(),
                 node_creds.principal(),
-                path.components.clone(),
+                path.components().map(|e| e.to_string()).collect(),
                 Epoch::now() + Duration::from_secs(3600),
                 Epoch::now() + Duration::from_secs(3600),
             )
             )
             .expect("failed to issue writecap");
             .expect("failed to issue writecap");
@@ -1852,6 +1833,7 @@ mod test {
     /// To work-around this issue I've chosen to wrap the data returned by `duplicate` with a
     /// To work-around this issue I've chosen to wrap the data returned by `duplicate` with a
     /// symmetric key in software.
     /// symmetric key in software.
     //#[test]
     //#[test]
+    #[allow(dead_code)]
     fn key_export_import() -> Result<()> {
     fn key_export_import() -> Result<()> {
         let auth = Auth::try_from(vec![0u8; 32])?;
         let auth = Auth::try_from(vec![0u8; 32])?;
 
 

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 53 - 725
crates/btlib/src/lib.rs


+ 533 - 0
crates/btlib/src/sectored_buf.rs

@@ -0,0 +1,533 @@
+pub use private::SectoredBuf;
+
+mod private {
+    use log::{error, warn};
+    use std::io::{self, Read, Seek, SeekFrom, Write};
+
+    use btserde::{read_from, write_to};
+
+    use crate::{
+        crypto::{Encrypter, SymKey},
+        Block, BlockPath, BoxInIoErr, Decompose, Error, MetaAccess, Principal, ReadExt, Result,
+        Sectored, TryCompose,
+    };
+
+    /// A stream which buffers writes and read such that the inner stream only sees reads and writes
+    /// of sector length buffers.
+    pub struct SectoredBuf<T> {
+        inner: T,
+        buf: Vec<u8>,
+        /// The offset into the inner stream which the zero offset byte in `buf` corresponds to.
+        buf_start: usize,
+        /// Indicates if the contents of `buf` have been written to, and so whether `buf` needs to
+        /// be written back to `inner` before it is refilled.
+        dirty: bool,
+        /// The total number of bytes that have been written to the inner stream, including the
+        /// reserved bytes at the beginning.
+        len: usize,
+        /// The current position of this stream, expressed as an offset into the inner stream.
+        pos: usize,
+    }
+
+    impl SectoredBuf<()> {
+        pub fn new() -> SectoredBuf<()> {
+            SectoredBuf {
+                inner: (),
+                buf: Vec::new(),
+                buf_start: 0,
+                dirty: false,
+                len: 0,
+                pos: 0,
+            }
+        }
+    }
+
+    impl<T> SectoredBuf<T> {
+        /// The number of bytes at the beginning of the inner stream which are reserved to store the
+        /// length of data written. All offsets into the stored data must be shifted by this amount
+        /// to be translated to an offset in the inner stream.
+        pub(crate) const RESERVED: usize = std::mem::size_of::<usize>();
+
+        /// Returns the position in the inner stream which the given position in this stream
+        /// corresponds to.
+        fn inner_pos(self_pos: u64) -> u64 {
+            let offset: u64 = Self::RESERVED.try_into().unwrap();
+            self_pos + offset
+        }
+
+        /// Returns the position in this stream which the given position in the inner stream
+        /// corresponds to.
+        fn self_pos(inner_pos: u64) -> u64 {
+            let offset: u64 = Self::RESERVED.try_into().unwrap();
+            inner_pos - offset
+        }
+
+        /// Returns the offset into the internal buffer that corresponds to the current position.
+        fn buf_pos(&self) -> usize {
+            self.pos - self.buf_start
+        }
+
+        /// Returns one more than the last index in the internal buffer which can be read.
+        fn buf_end(&self) -> usize {
+            let limit = self.len.min(self.buf_start + self.sector_sz());
+            limit - self.buf_start
+        }
+
+        /// Returns the index of the sector which is currently loaded into the buffer.
+        fn buf_sector_index(&self) -> usize {
+            self.pos / self.sector_sz()
+        }
+    }
+
+    impl<T: Read + Seek> SectoredBuf<T> {
+        /// Fills the internal buffer by reading from the inner stream at the current position
+        /// and updates `self.buf_start` with the position read from.
+        fn fill_internal_buf(&mut self) -> Result<usize> {
+            self.buf_start = self.inner.stream_position()?.try_into().box_err()?;
+            let read_bytes = if self.buf_start < self.len {
+                let read_bytes = self.inner.fill_buf(&mut self.buf)?;
+                if read_bytes < self.buf.len() {
+                    return Err(Error::IncorrectSize {
+                        expected: self.buf.len(),
+                        actual: read_bytes,
+                    });
+                }
+                read_bytes
+            } else {
+                0
+            };
+            Ok(read_bytes)
+        }
+    }
+
+    impl Default for SectoredBuf<()> {
+        fn default() -> Self {
+            Self::new()
+        }
+    }
+
+    impl<T> Decompose<T> for SectoredBuf<T> {
+        fn into_inner(self) -> T {
+            self.inner
+        }
+    }
+
+    impl<T: Sectored + Read + Seek> TryCompose<T, SectoredBuf<T>> for SectoredBuf<()> {
+        type Error = Error;
+        fn try_compose(self, inner: T) -> Result<SectoredBuf<T>> {
+            let sect_sz = inner.sector_sz();
+            if sect_sz < Self::RESERVED {
+                return Err(Error::custom(format!(
+                    "a sector size of at least {} is required. Got {}",
+                    Self::RESERVED,
+                    sect_sz,
+                )));
+            }
+            let mut sectored = SectoredBuf {
+                inner,
+                buf: self.buf,
+                buf_start: 0,
+                dirty: false,
+                len: Self::RESERVED,
+                pos: Self::RESERVED,
+            };
+            sectored.inner.seek(SeekFrom::Start(0))?;
+            sectored.buf.resize(sect_sz, 0);
+            let len_stored = match sectored.fill_internal_buf() {
+                Ok(bytes_read) => bytes_read >= Self::RESERVED,
+                Err(Error::IncorrectSize { actual, expected }) => {
+                    if actual > 0 {
+                        return Err(Error::IncorrectSize { expected, actual });
+                    }
+                    // When the actual size was 0 that just means the inner stream was empty, which
+                    // is not an error.
+                    false
+                }
+                Err(err) => return Err(err),
+            };
+            if len_stored {
+                if let Ok(len) = read_from::<u64, _>(&mut sectored.buf.as_slice()) {
+                    sectored.len = len.try_into()?;
+                }
+            } else {
+                write_to(&Self::RESERVED, &mut sectored.buf.as_mut_slice())?;
+                sectored.dirty = true;
+            }
+            Ok(sectored)
+        }
+    }
+
+    impl<T> Sectored for SectoredBuf<T> {
+        fn sector_sz(&self) -> usize {
+            self.buf.len()
+        }
+    }
+
+    impl<T: Seek + Read + Write> Write for SectoredBuf<T> {
+        fn write(&mut self, mut src: &[u8]) -> io::Result<usize> {
+            let src_len_start = src.len();
+            let mut dest = {
+                let buf_pos = self.buf_pos();
+                &mut self.buf[buf_pos..]
+            };
+            while !src.is_empty() {
+                if dest.is_empty() {
+                    if let Err(err) = self.flush() {
+                        error!("A call to SectoredBuf::flush returned an error: {}", err);
+                        break;
+                    }
+                    dest = &mut self.buf[..];
+                }
+                let sz = src.len().min(dest.len());
+                (&mut dest[..sz]).copy_from_slice(&src[..sz]);
+                dest = &mut dest[sz..];
+                src = &src[sz..];
+                self.dirty = sz > 0;
+                self.pos += sz;
+            }
+            Ok(src_len_start - src.len())
+        }
+
+        fn flush(&mut self) -> io::Result<()> {
+            if !self.dirty {
+                return Ok(());
+            }
+
+            // Write out the contents of the buffer.
+            let sect_sz: u64 = self.sector_sz().try_into().box_err()?;
+            let inner_pos = self.inner.stream_position()?;
+            let inner_pos_usize: usize = inner_pos.try_into().box_err()?;
+            let is_new_sector = self.pos > inner_pos_usize;
+            let is_full = (self.buf.len() - self.buf_pos()) == 0;
+            let seek_to = if is_new_sector {
+                if is_full {
+                    inner_pos + sect_sz
+                } else {
+                    inner_pos
+                }
+            } else {
+                // The contents of the buffer were previously read from inner, so we write the
+                // updated contents to the same offset.
+                let sect_start: u64 = self.buf_start.try_into().box_err()?;
+                self.inner.seek(SeekFrom::Start(sect_start))?;
+                if is_full {
+                    inner_pos
+                } else {
+                    inner_pos - sect_sz
+                }
+            };
+            self.inner.write_all(&self.buf)?;
+
+            // Update the stored length.
+            self.len = self.len.max(self.pos);
+            self.inner.seek(SeekFrom::Start(0))?;
+            self.fill_internal_buf()?;
+            let len: u64 = self.len.try_into().box_err()?;
+            write_to(&len, &mut self.buf.as_mut_slice()).box_err()?;
+            self.inner.seek(SeekFrom::Start(0))?;
+            self.inner.write_all(&self.buf)?;
+            self.inner.flush()?;
+
+            // Seek to the next position.
+            self.inner.seek(SeekFrom::Start(seek_to))?;
+            self.fill_internal_buf()?;
+            self.dirty = false;
+
+            Ok(())
+        }
+    }
+
+    impl<T: Read + Seek> Read for SectoredBuf<T> {
+        fn read(&mut self, mut dest: &mut [u8]) -> io::Result<usize> {
+            if self.pos == self.len {
+                return Ok(0);
+            }
+
+            let dest_len_start = dest.len();
+            let mut src = {
+                let start = self.buf_pos();
+                let end = self.buf_end();
+                &self.buf[start..end]
+            };
+            while !dest.is_empty() {
+                if src.is_empty() {
+                    if self.pos >= self.len {
+                        break;
+                    }
+                    let byte_ct = match self.fill_internal_buf() {
+                        Ok(byte_ct) => byte_ct,
+                        Err(err) => {
+                            warn!("SectoredBuf::full_internal_buf returned an error: {}", err);
+                            break;
+                        }
+                    };
+                    if 0 == byte_ct {
+                        break;
+                    }
+                    src = &self.buf[..byte_ct];
+                }
+                let sz = src.len().min(dest.len());
+                (&mut dest[..sz]).copy_from_slice(&src[..sz]);
+                dest = &mut dest[sz..];
+                src = &src[sz..];
+                self.pos += sz;
+            }
+            Ok(dest_len_start - dest.len())
+        }
+    }
+
+    impl<T: Seek + Read + Write> Seek for SectoredBuf<T> {
+        fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+            let inner_pos = self.inner.stream_position()?;
+            let inner_pos_new = match pos {
+                SeekFrom::Start(rel_start) => Self::inner_pos(rel_start),
+                SeekFrom::Current(rel_curr) => {
+                    if rel_curr > 0 {
+                        inner_pos + rel_curr as u64
+                    } else {
+                        inner_pos - rel_curr as u64
+                    }
+                }
+                SeekFrom::End(_) => {
+                    return Err(io::Error::new(
+                        io::ErrorKind::Unsupported,
+                        "seeking relative to the end of the stream is not supported",
+                    ))
+                }
+            };
+            let sect_sz = self.sector_sz();
+            let sect_index = self.buf_sector_index();
+            let sect_index_new = TryInto::<usize>::try_into(inner_pos_new).box_err()? / sect_sz;
+            let pos: u64 = self.pos.try_into().box_err()?;
+            if sect_index != sect_index_new || pos == inner_pos {
+                self.flush()?;
+                let seek_to: u64 = (sect_index_new * sect_sz).try_into().box_err()?;
+                self.inner.seek(SeekFrom::Start(seek_to))?;
+                self.fill_internal_buf()?;
+            }
+            self.pos = inner_pos_new.try_into().box_err()?;
+            Ok(Self::self_pos(inner_pos_new))
+        }
+    }
+
+    impl<T: MetaAccess> MetaAccess for SectoredBuf<T> {
+        fn block_key(&self) -> Result<SymKey> {
+            self.inner.block_key()
+        }
+
+        fn add_readcap_for(&mut self, owner: Principal, key: &dyn Encrypter) -> Result<()> {
+            self.inner.add_readcap_for(owner, key)
+        }
+
+        fn integrity(&self) -> Option<&[u8]> {
+            self.inner.integrity()
+        }
+
+        fn set_path(&mut self, path: BlockPath) {
+            self.inner.set_path(path)
+        }
+    }
+
+    impl<T: Read + Write + Seek + MetaAccess> Block for SectoredBuf<T> {}
+}
+
+#[cfg(test)]
+mod tests {
+    use std::io::{Read, Seek, SeekFrom, Write};
+
+    use crate::{
+        test_helpers::{read_check, write_fill, Randomizer, SectoredCursor},
+        Decompose, ReadExt, Sectored, TryCompose, SECTOR_SZ_DEFAULT,
+    };
+
+    use super::*;
+
+    fn make_sectored_buf(sect_sz: usize, sect_ct: usize) -> SectoredBuf<SectoredCursor<Vec<u8>>> {
+        SectoredBuf::new()
+            .try_compose(SectoredCursor::new(vec![0u8; sect_sz * sect_ct], sect_sz))
+            .expect("compose for sectored buffer failed")
+    }
+
+    #[test]
+    fn sectored_buf_fill_inner() {
+        const SECT_SZ: usize = SECTOR_SZ_DEFAULT;
+        const SECT_CT: usize = 16;
+        let mut sectored = make_sectored_buf(SECT_SZ, SECT_CT);
+        let sect_sz = sectored.sector_sz();
+        assert_eq!(0, sect_sz % 16);
+        let chunk_sz = sect_sz / 16;
+        let chunk_ct = SECT_CT * 16;
+        write_fill(&mut sectored, chunk_sz, chunk_ct);
+    }
+
+    #[test]
+    fn sectored_buf_write_read_sequential() {
+        const SECT_SZ: usize = SECTOR_SZ_DEFAULT;
+        const SECT_CT: usize = 16;
+        let mut sectored = make_sectored_buf(SECT_SZ, SECT_CT);
+        let sect_sz = sectored.sector_sz();
+        assert_eq!(0, sect_sz % 16);
+        let chunk_sz = sect_sz / 16;
+        // We subtract one here so that the underlying buffer is not completely filled. This
+        // exercises the length limiting capability of the sectored buffer.
+        let chunk_ct = SECT_CT * 16 - 1;
+        write_fill(&mut sectored, chunk_sz, chunk_ct);
+        sectored.seek(SeekFrom::Start(0)).expect("seek failed");
+        read_check(&mut sectored, chunk_sz, chunk_ct);
+    }
+
+    #[test]
+    fn sectored_buf_sect_sz_too_small_is_error() {
+        const MIN: usize = SectoredBuf::<()>::RESERVED;
+        let result = SectoredBuf::new().try_compose(SectoredCursor::new([0u8; MIN], MIN - 1));
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn sectored_buf_len_preserved() {
+        const SECT_SZ: usize = SECTOR_SZ_DEFAULT;
+        const SECT_CT: usize = 16;
+        let mut sectored = make_sectored_buf(SECT_SZ, SECT_CT);
+        let expected = vec![42u8; 12];
+        // We need to ensure that writing expected will not fill up the buffer in sectored.
+        assert!(expected.len() < sectored.sector_sz() - SectoredBuf::<()>::RESERVED);
+
+        sectored.write_all(&expected).expect("write failed");
+        sectored.flush().expect("flush failed");
+        let inner = sectored.into_inner();
+        let mut sectored = SectoredBuf::new()
+            .try_compose(inner)
+            .expect("failed to compose sectored buffer");
+        let mut actual = vec![0u8; expected.len()];
+        sectored
+            .fill_buf(actual.as_mut_slice())
+            .expect("failed to fill actual");
+
+        assert_eq!(expected, actual);
+    }
+
+    #[test]
+    fn sectored_buf_seek() {
+        let sect_sz = 16usize;
+        let sect_ct = 16usize;
+        let cap = sect_sz * sect_ct - std::mem::size_of::<usize>();
+        let source = {
+            let mut source = Vec::with_capacity(cap);
+            source.extend(
+                std::iter::successors(Some(0u8), |n| if *n <= 254 { Some(*n + 1) } else { None })
+                    .take(cap),
+            );
+            source
+        };
+        let mut sectored = make_sectored_buf(sect_sz, sect_ct);
+        sectored.write(&source).expect("write failed");
+        let mut buf = [0u8; 1];
+        let end = cap.try_into().expect("cap cannot fit into a u8");
+        for pos in (0..end).rev() {
+            sectored
+                .seek(SeekFrom::Start(pos as u64))
+                .expect("seek failed");
+            sectored.read(&mut buf).expect("read failed");
+            assert_eq!(pos, buf[0]);
+        }
+    }
+
+    #[test]
+    fn sectored_buf_write_read_random() {
+        const SECT_SZ: usize = 16;
+        const SECT_CT: usize = 16;
+        const CAP: usize = SECT_SZ * SECT_CT - std::mem::size_of::<usize>();
+        let source = {
+            let mut expected = Vec::with_capacity(CAP);
+            expected.extend(
+                std::iter::successors(Some(0u8), |n| if *n <= 254 { Some(*n + 1) } else { None })
+                    .take(CAP),
+            );
+            expected
+        };
+        let indices: Vec<(usize, usize)> = {
+            let rando = Randomizer::new([3u8; Randomizer::HASH.len()]);
+            let rando2 = Randomizer::new([5u8; Randomizer::HASH.len()]);
+            rando
+                .zip(rando2)
+                .take(SECT_CT)
+                .map(|(mut first, mut second)| {
+                    first %= source.len();
+                    second &= source.len();
+                    let low = first.min(second);
+                    let high = first.max(second);
+                    (low, high)
+                })
+                .collect()
+        };
+
+        let mut sectored = make_sectored_buf(SECT_SZ, SECT_CT);
+        sectored
+            .write_all(&[0u8; CAP])
+            .expect("failed to fill sectored");
+        sectored.flush().expect("flush failed");
+        for (_k, (low, high)) in indices.iter().enumerate() {
+            sectored
+                .seek(SeekFrom::Start(*low as u64))
+                .expect("seek failed");
+            let src = &source[*low..*high];
+            sectored.write(src).expect("write failed");
+        }
+        sectored.flush().expect("flush failed");
+        let mut buf = vec![0u8; CAP];
+        for (_k, (low, high)) in indices.iter().enumerate() {
+            sectored
+                .seek(SeekFrom::Start(*low as u64))
+                .expect("seek failed");
+            let actual = &mut buf[*low..*high];
+            sectored.fill_buf(actual).expect("read failed");
+            let expected = &source[*low..*high];
+            assert_eq!(expected, actual);
+        }
+    }
+
+    #[test]
+    fn sectored_buf_read_past_end() {
+        const LEN: usize = 32;
+        let mut sectored = SectoredBuf::new()
+            .try_compose(SectoredCursor::new([0u8; LEN], LEN))
+            .expect("compose failed");
+        const BUF_LEN: usize = LEN - SectoredBuf::<()>::RESERVED + 1;
+        sectored.write(&[1u8; BUF_LEN - 1]).expect("write failed");
+        sectored.seek(SeekFrom::Start(0)).expect("seek failed");
+        let mut buf = [0u8; BUF_LEN];
+        // Note that buf is one byte longer than the available capacity in the cursor.
+        sectored.read(&mut buf).expect("read failed");
+        assert_eq!(&[1u8; BUF_LEN - 1], &buf[..(BUF_LEN - 1)]);
+        assert_eq!(0u8, buf[BUF_LEN - 1]);
+    }
+
+    /// Tests that the data written in try_compose is actually written back to the underlying stream.
+    #[test]
+    fn sectored_buf_write_back() {
+        let mut sectored = SectoredBuf::new()
+            .try_compose(SectoredCursor::new(vec![0u8; 24], 16))
+            .expect("compose failed");
+        let expected = [1u8; 8];
+        sectored.write(&expected).expect("first write failed");
+        sectored.write(&[2u8; 8]).expect("second write failed");
+        sectored.seek(SeekFrom::Start(0)).expect("seek failed");
+        let mut actual = [0u8; 8];
+        sectored.read(&mut actual).expect("read failed");
+        assert_eq!(expected, actual);
+    }
+
+    #[test]
+    fn sectored_buf_write_past_end() {
+        const LEN: usize = 8;
+        let mut sectored = SectoredBuf::new()
+            .try_compose(SectoredCursor::new(vec![0u8; 0], LEN))
+            .expect("compos failed");
+        let expected = [1u8; LEN + 1];
+        sectored.write(&expected).expect("write failed");
+        sectored.seek(SeekFrom::Start(0)).expect("seek failed");
+        let mut actual = [0u8; LEN + 1];
+        sectored.read(&mut actual).expect("read failed");
+        assert_eq!(expected, actual);
+    }
+}

+ 23 - 34
crates/btlib/src/test_helpers.rs

@@ -35,17 +35,6 @@ pub const PRINCIPAL2: [u8; 32] = [
     0xB9, 0x17, 0x7C, 0xF6, 0xBB, 0xC8, 0xD5, 0x30, 0xBF, 0x2A, 0xB4, 0xDE, 0x1B, 0x38, 0xCC, 0xF6,
     0xB9, 0x17, 0x7C, 0xF6, 0xBB, 0xC8, 0xD5, 0x30, 0xBF, 0x2A, 0xB4, 0xDE, 0x1B, 0x38, 0xCC, 0xF6,
 ];
 ];
 
 
-pub const PAYLOAD: [u8; 128] = [
-    0x39, 0x79, 0x1A, 0x0D, 0x8E, 0x6C, 0xF5, 0x4B, 0xF3, 0xA4, 0x75, 0xC4, 0x44, 0x73, 0x58, 0x58,
-    0x97, 0x14, 0x64, 0xE0, 0xC6, 0xFE, 0xCB, 0xCF, 0xBE, 0x67, 0x49, 0x49, 0x40, 0xAE, 0x71, 0x5A,
-    0x94, 0x7E, 0x6C, 0x4B, 0xDE, 0x33, 0x22, 0x75, 0xD8, 0x54, 0x23, 0x37, 0xFD, 0x1A, 0x68, 0x4A,
-    0x5F, 0xB5, 0xB3, 0xC9, 0x9A, 0x72, 0x7C, 0xF4, 0x3C, 0xAB, 0xED, 0x97, 0x87, 0x63, 0xBB, 0xD9,
-    0x8B, 0x11, 0xD3, 0xC1, 0x4C, 0x9A, 0x09, 0x0E, 0x7C, 0x10, 0x65, 0x9B, 0x8F, 0x35, 0xEB, 0x51,
-    0x19, 0xD7, 0x6E, 0xA3, 0xC9, 0x64, 0xE2, 0x54, 0x84, 0x5F, 0xA1, 0x8B, 0x63, 0x0C, 0xC3, 0x9D,
-    0xBE, 0xBB, 0x7F, 0x31, 0x1D, 0x59, 0xE2, 0x68, 0xC4, 0x5B, 0x37, 0x77, 0x04, 0xAD, 0x44, 0x75,
-    0xEE, 0x1F, 0x84, 0x17, 0xA2, 0x74, 0xC3, 0xC3, 0xD5, 0x2F, 0x70, 0x74, 0xFE, 0xD8, 0x2C, 0x29,
-];
-
 pub const SIGNATURE: [u8; 384] = [
 pub const SIGNATURE: [u8; 384] = [
     0x12, 0xB1, 0x09, 0x2F, 0x2B, 0x3C, 0x53, 0xE8, 0x1B, 0x2B, 0x6A, 0xE7, 0x97, 0x42, 0x9D, 0x83,
     0x12, 0xB1, 0x09, 0x2F, 0x2B, 0x3C, 0x53, 0xE8, 0x1B, 0x2B, 0x6A, 0xE7, 0x97, 0x42, 0x9D, 0x83,
     0x71, 0x75, 0x65, 0x25, 0xFD, 0xB0, 0x0E, 0x2F, 0xAB, 0x53, 0xB7, 0x03, 0x03, 0x39, 0xEE, 0xE9,
     0x71, 0x75, 0x65, 0x25, 0xFD, 0xB0, 0x0E, 0x2F, 0xAB, 0x53, 0xB7, 0x03, 0x03, 0x39, 0xEE, 0xE9,
@@ -102,16 +91,16 @@ pub(crate) fn make_principal() -> Principal {
     Principal(Hash::Sha2_256(PRINCIPAL))
     Principal(Hash::Sha2_256(PRINCIPAL))
 }
 }
 
 
-pub(crate) fn make_path_with_root(root: Principal, rel_components: Vec<&str>) -> Path {
+pub(crate) fn make_path_with_root(root: Principal, rel_components: Vec<&str>) -> BlockPath {
     let mut components = Vec::with_capacity(rel_components.len() + 1);
     let mut components = Vec::with_capacity(rel_components.len() + 1);
     components.push(root.0.to_string());
     components.push(root.0.to_string());
     for component in rel_components {
     for component in rel_components {
         components.push(component.to_string());
         components.push(component.to_string());
     }
     }
-    Path { root, components }
+    BlockPath::new(root, components)
 }
 }
 
 
-pub(crate) fn make_path(rel_components: Vec<&str>) -> Path {
+pub(crate) fn make_path(rel_components: Vec<&str>) -> BlockPath {
     make_path_with_root(make_principal(), rel_components)
     make_path_with_root(make_principal(), rel_components)
 }
 }
 
 
@@ -137,14 +126,17 @@ pub(crate) fn make_writecap_trusted_by<C: Creds>(
 ) -> Writecap {
 ) -> Writecap {
     let hour_hence = Epoch::now() + Duration::from_secs(3600);
     let hour_hence = Epoch::now() + Duration::from_secs(3600);
     let mut writecap = Writecap {
     let mut writecap = Writecap {
-        issued_to,
-        path: make_path_with_root(next.path.root.clone(), path_components),
-        expires: hour_hence,
-        signing_key: trusting_creds.public_sign().clone(),
+        body: WritecapBody {
+            issued_to,
+            path: make_path_with_root(next.body.path.root().clone(), path_components),
+            expires: hour_hence,
+            signing_key: trusting_creds.public_sign().clone(),
+        },
         signature: Signature::empty(Sign::RSA_PSS_3072_SHA_256),
         signature: Signature::empty(Sign::RSA_PSS_3072_SHA_256),
         next: Some(Box::from(next)),
         next: Some(Box::from(next)),
     };
     };
-    crypto::sign_writecap(&mut writecap, trusting_creds)
+    trusting_creds
+        .sign_writecap(&mut writecap)
         .map_err(convert_err)
         .map_err(convert_err)
         .expect("failed to sign writecap");
         .expect("failed to sign writecap");
     writecap
     writecap
@@ -163,34 +155,30 @@ pub(crate) fn make_self_signed_writecap_with<C: Creds>(key: &C) -> Writecap {
     let root_principal = key.principal();
     let root_principal = key.principal();
     let hour_hence = Epoch::now() + Duration::from_secs(3600);
     let hour_hence = Epoch::now() + Duration::from_secs(3600);
     let mut writecap = Writecap {
     let mut writecap = Writecap {
-        issued_to: root_principal.clone(),
-        path: make_path_with_root(root_principal, vec![]),
-        expires: hour_hence,
-        signing_key: key.public_sign().clone(),
+        body: WritecapBody {
+            issued_to: root_principal.clone(),
+            path: make_path_with_root(root_principal, vec![]),
+            expires: hour_hence,
+            signing_key: key.public_sign().clone(),
+        },
         signature: Signature::empty(Sign::RSA_PSS_3072_SHA_256),
         signature: Signature::empty(Sign::RSA_PSS_3072_SHA_256),
         next: None,
         next: None,
     };
     };
-    crypto::sign_writecap(&mut writecap, key)
+    key.sign_writecap(&mut writecap)
         .map_err(convert_err)
         .map_err(convert_err)
         .expect("failed to sign writecap");
         .expect("failed to sign writecap");
     writecap
     writecap
 }
 }
 
 
-pub(crate) fn make_readcap() -> Readcap {
-    make_readcap_for(&*ROOT_CREDS)
-}
-
 pub(crate) fn make_readcap_for<C: Encrypter + Principaled>(creds: &C) -> Readcap {
 pub(crate) fn make_readcap_for<C: Encrypter + Principaled>(creds: &C) -> Readcap {
     Readcap {
     Readcap {
         issued_to: creds.principal(),
         issued_to: creds.principal(),
-        key: crypto::encrypt(&BLOCK_KEY, creds).expect("failed to encrypt block key"),
+        key: creds
+            .ser_encrypt(&BLOCK_KEY)
+            .expect("failed to encrypt block key"),
     }
     }
 }
 }
 
 
-pub(crate) fn make_block() -> Box<dyn Block> {
-    make_block_with(make_readcap())
-}
-
 pub(crate) fn make_block_with(readcap: Readcap) -> Box<dyn Block> {
 pub(crate) fn make_block_with(readcap: Readcap) -> Box<dyn Block> {
     let mut readcaps = BTreeMap::new();
     let mut readcaps = BTreeMap::new();
     readcaps.insert(readcap.issued_to, readcap.key);
     readcaps.insert(readcap.issued_to, readcap.key);
@@ -199,7 +187,7 @@ pub(crate) fn make_block_with(readcap: Readcap) -> Box<dyn Block> {
     let (writecap, creds) = make_writecap_and_creds(vec!["apps"]);
     let (writecap, creds) = make_writecap_and_creds(vec!["apps"]);
     let root_writecap = writecap.next.as_ref().unwrap();
     let root_writecap = writecap.next.as_ref().unwrap();
     let header = BlockMetaBody {
     let header = BlockMetaBody {
-        path: make_path_with_root(root_writecap.issued_to.clone(), vec!["apps", "verse"]),
+        path: make_path_with_root(root_writecap.body.issued_to.clone(), vec!["apps", "verse"]),
         readcaps,
         readcaps,
         writecap: Some(writecap),
         writecap: Some(writecap),
         integrity: Some(Hash::Sha2_256([0u8; HashKind::Sha2_256.len()])),
         integrity: Some(Hash::Sha2_256([0u8; HashKind::Sha2_256.len()])),
@@ -223,6 +211,7 @@ pub(crate) fn make_block_with(readcap: Readcap) -> Box<dyn Block> {
 
 
 /// This function can be run as a test to write a new RSA key pair, as two Rust arrays,
 /// This function can be run as a test to write a new RSA key pair, as two Rust arrays,
 /// out to a file.
 /// out to a file.
+#[allow(dead_code)]
 fn write_test_keys() -> Result<()> {
 fn write_test_keys() -> Result<()> {
     write_rsa_keys_to_file("test_keys.rs")
     write_rsa_keys_to_file("test_keys.rs")
 }
 }

+ 306 - 0
crates/btlib/src/trailered.rs

@@ -0,0 +1,306 @@
+pub use private::Trailered;
+
+mod private {
+    use std::{
+        io::{self, Read, Seek, SeekFrom, Write},
+        marker::PhantomData,
+    };
+
+    use btserde::{read_from, write_to};
+    use serde::{de::DeserializeOwned, Serialize};
+
+    use crate::{BoxInIoErr, Decompose, MetaAccess, Result, WriteInteg};
+
+    /// A struct which wraps a stream and which writes a trailing data structure to when flushed.
+    pub struct Trailered<T, D> {
+        inner: T,
+        body_len: u64,
+        phantom: PhantomData<D>,
+    }
+
+    impl<T: Read + Seek, D: DeserializeOwned> Trailered<T, D> {
+        pub fn empty(inner: T) -> Trailered<T, D> {
+            Trailered {
+                inner,
+                body_len: 0,
+                phantom: PhantomData,
+            }
+        }
+
+        /// Creates a new `Trailered<T>` containing the given `T`. This method requires that the given
+        /// stream is either empty, or contains a valid serialization of `D` and a the offset at which
+        /// `D` is stored.
+        pub fn new(mut inner: T) -> Result<(Trailered<T, D>, Option<D>)> {
+            let pos = inner.stream_position()?;
+            let end = inner.seek(SeekFrom::End(0))?;
+            if 0 == end {
+                return Ok((Self::empty(inner), None));
+            }
+            inner.seek(SeekFrom::End(-8))?;
+            let offset: i64 = read_from(&mut inner)?;
+            let body_len = inner.seek(SeekFrom::End(offset))?;
+            let trailer: D = read_from(&mut inner)?;
+            inner.seek(SeekFrom::Start(pos))?;
+            Ok((
+                Trailered {
+                    inner,
+                    body_len,
+                    phantom: PhantomData,
+                },
+                Some(trailer),
+            ))
+        }
+    }
+
+    impl<T: Seek, D> Trailered<T, D> {
+        fn post_write(&mut self, written: usize) -> io::Result<usize> {
+            if 0 == written {
+                return Ok(0);
+            }
+            // I cannot return an error at this point because bytes have already been written to inner.
+            // So if I can't track the body len due to a failure, a panic is the only option.
+            let pos = self
+                .inner
+                .stream_position()
+                .expect("failed to get stream position");
+            self.body_len = self.body_len.max(pos);
+            Ok(written)
+        }
+    }
+
+    impl<T: Read + Seek, D> Read for Trailered<T, D> {
+        fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+            let pos = self.inner.stream_position()?;
+            let available_u64 = self.body_len - pos;
+            let available: usize = available_u64.try_into().box_err()?;
+            let limit = buf.len().min(available);
+            self.inner.read(&mut buf[..limit])
+        }
+    }
+
+    impl<T: Write + Seek, D: Serialize> Trailered<T, D> {
+        pub fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+            let written = self.inner.write(buf)?;
+            self.post_write(written)
+        }
+
+        fn write_trailer(&mut self, trailer: &D) -> io::Result<u64> {
+            let pos = self.inner.stream_position()?;
+            self.inner.seek(SeekFrom::Start(self.body_len))?;
+            write_to(trailer, &mut self.inner).box_err()?;
+            let offset_u64 = 8 + self.inner.stream_position()? - self.body_len;
+            let offset = -(offset_u64 as i64);
+            write_to(&offset, &mut self.inner).box_err()?;
+            Ok(pos)
+        }
+
+        pub fn flush(&mut self, trailer: &D) -> io::Result<()> {
+            let prev_pos = self.write_trailer(trailer)?;
+            self.inner.flush()?;
+            self.inner.seek(SeekFrom::Start(prev_pos))?;
+            Ok(())
+        }
+    }
+
+    impl<T: WriteInteg + Seek, D: Serialize> Trailered<T, D> {
+        pub(crate) fn flush_integ(&mut self, trailer: &D, integrity: &[u8]) -> io::Result<()> {
+            let prev_pos = self.write_trailer(trailer)?;
+            self.inner.flush_integ(integrity)?;
+            self.inner.seek(SeekFrom::Start(prev_pos))?;
+            Ok(())
+        }
+    }
+
+    impl<T: Seek, D> Seek for Trailered<T, D> {
+        fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+            /// Adds a signed integer to an unsigned integer and returns the result.
+            fn add_signed(unsigned: u64, signed: i64) -> u64 {
+                if signed >= 0 {
+                    unsigned + signed as u64
+                } else {
+                    unsigned - (-signed as u64)
+                }
+            }
+
+            let from_start = match pos {
+                SeekFrom::Start(from_start) => from_start,
+                SeekFrom::Current(from_curr) => {
+                    add_signed(self.inner.stream_position()?, from_curr)
+                }
+                SeekFrom::End(from_end) => add_signed(self.body_len, from_end),
+            };
+            let from_start = from_start.min(self.body_len);
+            self.inner.seek(SeekFrom::Start(from_start))
+        }
+    }
+
+    impl<T, D> Decompose<T> for Trailered<T, D> {
+        fn into_inner(self) -> T {
+            self.inner
+        }
+    }
+
+    impl<T: MetaAccess, D> MetaAccess for Trailered<T, D> {
+        fn add_readcap_for(
+            &mut self,
+            owner: crate::Principal,
+            key: &dyn crate::crypto::Encrypter,
+        ) -> Result<()> {
+            self.inner.add_readcap_for(owner, key)
+        }
+
+        fn block_key(&self) -> Result<crate::crypto::SymKey> {
+            self.inner.block_key()
+        }
+
+        fn integrity(&self) -> Option<&[u8]> {
+            self.inner.integrity()
+        }
+
+        fn set_path(&mut self, path: crate::BlockPath) {
+            self.inner.set_path(path)
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::Decompose;
+
+    use super::*;
+    use std::io::{Cursor, Read, Seek, SeekFrom};
+
+    /// Tests that a new `Trailered<T>` can be created from an empty stream.
+    #[test]
+    fn trailered_new_empty() {
+        let cursor = Cursor::new(Vec::new());
+
+        let (_, trailer): (_, Option<String>) =
+            Trailered::new(cursor).expect("Trailered::new failed");
+
+        assert_eq!(None, trailer);
+    }
+
+    /// Tests that an error is returned when an attempt is made to create a `Trailered<T>` from a
+    /// non-empty stream which is too short.
+    #[test]
+    fn trailered_new_inner_too_short_is_error() {
+        let cursor = Cursor::new([0u8; 5]);
+
+        let result = Trailered::<_, u128>::new(cursor);
+
+        assert!(result.is_err())
+    }
+
+    /// Checks that the trailer is persisted to the inner stream.
+    #[test]
+    fn trailered_trailer_persisted() {
+        const EXPECTED: &str = "Everyone deserves to be remembered,";
+        let cursor = {
+            let cursor = Cursor::new(Vec::new());
+            let (mut trailered, trailer) =
+                Trailered::<_, String>::new(cursor).expect("Trailered::new failed");
+            assert!(trailer.is_none());
+            trailered
+                .flush(&EXPECTED.to_string())
+                .expect("flush failed");
+            trailered.into_inner()
+        };
+
+        let (_, trailer) = Trailered::<_, String>::new(cursor).expect("Trailered::new failed");
+
+        assert_eq!(EXPECTED, trailer.unwrap());
+    }
+
+    #[test]
+    fn trailered_written_data_persisted() {
+        const EXPECTED: &[u8] = b"and every life has something to teach us.";
+        let mut cursor = {
+            let (mut trailered, _) = Trailered::<_, u8>::new(Cursor::new(Vec::new()))
+                .expect("failed to create first trailered");
+            trailered.write(EXPECTED).expect("write failed");
+            trailered.flush(&1).expect("flush failed");
+            trailered.into_inner()
+        };
+        cursor.seek(SeekFrom::Start(0)).expect("seek failed");
+        let (mut trailered, _) =
+            Trailered::<_, u8>::new(cursor).expect("failed to created second trailered");
+        let mut actual = vec![0u8; EXPECTED.len()];
+
+        trailered.read(&mut actual).expect("read failed");
+
+        assert_eq!(EXPECTED, actual);
+    }
+
+    fn trailered_for_seek_test() -> Trailered<impl Read + Seek, u8> {
+        let (mut trailered, _) =
+            Trailered::new(Cursor::new(Vec::new())).expect("failed to create trailered");
+        trailered
+            .write(&[0, 1, 2, 3, 4, 5, 6, 7])
+            .expect("write failed");
+        trailered.seek(SeekFrom::Start(0)).expect("seek failed");
+        trailered
+    }
+
+    #[test]
+    fn trailered_seek_from_start() {
+        const EXPECTED: u8 = 2;
+        let mut trailered = trailered_for_seek_test();
+
+        trailered
+            .seek(SeekFrom::Start(EXPECTED as u64))
+            .expect("seek failed");
+
+        let mut actual = [0u8; 1];
+        trailered.read(&mut actual).expect("read failed");
+        assert_eq!(EXPECTED, actual[0]);
+    }
+
+    #[test]
+    fn trailered_seek_from_curr() {
+        const EXPECTED: u8 = 5;
+        let mut trailered = trailered_for_seek_test();
+        trailered
+            .seek(SeekFrom::Start(6))
+            .expect("seek from start failed");
+
+        trailered
+            .seek(SeekFrom::Current(-1))
+            .expect("seek from current failed");
+
+        let mut actual = [0u8; 1];
+        trailered.read(&mut actual).expect("read failed");
+        assert_eq!(EXPECTED, actual[0]);
+    }
+
+    #[test]
+    fn trailered_seek_from_end() {
+        const EXPECTED: u8 = 7;
+        let mut trailered = trailered_for_seek_test();
+
+        trailered.seek(SeekFrom::End(-1)).expect("seek failed");
+
+        let mut actual = [0u8; 1];
+        trailered.read(&mut actual).expect("read failed");
+        assert_eq!(EXPECTED, actual[0]);
+    }
+
+    /// Tests that a read past the end of the body in a `Trailered<T>` is not allowed.
+    #[test]
+    fn trailered_read_limited_to_body_len() {
+        let (mut trailered, trailer) =
+            Trailered::new(Cursor::new(Vec::new())).expect("failed to create Trailered");
+        assert!(trailer.is_none());
+        const EXPECTED: &[u8] = &[1, 1, 1, 1, 1, 0, 0, 0];
+        trailered.write(&[1u8; 5]).expect("write failed");
+        trailered.flush(&1u8).expect("flush failed");
+        trailered.seek(SeekFrom::Start(0)).expect("seek failed");
+        let mut actual = vec![0u8; EXPECTED.len()];
+
+        // If read goes past the end of the body then there will be a 1 in the sixth position of
+        // actual.
+        trailered.read(&mut actual).expect("read failed");
+
+        assert_eq!(EXPECTED, actual);
+    }
+}

Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä