lib.rs 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734
  1. // The dead code warnings create too much noise during wire-framing.
  2. // TODO: Delete this prior to release.
  3. #![allow(dead_code)]
  4. mod crypto;
  5. #[cfg(test)]
  6. mod test_helpers;
  7. #[macro_use]
  8. extern crate static_assertions;
  9. #[cfg(test)]
  10. #[macro_use]
  11. extern crate lazy_static;
  12. use brotli::{CompressorWriter, Decompressor};
  13. use btserde::{self, read_from, write_to};
  14. use crypto::{
  15. AsymKeyPub, Ciphertext, Creds, Decrypter, Encrypter, EncrypterExt, Hash, HashKind,
  16. MerkleStream, SecretStream, Sign, Signature, Signer, SymKey, SymKeyKind,
  17. };
  18. use log::{error, warn};
  19. use serde::{de::DeserializeOwned, Deserialize, Serialize};
  20. use serde_big_array::BigArray;
  21. use std::{
  22. collections::HashMap,
  23. convert::{Infallible, TryFrom},
  24. fmt::{self, Display, Formatter},
  25. hash::Hash as Hashable,
  26. io::{self, Read, Seek, SeekFrom, Write},
  27. marker::PhantomData,
  28. ops::{Add, Sub},
  29. sync::PoisonError,
  30. time::{Duration, SystemTime},
  31. };
  32. #[derive(Debug)]
  33. pub enum Error {
  34. MissingWritecap,
  35. Io(std::io::Error),
  36. Serde(btserde::Error),
  37. Crypto(crypto::Error),
  38. IncorrectSize { expected: usize, actual: usize },
  39. Custom(Box<dyn std::fmt::Debug + Send + Sync>),
  40. }
  41. impl Error {
  42. fn custom<E: std::fmt::Debug + Send + Sync + 'static>(err: E) -> Error {
  43. Error::Custom(Box::new(err))
  44. }
  45. }
  46. impl Display for Error {
  47. fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
  48. match self {
  49. Error::MissingWritecap => write!(f, "missing writecap"),
  50. Error::Io(err) => err.fmt(f),
  51. Error::Serde(err) => err.fmt(f),
  52. Error::Crypto(err) => err.fmt(f),
  53. Error::IncorrectSize { expected, actual } => {
  54. write!(f, "incorrect size {}, expected {}", actual, expected)
  55. }
  56. Error::Custom(err) => err.fmt(f),
  57. }
  58. }
  59. }
  60. impl std::error::Error for Error {}
  61. impl From<std::io::Error> for Error {
  62. fn from(err: std::io::Error) -> Self {
  63. Error::Io(err)
  64. }
  65. }
  66. impl From<Error> for std::io::Error {
  67. fn from(err: Error) -> Self {
  68. io::Error::new(io::ErrorKind::Other, err)
  69. }
  70. }
  71. impl From<btserde::Error> for Error {
  72. fn from(err: btserde::Error) -> Self {
  73. Error::Serde(err)
  74. }
  75. }
  76. impl From<crypto::Error> for Error {
  77. fn from(err: crypto::Error) -> Self {
  78. Error::Crypto(err)
  79. }
  80. }
  81. impl From<std::num::TryFromIntError> for Error {
  82. fn from(err: std::num::TryFromIntError) -> Self {
  83. Error::custom(err)
  84. }
  85. }
  86. impl<T: std::fmt::Debug> From<PoisonError<T>> for Error {
  87. fn from(err: PoisonError<T>) -> Self {
  88. Error::custom(err.to_string())
  89. }
  90. }
  91. type Result<T> = std::result::Result<T, Error>;
  92. /// TODO: Remove this once the error_chain crate is integrated.
  93. trait BoxInIoErr<T> {
  94. fn box_err(self) -> std::result::Result<T, io::Error>;
  95. }
  96. impl<T, E: std::error::Error + Send + Sync + 'static> BoxInIoErr<T> for std::result::Result<T, E> {
  97. fn box_err(self) -> std::result::Result<T, io::Error> {
  98. self.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
  99. }
  100. }
  101. /// TODO: Remove this once the error_chain crate is integrated.
  102. trait StrInIoErr<T> {
  103. fn str_err(self) -> std::result::Result<T, io::Error>;
  104. }
  105. impl<T, E: Display> StrInIoErr<T> for std::result::Result<T, E> {
  106. fn str_err(self) -> std::result::Result<T, io::Error> {
  107. self.map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))
  108. }
  109. }
  110. /// The default sector size to use for new blocks.
  111. const SECTOR_SZ_DEFAULT: usize = 4096;
  112. /// Trait for types which provide read and write access to blocks.
  113. pub trait Block: Read + Write + Seek + MetaAccess {}
  114. // A trait for streams which only allow reads and writes in fixed sized units called sectors.
  115. trait Sectored {
  116. // Returns the size of the sector for this stream.
  117. fn sector_sz(&self) -> usize;
  118. fn assert_sector_sz(&self, actual: usize) -> Result<()> {
  119. let expected = self.sector_sz();
  120. if expected == actual {
  121. Ok(())
  122. } else {
  123. Err(Error::IncorrectSize { expected, actual })
  124. }
  125. }
  126. }
  127. /// A version of the `Write` trait, which allows integrity information to be supplied when flushing.
  128. trait WriteInteg: Write {
  129. fn flush_integ(&mut self, integrity: &[u8]) -> io::Result<()>;
  130. }
  131. trait Decompose<T> {
  132. fn into_inner(self) -> T;
  133. }
  134. trait TryCompose<T, U: Decompose<T>> {
  135. type Error;
  136. fn try_compose(self, inner: T) -> std::result::Result<U, Self::Error>;
  137. }
  138. trait Compose<T, U> {
  139. fn compose(self, inner: T) -> U;
  140. }
  141. impl<T, U: Decompose<T>, S: TryCompose<T, U, Error = Infallible>> Compose<T, U> for S {
  142. fn compose(self, inner: T) -> U {
  143. let result = self.try_compose(inner);
  144. // Safety: Infallible has no values, so `result` must be `Ok`.
  145. unsafe { result.unwrap_unchecked() }
  146. }
  147. }
  148. pub trait MetaAccess {
  149. fn block_key(&self) -> Result<SymKey>;
  150. fn add_readcap_for(&mut self, owner: Principal, key: &dyn Encrypter) -> Result<()>;
  151. /// Returns the integrity value used to protect the contents of the block.
  152. fn integrity(&self) -> Option<&[u8]>;
  153. fn set_path(&mut self, path: Path);
  154. }
  155. /// Extensions to the `Read` trait.
  156. trait ReadExt: Read {
  157. /// Reads repeatedly until one of the following occur:
  158. /// 1. The given buffer is full.
  159. /// 2. A call to `read` returns 0.
  160. /// 3. A call to `read` returns an error.
  161. /// The number of bytes read is returned. If an error is returned, then no bytes were read.
  162. fn fill_buf(&mut self, mut dest: &mut [u8]) -> io::Result<usize> {
  163. let dest_len_start = dest.len();
  164. while !dest.is_empty() {
  165. let byte_ct = match self.read(dest) {
  166. Ok(byte_ct) => byte_ct,
  167. Err(err) => {
  168. if dest_len_start == dest.len() {
  169. return Err(err);
  170. } else {
  171. // We're not allowed to return an error if we've already read from self.
  172. error!("an error occurred in fill_buf: {}", err);
  173. break;
  174. }
  175. }
  176. };
  177. if 0 == byte_ct {
  178. break;
  179. }
  180. dest = &mut dest[byte_ct..];
  181. }
  182. Ok(dest_len_start - dest.len())
  183. }
  184. }
  185. impl<T: Read> ReadExt for T {}
  186. /// This struct contains all of the metadata fields associated with a block, except for its
  187. /// signature. Since this struct implements `Serialize`, this allows for convenient signature
  188. /// calculations.
  189. #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
  190. pub struct BlockMetaBody {
  191. path: Path,
  192. readcaps: HashMap<Principal, Ciphertext<SymKey>>,
  193. writecap: Option<Writecap>,
  194. /// A hash which provides integrity for the contents of the block body.
  195. integrity: Option<Hash>,
  196. /// The public key that corresponds to the private key used to sign this header.
  197. signing_key: AsymKeyPub<Sign>,
  198. }
  199. impl BlockMetaBody {
  200. fn new<C: Creds>(creds: &C) -> BlockMetaBody {
  201. BlockMetaBody {
  202. path: Path::default(),
  203. readcaps: HashMap::new(),
  204. writecap: creds.writecap().map(|e| e.to_owned()),
  205. integrity: None,
  206. signing_key: creds.public_sign().to_owned(),
  207. }
  208. }
  209. }
  210. /// Signed metadata associated with a block.
  211. #[derive(Serialize, Deserialize)]
  212. struct BlockMeta {
  213. body: BlockMetaBody,
  214. sig: Signature,
  215. }
  216. impl BlockMeta {
  217. fn new<C: Creds>(creds: &C) -> BlockMeta {
  218. let body = BlockMetaBody::new(creds);
  219. let sig = Signature::empty(body.signing_key.scheme());
  220. BlockMeta { body, sig }
  221. }
  222. }
  223. struct BlockStream<T, C> {
  224. trailered: Trailered<T, BlockMeta>,
  225. meta: BlockMeta,
  226. meta_body_buf: Vec<u8>,
  227. creds: C,
  228. }
  229. impl<T: Read + Seek, C: Creds> BlockStream<T, C> {
  230. fn new(inner: T, creds: C) -> Result<BlockStream<T, C>> {
  231. let (trailered, trailer) = Trailered::<_, BlockMeta>::new(inner)?;
  232. let trailer = match trailer {
  233. Some(trailer) => {
  234. crypto::verify_header(&trailer.body, &trailer.sig)?;
  235. trailer
  236. }
  237. None => {
  238. let mut meta = BlockMeta::new(&creds);
  239. let block_key = SymKey::generate(SymKeyKind::default())?;
  240. meta.body
  241. .readcaps
  242. .insert(creds.principal(), creds.ser_encrypt(&block_key)?);
  243. meta.body.writecap = creds.writecap().map(|e| e.to_owned());
  244. meta
  245. }
  246. };
  247. Ok(BlockStream {
  248. trailered,
  249. meta: trailer,
  250. meta_body_buf: Vec::new(),
  251. creds,
  252. })
  253. }
  254. }
  255. impl<T: Write + Seek, C: Signer> Write for BlockStream<T, C> {
  256. fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
  257. self.trailered.write(buf)
  258. }
  259. fn flush(&mut self) -> io::Result<()> {
  260. Err(io::Error::new(
  261. io::ErrorKind::Unsupported,
  262. "flush is not supported, use flush_integ instead",
  263. ))
  264. }
  265. }
  266. impl<T: Write + Seek, C: Signer> WriteInteg for BlockStream<T, C> {
  267. fn flush_integ(&mut self, integrity: &[u8]) -> io::Result<()> {
  268. let meta_body = &mut self.meta.body;
  269. let integ = meta_body.integrity.get_or_insert_with(Hash::default);
  270. integ.as_mut().copy_from_slice(integrity);
  271. self.meta_body_buf.clear();
  272. write_to(&meta_body, &mut self.meta_body_buf).box_err()?;
  273. self.meta.sig = self
  274. .creds
  275. .sign(std::iter::once(self.meta_body_buf.as_slice()))?;
  276. self.trailered.flush(&self.meta)?;
  277. Ok(())
  278. }
  279. }
  280. impl<T: Read + Seek, C> Read for BlockStream<T, C> {
  281. fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
  282. self.trailered.read(buf)
  283. }
  284. }
  285. impl<T: Seek, C> Seek for BlockStream<T, C> {
  286. /// Seeks to the given position in the stream. If a position beyond the end of the stream is
  287. /// specified, the the seek will be to the end of the stream.
  288. fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
  289. self.trailered.seek(pos)
  290. }
  291. }
  292. impl<T, C: Decrypter + Principaled> MetaAccess for BlockStream<T, C> {
  293. fn block_key(&self) -> Result<SymKey> {
  294. let readcap = self
  295. .meta
  296. .body
  297. .readcaps
  298. .get(&self.creds.principal())
  299. .ok_or(Error::Crypto(crypto::Error::NoReadCap))?;
  300. Ok(crypto::decrypt(readcap, &self.creds)?)
  301. }
  302. fn add_readcap_for(&mut self, owner: Principal, key: &dyn Encrypter) -> Result<()> {
  303. let block_key = self.block_key()?;
  304. let readcap = key.ser_encrypt(&block_key)?;
  305. self.meta.body.readcaps.insert(owner, readcap);
  306. Ok(())
  307. }
  308. fn integrity(&self) -> Option<&[u8]> {
  309. self.meta.body.integrity.as_ref().map(|hash| hash.as_ref())
  310. }
  311. fn set_path(&mut self, path: Path) {
  312. self.meta.body.path = path;
  313. }
  314. }
  315. struct BlockOpenOptions<T, C> {
  316. inner: T,
  317. creds: C,
  318. encrypt: bool,
  319. compress: bool,
  320. }
  321. impl BlockOpenOptions<(), ()> {
  322. fn new() -> BlockOpenOptions<(), ()> {
  323. BlockOpenOptions {
  324. inner: (),
  325. creds: (),
  326. encrypt: true,
  327. compress: true,
  328. }
  329. }
  330. }
  331. impl<T, C> BlockOpenOptions<T, C> {
  332. fn with_inner<U>(self, inner: U) -> BlockOpenOptions<U, C> {
  333. BlockOpenOptions {
  334. inner,
  335. creds: self.creds,
  336. encrypt: self.encrypt,
  337. compress: self.compress,
  338. }
  339. }
  340. fn with_creds<D>(self, creds: D) -> BlockOpenOptions<T, D> {
  341. BlockOpenOptions {
  342. inner: self.inner,
  343. creds,
  344. encrypt: self.encrypt,
  345. compress: self.compress,
  346. }
  347. }
  348. fn with_encrypt(mut self, encrypt: bool) -> Self {
  349. self.encrypt = encrypt;
  350. self
  351. }
  352. fn with_compress(mut self, compress: bool) -> Self {
  353. self.compress = compress;
  354. self
  355. }
  356. }
  357. impl<T: Read + Write + Seek + 'static, C: Creds + 'static> BlockOpenOptions<T, C> {
  358. fn open(self) -> Result<Box<dyn Block>> {
  359. let stream = BlockStream::new(self.inner, self.creds)?;
  360. let block_key = stream.block_key()?;
  361. let mut stream = MerkleStream::new(stream)?;
  362. stream.assert_root_integrity()?;
  363. if self.encrypt {
  364. let stream = SecretStream::new(block_key).try_compose(stream)?;
  365. let stream = SectoredBuf::new().try_compose(stream)?;
  366. Ok(Box::new(stream))
  367. } else {
  368. let stream = SectoredBuf::new().try_compose(stream)?;
  369. Ok(Box::new(stream))
  370. }
  371. }
  372. }
  373. /// A struct which wraps a stream and which writes a trailing data structure to when flushed.
  374. struct Trailered<T, D> {
  375. inner: T,
  376. body_len: u64,
  377. phantom: PhantomData<D>,
  378. }
  379. impl<T: Read + Seek, D: DeserializeOwned> Trailered<T, D> {
  380. fn empty(inner: T) -> Trailered<T, D> {
  381. Trailered {
  382. inner,
  383. body_len: 0,
  384. phantom: PhantomData,
  385. }
  386. }
  387. /// Creates a new `Trailered<T>` containing the given `T`. This method requires that the given
  388. /// stream is either empty, or contains a valid serialization of `D` and a the offset at which
  389. /// `D` is stored.
  390. fn new(mut inner: T) -> Result<(Trailered<T, D>, Option<D>)> {
  391. let pos = inner.stream_position()?;
  392. let end = inner.seek(SeekFrom::End(0))?;
  393. if 0 == end {
  394. return Ok((Self::empty(inner), None));
  395. }
  396. inner.seek(SeekFrom::End(-8))?;
  397. let offset: i64 = read_from(&mut inner)?;
  398. let body_len = inner.seek(SeekFrom::End(offset))?;
  399. let trailer: D = read_from(&mut inner)?;
  400. inner.seek(SeekFrom::Start(pos))?;
  401. Ok((
  402. Trailered {
  403. inner,
  404. body_len,
  405. phantom: PhantomData,
  406. },
  407. Some(trailer),
  408. ))
  409. }
  410. }
  411. impl<T: Seek, D> Trailered<T, D> {
  412. fn post_write(&mut self, written: usize) -> io::Result<usize> {
  413. if 0 == written {
  414. return Ok(0);
  415. }
  416. // I cannot return an error at this point because bytes have already been written to inner.
  417. // So if I can't track the body len due to a failure, a panic is the only option.
  418. let pos = self
  419. .inner
  420. .stream_position()
  421. .expect("failed to get stream position");
  422. self.body_len = self.body_len.max(pos);
  423. Ok(written)
  424. }
  425. }
  426. impl<T: Read + Seek, D> Read for Trailered<T, D> {
  427. fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
  428. let pos = self.inner.stream_position()?;
  429. let available_u64 = self.body_len - pos;
  430. let available: usize = available_u64.try_into().box_err()?;
  431. let limit = buf.len().min(available);
  432. self.inner.read(&mut buf[..limit])
  433. }
  434. }
  435. impl<T: Write + Seek, D: Serialize> Trailered<T, D> {
  436. fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
  437. let written = self.inner.write(buf)?;
  438. self.post_write(written)
  439. }
  440. fn write_trailer(&mut self, trailer: &D) -> io::Result<u64> {
  441. let pos = self.inner.stream_position()?;
  442. self.inner.seek(SeekFrom::Start(self.body_len))?;
  443. write_to(trailer, &mut self.inner).box_err()?;
  444. let offset_u64 = 8 + self.inner.stream_position()? - self.body_len;
  445. let offset = -(offset_u64 as i64);
  446. write_to(&offset, &mut self.inner).box_err()?;
  447. Ok(pos)
  448. }
  449. fn flush(&mut self, trailer: &D) -> io::Result<()> {
  450. let prev_pos = self.write_trailer(trailer)?;
  451. self.inner.flush()?;
  452. self.inner.seek(SeekFrom::Start(prev_pos))?;
  453. Ok(())
  454. }
  455. }
  456. impl<T: WriteInteg + Seek, D: Serialize> Trailered<T, D> {
  457. fn flush_integ(&mut self, trailer: &D, integrity: &[u8]) -> io::Result<()> {
  458. let prev_pos = self.write_trailer(trailer)?;
  459. self.inner.flush_integ(integrity)?;
  460. self.inner.seek(SeekFrom::Start(prev_pos))?;
  461. Ok(())
  462. }
  463. }
  464. impl<T: Seek, D> Seek for Trailered<T, D> {
  465. fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
  466. /// Adds a signed integer to an unsigned integer and returns the result.
  467. fn add_signed(unsigned: u64, signed: i64) -> u64 {
  468. if signed >= 0 {
  469. unsigned + signed as u64
  470. } else {
  471. unsigned - (-signed as u64)
  472. }
  473. }
  474. let from_start = match pos {
  475. SeekFrom::Start(from_start) => from_start,
  476. SeekFrom::Current(from_curr) => add_signed(self.inner.stream_position()?, from_curr),
  477. SeekFrom::End(from_end) => add_signed(self.body_len, from_end),
  478. };
  479. let from_start = from_start.min(self.body_len);
  480. self.inner.seek(SeekFrom::Start(from_start))
  481. }
  482. }
  483. impl<T: Write> Decompose<T> for CompressorWriter<T> {
  484. fn into_inner(self) -> T {
  485. self.into_inner()
  486. }
  487. }
  488. impl<T: Read> Decompose<T> for Decompressor<T> {
  489. fn into_inner(self) -> T {
  490. self.into_inner()
  491. }
  492. }
  493. #[derive(Clone)]
  494. struct BrotliParams {
  495. buf_sz: usize,
  496. quality: u32,
  497. window_sz: u32,
  498. }
  499. impl BrotliParams {
  500. fn new(buf_sz: usize, quality: u32, window_sz: u32) -> BrotliParams {
  501. BrotliParams {
  502. buf_sz,
  503. quality,
  504. window_sz,
  505. }
  506. }
  507. }
  508. impl<T: Write> TryCompose<T, CompressorWriter<T>> for BrotliParams {
  509. type Error = Error;
  510. fn try_compose(self, inner: T) -> Result<CompressorWriter<T>> {
  511. Ok(CompressorWriter::new(
  512. inner,
  513. self.buf_sz,
  514. self.quality,
  515. self.window_sz,
  516. ))
  517. }
  518. }
  519. impl<T: Read> TryCompose<T, Decompressor<T>> for BrotliParams {
  520. type Error = Error;
  521. fn try_compose(self, inner: T) -> Result<Decompressor<T>> {
  522. Ok(Decompressor::new(inner, self.buf_sz))
  523. }
  524. }
  525. /// A stream which buffers writes and read such that the inner stream only sees reads and writes
  526. /// of sector length buffers.
  527. struct SectoredBuf<T> {
  528. inner: T,
  529. buf: Vec<u8>,
  530. /// The offset into the inner stream which the zero offset byte in `buf` corresponds to.
  531. buf_start: usize,
  532. /// Indicates if the contents of `buf` have been written to, and so whether `buf` needs to be
  533. /// written back to `inner` before it is refilled.
  534. dirty: bool,
  535. /// The total number of bytes that have been written to the inner stream, including the reserved
  536. /// bytes at the beginning.
  537. len: usize,
  538. /// The current position of this stream, expressed as an offset into the inner stream.
  539. pos: usize,
  540. }
  541. impl SectoredBuf<()> {
  542. fn new() -> SectoredBuf<()> {
  543. SectoredBuf {
  544. inner: (),
  545. buf: Vec::new(),
  546. buf_start: 0,
  547. dirty: false,
  548. len: 0,
  549. pos: 0,
  550. }
  551. }
  552. }
  553. impl<T> SectoredBuf<T> {
  554. /// The number of bytes at the beginning of the inner stream which are reserved to store the
  555. /// length of data written. All offsets into the stored data must be shifted by this amount to
  556. /// be translated to an offset in the inner stream.
  557. const RESERVED: usize = std::mem::size_of::<usize>();
  558. /// Returns the position in the inner stream which the given position in this stream corresponds
  559. /// to.
  560. fn inner_pos(self_pos: u64) -> u64 {
  561. let offset: u64 = Self::RESERVED.try_into().unwrap();
  562. self_pos + offset
  563. }
  564. /// Returns the position in this stream which the given position in the inner stream corresponds
  565. /// to.
  566. fn self_pos(inner_pos: u64) -> u64 {
  567. let offset: u64 = Self::RESERVED.try_into().unwrap();
  568. inner_pos - offset
  569. }
  570. /// Returns the offset into the internal buffer that corresponds to the current position.
  571. fn buf_pos(&self) -> usize {
  572. self.pos - self.buf_start
  573. }
  574. /// Returns one more than the last index in the internal buffer which can be read.
  575. fn buf_end(&self) -> usize {
  576. let limit = self.len.min(self.buf_start + self.sector_sz());
  577. limit - self.buf_start
  578. }
  579. /// Returns the index of the sector which is currently loaded into the buffer.
  580. fn buf_sector_index(&self) -> usize {
  581. self.pos / self.sector_sz()
  582. }
  583. }
  584. impl<T: Read + Seek> SectoredBuf<T> {
  585. /// Fills the internal buffer by reading from the inner stream at the current position
  586. /// and updates `self.buf_start` with the position read from.
  587. fn fill_internal_buf(&mut self) -> Result<usize> {
  588. self.buf_start = self.inner.stream_position()?.try_into().box_err()?;
  589. let read_bytes = if self.buf_start < self.len {
  590. let read_bytes = self.inner.fill_buf(&mut self.buf)?;
  591. if read_bytes < self.buf.len() {
  592. return Err(Error::IncorrectSize {
  593. expected: self.buf.len(),
  594. actual: read_bytes,
  595. });
  596. }
  597. read_bytes
  598. } else {
  599. 0
  600. };
  601. Ok(read_bytes)
  602. }
  603. }
  604. impl<T> Decompose<T> for SectoredBuf<T> {
  605. fn into_inner(self) -> T {
  606. self.inner
  607. }
  608. }
  609. impl<T: Sectored + Read + Seek> TryCompose<T, SectoredBuf<T>> for SectoredBuf<()> {
  610. type Error = Error;
  611. fn try_compose(self, inner: T) -> Result<SectoredBuf<T>> {
  612. let sect_sz = inner.sector_sz();
  613. if sect_sz < Self::RESERVED {
  614. return Err(Error::custom(format!(
  615. "a sector size of at least {} is required. Got {}",
  616. Self::RESERVED,
  617. sect_sz,
  618. )));
  619. }
  620. let mut sectored = SectoredBuf {
  621. inner,
  622. buf: self.buf,
  623. buf_start: 0,
  624. dirty: false,
  625. len: Self::RESERVED,
  626. pos: Self::RESERVED,
  627. };
  628. sectored.inner.seek(SeekFrom::Start(0))?;
  629. sectored.buf.resize(sect_sz, 0);
  630. let len_stored = match sectored.fill_internal_buf() {
  631. Ok(bytes_read) => bytes_read >= Self::RESERVED,
  632. Err(Error::IncorrectSize { actual, expected }) => {
  633. if actual > 0 {
  634. return Err(Error::IncorrectSize { expected, actual });
  635. }
  636. // When the actual size was 0 that just means the inner stream was empty, which is
  637. // not an error.
  638. false
  639. }
  640. Err(err) => return Err(err),
  641. };
  642. if len_stored {
  643. if let Ok(len) = read_from::<u64, _>(&mut sectored.buf.as_slice()) {
  644. sectored.len = len.try_into()?;
  645. }
  646. } else {
  647. write_to(&Self::RESERVED, &mut sectored.buf.as_mut_slice())?;
  648. sectored.dirty = true;
  649. }
  650. Ok(sectored)
  651. }
  652. }
  653. impl<T> Sectored for SectoredBuf<T> {
  654. fn sector_sz(&self) -> usize {
  655. self.buf.len()
  656. }
  657. }
  658. impl<T: Seek + Read + Write> Write for SectoredBuf<T> {
  659. fn write(&mut self, mut src: &[u8]) -> io::Result<usize> {
  660. let src_len_start = src.len();
  661. let mut dest = {
  662. let buf_pos = self.buf_pos();
  663. &mut self.buf[buf_pos..]
  664. };
  665. while !src.is_empty() {
  666. if dest.is_empty() {
  667. if let Err(err) = self.flush() {
  668. error!("A call to SectoredBuf::flush returned an error: {}", err);
  669. break;
  670. }
  671. dest = &mut self.buf[..];
  672. }
  673. let sz = src.len().min(dest.len());
  674. (&mut dest[..sz]).copy_from_slice(&src[..sz]);
  675. dest = &mut dest[sz..];
  676. src = &src[sz..];
  677. self.dirty = sz > 0;
  678. self.pos += sz;
  679. }
  680. Ok(src_len_start - src.len())
  681. }
  682. fn flush(&mut self) -> io::Result<()> {
  683. if !self.dirty {
  684. return Ok(());
  685. }
  686. // Write out the contents of the buffer.
  687. let sect_sz: u64 = self.sector_sz().try_into().box_err()?;
  688. let inner_pos = self.inner.stream_position()?;
  689. let inner_pos_usize: usize = inner_pos.try_into().box_err()?;
  690. let is_new_sector = self.pos > inner_pos_usize;
  691. let is_full = (self.buf.len() - self.buf_pos()) == 0;
  692. let seek_to = if is_new_sector {
  693. if is_full {
  694. inner_pos + sect_sz
  695. } else {
  696. inner_pos
  697. }
  698. } else {
  699. // The contents of the buffer were previously read from inner, so we write the
  700. // updated contents to the same offset.
  701. let sect_start: u64 = self.buf_start.try_into().box_err()?;
  702. self.inner.seek(SeekFrom::Start(sect_start))?;
  703. if is_full {
  704. inner_pos
  705. } else {
  706. inner_pos - sect_sz
  707. }
  708. };
  709. self.inner.write_all(&self.buf)?;
  710. // Update the stored length.
  711. self.len = self.len.max(self.pos);
  712. self.inner.seek(SeekFrom::Start(0))?;
  713. self.fill_internal_buf()?;
  714. let len: u64 = self.len.try_into().box_err()?;
  715. write_to(&len, &mut self.buf.as_mut_slice()).box_err()?;
  716. self.inner.seek(SeekFrom::Start(0))?;
  717. self.inner.write_all(&self.buf)?;
  718. self.inner.flush()?;
  719. // Seek to the next position.
  720. self.inner.seek(SeekFrom::Start(seek_to))?;
  721. self.fill_internal_buf()?;
  722. self.dirty = false;
  723. Ok(())
  724. }
  725. }
  726. impl<T: Read + Seek> Read for SectoredBuf<T> {
  727. fn read(&mut self, mut dest: &mut [u8]) -> io::Result<usize> {
  728. if self.pos == self.len {
  729. return Ok(0);
  730. }
  731. let dest_len_start = dest.len();
  732. let mut src = {
  733. let start = self.buf_pos();
  734. let end = self.buf_end();
  735. &self.buf[start..end]
  736. };
  737. while !dest.is_empty() {
  738. if src.is_empty() {
  739. if self.pos >= self.len {
  740. break;
  741. }
  742. let byte_ct = match self.fill_internal_buf() {
  743. Ok(byte_ct) => byte_ct,
  744. Err(err) => {
  745. warn!("SectoredBuf::full_internal_buf returned an error: {}", err);
  746. break;
  747. }
  748. };
  749. if 0 == byte_ct {
  750. break;
  751. }
  752. src = &self.buf[..byte_ct];
  753. }
  754. let sz = src.len().min(dest.len());
  755. (&mut dest[..sz]).copy_from_slice(&src[..sz]);
  756. dest = &mut dest[sz..];
  757. src = &src[sz..];
  758. self.pos += sz;
  759. }
  760. Ok(dest_len_start - dest.len())
  761. }
  762. }
  763. impl<T: Seek + Read + Write> Seek for SectoredBuf<T> {
  764. fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
  765. let inner_pos = self.inner.stream_position()?;
  766. let inner_pos_new = match pos {
  767. SeekFrom::Start(rel_start) => Self::inner_pos(rel_start),
  768. SeekFrom::Current(rel_curr) => {
  769. if rel_curr > 0 {
  770. inner_pos + rel_curr as u64
  771. } else {
  772. inner_pos - rel_curr as u64
  773. }
  774. }
  775. SeekFrom::End(_) => {
  776. return Err(io::Error::new(
  777. io::ErrorKind::Unsupported,
  778. "seeking relative to the end of the stream is not supported",
  779. ))
  780. }
  781. };
  782. let sect_sz = self.sector_sz();
  783. let sect_index = self.buf_sector_index();
  784. let sect_index_new = TryInto::<usize>::try_into(inner_pos_new).box_err()? / sect_sz;
  785. let pos: u64 = self.pos.try_into().box_err()?;
  786. if sect_index != sect_index_new || pos == inner_pos {
  787. self.flush()?;
  788. let seek_to: u64 = (sect_index_new * sect_sz).try_into().box_err()?;
  789. self.inner.seek(SeekFrom::Start(seek_to))?;
  790. self.fill_internal_buf()?;
  791. }
  792. self.pos = inner_pos_new.try_into().box_err()?;
  793. Ok(Self::self_pos(inner_pos_new))
  794. }
  795. }
  796. impl<T: MetaAccess> MetaAccess for SectoredBuf<T> {
  797. fn block_key(&self) -> Result<SymKey> {
  798. self.inner.block_key()
  799. }
  800. fn add_readcap_for(&mut self, owner: Principal, key: &dyn Encrypter) -> Result<()> {
  801. self.inner.add_readcap_for(owner, key)
  802. }
  803. fn integrity(&self) -> Option<&[u8]> {
  804. self.inner.integrity()
  805. }
  806. fn set_path(&mut self, path: Path) {
  807. self.inner.set_path(path)
  808. }
  809. }
  810. impl<T: Read + Write + Seek + MetaAccess> Block for SectoredBuf<T> {}
  811. /// An envelopment of a key, which is tagged with the principal who the key is meant for.
  812. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
  813. struct Readcap {
  814. /// The principal this `Readcap` was issued to.
  815. issued_to: Principal,
  816. /// An encipherment of a block key using the public key of the principal.
  817. key: Ciphertext<SymKey>,
  818. }
  819. impl Readcap {
  820. fn new(issued_to: Hash, key: Ciphertext<SymKey>) -> Readcap {
  821. Readcap {
  822. issued_to: Principal(issued_to),
  823. key,
  824. }
  825. }
  826. }
  827. /// Verifies that a principal is authorized to write blocks in a tree.
  828. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
  829. struct Writecap {
  830. /// The principal this `Writecap` was issued to.
  831. issued_to: Principal,
  832. /// The path where this write caps's validity begins.
  833. path: Path,
  834. /// The point in time after which this write cap is no longer valid.
  835. expires: Epoch,
  836. /// The public key used to sign this write cap.
  837. signing_key: AsymKeyPub<Sign>,
  838. /// A digital signature which covers all of the fields in the write cap except for next.
  839. signature: Signature,
  840. /// The next write cap in the chain leading back to the root.
  841. next: Option<Box<Writecap>>,
  842. }
  843. /// Fragments are created from blocks using Erasure Encoding and stored with other nodes in the
  844. /// network to provide availability and redundancy of data.
  845. #[derive(Debug, PartialEq, Serialize, Deserialize)]
  846. struct Fragment {
  847. /// The path to the block this fragment is from.
  848. path: Path,
  849. /// The serial number of this fragment.
  850. serial: FragmentSerial,
  851. /// The actual data.
  852. body: Vec<u8>,
  853. }
  854. impl Fragment {
  855. /// Create a new fragment with the given fields. If `path_str` cannot be parsed then a failed
  856. /// `Result` is returned containing a `PathError`.
  857. fn new(
  858. path_str: &str,
  859. serial_num: u32,
  860. body: Vec<u8>,
  861. ) -> std::result::Result<Fragment, PathError> {
  862. let result = Path::try_from(path_str);
  863. Ok(Fragment {
  864. path: result?,
  865. serial: FragmentSerial(serial_num),
  866. body,
  867. })
  868. }
  869. }
  870. /// The body of every non-leaf node in a tree contains this data structure.
  871. #[derive(Debug, PartialEq, Serialize, Deserialize)]
  872. struct Directory {
  873. /// The nodes that are attached to the tree at this block.
  874. nodes: Vec<Principal>,
  875. /// This block's descendants.
  876. children: HashMap<String, HashMap<FragmentSerial, FragmentRecord>>,
  877. }
  878. /// Keeps track of which principal is storing a fragment.
  879. #[derive(Debug, PartialEq, Serialize, Deserialize)]
  880. struct FragmentRecord {
  881. /// The fragment serial number this record is for.
  882. serial: FragmentSerial,
  883. /// The principal who is storing this fragment.
  884. stored_by: Principal,
  885. }
  886. impl FragmentRecord {
  887. /// Creates a new `FragmentRecord` whose `serial` and `stored_by` fields are set to
  888. /// the given values.
  889. fn new(serial: u32, stored_by: Hash) -> FragmentRecord {
  890. FragmentRecord {
  891. serial: FragmentSerial(serial),
  892. stored_by: Principal(stored_by),
  893. }
  894. }
  895. }
  896. /// An identifier for a security principal, which is any entity that can be authenticated.
  897. #[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Hashable, Clone, Default)]
  898. pub struct Principal(Hash);
  899. impl Principal {
  900. fn kind(&self) -> HashKind {
  901. HashKind::from(&self.0)
  902. }
  903. }
  904. /// Trait for types which are owned by a `Principal`.
  905. trait Principaled {
  906. /// Returns the `Principal` that owns `self`, using the given hash algorithm.
  907. fn principal_of_kind(&self, kind: HashKind) -> Principal;
  908. /// Returns the `Principal` that owns `self`, using the default hash algorithm.
  909. fn principal(&self) -> Principal {
  910. self.principal_of_kind(HashKind::default())
  911. }
  912. }
  913. /// An identifier for a block in a tree.
  914. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Default)]
  915. pub struct Path {
  916. root: Principal,
  917. components: Vec<String>,
  918. }
  919. impl Path {
  920. /// The character that is used to separate path components.
  921. const SEP: char = '/';
  922. /// The limit, in bytes, of a `Path`'s length.
  923. const BYTE_LIMIT: usize = 4096;
  924. /// Returns a result which, when successful, contains the index after the last character in the
  925. /// current path component.
  926. fn component_end<I: Iterator<Item = (usize, char)>>(
  927. start: usize,
  928. first: char,
  929. pairs: &mut I,
  930. ) -> std::result::Result<usize, PathError> {
  931. if first == Path::SEP {
  932. return Err(PathError::EmptyComponent);
  933. }
  934. let end;
  935. let mut last = start;
  936. loop {
  937. match pairs.next() {
  938. Some((index, Path::SEP)) => {
  939. end = index;
  940. break;
  941. }
  942. Some((index, _)) => last = index,
  943. None => {
  944. end = last + 1;
  945. break;
  946. }
  947. }
  948. }
  949. if end == start {
  950. Err(PathError::EmptyComponent)
  951. } else {
  952. Ok(end)
  953. }
  954. }
  955. /// Asserts that the number of bytes in the given string is no more than `Path::BYTE_LIMIT`.
  956. fn assert_not_too_long(string: &str) -> std::result::Result<(), PathError> {
  957. let len = string.len();
  958. if len > Path::BYTE_LIMIT {
  959. return Err(PathError::PathTooLong(len));
  960. }
  961. Ok(())
  962. }
  963. /// Returns true if `other` is a subpath of this `Path`.
  964. fn contains(&self, other: &Path) -> bool {
  965. if self.root != other.root {
  966. return false;
  967. };
  968. // This path must be no longer than the other path.
  969. if self.components.len() > other.components.len() {
  970. return false;
  971. }
  972. // Skip the component containing the owner.
  973. let self_iter = self.components.iter().skip(1);
  974. let other_iter = other.components.iter().skip(1);
  975. for pair in self_iter.zip(other_iter) {
  976. if pair.0 != pair.1 {
  977. return false;
  978. }
  979. }
  980. true
  981. }
  982. }
  983. impl<'s> TryFrom<&'s str> for Path {
  984. type Error = PathError;
  985. fn try_from(string: &'s str) -> std::result::Result<Path, PathError> {
  986. Path::assert_not_too_long(string)?;
  987. let mut pairs = string.char_indices();
  988. let mut components = Vec::new();
  989. let mut last_end = 0;
  990. while let Some((start, c)) = pairs.next() {
  991. let end = Path::component_end(start, c, &mut pairs)?;
  992. last_end = end;
  993. let slice = &string[start..end];
  994. components.push(slice.to_string());
  995. }
  996. // An empty component is added to the end to indicate if there was a trailing slash.
  997. if string.len() - 1 == last_end {
  998. components.push("".to_string());
  999. }
  1000. let leading = components
  1001. .get(0)
  1002. .ok_or(PathError::InvalidLeadingComponent)?;
  1003. let hash =
  1004. Hash::try_from(leading.as_str()).map_err(|_| PathError::InvalidLeadingComponent)?;
  1005. Ok(Path {
  1006. root: Principal(hash),
  1007. components,
  1008. })
  1009. }
  1010. }
  1011. impl Display for Path {
  1012. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
  1013. if self.components.is_empty() {
  1014. return write!(f, "");
  1015. };
  1016. let mut iter = self.components.iter();
  1017. let first = iter.next().unwrap();
  1018. let mut output = write!(f, "{}", first);
  1019. for component in iter {
  1020. output = write!(f, "{}{}", Path::SEP, component)
  1021. }
  1022. output
  1023. }
  1024. }
  1025. /// Errors which can occur when converting a string to a `Path`.
  1026. #[derive(Debug, PartialEq)]
  1027. pub enum PathError {
  1028. /// Occurs when the number of bytes in a string is greater than `Path::BYTE_LIMIT`.
  1029. PathTooLong(usize),
  1030. /// Indicates that a path string was empty.
  1031. Empty,
  1032. /// Occurs when a component in a path string was empty.
  1033. EmptyComponent,
  1034. /// Occurs when the leading component of a path is not in the correct format.
  1035. InvalidLeadingComponent,
  1036. }
  1037. impl Display for PathError {
  1038. fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
  1039. match self {
  1040. PathError::PathTooLong(length) => formatter.write_fmt(format_args!(
  1041. "path contained {} bytes, which is over the {} byte limit",
  1042. length,
  1043. Path::BYTE_LIMIT
  1044. )),
  1045. PathError::Empty => formatter.write_str("path was empty"),
  1046. PathError::EmptyComponent => formatter.write_str("component of path was empty"),
  1047. PathError::InvalidLeadingComponent => {
  1048. formatter.write_str("invalid leading path component")
  1049. }
  1050. }
  1051. }
  1052. }
  1053. /// An instant in time represented by the number of seconds since January 1st 1970, 00:00:00 UTC.
  1054. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, PartialOrd, Ord, Default)]
  1055. struct Epoch(u64);
  1056. impl Epoch {
  1057. /// Returns the current epoch time.
  1058. fn now() -> Epoch {
  1059. let now = SystemTime::now();
  1060. // If the system clock is before the unix epoch, just panic.
  1061. let epoch = now.duration_since(SystemTime::UNIX_EPOCH).unwrap();
  1062. Epoch(epoch.as_secs())
  1063. }
  1064. }
  1065. impl Copy for Epoch {}
  1066. impl Add<Duration> for Epoch {
  1067. type Output = Self;
  1068. fn add(self, other: Duration) -> Self {
  1069. Epoch(self.0 + other.as_secs())
  1070. }
  1071. }
  1072. impl Sub<Duration> for Epoch {
  1073. type Output = Self;
  1074. fn sub(self, other: Duration) -> Self {
  1075. Epoch(self.0 - other.as_secs())
  1076. }
  1077. }
  1078. /// The serial number of a block fragment.
  1079. #[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Hashable)]
  1080. struct FragmentSerial(u32);
  1081. #[cfg(test)]
  1082. mod tests {
  1083. use std::{fs::OpenOptions, io::Cursor};
  1084. use crate::crypto::{tpm::TpmCredStore, CredStore, CredsPriv};
  1085. use super::*;
  1086. use tempdir::TempDir;
  1087. use test_helpers::*;
  1088. fn path_from_str_test_case(
  1089. expected: std::result::Result<Path, PathError>,
  1090. input: &str,
  1091. ) -> std::result::Result<(), PathError> {
  1092. let result = Path::try_from(input);
  1093. assert_eq!(expected, result);
  1094. Ok(())
  1095. }
  1096. #[test]
  1097. fn path_from_str_multiple_components_ok() -> std::result::Result<(), PathError> {
  1098. let expected = make_path(vec!["red", "green", "blue"]);
  1099. let input = format!("{}/red/green/blue", expected.root.0);
  1100. path_from_str_test_case(Ok(expected), input.as_str())?;
  1101. Ok(())
  1102. }
  1103. #[test]
  1104. fn path_from_str_one_component_ok() -> std::result::Result<(), PathError> {
  1105. let expected = make_path(vec![]);
  1106. let input = expected.root.0.to_string();
  1107. path_from_str_test_case(Ok(expected), input.as_str())?;
  1108. Ok(())
  1109. }
  1110. #[test]
  1111. fn path_from_str_trailing_slash_ok() -> std::result::Result<(), PathError> {
  1112. // Notice the empty component at the end of this path due to the trailing slash.
  1113. let expected = make_path(vec!["orange", "banana", "shotgun", ""]);
  1114. let input = format!("{}/orange/banana/shotgun/", expected.root.0);
  1115. path_from_str_test_case(Ok(expected), input.as_str())?;
  1116. Ok(())
  1117. }
  1118. #[test]
  1119. fn path_from_str_path_too_long_fail() -> std::result::Result<(), PathError> {
  1120. let principal = make_principal();
  1121. let input = format!("{}/{}", principal.0, "*".repeat(4097));
  1122. let expected = Err(PathError::PathTooLong(input.len()));
  1123. path_from_str_test_case(expected, input.as_str())?;
  1124. Ok(())
  1125. }
  1126. #[test]
  1127. fn path_from_str_multiple_slashes_fail() -> std::result::Result<(), PathError> {
  1128. let expected = Err(PathError::EmptyComponent);
  1129. let input = format!("{}//orange", make_principal().0);
  1130. path_from_str_test_case(expected, input.as_str())?;
  1131. Ok(())
  1132. }
  1133. #[test]
  1134. fn path_from_str_leading_slash_fail() -> std::result::Result<(), PathError> {
  1135. let expected = Err(PathError::EmptyComponent);
  1136. let input = format!("/{}/orange/banana/shotgun", make_principal().0);
  1137. path_from_str_test_case(expected, input.as_str())?;
  1138. Ok(())
  1139. }
  1140. #[test]
  1141. fn path_round_trip() -> std::result::Result<(), PathError> {
  1142. let expected = make_path(vec!["interstitial", "inter-related", "intersections"]);
  1143. let actual = Path::try_from(expected.to_string().as_str())?;
  1144. assert_eq!(expected, actual);
  1145. Ok(())
  1146. }
  1147. #[test]
  1148. fn path_contains_true() {
  1149. let larger = make_path(vec!["apps"]);
  1150. let smaller = make_path(vec!["apps", "bohdi"]);
  1151. assert!(larger.contains(&smaller));
  1152. }
  1153. #[test]
  1154. fn path_contains_true_only_owner() {
  1155. let larger = make_path(vec![]);
  1156. let smaller = make_path(vec![]);
  1157. assert!(larger.contains(&smaller));
  1158. }
  1159. #[test]
  1160. fn path_contains_false_self_is_longer() {
  1161. let first = make_path(vec!["apps", "bohdi"]);
  1162. let second = make_path(vec!["apps"]);
  1163. assert!(!first.contains(&second));
  1164. }
  1165. #[test]
  1166. fn path_contains_false_same_owners() {
  1167. let first = make_path(vec!["apps"]);
  1168. let second = make_path(vec!["nodes"]);
  1169. assert!(!first.contains(&second));
  1170. }
  1171. #[test]
  1172. fn path_contains_false_different_owners() {
  1173. let first = make_path(vec!["apps"]);
  1174. let mut second = make_path(vec!["apps"]);
  1175. second.root = Principal(Hash::Sha2_256(PRINCIPAL2));
  1176. assert!(!first.contains(&second));
  1177. }
  1178. #[test]
  1179. fn brotli_compress_decompress() {
  1180. const SECT_SZ: usize = SECTOR_SZ_DEFAULT;
  1181. const SECT_CT: usize = 16;
  1182. let params = BrotliParams::new(SECT_SZ, 8, 20);
  1183. let mut memory = Cursor::new([0u8; SECT_SZ * SECT_CT]);
  1184. {
  1185. let write: CompressorWriter<_> = params
  1186. .clone()
  1187. .try_compose(&mut memory)
  1188. .expect("compose for write failed");
  1189. write_fill(write, SECT_SZ, SECT_CT);
  1190. }
  1191. memory.seek(SeekFrom::Start(0)).expect("seek failed");
  1192. {
  1193. let read: Decompressor<_> = params
  1194. .try_compose(&mut memory)
  1195. .expect("compose for read failed");
  1196. read_check(read, SECT_SZ, SECT_CT);
  1197. }
  1198. }
  1199. fn make_sectored_buf(sect_sz: usize, sect_ct: usize) -> SectoredBuf<SectoredCursor<Vec<u8>>> {
  1200. SectoredBuf::new()
  1201. .try_compose(SectoredCursor::new(vec![0u8; sect_sz * sect_ct], sect_sz))
  1202. .expect("compose for sectored buffer failed")
  1203. }
  1204. #[test]
  1205. fn sectored_buf_fill_inner() {
  1206. const SECT_SZ: usize = SECTOR_SZ_DEFAULT;
  1207. const SECT_CT: usize = 16;
  1208. let mut sectored = make_sectored_buf(SECT_SZ, SECT_CT);
  1209. let sect_sz = sectored.sector_sz();
  1210. assert_eq!(0, sect_sz % 16);
  1211. let chunk_sz = sect_sz / 16;
  1212. let chunk_ct = SECT_CT * 16;
  1213. write_fill(&mut sectored, chunk_sz, chunk_ct);
  1214. }
  1215. #[test]
  1216. fn sectored_buf_write_read_sequential() {
  1217. const SECT_SZ: usize = SECTOR_SZ_DEFAULT;
  1218. const SECT_CT: usize = 16;
  1219. let mut sectored = make_sectored_buf(SECT_SZ, SECT_CT);
  1220. let sect_sz = sectored.sector_sz();
  1221. assert_eq!(0, sect_sz % 16);
  1222. let chunk_sz = sect_sz / 16;
  1223. // We subtract one here so that the underlying buffer is not completely filled. This
  1224. // exercises the length limiting capability of the sectored buffer.
  1225. let chunk_ct = SECT_CT * 16 - 1;
  1226. write_fill(&mut sectored, chunk_sz, chunk_ct);
  1227. sectored.seek(SeekFrom::Start(0)).expect("seek failed");
  1228. read_check(&mut sectored, chunk_sz, chunk_ct);
  1229. }
  1230. #[test]
  1231. fn sectored_buf_sect_sz_too_small_is_error() {
  1232. const MIN: usize = SectoredBuf::<()>::RESERVED;
  1233. let result = SectoredBuf::new().try_compose(SectoredCursor::new([0u8; MIN], MIN - 1));
  1234. assert!(result.is_err());
  1235. }
  1236. #[test]
  1237. fn sectored_buf_len_preserved() {
  1238. const SECT_SZ: usize = SECTOR_SZ_DEFAULT;
  1239. const SECT_CT: usize = 16;
  1240. let mut sectored = make_sectored_buf(SECT_SZ, SECT_CT);
  1241. let expected = vec![42u8; 12];
  1242. // We need to ensure that writing expected will not fill up the buffer in sectored.
  1243. assert!(expected.len() < sectored.sector_sz() - SectoredBuf::<()>::RESERVED);
  1244. sectored.write_all(&expected).expect("write failed");
  1245. sectored.flush().expect("flush failed");
  1246. let inner = sectored.into_inner();
  1247. let mut sectored = SectoredBuf::new()
  1248. .try_compose(inner)
  1249. .expect("failed to compose sectored buffer");
  1250. let mut actual = vec![0u8; expected.len()];
  1251. sectored
  1252. .fill_buf(actual.as_mut_slice())
  1253. .expect("failed to fill actual");
  1254. assert_eq!(expected, actual);
  1255. }
  1256. #[test]
  1257. fn sectored_buf_seek() {
  1258. let sect_sz = 16usize;
  1259. let sect_ct = 16usize;
  1260. let cap = sect_sz * sect_ct - std::mem::size_of::<usize>();
  1261. let source = {
  1262. let mut source = Vec::with_capacity(cap);
  1263. source.extend(
  1264. std::iter::successors(Some(0u8), |n| if *n <= 254 { Some(*n + 1) } else { None })
  1265. .take(cap),
  1266. );
  1267. source
  1268. };
  1269. let mut sectored = make_sectored_buf(sect_sz, sect_ct);
  1270. sectored.write(&source).expect("write failed");
  1271. let mut buf = [0u8; 1];
  1272. let end = cap.try_into().expect("cap cannot fit into a u8");
  1273. for pos in (0..end).rev() {
  1274. sectored
  1275. .seek(SeekFrom::Start(pos as u64))
  1276. .expect("seek failed");
  1277. sectored.read(&mut buf).expect("read failed");
  1278. assert_eq!(pos, buf[0]);
  1279. }
  1280. }
  1281. #[test]
  1282. fn sectored_buf_write_read_random() {
  1283. const SECT_SZ: usize = 16;
  1284. const SECT_CT: usize = 16;
  1285. const CAP: usize = SECT_SZ * SECT_CT - std::mem::size_of::<usize>();
  1286. let source = {
  1287. let mut expected = Vec::with_capacity(CAP);
  1288. expected.extend(
  1289. std::iter::successors(Some(0u8), |n| if *n <= 254 { Some(*n + 1) } else { None })
  1290. .take(CAP),
  1291. );
  1292. expected
  1293. };
  1294. let indices: Vec<(usize, usize)> = {
  1295. let rando = Randomizer::new([3u8; Randomizer::HASH.len()]);
  1296. let rando2 = Randomizer::new([5u8; Randomizer::HASH.len()]);
  1297. rando
  1298. .zip(rando2)
  1299. .take(SECT_CT)
  1300. .map(|(mut first, mut second)| {
  1301. first %= source.len();
  1302. second &= source.len();
  1303. let low = first.min(second);
  1304. let high = first.max(second);
  1305. (low, high)
  1306. })
  1307. .collect()
  1308. };
  1309. let mut sectored = make_sectored_buf(SECT_SZ, SECT_CT);
  1310. sectored
  1311. .write_all(&[0u8; CAP])
  1312. .expect("failed to fill sectored");
  1313. sectored.flush().expect("flush failed");
  1314. for (_k, (low, high)) in indices.iter().enumerate() {
  1315. sectored
  1316. .seek(SeekFrom::Start(*low as u64))
  1317. .expect("seek failed");
  1318. let src = &source[*low..*high];
  1319. sectored.write(src).expect("write failed");
  1320. }
  1321. sectored.flush().expect("flush failed");
  1322. let mut buf = vec![0u8; CAP];
  1323. for (_k, (low, high)) in indices.iter().enumerate() {
  1324. sectored
  1325. .seek(SeekFrom::Start(*low as u64))
  1326. .expect("seek failed");
  1327. let actual = &mut buf[*low..*high];
  1328. sectored.fill_buf(actual).expect("read failed");
  1329. let expected = &source[*low..*high];
  1330. assert_eq!(expected, actual);
  1331. }
  1332. }
  1333. #[test]
  1334. fn sectored_buf_read_past_end() {
  1335. const LEN: usize = 32;
  1336. let mut sectored = SectoredBuf::new()
  1337. .try_compose(SectoredCursor::new([0u8; LEN], LEN))
  1338. .expect("compose failed");
  1339. const BUF_LEN: usize = LEN - SectoredBuf::<()>::RESERVED + 1;
  1340. sectored.write(&[1u8; BUF_LEN - 1]).expect("write failed");
  1341. sectored.seek(SeekFrom::Start(0)).expect("seek failed");
  1342. let mut buf = [0u8; BUF_LEN];
  1343. // Note that buf is one byte longer than the available capacity in the cursor.
  1344. sectored.read(&mut buf).expect("read failed");
  1345. assert_eq!(&[1u8; BUF_LEN - 1], &buf[..(BUF_LEN - 1)]);
  1346. assert_eq!(0u8, buf[BUF_LEN - 1]);
  1347. }
  1348. /// Tests that the data written in try_compose is actually written back to the underlying stream.
  1349. #[test]
  1350. fn sectored_buf_write_back() {
  1351. let mut sectored = SectoredBuf::new()
  1352. .try_compose(SectoredCursor::new(vec![0u8; 24], 16))
  1353. .expect("compose failed");
  1354. let expected = [1u8; 8];
  1355. sectored.write(&expected).expect("first write failed");
  1356. sectored.write(&[2u8; 8]).expect("second write failed");
  1357. sectored.seek(SeekFrom::Start(0)).expect("seek failed");
  1358. let mut actual = [0u8; 8];
  1359. sectored.read(&mut actual).expect("read failed");
  1360. assert_eq!(expected, actual);
  1361. }
  1362. #[test]
  1363. fn sectored_buf_write_past_end() {
  1364. const LEN: usize = 8;
  1365. let mut sectored = SectoredBuf::new()
  1366. .try_compose(SectoredCursor::new(vec![0u8; 0], LEN))
  1367. .expect("compos failed");
  1368. let expected = [1u8; LEN + 1];
  1369. sectored.write(&expected).expect("write failed");
  1370. sectored.seek(SeekFrom::Start(0)).expect("seek failed");
  1371. let mut actual = [0u8; LEN + 1];
  1372. sectored.read(&mut actual).expect("read failed");
  1373. assert_eq!(expected, actual);
  1374. }
  1375. /// Tests that a new `Trailered<T>` can be created from an empty stream.
  1376. #[test]
  1377. fn trailered_new_empty() {
  1378. let cursor = Cursor::new(Vec::new());
  1379. let (_, trailer): (_, Option<String>) =
  1380. Trailered::new(cursor).expect("Trailered::new failed");
  1381. assert_eq!(None, trailer);
  1382. }
  1383. /// Tests that an error is returned when an attempt is made to create a `Trailered<T>` from a
  1384. /// non-empty stream which is too short.
  1385. #[test]
  1386. fn trailered_new_inner_too_short_is_error() {
  1387. let cursor = Cursor::new([0u8; 5]);
  1388. let result = Trailered::<_, u128>::new(cursor);
  1389. assert!(result.is_err())
  1390. }
  1391. /// Checks that the trailer is persisted to the inner stream.
  1392. #[test]
  1393. fn trailered_trailer_persisted() {
  1394. const EXPECTED: &str = "Everyone deserves to be remembered,";
  1395. let cursor = {
  1396. let cursor = Cursor::new(Vec::new());
  1397. let (mut trailered, trailer) =
  1398. Trailered::<_, String>::new(cursor).expect("Trailered::new failed");
  1399. assert!(trailer.is_none());
  1400. trailered
  1401. .flush(&EXPECTED.to_string())
  1402. .expect("flush failed");
  1403. trailered.inner
  1404. };
  1405. let (_, trailer) = Trailered::<_, String>::new(cursor).expect("Trailered::new failed");
  1406. assert_eq!(EXPECTED, trailer.unwrap());
  1407. }
  1408. #[test]
  1409. fn trailered_written_data_persisted() {
  1410. const EXPECTED: &[u8] = b"and every life has something to teach us.";
  1411. let mut cursor = {
  1412. let (mut trailered, _) = Trailered::<_, u8>::new(Cursor::new(Vec::new()))
  1413. .expect("failed to create first trailered");
  1414. trailered.write(EXPECTED).expect("write failed");
  1415. trailered.flush(&1).expect("flush failed");
  1416. trailered.inner
  1417. };
  1418. cursor.seek(SeekFrom::Start(0)).expect("seek failed");
  1419. let (mut trailered, _) =
  1420. Trailered::<_, u8>::new(cursor).expect("failed to created second trailered");
  1421. let mut actual = vec![0u8; EXPECTED.len()];
  1422. trailered.read(&mut actual).expect("read failed");
  1423. assert_eq!(EXPECTED, actual);
  1424. }
  1425. fn trailered_for_seek_test() -> Trailered<impl Read + Seek, u8> {
  1426. let (mut trailered, _) =
  1427. Trailered::new(Cursor::new(Vec::new())).expect("failed to create trailered");
  1428. trailered
  1429. .write(&[0, 1, 2, 3, 4, 5, 6, 7])
  1430. .expect("write failed");
  1431. trailered.seek(SeekFrom::Start(0)).expect("seek failed");
  1432. trailered
  1433. }
  1434. #[test]
  1435. fn trailered_seek_from_start() {
  1436. const EXPECTED: u8 = 2;
  1437. let mut trailered = trailered_for_seek_test();
  1438. trailered
  1439. .seek(SeekFrom::Start(EXPECTED as u64))
  1440. .expect("seek failed");
  1441. let mut actual = [0u8; 1];
  1442. trailered.read(&mut actual).expect("read failed");
  1443. assert_eq!(EXPECTED, actual[0]);
  1444. }
  1445. #[test]
  1446. fn trailered_seek_from_curr() {
  1447. const EXPECTED: u8 = 5;
  1448. let mut trailered = trailered_for_seek_test();
  1449. trailered
  1450. .seek(SeekFrom::Start(6))
  1451. .expect("seek from start failed");
  1452. trailered
  1453. .seek(SeekFrom::Current(-1))
  1454. .expect("seek from current failed");
  1455. let mut actual = [0u8; 1];
  1456. trailered.read(&mut actual).expect("read failed");
  1457. assert_eq!(EXPECTED, actual[0]);
  1458. }
  1459. #[test]
  1460. fn trailered_seek_from_end() {
  1461. const EXPECTED: u8 = 7;
  1462. let mut trailered = trailered_for_seek_test();
  1463. trailered.seek(SeekFrom::End(-1)).expect("seek failed");
  1464. let mut actual = [0u8; 1];
  1465. trailered.read(&mut actual).expect("read failed");
  1466. assert_eq!(EXPECTED, actual[0]);
  1467. }
  1468. /// Tests that a read past the end of the body in a `Trailered<T>` is not allowed.
  1469. #[test]
  1470. fn trailered_read_limited_to_body_len() {
  1471. let (mut trailered, trailer) =
  1472. Trailered::new(Cursor::new(Vec::new())).expect("failed to create Trailered");
  1473. assert!(trailer.is_none());
  1474. const EXPECTED: &[u8] = &[1, 1, 1, 1, 1, 0, 0, 0];
  1475. trailered.write(&[1u8; 5]).expect("write failed");
  1476. trailered.flush(&1u8).expect("flush failed");
  1477. trailered.seek(SeekFrom::Start(0)).expect("seek failed");
  1478. let mut actual = vec![0u8; EXPECTED.len()];
  1479. // If read goes past the end of the body then there will be a 1 in the sixth position of
  1480. // actual.
  1481. trailered.read(&mut actual).expect("read failed");
  1482. assert_eq!(EXPECTED, actual);
  1483. }
  1484. #[test]
  1485. fn block_can_create_empty() {
  1486. let harness = SwtpmHarness::new().expect("failed to start swtpm");
  1487. let context = harness.context().expect("failed to retrieve context");
  1488. let cred_store = TpmCredStore::new(context, harness.state_path())
  1489. .expect("failed to create TpmCredStore");
  1490. let creds = cred_store.node_creds().expect("failed to get node creds");
  1491. BlockOpenOptions::new()
  1492. .with_inner(Cursor::new(Vec::<u8>::new()))
  1493. .with_creds(creds)
  1494. .with_encrypt(true)
  1495. .open()
  1496. .expect("failed to open block");
  1497. }
  1498. #[test]
  1499. fn block_contents_persisted() {
  1500. const EXPECTED: &[u8] = b"Silly sordid sulking sultans.";
  1501. let temp_dir = TempDir::new("btlib").expect("failed to create temp dir");
  1502. let file_path = temp_dir.path().join("test.blk").to_owned();
  1503. let harness = SwtpmHarness::new().expect("failed to start swtpm");
  1504. let context = harness.context().expect("failed to retrieve context");
  1505. let cred_store = TpmCredStore::new(context, harness.state_path())
  1506. .expect("failed to create TpmCredStore");
  1507. let root_creds = cred_store
  1508. .gen_root_creds("(1337Prestidigitation7331)")
  1509. .expect("failed to get root creds");
  1510. let mut node_creds = cred_store.node_creds().expect("failed to get node creds");
  1511. let writecap = root_creds
  1512. .issue_writecap(
  1513. node_creds.principal(),
  1514. vec!["nodes".to_string(), "phone".to_string()],
  1515. Epoch::now() + Duration::from_secs(3600),
  1516. )
  1517. .expect("failed to issue writecap");
  1518. let path = writecap.path.clone();
  1519. node_creds.set_writecap(writecap);
  1520. {
  1521. let file = OpenOptions::new()
  1522. .create_new(true)
  1523. .write(true)
  1524. .read(true)
  1525. .open(&file_path)
  1526. .expect("failed to open file");
  1527. let mut block = BlockOpenOptions::new()
  1528. .with_inner(file)
  1529. .with_creds(node_creds.clone())
  1530. .with_encrypt(true)
  1531. .open()
  1532. .expect("failed to open block");
  1533. block.set_path(path);
  1534. block.write(EXPECTED).expect("failed to write");
  1535. block.flush().expect("flush failed");
  1536. }
  1537. let file = OpenOptions::new()
  1538. .read(true)
  1539. .open(&file_path)
  1540. .expect("failed to reopen file");
  1541. let mut block = BlockOpenOptions::new()
  1542. .with_inner(file)
  1543. .with_creds(node_creds)
  1544. .with_encrypt(true)
  1545. .open()
  1546. .expect("failed to reopen block");
  1547. let mut actual = [0u8; EXPECTED.len()];
  1548. block.read(&mut actual).expect("read failed");
  1549. assert_eq!(EXPECTED, actual);
  1550. }
  1551. }