local_fs.rs 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859
  1. // SPDX-License-Identifier: AGPL-3.0-or-later
  2. use crate::{msg::*, server::FsProvider};
  3. use btlib::{
  4. accessor::Accessor,
  5. bterr,
  6. crypto::{rand_vec, Creds, Decrypter, HashKind, Signer, SymKey},
  7. error::BtErr,
  8. AuthzAttrs, BlockAccessor, BlockError, BlockMeta, BlockMetaSecrets, BlockOpenOptions,
  9. BlockPath, BlockReader, DirEntry, Directory, Epoch, FileBlock, FlushMeta, IssuedProcRec,
  10. MetaAccess, MetaReader, Positioned, Principal, Principaled, ProcRec, Result, Split, TrySeek,
  11. ZeroExtendable,
  12. };
  13. use btserde::{read_from, write_to};
  14. use core::future::Ready;
  15. use log::{debug, error, warn};
  16. use positioned_io::{ReadAt, Size};
  17. use serde::{Deserialize, Serialize};
  18. use std::{
  19. collections::hash_map::{self, HashMap},
  20. fmt::{Display, Formatter},
  21. fs::File,
  22. future::Future,
  23. io::{self, Seek, SeekFrom, Write as IoWrite},
  24. net::{IpAddr, Ipv6Addr},
  25. ops::{Deref, DerefMut},
  26. path::{Path, PathBuf},
  27. sync::{
  28. atomic::{AtomicU64, Ordering},
  29. Arc,
  30. },
  31. time::Duration,
  32. };
  33. use tokio::sync::{
  34. Mutex, MutexGuard, OwnedMutexGuard, OwnedRwLockReadGuard, RwLock, RwLockReadGuard,
  35. RwLockWriteGuard,
  36. };
  37. use zeroize::ZeroizeOnDrop;
  38. pub use private::{Authorizer, AuthzContext, Error, LocalFs, ModeAuthorizer};
  39. mod private {
  40. use super::*;
  41. type Inode = u64;
  42. type Handle = u64;
  43. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
  44. pub enum Error {
  45. NotOpen(Inode),
  46. InvalidHandle { inode: Inode, handle: Handle },
  47. NoHandlesAvailable(Inode),
  48. InodeNotFound(Inode),
  49. ReadOnlyHandle(Handle),
  50. WrongOwner,
  51. }
  52. impl Display for Error {
  53. fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
  54. match self {
  55. Error::NotOpen(inode) => write!(f, "inode {inode} is not open"),
  56. Error::InvalidHandle { inode, handle } => {
  57. write!(f, "invalid handle {handle} for inode {inode}")
  58. }
  59. Error::NoHandlesAvailable(inode) => {
  60. write!(f, "no handles are available for inode {inode}")
  61. }
  62. Error::InodeNotFound(inode) => write!(f, "inode {inode} could not be found"),
  63. Error::ReadOnlyHandle(handle) => {
  64. write!(f, "cannot mutably access read-only handle {handle}")
  65. }
  66. Error::WrongOwner => write!(f, "handle is not owned by the requestor"),
  67. }
  68. }
  69. }
  70. trait BlockPathExt {
  71. fn assert_eq(&self, other: &BlockPath) -> Result<()>;
  72. }
  73. impl BlockPathExt for BlockPath {
  74. fn assert_eq(&self, other: &BlockPath) -> Result<()> {
  75. if self != other {
  76. Err(Error::WrongOwner.into())
  77. } else {
  78. Ok(())
  79. }
  80. }
  81. }
  82. impl std::error::Error for Error {}
  83. /// This type provides context for an authorization decision as to whether a given process will
  84. /// be allowed to access a block.
  85. pub struct AuthzContext<'a> {
  86. /// The path from which this request was received.
  87. pub from: &'a BlockPath,
  88. /// The attributes of the principal whose access is being authorized.
  89. pub attrs: &'a AuthzAttrs,
  90. /// A reference to the metadata of a block, the access to which is being authorized.
  91. pub meta: &'a BlockMeta,
  92. }
  93. impl<'a> AuthzContext<'a> {
  94. fn new(from: &'a BlockPath, attrs: &'a AuthzAttrs, meta: &'a BlockMeta) -> Self {
  95. Self { from, attrs, meta }
  96. }
  97. }
  98. /// A trait for types which can render authorization decisions.
  99. pub trait Authorizer {
  100. /// Returns [Ok] if read authorization is granted, and [Err] otherwise.
  101. fn can_read(&self, ctx: &AuthzContext<'_>) -> io::Result<()>;
  102. /// Returns [Ok] if write authorization is granted, and [Err] otherwise.
  103. fn can_write(&self, ctx: &AuthzContext<'_>) -> io::Result<()>;
  104. /// Returns [Ok] if execute authorization is granted, and [Err] otherwise.
  105. fn can_exec(&self, ctx: &AuthzContext<'_>) -> io::Result<()>;
  106. }
  107. /// A particularly simple authorizer that just looks at the mode bits in the block metadata
  108. /// to make authorization decisions.
  109. pub struct ModeAuthorizer;
  110. impl ModeAuthorizer {
  111. fn authorize(mode: u32, mask: u32, denied_msg: &str) -> io::Result<()> {
  112. if (mode & mask) != 0 {
  113. Ok(())
  114. } else {
  115. Err(io::Error::new(io::ErrorKind::PermissionDenied, denied_msg))
  116. }
  117. }
  118. fn user_is_root(ctx: &AuthzContext<'_>) -> bool {
  119. ctx.attrs.uid == 0
  120. }
  121. }
  122. impl Authorizer for ModeAuthorizer {
  123. fn can_read(&self, ctx: &AuthzContext<'_>) -> io::Result<()> {
  124. if Self::user_is_root(ctx) {
  125. return Ok(());
  126. }
  127. let secrets = ctx.meta.body().secrets()?;
  128. let mask = (libc::S_IRUSR * (secrets.uid == ctx.attrs.uid) as u32)
  129. | (libc::S_IRGRP * (secrets.gid == ctx.attrs.gid) as u32)
  130. | libc::S_IROTH;
  131. Self::authorize(secrets.mode, mask, "read access denied")
  132. }
  133. fn can_write(&self, ctx: &AuthzContext<'_>) -> io::Result<()> {
  134. if Self::user_is_root(ctx) {
  135. return Ok(());
  136. }
  137. let secrets = ctx.meta.body().secrets()?;
  138. let mask = (libc::S_IWUSR * (secrets.uid == ctx.attrs.uid) as u32)
  139. | (libc::S_IWGRP * (secrets.gid == ctx.attrs.gid) as u32)
  140. | libc::S_IWOTH;
  141. Self::authorize(secrets.mode, mask, "write access denied")
  142. }
  143. fn can_exec(&self, ctx: &AuthzContext<'_>) -> io::Result<()> {
  144. if Self::user_is_root(ctx) {
  145. return Ok(());
  146. }
  147. let secrets = ctx.meta.body().secrets()?;
  148. let mask = (libc::S_IXUSR * (secrets.uid == ctx.attrs.uid) as u32)
  149. | (libc::S_IXGRP * (secrets.gid == ctx.attrs.gid) as u32)
  150. | libc::S_IXOTH;
  151. Self::authorize(secrets.mode, mask, "exec access denied")
  152. }
  153. }
  154. type EmptyAccessor = Option<Accessor<&'static [u8]>>;
  155. type HandleValueParts<'a> = (&'a Arc<Mutex<EmptyAccessor>>, &'a Arc<BlockPath>, Flags);
  156. struct HandleGuard<B: Size, G: DerefMut<Target = EmptyAccessor>> {
  157. guard: G,
  158. accessor: Option<Accessor<B>>,
  159. flags: Flags,
  160. }
  161. impl<B: Size, G: DerefMut<Target = EmptyAccessor>> HandleGuard<B, G> {
  162. fn new(flags: Flags, mut guard: G, block: B) -> Self {
  163. let accessor = guard
  164. .take()
  165. .map(move |accessor| Accessor::combine(accessor, block));
  166. Self {
  167. guard,
  168. accessor,
  169. flags,
  170. }
  171. }
  172. }
  173. impl<B: Size, G: DerefMut<Target = EmptyAccessor>> Drop for HandleGuard<B, G> {
  174. fn drop(&mut self) {
  175. *self.guard = self.accessor.take().map(|accessor| {
  176. let (accessor, _) = accessor.split();
  177. accessor
  178. });
  179. }
  180. }
  181. impl<B: Size, G: DerefMut<Target = EmptyAccessor>> Deref for HandleGuard<B, G> {
  182. type Target = Accessor<B>;
  183. fn deref(&self) -> &Self::Target {
  184. self.accessor.as_ref().unwrap()
  185. }
  186. }
  187. impl<B: Size, G: DerefMut<Target = EmptyAccessor>> DerefMut for HandleGuard<B, G> {
  188. fn deref_mut(&mut self) -> &mut Self::Target {
  189. self.accessor.as_mut().unwrap()
  190. }
  191. }
  192. enum HandleValue {
  193. File {
  194. accessor: Arc<Mutex<EmptyAccessor>>,
  195. owner: Arc<BlockPath>,
  196. flags: Flags,
  197. },
  198. Directory {
  199. accessor: Arc<Mutex<EmptyAccessor>>,
  200. owner: Arc<BlockPath>,
  201. flags: Flags,
  202. dir: Directory,
  203. },
  204. }
  205. impl HandleValue {
  206. fn new<T: Size>(accessor: Accessor<T>, owner: Arc<BlockPath>, flags: Flags) -> HandleValue {
  207. let (accessor, ..) = accessor.split();
  208. HandleValue::File {
  209. accessor: Arc::new(Mutex::new(Some(accessor))),
  210. owner,
  211. flags,
  212. }
  213. }
  214. fn flags(&self) -> Flags {
  215. match self {
  216. Self::File { flags, .. } => *flags,
  217. Self::Directory { flags, .. } => *flags,
  218. }
  219. }
  220. fn parts(&self) -> HandleValueParts<'_> {
  221. match self {
  222. Self::File {
  223. accessor,
  224. owner,
  225. flags,
  226. } => (accessor, owner, *flags),
  227. Self::Directory {
  228. accessor,
  229. owner,
  230. flags,
  231. ..
  232. } => (accessor, owner, *flags),
  233. }
  234. }
  235. fn convert_to_dir<C: Signer + Principaled + Decrypter>(
  236. self,
  237. block: &mut FileBlock<C>,
  238. ) -> Result<HandleValue> {
  239. let (accessor, owner, flags) = match self {
  240. Self::File {
  241. accessor,
  242. owner,
  243. flags,
  244. } => (accessor, owner, flags),
  245. Self::Directory {
  246. accessor,
  247. owner,
  248. flags,
  249. ..
  250. } => (accessor, owner, flags),
  251. };
  252. let accessor = Arc::try_unwrap(accessor).map_err(|_| {
  253. bterr!("LOGIC ERROR: accessor was still in use even though convert_to_dir owns it")
  254. })?;
  255. let accessor = accessor
  256. .into_inner()
  257. .ok_or_else(|| bterr!("LOGIC ERROR: accessor was not returned to mutex"))?;
  258. let mut accessor = Accessor::combine(accessor, block);
  259. let dir = accessor.read_dir()?;
  260. let (accessor, ..) = accessor.split();
  261. Ok(HandleValue::Directory {
  262. dir,
  263. accessor: Arc::new(Mutex::new(Some(accessor))),
  264. owner,
  265. flags,
  266. })
  267. }
  268. fn directory(&self) -> io::Result<&Directory> {
  269. match self {
  270. Self::Directory { dir, .. } => Ok(dir),
  271. _ => Err(io::Error::new(
  272. io::ErrorKind::Other,
  273. "handle is not for a directory",
  274. )),
  275. }
  276. }
  277. async fn lock(&self, from: &BlockPath) -> Result<(Flags, OwnedMutexGuard<EmptyAccessor>)> {
  278. let (mutex, owner, flags) = self.parts();
  279. owner.assert_eq(from)?;
  280. Ok((flags, mutex.clone().lock_owned().await))
  281. }
  282. async fn guard<'a, B: Size>(
  283. &'a self,
  284. from: &BlockPath,
  285. block: B,
  286. ) -> Result<HandleGuard<B, MutexGuard<'a, EmptyAccessor>>> {
  287. let (mutex, owner, flags) = self.parts();
  288. owner.assert_eq(from)?;
  289. let guard = mutex.lock().await;
  290. Ok(HandleGuard::new(flags, guard, block))
  291. }
  292. fn set_flags(&mut self, new_flags: Flags) {
  293. match self {
  294. Self::File { flags, .. } => *flags = new_flags,
  295. Self::Directory { flags, .. } => *flags = new_flags,
  296. }
  297. }
  298. }
  299. struct BlockGuard<B> {
  300. inner: B,
  301. }
  302. impl<B> BlockGuard<B> {
  303. fn new(inner: B) -> Self {
  304. Self { inner }
  305. }
  306. }
  307. impl<C, B: Deref<Target = InodeTableValue<C>>> Deref for BlockGuard<B> {
  308. type Target = FileBlock<C>;
  309. fn deref(&self) -> &Self::Target {
  310. self.inner.block.get_ref()
  311. }
  312. }
  313. impl<C, B: DerefMut<Target = InodeTableValue<C>>> DerefMut for BlockGuard<B> {
  314. fn deref_mut(&mut self) -> &mut Self::Target {
  315. self.inner.block.get_mut()
  316. }
  317. }
  318. impl<C, B: Deref<Target = InodeTableValue<C>>> Size for BlockGuard<B> {
  319. fn size(&self) -> io::Result<Option<u64>> {
  320. self.inner.block.size()
  321. }
  322. }
  323. impl<C, B: Deref<Target = InodeTableValue<C>>> ReadAt for BlockGuard<B> {
  324. fn read_at(&self, pos: u64, buf: &mut [u8]) -> io::Result<usize> {
  325. self.inner.block.get_ref().read_at(pos, buf)
  326. }
  327. }
  328. impl<C: 'static, B: Deref<Target = InodeTableValue<C>>> AsRef<BlockMeta> for BlockGuard<B> {
  329. fn as_ref(&self) -> &BlockMeta {
  330. self.inner.block.as_ref()
  331. }
  332. }
  333. pub struct InodeTableValue<C> {
  334. block: Accessor<FileBlock<C>>,
  335. handle_values: HashMap<Handle, HandleValue>,
  336. next_handle: Handle,
  337. lookup_counts: HashMap<Arc<BlockPath>, u64>,
  338. delete: bool,
  339. }
  340. impl<C: Signer + Principaled + Decrypter> InodeTableValue<C> {
  341. fn new(block: Accessor<FileBlock<C>>, opener: Arc<BlockPath>) -> InodeTableValue<C> {
  342. let mut lookup_counts = HashMap::with_capacity(1);
  343. lookup_counts.insert(opener, 1);
  344. Self {
  345. block,
  346. handle_values: HashMap::new(),
  347. next_handle: 1,
  348. lookup_counts,
  349. delete: false,
  350. }
  351. }
  352. fn invalid_handle_err(handle: Handle) -> io::Error {
  353. io::Error::new(io::ErrorKind::Other, format!("invalid handle {handle}"))
  354. }
  355. fn value(&self, handle: Handle) -> io::Result<&HandleValue> {
  356. self.handle_values
  357. .get(&handle)
  358. .ok_or_else(|| Self::invalid_handle_err(handle))
  359. }
  360. fn block(&self) -> &FileBlock<C> {
  361. self.block.get_ref()
  362. }
  363. fn block_mut(&mut self) -> &mut FileBlock<C> {
  364. self.block.get_mut()
  365. }
  366. fn convert_to_dir(&mut self, handle: Handle) -> io::Result<()> {
  367. let value = self
  368. .handle_values
  369. .remove(&handle)
  370. .ok_or_else(|| Self::invalid_handle_err(handle))?;
  371. let block = self.block_mut();
  372. let value = value.convert_to_dir(block)?;
  373. self.handle_values.insert(handle, value);
  374. Ok(())
  375. }
  376. async fn handle_guard<'a>(
  377. &'a self,
  378. from: &BlockPath,
  379. handle: Handle,
  380. ) -> Result<HandleGuard<&FileBlock<C>, MutexGuard<'a, EmptyAccessor>>> {
  381. let value = self.value(handle)?;
  382. let block = self.block();
  383. value.guard(from, block).await
  384. }
  385. async fn handle_guard_owned(
  386. guard: OwnedRwLockReadGuard<Self>,
  387. from: &BlockPath,
  388. handle: Handle,
  389. ) -> Result<
  390. HandleGuard<BlockGuard<OwnedRwLockReadGuard<Self>>, OwnedMutexGuard<EmptyAccessor>>,
  391. > {
  392. let value = guard.value(handle)?;
  393. let (flags, mutex_guard) = value.lock(from).await?;
  394. let guard = BlockGuard::new(guard);
  395. Ok(HandleGuard::new(flags, mutex_guard, guard))
  396. }
  397. async fn handle_guard_mut<'a>(
  398. &'a mut self,
  399. from: &BlockPath,
  400. handle: Handle,
  401. ) -> Result<HandleGuard<&mut FileBlock<C>, MutexGuard<'a, EmptyAccessor>>> {
  402. let value = self
  403. .handle_values
  404. .get(&handle)
  405. .ok_or_else(|| Self::invalid_handle_err(handle))?;
  406. if !value.flags().writeable() {
  407. return Err(Error::ReadOnlyHandle(handle).into());
  408. }
  409. let inner = self.block.get_mut();
  410. value.guard(from, inner).await
  411. }
  412. fn new_handle(&mut self, owner: Arc<BlockPath>, flags: Flags) -> Result<Handle> {
  413. if self.handle_values.len() as u64 >= u64::MAX {
  414. return Err(bterr!("no handles are available"));
  415. }
  416. let mut handle_value = HandleValue::new(Accessor::new(self.block())?, owner, flags);
  417. loop {
  418. let handle = self.next_handle;
  419. self.next_handle = self.next_handle.wrapping_add(1);
  420. match self.handle_values.insert(handle, handle_value) {
  421. Some(prev) => {
  422. // We've wrapped around and this handle is already taken. Put the previous
  423. // value back and try again.
  424. handle_value = self.handle_values.insert(handle, prev).unwrap();
  425. }
  426. // We generated an unused handle. Return it.
  427. None => return Ok(handle),
  428. }
  429. }
  430. }
  431. fn set_flags(&mut self, handle: Handle, flags: Flags) -> Result<()> {
  432. let handle_value = self
  433. .handle_values
  434. .get_mut(&handle)
  435. .ok_or_else(|| Self::invalid_handle_err(handle))?;
  436. handle_value.set_flags(flags);
  437. Ok(())
  438. }
  439. fn forget_handle(&mut self, handle: Handle) {
  440. self.handle_values.remove(&handle);
  441. }
  442. /// Increments the lookup count from the given path by 1.
  443. fn incr_lookup_count(&mut self, from: &Arc<BlockPath>) {
  444. match self.lookup_counts.entry(from.clone()) {
  445. hash_map::Entry::Occupied(mut entry) => {
  446. // I don't want this to silently overflow.
  447. let new_count = entry.get().checked_add(1).unwrap();
  448. *entry.get_mut() = new_count;
  449. }
  450. hash_map::Entry::Vacant(entry) => {
  451. entry.insert(1);
  452. }
  453. }
  454. }
  455. /// Decrements the lookup count from the given path by the given amount.
  456. fn decr_lookup_count(&mut self, from: Arc<BlockPath>, decr: u64) {
  457. match self.lookup_counts.entry(from) {
  458. hash_map::Entry::Occupied(mut entry) => {
  459. let new_count = entry.get().saturating_sub(decr);
  460. if new_count > 0 {
  461. *entry.get_mut() = new_count;
  462. } else {
  463. entry.remove();
  464. }
  465. }
  466. hash_map::Entry::Vacant(..) => (),
  467. }
  468. }
  469. fn total_lookup_count(&self) -> u64 {
  470. self.lookup_counts.values().sum()
  471. }
  472. }
  473. type InodeTable<C> = HashMap<Inode, Arc<RwLock<InodeTableValue<C>>>>;
  474. type OwnedTableLock<C> = OwnedRwLockReadGuard<InodeTable<C>>;
  475. type TableLock<'a, C> = RwLockReadGuard<'a, InodeTable<C>>;
  476. struct TableGuard<G> {
  477. table_guard: G,
  478. }
  479. impl<C> TableGuard<OwnedRwLockReadGuard<C>> {
  480. async fn new_owned(table: Arc<RwLock<InodeTable<C>>>) -> TableGuard<OwnedTableLock<C>> {
  481. let table_guard = table.read_owned().await;
  482. TableGuard { table_guard }
  483. }
  484. }
  485. impl<'a, C> TableGuard<TableLock<'a, C>> {
  486. async fn new(table: &'a RwLock<InodeTable<C>>) -> TableGuard<TableLock<'a, C>> {
  487. let table_guard = table.read().await;
  488. TableGuard { table_guard }
  489. }
  490. }
  491. impl<C, G: Deref<Target = InodeTable<C>>> TableGuard<G> {
  492. fn get_value(&self, inode: Inode) -> Result<&Arc<RwLock<InodeTableValue<C>>>> {
  493. self.table_guard
  494. .get(&inode)
  495. .ok_or_else(|| bterr!(Error::NotOpen(inode)))
  496. }
  497. async fn read<'a>(&'a self, inode: Inode) -> Result<RwLockReadGuard<'a, InodeTableValue<C>>>
  498. where
  499. C: 'a,
  500. {
  501. let value = self.get_value(inode)?;
  502. Ok(value.read().await)
  503. }
  504. async fn write<'a>(
  505. &'a self,
  506. inode: Inode,
  507. ) -> Result<RwLockWriteGuard<'a, InodeTableValue<C>>>
  508. where
  509. C: 'a,
  510. {
  511. let value = self.get_value(inode)?;
  512. Ok(value.write().await)
  513. }
  514. }
  515. /// Structure for metadata about a blocktree.
  516. #[derive(Debug, Serialize, Deserialize, ZeroizeOnDrop)]
  517. struct Superblock {
  518. /// The generation number of the cluster this part of the blocktree is stored on.
  519. generation: u64,
  520. /// The next free inode available to the cluster.
  521. #[zeroize(skip)]
  522. next_inode: AtomicU64,
  523. /// The hash algorithm to use when computing inode paths.
  524. #[zeroize(skip)]
  525. inode_hash: HashKind,
  526. /// The key to use when hashing inodes to file paths.
  527. inode_key: Vec<u8>,
  528. }
  529. /// Structure for managing the part of a blocktree which is stored in the local filesystem.
  530. pub struct LocalFs<C, A> {
  531. /// The path to the directory in the local filesystem where this blocktree is located.
  532. path: PathBuf,
  533. /// A map from inode numbers to their reference counts.
  534. inodes: Arc<RwLock<InodeTable<C>>>,
  535. /// An in-memory copy of the superblock.
  536. sb: Superblock,
  537. /// The credentials this blocktree instance will use for all cryptographic operations.
  538. creds: C,
  539. authorizer: A,
  540. root_principal: Principal,
  541. }
  542. impl<C, A> LocalFs<C, A> {
  543. /// The maximum number of directory entries that can be returned in any given call to
  544. /// `read_dir`.
  545. const DIR_ENTRY_LIMIT: usize = 1024;
  546. }
  547. impl<C: Creds + 'static, A: Authorizer> LocalFs<C, A> {
  548. /// Creates a new empty blocktree at the given path.
  549. pub async fn new_empty(
  550. btdir: PathBuf,
  551. generation: u64,
  552. creds: C,
  553. authorizer: A,
  554. ) -> Result<LocalFs<C, A>> {
  555. let writecap = creds.writecap().ok_or(BlockError::MissingWritecap)?;
  556. let mut root_block_path = writecap.root_block_path();
  557. let root_principal = writecap.root_principal();
  558. // Initialize the superblock.
  559. let mut sb_block =
  560. Self::open_superblock(&btdir, creds.clone(), root_block_path.clone())?;
  561. let sb = Superblock {
  562. generation,
  563. next_inode: AtomicU64::new(SpecInodes::FirstFree.into()),
  564. inode_hash: HashKind::default(),
  565. inode_key: rand_vec(HashKind::default().len())?,
  566. };
  567. write_to(&sb, &mut sb_block)?;
  568. sb_block.mut_meta_body().access_secrets(|secrets| {
  569. secrets.block_id.generation = generation;
  570. secrets.block_id.inode = SpecInodes::Sb.into();
  571. secrets.mode = FileType::Reg.value() | 0o666;
  572. secrets.uid = 0;
  573. secrets.gid = 0;
  574. secrets.nlink = 1;
  575. Ok(())
  576. })?;
  577. sb_block.flush()?;
  578. // Initialize the root directory.
  579. let mut root_block = Self::open_block(
  580. &btdir,
  581. SpecInodes::RootDir.into(),
  582. creds.clone(),
  583. root_block_path.clone(),
  584. None,
  585. sb.inode_hash,
  586. &sb.inode_key,
  587. )?;
  588. write_to(&Directory::new(), &mut root_block)?;
  589. root_block.mut_meta_body().access_secrets(|secrets| {
  590. secrets.block_id.generation = generation;
  591. secrets.block_id.inode = SpecInodes::RootDir.into();
  592. secrets.mode = FileType::Dir.value() | 0o777;
  593. secrets.uid = 0;
  594. secrets.gid = 0;
  595. secrets.nlink = 1;
  596. Ok(())
  597. })?;
  598. root_block.flush()?;
  599. let fs = Self::new(
  600. btdir,
  601. sb,
  602. sb_block,
  603. root_block,
  604. creds,
  605. authorizer,
  606. root_principal,
  607. )?;
  608. let writecap = fs.creds.writecap().ok_or(BlockError::MissingWritecap)?;
  609. if fs.creds.principal() != fs.root_principal {
  610. let proc_rec = IssuedProcRec {
  611. addr: IpAddr::V6(Ipv6Addr::LOCALHOST),
  612. pub_creds: fs.creds.concrete_pub(),
  613. writecap: writecap.to_owned(),
  614. authz_attrs: AuthzAttrs {
  615. uid: 0,
  616. gid: 0,
  617. supp_gids: Vec::new(),
  618. },
  619. };
  620. root_block_path.push_component(fs.root_principal.to_string());
  621. fs.grant_access_to(
  622. &Arc::new(root_block_path),
  623. SpecInodes::RootDir.into(),
  624. proc_rec,
  625. )
  626. .await?;
  627. }
  628. Ok(fs)
  629. }
  630. /// Opens an existing blocktree stored at the given path.
  631. pub fn new_existing(btdir: PathBuf, creds: C, authorizer: A) -> Result<LocalFs<C, A>> {
  632. let writecap = creds.writecap().ok_or(BlockError::MissingWritecap)?;
  633. let root_block_path = writecap.root_block_path();
  634. let root_principal = writecap.root_principal();
  635. let mut sb_block =
  636. Self::open_superblock(&btdir, creds.clone(), root_block_path.clone())?;
  637. let sb: Superblock = read_from(&mut sb_block)?;
  638. let root_block = Self::open_block(
  639. &btdir,
  640. SpecInodes::RootDir.into(),
  641. creds.clone(),
  642. root_block_path,
  643. None,
  644. sb.inode_hash,
  645. &sb.inode_key,
  646. )?;
  647. Self::new(
  648. btdir,
  649. sb,
  650. sb_block,
  651. root_block,
  652. creds,
  653. authorizer,
  654. root_principal,
  655. )
  656. }
  657. fn new(
  658. btdir: PathBuf,
  659. sb: Superblock,
  660. sb_block: Accessor<FileBlock<C>>,
  661. root_block: Accessor<FileBlock<C>>,
  662. creds: C,
  663. authorizer: A,
  664. root_principal: Principal,
  665. ) -> Result<LocalFs<C, A>> {
  666. let mut inodes = HashMap::with_capacity(1);
  667. let empty_path = Arc::new(BlockPath::default());
  668. inodes.insert(
  669. SpecInodes::Sb.into(),
  670. Arc::new(RwLock::new(InodeTableValue::new(
  671. sb_block,
  672. empty_path.clone(),
  673. ))),
  674. );
  675. inodes.insert(
  676. SpecInodes::RootDir.into(),
  677. Arc::new(RwLock::new(InodeTableValue::new(root_block, empty_path))),
  678. );
  679. Ok(LocalFs {
  680. path: btdir,
  681. inodes: Arc::new(RwLock::new(inodes)),
  682. sb,
  683. creds,
  684. authorizer,
  685. root_principal,
  686. })
  687. }
  688. fn open_superblock<P: AsRef<Path>>(
  689. btdir: P,
  690. creds: C,
  691. block_path: BlockPath,
  692. ) -> Result<Accessor<FileBlock<C>>> {
  693. let path = btdir.as_ref().join("super.blk");
  694. let file = std::fs::OpenOptions::new()
  695. .read(true)
  696. .write(true)
  697. .create(true)
  698. .open(path)?;
  699. let block = BlockOpenOptions::new()
  700. .with_creds(creds)
  701. .with_encrypt(true)
  702. .with_inner(file)
  703. .with_block_path(block_path)
  704. .open()?;
  705. Ok(block)
  706. }
  707. fn hex_encode(src: &[u8]) -> Result<String> {
  708. use std::fmt::Write;
  709. let mut string = String::with_capacity(2 * src.len());
  710. for byte in src.iter() {
  711. write!(string, "{byte:02x}")?;
  712. }
  713. Ok(string)
  714. }
  715. /// Returns the path to the file storing the given inode's data.
  716. fn block_path<P: AsRef<Path>>(
  717. parent: P,
  718. inode: Inode,
  719. inode_hash: HashKind,
  720. inode_key: &[u8],
  721. ) -> Result<PathBuf> {
  722. let mut buf = vec![0u8; inode_hash.len()];
  723. inode_hash.digest(
  724. &mut buf,
  725. [inode.to_le_bytes().as_slice(), inode_key].into_iter(),
  726. )?;
  727. let hex_str = Self::hex_encode(&buf)?;
  728. let mut path =
  729. PathBuf::with_capacity(parent.as_ref().as_os_str().len() + 1 + hex_str.len() + 1);
  730. path.push(parent);
  731. path.push(&hex_str[..2]);
  732. path.push(&hex_str[2..]);
  733. Ok(path)
  734. }
  735. fn open_block<P: AsRef<Path>>(
  736. btdir: P,
  737. inode: Inode,
  738. creds: C,
  739. block_path: BlockPath,
  740. parent_key: Option<SymKey>,
  741. inode_hash: HashKind,
  742. inode_key: &[u8],
  743. ) -> Result<Accessor<FileBlock<C>>> {
  744. let path = Self::block_path(&btdir, inode, inode_hash, inode_key)?;
  745. let dir = path.ancestors().nth(1).unwrap();
  746. if let Err(err) = std::fs::create_dir(dir) {
  747. match err.kind() {
  748. io::ErrorKind::AlreadyExists => (),
  749. _ => return Err(err.into()),
  750. }
  751. }
  752. let file = std::fs::OpenOptions::new()
  753. .read(true)
  754. .write(true)
  755. .create(true)
  756. .open(path)?;
  757. Self::open_block_file(file, creds, block_path, parent_key)
  758. }
  759. fn open_block_file(
  760. file: File,
  761. creds: C,
  762. block_path: BlockPath,
  763. parent_key: Option<SymKey>,
  764. ) -> Result<Accessor<FileBlock<C>>> {
  765. let block = BlockOpenOptions::new()
  766. .with_creds(creds)
  767. .with_encrypt(true)
  768. .with_inner(file)
  769. .with_parent_key(parent_key)
  770. .with_block_path(block_path)
  771. .open()?;
  772. Ok(block)
  773. }
  774. async fn table_guard(&self) -> TableGuard<TableLock<'_, C>> {
  775. TableGuard::new(&self.inodes).await
  776. }
  777. async fn open_value(
  778. &self,
  779. from: Arc<BlockPath>,
  780. inode: Inode,
  781. block_path: BlockPath,
  782. parent_key: Option<SymKey>,
  783. ) -> Result<()> {
  784. let block = Self::open_block(
  785. &self.path,
  786. inode,
  787. self.creds.clone(),
  788. block_path,
  789. parent_key,
  790. self.sb.inode_hash,
  791. &self.sb.inode_key,
  792. )?;
  793. let value = Arc::new(RwLock::new(InodeTableValue::new(block, from)));
  794. let mut inodes = self.inodes.write().await;
  795. if inodes.insert(inode, value).is_some() {
  796. error!(
  797. "LOGIC ERROR: open_value was called with inode {inode}, which is already open"
  798. );
  799. }
  800. Ok(())
  801. }
  802. /// Ensures that the given inode is open. If the inode is already open, then this method
  803. /// does nothing and returns the table guard which was used to check the status of the
  804. /// inode.
  805. /// ## Warning
  806. /// Because this method creates new table guards, no table guard must be alive when it's
  807. /// called. Otherwise a deadlock will occur.
  808. async fn ensure_open<'a>(
  809. &'a self,
  810. from: &Arc<BlockPath>,
  811. inode: Inode,
  812. block_path: BlockPath,
  813. parent_key: Option<SymKey>,
  814. ) -> Result<TableGuard<OwnedTableLock<C>>> {
  815. {
  816. let table_guard = self.inodes.clone().read_owned().await;
  817. if table_guard.contains_key(&inode) {
  818. return Ok(TableGuard { table_guard });
  819. }
  820. }
  821. self.open_value(from.clone(), inode, block_path, parent_key)
  822. .await?;
  823. Ok(TableGuard::new_owned(self.inodes.clone()).await)
  824. }
  825. fn delete_block_file(&self, inode: Inode) -> Result<()> {
  826. let mut path =
  827. Self::block_path(&self.path, inode, self.sb.inode_hash, &self.sb.inode_key)?;
  828. std::fs::remove_file(&path)?;
  829. path.pop();
  830. let mut contents = std::fs::read_dir(&path)?;
  831. if contents.next().is_none() {
  832. std::fs::remove_dir(&path)?;
  833. }
  834. Ok(())
  835. }
  836. async fn inode_forget<'a>(
  837. &self,
  838. from: Arc<BlockPath>,
  839. inode: Inode,
  840. count: u64,
  841. ) -> io::Result<()> {
  842. let mut inodes = self.inodes.write().await;
  843. let lookup_count = {
  844. let inode_lock = match inodes.get_mut(&inode) {
  845. Some(inode_lock) => inode_lock,
  846. None => {
  847. warn!("an attempt was made to forget non-existent inode {inode}");
  848. return Ok(());
  849. }
  850. };
  851. let mut value = inode_lock.write().await;
  852. value.decr_lookup_count(from, count);
  853. value.total_lookup_count()
  854. };
  855. if 0 == lookup_count {
  856. let entry = Arc::try_unwrap(inodes.remove(&inode).unwrap())
  857. .map_err(|_| bterr!("LOGIC ERROR: entry for inode {inode} was still in use while it was being forgotten"))?;
  858. let delete = entry.into_inner().delete;
  859. if delete {
  860. self.delete_block_file(inode)?;
  861. }
  862. }
  863. Ok(())
  864. }
  865. /// Returns the next available inode and updates the superblock in one atomic operation.
  866. /// TODO: Obviously this strategy won't work when there are multiple servers in this
  867. /// generation.
  868. async fn next_inode(&self) -> Result<Inode> {
  869. let table_guard = self.table_guard().await;
  870. let mut value_guard = table_guard.write(SpecInodes::Sb.into()).await?;
  871. let mut block = &mut value_guard.block;
  872. // We don't need strict ordering because the lock on the inode table value is already
  873. // serializing access.
  874. let inode = self.sb.next_inode.fetch_add(1, Ordering::Relaxed);
  875. block.rewind()?;
  876. write_to(&self.sb, &mut block)?;
  877. block.flush()?;
  878. Ok(inode)
  879. }
  880. fn attr_timeout(&self) -> Duration {
  881. Duration::from_secs(5)
  882. }
  883. fn entry_timeout(&self) -> Duration {
  884. Duration::from_secs(5)
  885. }
  886. fn unsupported_flag_err(flag: &str) -> btlib::Error {
  887. bterr!("unsupported flag: {flag}")
  888. }
  889. fn bt_entry(&self, attr: BlockMetaSecrets) -> crate::msg::Entry {
  890. crate::msg::Entry {
  891. attr,
  892. attr_timeout: self.attr_timeout(),
  893. entry_timeout: self.entry_timeout(),
  894. }
  895. }
  896. /// Grants the given credentials access to the directory this instance is responsible for.
  897. ///
  898. /// ## Warning
  899. /// This method calls `self.authz_attrs`, so the same consideration for avoiding deadlock
  900. /// apply to this method as well. See the documentation of `self.authz_attrs` for details.
  901. async fn grant_access_to(
  902. &self,
  903. from: &Arc<BlockPath>,
  904. inode: Inode,
  905. proc_rec: IssuedProcRec,
  906. ) -> Result<()> {
  907. let authz_attrs = self.authz_attrs(from).await?;
  908. let principal = proc_rec.pub_creds.principal();
  909. let (next_inode, parent_key) = {
  910. let table_guard = self.table_guard().await;
  911. let next_inode = if inode == SpecInodes::RootDir.value() {
  912. // If the inode is for the root directory we need to add a readcap for the
  913. // superblock.
  914. let mut value_guard = table_guard.write(SpecInodes::Sb.into()).await?;
  915. let mut block = &mut value_guard.block;
  916. let next_inode = self.sb.next_inode.fetch_add(1, Ordering::Relaxed);
  917. block.rewind()?;
  918. write_to(&self.sb, &mut block)?;
  919. block
  920. .mut_meta_body()
  921. .add_readcap_for(principal.clone(), &proc_rec.pub_creds.enc)?;
  922. block.flush()?;
  923. Ok(next_inode)
  924. } else {
  925. self.next_inode().await
  926. }?;
  927. let parent_key = {
  928. let mut value_guard = table_guard.write(inode).await?;
  929. let block = &mut value_guard.block;
  930. self.authorizer.can_write(&AuthzContext::new(
  931. from,
  932. &authz_attrs,
  933. block.meta(),
  934. ))?;
  935. block
  936. .mut_meta_body()
  937. .add_readcap_for(principal.clone(), &proc_rec.pub_creds.enc)?;
  938. let mut dir = block.read_dir()?;
  939. let proc_rec_name = principal.to_string();
  940. dir.add_file(proc_rec_name, next_inode)?;
  941. // Note that write_dir calls flush, which also ensures metadata is written to
  942. // disk.
  943. block.write_dir(&dir)?;
  944. block.meta_body().block_key()?.clone()
  945. };
  946. (next_inode, parent_key)
  947. };
  948. let self_writecap = self.creds.writecap().ok_or(BlockError::MissingWritecap)?;
  949. let self_bind_path = Arc::new(self_writecap.bind_path());
  950. let bind_path = proc_rec.writecap.bind_path();
  951. self.open_value(
  952. self_bind_path.clone(),
  953. next_inode,
  954. bind_path,
  955. Some(parent_key),
  956. )
  957. .await?;
  958. {
  959. let table_guard = self.table_guard().await;
  960. let mut value_guard = table_guard.write(next_inode).await?;
  961. let block = &mut value_guard.block;
  962. block.write_proc_rec(&ProcRec::Valid(proc_rec))?;
  963. };
  964. // We must ensure the reference count for the inode is decremented, otherwise the table
  965. // entry will never be freed.
  966. self.inode_forget(self_bind_path, next_inode, 1).await?;
  967. Ok(())
  968. }
  969. async fn lookup_inode_in<'a>(
  970. table_guard: &'a TableGuard<TableLock<'a, C>>,
  971. parent: Inode,
  972. name: &str,
  973. ) -> Result<Inode> {
  974. let mut value_guard = table_guard.write(parent).await?;
  975. let dir = value_guard.block.read_dir()?;
  976. dir.entry(name)
  977. .ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT).into())
  978. .map(|e| e.inode())
  979. }
  980. /// Returns a pair of inodes, where the first inode is the inode referred to by the given
  981. /// path, and the second is the parent inode.
  982. async fn lookup_inode<'a, 'b, I: Iterator<Item = &'a str>>(
  983. table_guard: &'b TableGuard<TableLock<'b, C>>,
  984. components: I,
  985. ) -> Result<(Inode, Option<Inode>)> {
  986. const ROOT: Inode = SpecInodes::RootDir as Inode;
  987. let mut parent = None;
  988. let mut inode = ROOT;
  989. for component in components {
  990. parent = Some(inode);
  991. inode = Self::lookup_inode_in(table_guard, inode, component).await?;
  992. }
  993. Ok((inode, parent))
  994. }
  995. /// Retrieves the authorization attributes for the principal identified by the given path.
  996. /// If the principal is not associated with a valid process record, then an [Err] is
  997. /// returned.
  998. /// ## Warning
  999. /// If this method is called while a lock for any component on the given path is held, then
  1000. /// a deadlock may occur. It's safest to call this method when _no_ locks are held.
  1001. async fn authz_attrs(&self, from: &Arc<BlockPath>) -> Result<AuthzAttrs> {
  1002. let writecap = self.creds.writecap().ok_or(BlockError::MissingWritecap)?;
  1003. let root_principal = writecap.root_principal();
  1004. let from_principal = from.components().last().map_or_else(
  1005. || Err(bterr!("path from which message was received was empty")),
  1006. Principal::try_from,
  1007. )?;
  1008. if root_principal == from_principal {
  1009. // Now I am become root, the destroyer of files.
  1010. return Ok(AuthzAttrs {
  1011. uid: 0,
  1012. gid: 0,
  1013. supp_gids: Vec::new(),
  1014. });
  1015. }
  1016. let local_root = writecap.path();
  1017. let relative = from.relative_to(local_root)?;
  1018. let (inode, parent_key) = {
  1019. let table_guard = self.table_guard().await;
  1020. let (inode, parent) =
  1021. Self::lookup_inode(&table_guard, relative.components()).await?;
  1022. let parent_key = if let Some(parent) = parent {
  1023. let value_guard = table_guard.read(parent).await?;
  1024. Some(value_guard.block.meta_body().block_key()?.clone())
  1025. } else {
  1026. None
  1027. };
  1028. (inode, parent_key)
  1029. };
  1030. let proc_rec = {
  1031. let table_guard = self
  1032. .ensure_open(from, inode, from.as_ref().to_owned(), parent_key)
  1033. .await?;
  1034. let mut value_guard = table_guard.write(inode).await?;
  1035. value_guard.block.read_proc_rec()?
  1036. };
  1037. let proc_rec = proc_rec.validate()?;
  1038. Ok(proc_rec.authz_attrs)
  1039. }
  1040. }
  1041. unsafe impl<C: Sync, A: Sync> Sync for LocalFs<C, A> {}
  1042. /// An owned guard which allows read access to file data.
  1043. pub struct BufGuard<C> {
  1044. offset: u64,
  1045. size: u64,
  1046. // Note that handle must come before _table to ensure the guards are dropped in the correct
  1047. // order.
  1048. handle: HandleGuard<
  1049. BlockGuard<OwnedRwLockReadGuard<InodeTableValue<C>>>,
  1050. OwnedMutexGuard<EmptyAccessor>,
  1051. >,
  1052. _table: OwnedTableLock<C>,
  1053. }
  1054. impl<C: 'static + Principaled + Signer + Decrypter> BufGuard<C> {
  1055. async fn new(
  1056. table: Arc<RwLock<InodeTable<C>>>,
  1057. from: &BlockPath,
  1058. inode: Inode,
  1059. handle: Handle,
  1060. offset: u64,
  1061. size: u64,
  1062. ) -> Result<BufGuard<C>> {
  1063. let table = table.read_owned().await;
  1064. let entry = table.get(&inode).ok_or(Error::NotOpen(inode))?;
  1065. let inode_guard = {
  1066. let inode_guard = entry.clone().read_owned().await;
  1067. let mut handle_guard = inode_guard.handle_guard(from, handle).await?;
  1068. handle_guard.flags.assert_readable()?;
  1069. let pos = handle_guard.pos() as u64;
  1070. if offset != pos {
  1071. if let Err(err) = handle_guard.try_seek(SeekFrom::Start(offset)) {
  1072. // An error with `ErrorKind::Unsupported` means that the `SectoredBuf`
  1073. // has unflushed data and it needs exclusive access to the block to
  1074. // perform this seek because this data needs to be written.
  1075. if let io::ErrorKind::Unsupported = err.kind() {
  1076. None
  1077. } else {
  1078. return Err(err.into());
  1079. }
  1080. } else {
  1081. drop(handle_guard);
  1082. Some(inode_guard)
  1083. }
  1084. } else {
  1085. drop(handle_guard);
  1086. Some(inode_guard)
  1087. }
  1088. };
  1089. let inode_guard = match inode_guard {
  1090. Some(inode_guard) => inode_guard,
  1091. None => {
  1092. {
  1093. let mut inode_guard = entry.write().await;
  1094. let mut handle_guard = inode_guard.handle_guard_mut(from, handle).await?;
  1095. handle_guard.seek(SeekFrom::Start(offset))?;
  1096. }
  1097. entry.clone().read_owned().await
  1098. }
  1099. };
  1100. let handle = InodeTableValue::handle_guard_owned(inode_guard, from, handle).await?;
  1101. Ok(BufGuard {
  1102. handle,
  1103. _table: table,
  1104. offset,
  1105. size,
  1106. })
  1107. }
  1108. }
  1109. impl<C: 'static + Principaled + Decrypter + Signer> Deref for BufGuard<C> {
  1110. type Target = [u8];
  1111. fn deref(&self) -> &Self::Target {
  1112. self.handle.get_buf(self.offset, self.size).unwrap()
  1113. }
  1114. }
  1115. impl<C: 'static + Creds + Clone + Send + Sync, A: 'static + Authorizer + Send + Sync> FsProvider
  1116. for LocalFs<C, A>
  1117. {
  1118. type LookupFut<'c> = impl 'c + Send + Future<Output = Result<LookupReply>>;
  1119. fn lookup<'c>(&'c self, from: &'c Arc<BlockPath>, msg: Lookup<'c>) -> Self::LookupFut<'c> {
  1120. async move {
  1121. let Lookup { parent, name, .. } = msg;
  1122. debug!("lookup: parent {parent}, {:?}", name);
  1123. let authz_attrs = self.authz_attrs(from).await?;
  1124. let (dir, block_path, parent_key) = {
  1125. let table_guard = self.table_guard().await;
  1126. let mut value_guard = table_guard.write(parent).await?;
  1127. let parent_block = &mut value_guard.block;
  1128. self.authorizer.can_exec(&AuthzContext::new(
  1129. from,
  1130. &authz_attrs,
  1131. parent_block.meta(),
  1132. ))?;
  1133. let dir = parent_block.read_dir()?;
  1134. let meta_body = parent_block.meta_body();
  1135. let block_path = meta_body.path().to_owned();
  1136. let parent_key = meta_body.block_key()?.clone();
  1137. (dir, block_path, parent_key)
  1138. };
  1139. let entry = dir
  1140. .entry(name)
  1141. .ok_or_else(|| io::Error::from_raw_os_error(libc::ENOENT))?;
  1142. let inode = entry.inode();
  1143. let stat = {
  1144. let table_guard = self
  1145. .ensure_open(from, inode, block_path, Some(parent_key))
  1146. .await?;
  1147. let mut value_guard = table_guard.write(inode).await?;
  1148. let stat = value_guard.block.meta_body().secrets()?.to_owned();
  1149. value_guard.incr_lookup_count(from);
  1150. stat
  1151. };
  1152. let entry = self.bt_entry(stat);
  1153. let reply = LookupReply {
  1154. inode,
  1155. generation: self.sb.generation,
  1156. entry,
  1157. };
  1158. Ok(reply)
  1159. }
  1160. }
  1161. type CreateFut<'c> = impl 'c + Send + Future<Output = Result<CreateReply>>;
  1162. fn create<'c>(&'c self, from: &'c Arc<BlockPath>, msg: Create<'c>) -> Self::CreateFut<'c> {
  1163. async move {
  1164. let Create {
  1165. parent,
  1166. name,
  1167. flags,
  1168. mode,
  1169. umask,
  1170. } = msg;
  1171. debug!("create: parent {parent}, name {:?}", name);
  1172. let authz_attrs = self.authz_attrs(from).await?;
  1173. let name = msg.name.to_owned();
  1174. // Add a directory entry to the parent for the new inode.
  1175. let (inode, mut block_path, parent_key) = {
  1176. let table_guard = self.table_guard().await;
  1177. let mut value_guard = table_guard.write(parent).await?;
  1178. let block = &mut value_guard.block;
  1179. self.authorizer.can_write(&AuthzContext::new(
  1180. from,
  1181. &authz_attrs,
  1182. block.meta(),
  1183. ))?;
  1184. let mut dir = block.read_dir()?;
  1185. if dir.contains_entry(&name) {
  1186. return Err(io::Error::from_raw_os_error(libc::EEXIST).into());
  1187. }
  1188. // Reserve a free inode.
  1189. let inode = self.next_inode().await?;
  1190. dir.add_file(name.clone(), inode)?;
  1191. block.write_dir(&dir)?;
  1192. let meta_body = block.meta_body();
  1193. let block_path = meta_body.path().clone();
  1194. let parent_key = meta_body.block_key()?.clone();
  1195. (inode, block_path, parent_key)
  1196. };
  1197. block_path.push_component(name);
  1198. let (handle, stat) = {
  1199. let table_guard = self
  1200. .ensure_open(from, inode, block_path, Some(parent_key))
  1201. .await?;
  1202. let mut value_guard = table_guard.write(inode).await?;
  1203. let handle =
  1204. value_guard.new_handle(from.clone(), FlagValue::ReadWrite.into())?;
  1205. let stat = {
  1206. let mut block = value_guard.handle_guard_mut(from, handle).await?;
  1207. let stat = block.mut_meta_body().access_secrets(|secrets| {
  1208. secrets.block_id.generation = self.sb.generation;
  1209. secrets.block_id.inode = inode;
  1210. secrets.mode = mode & !umask;
  1211. if flags.directory() {
  1212. secrets.mode |= FileType::Dir.value();
  1213. } else {
  1214. secrets.mode |= FileType::Reg.value();
  1215. }
  1216. secrets.uid = authz_attrs.uid;
  1217. secrets.gid = authz_attrs.gid;
  1218. let now = Epoch::now();
  1219. secrets.atime = now;
  1220. secrets.ctime = now;
  1221. secrets.mtime = now;
  1222. secrets.nlink = 1;
  1223. Ok(secrets.to_owned())
  1224. })?;
  1225. if flags.directory() {
  1226. // Note that write_dir flushes data after writing, including
  1227. // metadata.
  1228. block.write_dir(&Directory::new())?;
  1229. } else {
  1230. block.flush_meta()?;
  1231. }
  1232. stat
  1233. };
  1234. if flags.directory() {
  1235. value_guard.convert_to_dir(handle)?;
  1236. }
  1237. value_guard.set_flags(handle, flags)?;
  1238. (handle, stat)
  1239. };
  1240. Ok(CreateReply {
  1241. inode,
  1242. handle,
  1243. entry: self.bt_entry(stat),
  1244. })
  1245. }
  1246. }
  1247. type OpenFut<'c> = impl 'c + Send + Future<Output = Result<OpenReply>>;
  1248. fn open<'c>(&'c self, from: &'c Arc<BlockPath>, msg: Open) -> Self::OpenFut<'c> {
  1249. async move {
  1250. let Open { inode, flags } = msg;
  1251. debug!("open: inode {inode}, flags {flags}");
  1252. if flags.value() & libc::O_APPEND != 0 {
  1253. return Err(Self::unsupported_flag_err("O_APPEND"));
  1254. }
  1255. if flags.value() & libc::O_CLOEXEC != 0 {
  1256. return Err(Self::unsupported_flag_err("O_CLOEXEC"));
  1257. }
  1258. let authz_attrs = self.authz_attrs(from).await?;
  1259. let handle = {
  1260. let table_guard = self.table_guard().await;
  1261. let mut value = table_guard.write(inode).await?;
  1262. let handle = value.new_handle(from.clone(), flags)?;
  1263. {
  1264. let block = {
  1265. let result = value.handle_guard(from, handle).await;
  1266. match result {
  1267. Ok(block) => block,
  1268. Err(ref err) => {
  1269. let message = err.to_string();
  1270. drop(result);
  1271. value.forget_handle(handle);
  1272. return Err(bterr!(message));
  1273. }
  1274. }
  1275. };
  1276. let ctx = AuthzContext::new(from, &authz_attrs, block.meta());
  1277. if flags.readable() {
  1278. if let Err(err) = self.authorizer.can_read(&ctx) {
  1279. drop(block);
  1280. value.forget_handle(handle);
  1281. return Err(err.into());
  1282. }
  1283. }
  1284. if flags.writeable() {
  1285. if let Err(err) = self.authorizer.can_write(&ctx) {
  1286. drop(block);
  1287. value.forget_handle(handle);
  1288. return Err(err.into());
  1289. }
  1290. }
  1291. }
  1292. if flags.directory() {
  1293. if let Err(err) = value.convert_to_dir(handle) {
  1294. value.forget_handle(handle);
  1295. return Err(err.into());
  1296. }
  1297. }
  1298. if let Err(err) = value.set_flags(handle, flags) {
  1299. value.forget_handle(handle);
  1300. return Err(err);
  1301. }
  1302. handle
  1303. };
  1304. Ok(OpenReply { handle })
  1305. }
  1306. }
  1307. type ReadGuard = BufGuard<C>;
  1308. type ReadFut<'c> = impl 'c + Send + Future<Output = Result<Self::ReadGuard>>;
  1309. fn read<'c>(&'c self, from: &'c Arc<BlockPath>, msg: Read) -> Self::ReadFut<'c> {
  1310. async move {
  1311. let Read {
  1312. inode,
  1313. handle,
  1314. offset,
  1315. size,
  1316. } = msg;
  1317. debug!("read: inode {inode}, handle {handle}, offset {offset}, size {size}");
  1318. BufGuard::new(self.inodes.clone(), from, inode, handle, offset, size).await
  1319. }
  1320. }
  1321. type WriteFut<'r> = impl 'r + Send + Future<Output = Result<WriteReply>>;
  1322. fn write<'c>(
  1323. &'c self,
  1324. from: &'c Arc<BlockPath>,
  1325. write: Write<&'c [u8]>,
  1326. ) -> Self::WriteFut<'c> {
  1327. async move {
  1328. let Write {
  1329. inode,
  1330. handle,
  1331. offset,
  1332. mut data,
  1333. } = write;
  1334. debug!("write: inode {inode}, handle {handle}, offset {offset}");
  1335. let table_guard = self.table_guard().await;
  1336. let mut value_guard = table_guard.write(inode).await?;
  1337. let mut block = value_guard.handle_guard_mut(from, handle).await?;
  1338. block.flags.assert_writeable()?;
  1339. let pos = block.pos() as u64;
  1340. if offset != pos {
  1341. block.seek(SeekFrom::Start(offset))?;
  1342. }
  1343. let written = std::io::copy(&mut data, block.deref_mut())?;
  1344. Ok(WriteReply { written })
  1345. }
  1346. }
  1347. type FlushFut<'c> = impl 'c + Send + Future<Output = Result<()>>;
  1348. fn flush<'c>(&'c self, from: &'c Arc<BlockPath>, msg: Flush) -> Self::FlushFut<'c> {
  1349. async move {
  1350. let Flush { inode, handle } = msg;
  1351. debug!("flush: inode {inode}, handle {handle}");
  1352. let table_guard = self.table_guard().await;
  1353. let mut value_guard = table_guard.write(inode).await?;
  1354. let mut handle_guard = match value_guard.handle_guard_mut(from, handle).await {
  1355. Ok(guard) => guard,
  1356. Err(err) => match err.downcast_ref::<Error>() {
  1357. Some(Error::ReadOnlyHandle(..)) => {
  1358. // We ignore attempts to flush read-only handles.
  1359. return Ok(());
  1360. }
  1361. _ => return Err(err),
  1362. },
  1363. };
  1364. handle_guard.flush()?;
  1365. Ok(())
  1366. }
  1367. }
  1368. type ReadDirFut<'c> = impl 'c + Send + Future<Output = Result<ReadDirReply>>;
  1369. fn read_dir<'c>(&'c self, from: &'c Arc<BlockPath>, msg: ReadDir) -> Self::ReadDirFut<'c> {
  1370. async move {
  1371. let ReadDir {
  1372. inode,
  1373. handle,
  1374. limit,
  1375. state,
  1376. } = msg;
  1377. debug!("read_dir: inode {inode}, handle {handle}, state {state}");
  1378. let table_guard = self.table_guard().await;
  1379. let value = table_guard.read(inode).await?;
  1380. let handle_value = value
  1381. .value(handle)
  1382. .map_err(|_| bterr!(Error::InvalidHandle { handle, inode }))?;
  1383. let (_, owner, flags) = handle_value.parts();
  1384. flags.assert_readable()?;
  1385. owner.assert_eq(from)?;
  1386. let dir = handle_value.directory()?;
  1387. let state: usize = state.try_into()?;
  1388. let server_limit = Self::DIR_ENTRY_LIMIT.min(dir.num_entries() - state);
  1389. let entries_len = if limit > 0 {
  1390. server_limit.min(limit as usize)
  1391. } else {
  1392. server_limit
  1393. };
  1394. let pairs = dir
  1395. .entries()
  1396. .skip(state)
  1397. .take(entries_len)
  1398. .map(|(name, entry)| (name.to_owned(), entry.to_owned()));
  1399. let mut entries = Vec::with_capacity(entries_len);
  1400. entries.extend(pairs);
  1401. Ok(ReadDirReply {
  1402. entries,
  1403. new_state: (state + entries_len) as u64,
  1404. })
  1405. }
  1406. }
  1407. type LinkFut<'c> = impl 'c + Send + Future<Output = Result<LinkReply>>;
  1408. fn link<'c>(&'c self, from: &'c Arc<BlockPath>, msg: Link<'c>) -> Self::LinkFut<'c> {
  1409. async move {
  1410. let Link {
  1411. inode,
  1412. new_parent,
  1413. name,
  1414. } = msg;
  1415. debug!("link: inode {inode}, new_parent {new_parent}, name {name}");
  1416. let authz_attrs = self.authz_attrs(from).await?;
  1417. let table_guard = self.table_guard().await;
  1418. let mut value_guard = table_guard.write(new_parent).await?;
  1419. let parent_block = &mut value_guard.block;
  1420. self.authorizer.can_write(&AuthzContext::new(
  1421. from,
  1422. &authz_attrs,
  1423. parent_block.meta(),
  1424. ))?;
  1425. let mut dir = parent_block.read_dir()?;
  1426. if dir.contains_entry(name) {
  1427. return Err(io::Error::from_raw_os_error(libc::EEXIST).into());
  1428. }
  1429. let attr = {
  1430. let table_guard = self.table_guard().await;
  1431. let mut value = table_guard.write(inode).await?;
  1432. let block = value.block_mut();
  1433. let meta = block.mut_meta_body();
  1434. let attr = meta.access_secrets(|secrets| {
  1435. secrets.nlink += 1;
  1436. Ok(secrets.to_owned())
  1437. })?;
  1438. block.flush_meta()?;
  1439. value.incr_lookup_count(from);
  1440. attr
  1441. };
  1442. let file_type = FileType::from_value(attr.mode)?;
  1443. let entry = match file_type {
  1444. FileType::Reg => DirEntry::File(inode),
  1445. FileType::Dir => DirEntry::Directory(inode),
  1446. };
  1447. dir.insert_entry(name.to_owned(), entry);
  1448. parent_block.write_dir(&dir)?;
  1449. let entry = self.bt_entry(attr);
  1450. Ok(LinkReply { entry })
  1451. }
  1452. }
  1453. type UnlinkFut<'c> = impl 'c + Send + Future<Output = Result<()>>;
  1454. fn unlink<'c>(&'c self, from: &'c Arc<BlockPath>, msg: Unlink<'c>) -> Self::UnlinkFut<'c> {
  1455. fn decr_nlink(secrets: &mut BlockMetaSecrets) -> Result<u32> {
  1456. secrets.nlink -= 1;
  1457. Ok(secrets.nlink)
  1458. }
  1459. async move {
  1460. let Unlink { parent, name } = msg;
  1461. debug!("unlink: parent {parent}, name {name}");
  1462. let authz_attrs = self.authz_attrs(from).await?;
  1463. let (block_path, inode, parent_key) = {
  1464. let table_guard = self.table_guard().await;
  1465. let mut value_guard = table_guard.write(parent).await?;
  1466. let parent_block = &mut value_guard.block;
  1467. self.authorizer.can_write(&AuthzContext::new(
  1468. from,
  1469. &authz_attrs,
  1470. parent_block.meta(),
  1471. ))?;
  1472. let mut dir = parent_block.read_dir()?;
  1473. let entry = match dir.remove_entry(name) {
  1474. None => return Err(io::Error::from_raw_os_error(libc::ENOENT).into()),
  1475. Some(entry) => entry,
  1476. };
  1477. let inode = entry.inode();
  1478. parent_block.write_dir(&dir)?;
  1479. let meta_body = parent_block.meta_body();
  1480. let mut block_path = meta_body.path().clone();
  1481. block_path.push_component(name.to_owned());
  1482. let parent_key = meta_body.block_key()?.clone();
  1483. (block_path, inode, parent_key)
  1484. };
  1485. let table_guard = self.inodes.read().await;
  1486. let delete = if let Some(entry) = table_guard.get(&inode) {
  1487. let mut value = entry.write().await;
  1488. let nlink = value
  1489. .block_mut()
  1490. .mut_meta_body()
  1491. .access_secrets(decr_nlink)?;
  1492. value.delete = 0 == nlink;
  1493. // If the block is about to be deleted then there's no point in flushing its
  1494. // metadata.
  1495. if !value.delete {
  1496. value.block_mut().flush_meta()?;
  1497. }
  1498. // Since this block was already open, a client is keeping it alive. When they
  1499. // choose to forget this inode it will be deleted. Thus we return false here.
  1500. false
  1501. } else {
  1502. // It may be tempting to drop the table_guard here, but if this were done then
  1503. // another this block file could be opened concurrently.
  1504. let mut block = Self::open_block(
  1505. &self.path,
  1506. inode,
  1507. self.creds.clone(),
  1508. block_path,
  1509. Some(parent_key),
  1510. self.sb.inode_hash,
  1511. &self.sb.inode_key,
  1512. )?;
  1513. let nlink = block.mut_meta_body().access_secrets(decr_nlink)?;
  1514. if nlink > 0 {
  1515. block.flush_meta()?;
  1516. false
  1517. } else {
  1518. true
  1519. }
  1520. };
  1521. if delete {
  1522. self.delete_block_file(inode)?;
  1523. }
  1524. Ok(())
  1525. }
  1526. }
  1527. type ReadMetaFut<'c> = impl 'c + Send + Future<Output = Result<ReadMetaReply>>;
  1528. fn read_meta<'c>(
  1529. &'c self,
  1530. from: &'c Arc<BlockPath>,
  1531. msg: ReadMeta,
  1532. ) -> Self::ReadMetaFut<'c> {
  1533. async move {
  1534. let ReadMeta { inode, handle } = msg;
  1535. debug!("read_meta: inode {inode}, handle {:?}", handle);
  1536. let table_guard = self.table_guard().await;
  1537. let value_guard = table_guard.read(inode).await?;
  1538. let attrs = if let Some(handle) = handle {
  1539. let block = value_guard.handle_guard(from, handle).await?;
  1540. block.meta_body().secrets()?.to_owned()
  1541. } else {
  1542. value_guard.block().meta_body().secrets()?.to_owned()
  1543. };
  1544. debug!("read_meta attrs: {:?}", attrs);
  1545. let reply = ReadMetaReply {
  1546. attrs,
  1547. valid_for: self.attr_timeout(),
  1548. };
  1549. Ok(reply)
  1550. }
  1551. }
  1552. type WriteMetaFut<'c> = impl 'c + Send + Future<Output = Result<WriteMetaReply>>;
  1553. fn write_meta<'c>(
  1554. &'c self,
  1555. from: &'c Arc<BlockPath>,
  1556. msg: WriteMeta,
  1557. ) -> Self::WriteMetaFut<'c> {
  1558. async move {
  1559. let WriteMeta {
  1560. inode,
  1561. handle,
  1562. attrs,
  1563. attrs_set,
  1564. } = msg;
  1565. debug!("write_meta: inode {inode}, handle {:?}", handle);
  1566. let authz_attrs = self.authz_attrs(from).await?;
  1567. let cb = |block: &mut FileBlock<C>| {
  1568. self.authorizer.can_write(&AuthzContext::new(
  1569. from,
  1570. &authz_attrs,
  1571. block.meta(),
  1572. ))?;
  1573. let attrs = block.mut_meta_body().access_secrets(|secrets| {
  1574. if attrs_set.mode() {
  1575. secrets.mode = attrs.mode;
  1576. }
  1577. if attrs_set.uid() {
  1578. secrets.uid = attrs.uid;
  1579. }
  1580. if attrs_set.gid() {
  1581. secrets.gid = attrs.gid;
  1582. }
  1583. if attrs_set.atime() {
  1584. secrets.atime = attrs.atime;
  1585. }
  1586. if attrs_set.mtime() {
  1587. secrets.mtime = attrs.mtime;
  1588. }
  1589. if attrs_set.ctime() {
  1590. secrets.ctime = attrs.ctime;
  1591. }
  1592. for (key, value) in attrs.tags.into_iter() {
  1593. secrets.tags.insert(key, value);
  1594. }
  1595. Ok(secrets.to_owned())
  1596. })?;
  1597. block.flush_meta()?;
  1598. Ok::<_, btlib::Error>(attrs)
  1599. };
  1600. let attrs = {
  1601. let table_guard = self.table_guard().await;
  1602. let mut value_guard = table_guard.write(inode).await?;
  1603. if let Some(handle) = handle {
  1604. let mut block = value_guard.handle_guard_mut(from, handle).await?;
  1605. block.flags.assert_writeable()?;
  1606. cb(block.get_mut())
  1607. } else {
  1608. cb(value_guard.block.get_mut())
  1609. }
  1610. }?;
  1611. Ok(WriteMetaReply {
  1612. attrs,
  1613. valid_for: self.attr_timeout(),
  1614. })
  1615. }
  1616. }
  1617. type AllocateFut<'c> = impl 'c + Send + Future<Output = Result<()>>;
  1618. fn allocate<'c>(
  1619. &'c self,
  1620. from: &'c Arc<BlockPath>,
  1621. msg: Allocate,
  1622. ) -> Self::AllocateFut<'c> {
  1623. async move {
  1624. let Allocate {
  1625. inode,
  1626. handle,
  1627. offset,
  1628. size,
  1629. } = msg;
  1630. debug!(
  1631. "allocate: inode {inode}, handle {handle}, offset {:?}, size {size}",
  1632. offset
  1633. );
  1634. let table_guard = self.table_guard().await;
  1635. let mut value_guard = table_guard.write(inode).await?;
  1636. let mut block = value_guard.handle_guard_mut(from, handle).await?;
  1637. let curr_size = block.meta_body().secrets()?.size;
  1638. if let Some(offset) = offset {
  1639. if curr_size != offset {
  1640. return Err(bterr!("only allocations at the end of files are supported"));
  1641. }
  1642. }
  1643. let new_size = curr_size.max(size);
  1644. if new_size > curr_size {
  1645. block.zero_extend(new_size - curr_size)?;
  1646. }
  1647. Ok(())
  1648. }
  1649. }
  1650. type CloseFut<'c> = impl 'c + Send + Future<Output = Result<()>>;
  1651. fn close<'c>(&'c self, from: &'c Arc<BlockPath>, msg: Close) -> Self::CloseFut<'c> {
  1652. async move {
  1653. let Close { inode, handle } = msg;
  1654. debug!("close: inode {inode}, handle {handle}");
  1655. let table_guard = self.table_guard().await;
  1656. let mut value = table_guard.write(inode).await?;
  1657. match value.handle_guard_mut(from, handle).await {
  1658. Ok(mut block) => {
  1659. block.flush()?;
  1660. block.flush_meta()?;
  1661. }
  1662. Err(err) => match err.downcast_ref::<Error>() {
  1663. // If the cause of the error is that the handle is read-only, then it is
  1664. // not actually an error.
  1665. Some(Error::ReadOnlyHandle(_)) => (),
  1666. _ => return Err(err),
  1667. },
  1668. };
  1669. value.forget_handle(handle);
  1670. Ok(())
  1671. }
  1672. }
  1673. type ForgetFut<'c> = impl 'c + Send + Future<Output = Result<()>>;
  1674. fn forget<'c>(&'c self, from: &'c Arc<BlockPath>, msg: Forget) -> Self::ForgetFut<'c> {
  1675. async move {
  1676. let Forget { inode, count } = msg;
  1677. debug!("forget: inode {inode}, count {count}");
  1678. self.inode_forget(from.clone(), inode, count).await.bterr()
  1679. }
  1680. }
  1681. type LockFut<'c> = Ready<Result<()>>;
  1682. fn lock<'c>(&'c self, _from: &'c Arc<BlockPath>, _msg: Lock) -> Self::LockFut<'c> {
  1683. todo!();
  1684. }
  1685. type UnlockFut<'c> = Ready<Result<()>>;
  1686. fn unlock<'c>(&'c self, _from: &'c Arc<BlockPath>, _msg: Unlock) -> Self::UnlockFut<'c> {
  1687. todo!();
  1688. }
  1689. type AddReacapFut<'c> = impl 'c + Send + Future<Output = Result<()>>;
  1690. fn add_readcap<'c>(
  1691. &'c self,
  1692. from: &'c Arc<BlockPath>,
  1693. msg: AddReadcap,
  1694. ) -> Self::AddReacapFut<'c> {
  1695. async move {
  1696. let AddReadcap {
  1697. inode,
  1698. handle,
  1699. principal,
  1700. enc_key,
  1701. } = msg;
  1702. debug!("add_readcap: inode {inode}, handle {handle}, principal {principal}");
  1703. let table_guard = self.table_guard().await;
  1704. let mut value_guard = table_guard.write(inode).await?;
  1705. let mut block = value_guard.handle_guard_mut(from, handle).await?;
  1706. block.mut_meta_body().add_readcap_for(principal, &enc_key)
  1707. }
  1708. }
  1709. type GrantAccessFut<'c> = impl 'c + Send + Future<Output = Result<()>>;
  1710. fn grant_access<'c>(
  1711. &'c self,
  1712. from: &'c Arc<BlockPath>,
  1713. msg: GrantAccess,
  1714. ) -> Self::GrantAccessFut<'c> {
  1715. let GrantAccess { inode, record } = msg;
  1716. debug!("grant_access: inode {inode}, record {:?}", record);
  1717. self.grant_access_to(from, inode, record)
  1718. }
  1719. }
  1720. }