rev_persistence.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. use crate::cache::{
  2. disk::{RevisionChangeset, RevisionDiskCache, SQLiteTextBlockRevisionPersistence},
  3. memory::RevisionMemoryCacheDelegate,
  4. };
  5. use crate::disk::{RevisionRecord, RevisionState};
  6. use crate::memory::RevisionMemoryCache;
  7. use crate::RevisionCompactor;
  8. use flowy_database::ConnectionPool;
  9. use flowy_error::{internal_error, FlowyError, FlowyResult};
  10. use flowy_sync::entities::revision::{Revision, RevisionRange};
  11. use std::collections::VecDeque;
  12. use std::{borrow::Cow, sync::Arc};
  13. use tokio::sync::RwLock;
  14. use tokio::task::spawn_blocking;
  15. pub const REVISION_WRITE_INTERVAL_IN_MILLIS: u64 = 600;
  16. pub struct RevisionPersistence {
  17. user_id: String,
  18. object_id: String,
  19. disk_cache: Arc<dyn RevisionDiskCache<Error = FlowyError>>,
  20. memory_cache: Arc<RevisionMemoryCache>,
  21. sync_seq: RwLock<RevisionSyncSequence>,
  22. }
  23. impl RevisionPersistence {
  24. pub fn new(
  25. user_id: &str,
  26. object_id: &str,
  27. disk_cache: Arc<dyn RevisionDiskCache<Error = FlowyError>>,
  28. ) -> RevisionPersistence {
  29. let object_id = object_id.to_owned();
  30. let user_id = user_id.to_owned();
  31. let sync_seq = RwLock::new(RevisionSyncSequence::new());
  32. let memory_cache = Arc::new(RevisionMemoryCache::new(&object_id, Arc::new(disk_cache.clone())));
  33. Self {
  34. user_id,
  35. object_id,
  36. disk_cache,
  37. memory_cache,
  38. sync_seq,
  39. }
  40. }
  41. /// Save the revision that comes from remote to disk.
  42. #[tracing::instrument(level = "trace", skip(self, revision), fields(rev_id, object_id=%self.object_id), err)]
  43. pub(crate) async fn add_ack_revision(&self, revision: &Revision) -> FlowyResult<()> {
  44. tracing::Span::current().record("rev_id", &revision.rev_id);
  45. self.add(revision.clone(), RevisionState::Ack, true).await
  46. }
  47. /// Append the revision that already existed in the local DB state to sync sequence
  48. #[tracing::instrument(level = "trace", skip(self), fields(rev_id, object_id=%self.object_id), err)]
  49. pub(crate) async fn sync_revision(&self, revision: &Revision) -> FlowyResult<()> {
  50. tracing::Span::current().record("rev_id", &revision.rev_id);
  51. self.add(revision.clone(), RevisionState::Sync, false).await?;
  52. self.sync_seq.write().await.add(revision.rev_id)?;
  53. Ok(())
  54. }
  55. /// Save the revision to disk and append it to the end of the sync sequence.
  56. #[tracing::instrument(level = "trace", skip_all, fields(rev_id, compact_range, object_id=%self.object_id), err)]
  57. pub(crate) async fn add_sync_revision<'a>(
  58. &'a self,
  59. revision: &'a Revision,
  60. compactor: Box<dyn RevisionCompactor + 'a>,
  61. ) -> FlowyResult<i64> {
  62. let result = self.sync_seq.read().await.compact();
  63. match result {
  64. None => {
  65. tracing::Span::current().record("rev_id", &revision.rev_id);
  66. self.add(revision.clone(), RevisionState::Sync, true).await?;
  67. self.sync_seq.write().await.add(revision.rev_id)?;
  68. Ok(revision.rev_id)
  69. }
  70. Some((range, mut compact_seq)) => {
  71. tracing::Span::current().record("compact_range", &format!("{}", range).as_str());
  72. let mut revisions = self.revisions_in_range(&range).await?;
  73. if range.to_rev_ids().len() != revisions.len() {
  74. debug_assert_eq!(range.to_rev_ids().len(), revisions.len());
  75. }
  76. // append the new revision
  77. revisions.push(revision.clone());
  78. // compact multiple revisions into one
  79. let compact_revision = compactor.compact(&self.user_id, &self.object_id, revisions)?;
  80. let rev_id = compact_revision.rev_id;
  81. tracing::Span::current().record("rev_id", &rev_id);
  82. // insert new revision
  83. compact_seq.push_back(rev_id);
  84. // replace the revisions in range with compact revision
  85. self.compact(&range, compact_revision).await?;
  86. debug_assert_eq!(self.sync_seq.read().await.len(), compact_seq.len());
  87. self.sync_seq.write().await.reset(compact_seq);
  88. Ok(rev_id)
  89. }
  90. }
  91. }
  92. /// Remove the revision with rev_id from the sync sequence.
  93. pub(crate) async fn ack_revision(&self, rev_id: i64) -> FlowyResult<()> {
  94. if self.sync_seq.write().await.ack(&rev_id).is_ok() {
  95. self.memory_cache.ack(&rev_id).await;
  96. }
  97. Ok(())
  98. }
  99. pub(crate) async fn next_sync_revision(&self) -> FlowyResult<Option<Revision>> {
  100. match self.sync_seq.read().await.next_rev_id() {
  101. None => Ok(None),
  102. Some(rev_id) => Ok(self.get(rev_id).await.map(|record| record.revision)),
  103. }
  104. }
  105. /// The cache gets reset while it conflicts with the remote revisions.
  106. #[tracing::instrument(level = "trace", skip(self, revisions), err)]
  107. pub(crate) async fn reset(&self, revisions: Vec<Revision>) -> FlowyResult<()> {
  108. let records = revisions
  109. .to_vec()
  110. .into_iter()
  111. .map(|revision| RevisionRecord {
  112. revision,
  113. state: RevisionState::Sync,
  114. write_to_disk: false,
  115. })
  116. .collect::<Vec<_>>();
  117. let _ = self
  118. .disk_cache
  119. .delete_and_insert_records(&self.object_id, None, records.clone())?;
  120. let _ = self.memory_cache.reset_with_revisions(records).await;
  121. self.sync_seq.write().await.clear();
  122. Ok(())
  123. }
  124. async fn add(&self, revision: Revision, state: RevisionState, write_to_disk: bool) -> FlowyResult<()> {
  125. if self.memory_cache.contains(&revision.rev_id) {
  126. tracing::warn!("Duplicate revision: {}:{}-{:?}", self.object_id, revision.rev_id, state);
  127. return Ok(());
  128. }
  129. let record = RevisionRecord {
  130. revision,
  131. state,
  132. write_to_disk,
  133. };
  134. self.memory_cache.add(Cow::Owned(record)).await;
  135. Ok(())
  136. }
  137. async fn compact(&self, range: &RevisionRange, new_revision: Revision) -> FlowyResult<()> {
  138. self.memory_cache.remove_with_range(range);
  139. let rev_ids = range.to_rev_ids();
  140. let _ = self
  141. .disk_cache
  142. .delete_revision_records(&self.object_id, Some(rev_ids))?;
  143. self.add(new_revision, RevisionState::Sync, true).await?;
  144. Ok(())
  145. }
  146. pub async fn get(&self, rev_id: i64) -> Option<RevisionRecord> {
  147. match self.memory_cache.get(&rev_id).await {
  148. None => match self
  149. .disk_cache
  150. .read_revision_records(&self.object_id, Some(vec![rev_id]))
  151. {
  152. Ok(mut records) => {
  153. let record = records.pop()?;
  154. assert!(records.is_empty());
  155. Some(record)
  156. }
  157. Err(e) => {
  158. tracing::error!("{}", e);
  159. None
  160. }
  161. },
  162. Some(revision) => Some(revision),
  163. }
  164. }
  165. pub fn batch_get(&self, doc_id: &str) -> FlowyResult<Vec<RevisionRecord>> {
  166. self.disk_cache.read_revision_records(doc_id, None)
  167. }
  168. // Read the revision which rev_id >= range.start && rev_id <= range.end
  169. pub async fn revisions_in_range(&self, range: &RevisionRange) -> FlowyResult<Vec<Revision>> {
  170. let range = range.clone();
  171. let mut records = self.memory_cache.get_with_range(&range).await?;
  172. let range_len = range.len() as usize;
  173. if records.len() != range_len {
  174. let disk_cache = self.disk_cache.clone();
  175. let object_id = self.object_id.clone();
  176. records = spawn_blocking(move || disk_cache.read_revision_records_with_range(&object_id, &range))
  177. .await
  178. .map_err(internal_error)??;
  179. if records.len() != range_len {
  180. // #[cfg(debug_assertions)]
  181. // records.iter().for_each(|record| {
  182. // let delta = PlainDelta::from_bytes(&record.revision.delta_data).unwrap();
  183. // tracing::trace!("{}", delta.to_string());
  184. // });
  185. tracing::error!("Expect revision len {},but receive {}", range_len, records.len());
  186. }
  187. }
  188. Ok(records
  189. .into_iter()
  190. .map(|record| record.revision)
  191. .collect::<Vec<Revision>>())
  192. }
  193. }
  194. pub fn mk_revision_disk_cache(
  195. user_id: &str,
  196. pool: Arc<ConnectionPool>,
  197. ) -> Arc<dyn RevisionDiskCache<Error = FlowyError>> {
  198. Arc::new(SQLiteTextBlockRevisionPersistence::new(user_id, pool))
  199. }
  200. impl RevisionMemoryCacheDelegate for Arc<dyn RevisionDiskCache<Error = FlowyError>> {
  201. fn checkpoint_tick(&self, mut records: Vec<RevisionRecord>) -> FlowyResult<()> {
  202. records.retain(|record| record.write_to_disk);
  203. if !records.is_empty() {
  204. tracing::Span::current().record(
  205. "checkpoint_result",
  206. &format!("{} records were saved", records.len()).as_str(),
  207. );
  208. let _ = self.create_revision_records(records)?;
  209. }
  210. Ok(())
  211. }
  212. fn receive_ack(&self, object_id: &str, rev_id: i64) {
  213. let changeset = RevisionChangeset {
  214. object_id: object_id.to_string(),
  215. rev_id: rev_id.into(),
  216. state: RevisionState::Ack,
  217. };
  218. match self.update_revision_record(vec![changeset]) {
  219. Ok(_) => {}
  220. Err(e) => tracing::error!("{}", e),
  221. }
  222. }
  223. }
  224. #[derive(Default)]
  225. struct RevisionSyncSequence(VecDeque<i64>);
  226. impl RevisionSyncSequence {
  227. fn new() -> Self {
  228. RevisionSyncSequence::default()
  229. }
  230. fn add(&mut self, new_rev_id: i64) -> FlowyResult<()> {
  231. // The last revision's rev_id must be greater than the new one.
  232. if let Some(rev_id) = self.0.back() {
  233. if *rev_id >= new_rev_id {
  234. return Err(
  235. FlowyError::internal().context(format!("The new revision's id must be greater than {}", rev_id))
  236. );
  237. }
  238. }
  239. self.0.push_back(new_rev_id);
  240. Ok(())
  241. }
  242. fn ack(&mut self, rev_id: &i64) -> FlowyResult<()> {
  243. let cur_rev_id = self.0.front().cloned();
  244. if let Some(pop_rev_id) = cur_rev_id {
  245. if &pop_rev_id != rev_id {
  246. let desc = format!(
  247. "The ack rev_id:{} is not equal to the current rev_id:{}",
  248. rev_id, pop_rev_id
  249. );
  250. return Err(FlowyError::internal().context(desc));
  251. }
  252. let _ = self.0.pop_front();
  253. }
  254. Ok(())
  255. }
  256. fn next_rev_id(&self) -> Option<i64> {
  257. self.0.front().cloned()
  258. }
  259. fn reset(&mut self, new_seq: VecDeque<i64>) {
  260. self.0 = new_seq;
  261. }
  262. fn clear(&mut self) {
  263. self.0.clear();
  264. }
  265. fn len(&self) -> usize {
  266. self.0.len()
  267. }
  268. // Compact the rev_ids into one except the current synchronizing rev_id.
  269. fn compact(&self) -> Option<(RevisionRange, VecDeque<i64>)> {
  270. self.next_rev_id()?;
  271. let mut new_seq = self.0.clone();
  272. let mut drained = new_seq.drain(1..).collect::<VecDeque<_>>();
  273. let start = drained.pop_front()?;
  274. let end = drained.pop_back().unwrap_or(start);
  275. Some((RevisionRange { start, end }, new_seq))
  276. }
  277. }