manager.rs 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. use std::collections::HashMap;
  2. use std::sync::{Arc, Weak};
  3. use appflowy_integrate::collab_builder::AppFlowyCollabBuilder;
  4. use appflowy_integrate::{CollabPersistenceConfig, CollabType, RocksCollabDB};
  5. use collab::core::collab::{CollabRawData, MutexCollab};
  6. use collab_database::blocks::BlockEvent;
  7. use collab_database::database::{DatabaseData, YrsDocAction};
  8. use collab_database::error::DatabaseError;
  9. use collab_database::user::{
  10. CollabFuture, CollabObjectUpdate, CollabObjectUpdateByOid, DatabaseCollabService,
  11. WorkspaceDatabase,
  12. };
  13. use collab_database::views::{CreateDatabaseParams, CreateViewParams, DatabaseLayout};
  14. use tokio::sync::RwLock;
  15. use flowy_database_deps::cloud::DatabaseCloudService;
  16. use flowy_error::{internal_error, FlowyError, FlowyResult};
  17. use flowy_task::TaskDispatcher;
  18. use crate::entities::{
  19. DatabaseDescriptionPB, DatabaseLayoutPB, DatabaseSnapshotPB, DidFetchRowPB,
  20. RepeatedDatabaseDescriptionPB,
  21. };
  22. use crate::notification::{send_notification, DatabaseNotification};
  23. use crate::services::database::DatabaseEditor;
  24. use crate::services::database_view::DatabaseLayoutDepsResolver;
  25. use crate::services::share::csv::{CSVFormat, CSVImporter, ImportResult};
  26. pub trait DatabaseUser: Send + Sync {
  27. fn user_id(&self) -> Result<i64, FlowyError>;
  28. fn token(&self) -> Result<Option<String>, FlowyError>;
  29. fn collab_db(&self, uid: i64) -> Result<Weak<RocksCollabDB>, FlowyError>;
  30. }
  31. pub struct DatabaseManager {
  32. user: Arc<dyn DatabaseUser>,
  33. workspace_database: Arc<RwLock<Option<Arc<WorkspaceDatabase>>>>,
  34. task_scheduler: Arc<RwLock<TaskDispatcher>>,
  35. editors: RwLock<HashMap<String, Arc<DatabaseEditor>>>,
  36. collab_builder: Arc<AppFlowyCollabBuilder>,
  37. cloud_service: Arc<dyn DatabaseCloudService>,
  38. }
  39. impl DatabaseManager {
  40. pub fn new(
  41. database_user: Arc<dyn DatabaseUser>,
  42. task_scheduler: Arc<RwLock<TaskDispatcher>>,
  43. collab_builder: Arc<AppFlowyCollabBuilder>,
  44. cloud_service: Arc<dyn DatabaseCloudService>,
  45. ) -> Self {
  46. Self {
  47. user: database_user,
  48. workspace_database: Default::default(),
  49. task_scheduler,
  50. editors: Default::default(),
  51. collab_builder,
  52. cloud_service,
  53. }
  54. }
  55. fn is_collab_exist(&self, uid: i64, collab_db: &Weak<RocksCollabDB>, object_id: &str) -> bool {
  56. match collab_db.upgrade() {
  57. None => false,
  58. Some(collab_db) => {
  59. let read_txn = collab_db.read_txn();
  60. read_txn.is_exist(uid, object_id)
  61. },
  62. }
  63. }
  64. pub async fn initialize(
  65. &self,
  66. uid: i64,
  67. _workspace_id: String,
  68. workspace_database_id: String,
  69. ) -> FlowyResult<()> {
  70. let collab_db = self.user.collab_db(uid)?;
  71. let collab_builder = UserDatabaseCollabServiceImpl {
  72. collab_builder: self.collab_builder.clone(),
  73. cloud_service: self.cloud_service.clone(),
  74. };
  75. let config = CollabPersistenceConfig::new().snapshot_per_update(10);
  76. let mut collab_raw_data = CollabRawData::default();
  77. // If the workspace database not exist in disk, try to fetch from remote.
  78. if !self.is_collab_exist(uid, &collab_db, &workspace_database_id) {
  79. tracing::trace!("workspace database not exist, try to fetch from remote");
  80. match self
  81. .cloud_service
  82. .get_collab_update(&workspace_database_id, CollabType::WorkspaceDatabase)
  83. .await
  84. {
  85. Ok(updates) => collab_raw_data = updates,
  86. Err(err) => {
  87. return Err(FlowyError::record_not_found().context(format!(
  88. "get workspace database :{} failed: {}",
  89. workspace_database_id, err,
  90. )));
  91. },
  92. }
  93. }
  94. // Construct the workspace database.
  95. tracing::trace!("open workspace database: {}", &workspace_database_id);
  96. let collab = collab_builder.build_collab_with_config(
  97. uid,
  98. &workspace_database_id,
  99. CollabType::WorkspaceDatabase,
  100. collab_db.clone(),
  101. collab_raw_data,
  102. &config,
  103. );
  104. let workspace_database =
  105. WorkspaceDatabase::open(uid, collab, collab_db, config, collab_builder);
  106. subscribe_block_event(&workspace_database);
  107. *self.workspace_database.write().await = Some(Arc::new(workspace_database));
  108. // Remove all existing editors
  109. self.editors.write().await.clear();
  110. Ok(())
  111. }
  112. pub async fn initialize_with_new_user(
  113. &self,
  114. user_id: i64,
  115. workspace_id: String,
  116. database_storage_id: String,
  117. ) -> FlowyResult<()> {
  118. self
  119. .initialize(user_id, workspace_id, database_storage_id)
  120. .await?;
  121. Ok(())
  122. }
  123. pub async fn get_all_databases_description(&self) -> RepeatedDatabaseDescriptionPB {
  124. let mut items = vec![];
  125. if let Ok(wdb) = self.get_workspace_database().await {
  126. items = wdb
  127. .get_all_databases()
  128. .into_iter()
  129. .map(DatabaseDescriptionPB::from)
  130. .collect();
  131. }
  132. RepeatedDatabaseDescriptionPB { items }
  133. }
  134. pub async fn get_database_with_view_id(&self, view_id: &str) -> FlowyResult<Arc<DatabaseEditor>> {
  135. let database_id = self.get_database_id_with_view_id(view_id).await?;
  136. self.get_database(&database_id).await
  137. }
  138. pub async fn get_database_id_with_view_id(&self, view_id: &str) -> FlowyResult<String> {
  139. let wdb = self.get_workspace_database().await?;
  140. wdb.get_database_id_with_view_id(view_id).ok_or_else(|| {
  141. FlowyError::record_not_found()
  142. .context(format!("The database for view id: {} not found", view_id))
  143. })
  144. }
  145. pub async fn get_database(&self, database_id: &str) -> FlowyResult<Arc<DatabaseEditor>> {
  146. if let Some(editor) = self.editors.read().await.get(database_id) {
  147. return Ok(editor.clone());
  148. }
  149. self.open_database(database_id).await
  150. }
  151. pub async fn open_database(&self, database_id: &str) -> FlowyResult<Arc<DatabaseEditor>> {
  152. tracing::trace!("create new editor for database {}", database_id);
  153. let mut editors = self.editors.write().await;
  154. let wdb = self.get_workspace_database().await?;
  155. let database = wdb
  156. .get_database(database_id)
  157. .await
  158. .ok_or_else(FlowyError::record_not_found)?;
  159. let editor = Arc::new(DatabaseEditor::new(database, self.task_scheduler.clone()).await?);
  160. editors.insert(database_id.to_string(), editor.clone());
  161. Ok(editor)
  162. }
  163. #[tracing::instrument(level = "debug", skip_all)]
  164. pub async fn close_database_view<T: AsRef<str>>(&self, view_id: T) -> FlowyResult<()> {
  165. // TODO(natan): defer closing the database if the sync is not finished
  166. let view_id = view_id.as_ref();
  167. let wdb = self.get_workspace_database().await?;
  168. let database_id = wdb.get_database_id_with_view_id(view_id);
  169. if database_id.is_some() {
  170. wdb.close_database(database_id.as_ref().unwrap());
  171. }
  172. if let Some(database_id) = database_id {
  173. let mut editors = self.editors.write().await;
  174. if let Some(editor) = editors.get(&database_id) {
  175. if editor.close_view_editor(view_id).await {
  176. editor.close().await;
  177. editors.remove(&database_id);
  178. }
  179. }
  180. }
  181. Ok(())
  182. }
  183. pub async fn delete_database_view(&self, view_id: &str) -> FlowyResult<()> {
  184. let database = self.get_database_with_view_id(view_id).await?;
  185. let _ = database.delete_database_view(view_id).await?;
  186. Ok(())
  187. }
  188. pub async fn duplicate_database(&self, view_id: &str) -> FlowyResult<Vec<u8>> {
  189. let wdb = self.get_workspace_database().await?;
  190. let data = wdb.get_database_duplicated_data(view_id).await?;
  191. let json_bytes = data.to_json_bytes()?;
  192. Ok(json_bytes)
  193. }
  194. /// Create a new database with the given data that can be deserialized to [DatabaseData].
  195. #[tracing::instrument(level = "trace", skip_all, err)]
  196. pub async fn create_database_with_database_data(
  197. &self,
  198. view_id: &str,
  199. data: Vec<u8>,
  200. ) -> FlowyResult<()> {
  201. let mut database_data = DatabaseData::from_json_bytes(data)?;
  202. database_data.view.id = view_id.to_string();
  203. let wdb = self.get_workspace_database().await?;
  204. let _ = wdb.create_database_with_data(database_data)?;
  205. Ok(())
  206. }
  207. pub async fn create_database_with_params(&self, params: CreateDatabaseParams) -> FlowyResult<()> {
  208. let wdb = self.get_workspace_database().await?;
  209. let _ = wdb.create_database(params)?;
  210. Ok(())
  211. }
  212. /// A linked view is a view that is linked to existing database.
  213. #[tracing::instrument(level = "trace", skip(self), err)]
  214. pub async fn create_linked_view(
  215. &self,
  216. name: String,
  217. layout: DatabaseLayout,
  218. database_id: String,
  219. database_view_id: String,
  220. ) -> FlowyResult<()> {
  221. let wdb = self.get_workspace_database().await?;
  222. let mut params = CreateViewParams::new(database_id.clone(), database_view_id, name, layout);
  223. if let Some(database) = wdb.get_database(&database_id).await {
  224. let (field, layout_setting) = DatabaseLayoutDepsResolver::new(database, layout)
  225. .resolve_deps_when_create_database_linked_view();
  226. if let Some(field) = field {
  227. params = params.with_deps_fields(vec![field]);
  228. }
  229. if let Some(layout_setting) = layout_setting {
  230. params = params.with_layout_setting(layout_setting);
  231. }
  232. };
  233. wdb.create_database_linked_view(params).await?;
  234. Ok(())
  235. }
  236. pub async fn import_csv(
  237. &self,
  238. view_id: String,
  239. content: String,
  240. format: CSVFormat,
  241. ) -> FlowyResult<ImportResult> {
  242. let params = tokio::task::spawn_blocking(move || {
  243. CSVImporter.import_csv_from_string(view_id, content, format)
  244. })
  245. .await
  246. .map_err(internal_error)??;
  247. let result = ImportResult {
  248. database_id: params.database_id.clone(),
  249. view_id: params.view_id.clone(),
  250. };
  251. self.create_database_with_params(params).await?;
  252. Ok(result)
  253. }
  254. // will implement soon
  255. pub async fn import_csv_from_file(
  256. &self,
  257. _file_path: String,
  258. _format: CSVFormat,
  259. ) -> FlowyResult<()> {
  260. Ok(())
  261. }
  262. pub async fn export_csv(&self, view_id: &str, style: CSVFormat) -> FlowyResult<String> {
  263. let database = self.get_database_with_view_id(view_id).await?;
  264. database.export_csv(style).await
  265. }
  266. pub async fn update_database_layout(
  267. &self,
  268. view_id: &str,
  269. layout: DatabaseLayoutPB,
  270. ) -> FlowyResult<()> {
  271. let database = self.get_database_with_view_id(view_id).await?;
  272. database.update_view_layout(view_id, layout.into()).await
  273. }
  274. pub async fn get_database_snapshots(
  275. &self,
  276. view_id: &str,
  277. ) -> FlowyResult<Vec<DatabaseSnapshotPB>> {
  278. let database_id = self.get_database_id_with_view_id(view_id).await?;
  279. let mut snapshots = vec![];
  280. if let Some(snapshot) = self
  281. .cloud_service
  282. .get_collab_latest_snapshot(&database_id)
  283. .await?
  284. .map(|snapshot| DatabaseSnapshotPB {
  285. snapshot_id: snapshot.snapshot_id,
  286. snapshot_desc: "".to_string(),
  287. created_at: snapshot.created_at,
  288. data: snapshot.data,
  289. })
  290. {
  291. snapshots.push(snapshot);
  292. }
  293. Ok(snapshots)
  294. }
  295. async fn get_workspace_database(&self) -> FlowyResult<Arc<WorkspaceDatabase>> {
  296. let database = self.workspace_database.read().await;
  297. match &*database {
  298. None => Err(FlowyError::internal().context("Workspace database not initialized")),
  299. Some(user_database) => Ok(user_database.clone()),
  300. }
  301. }
  302. /// Only expose this method for testing
  303. #[cfg(debug_assertions)]
  304. pub fn get_cloud_service(&self) -> &Arc<dyn DatabaseCloudService> {
  305. &self.cloud_service
  306. }
  307. }
  308. /// Send notification to all clients that are listening to the given object.
  309. fn subscribe_block_event(workspace_database: &WorkspaceDatabase) {
  310. let mut block_event_rx = workspace_database.subscribe_block_event();
  311. tokio::spawn(async move {
  312. while let Ok(event) = block_event_rx.recv().await {
  313. match event {
  314. BlockEvent::DidFetchRow(row_details) => {
  315. for row_detail in row_details {
  316. tracing::trace!("Did fetch row: {:?}", row_detail.row.id);
  317. let row_id = row_detail.row.id.clone();
  318. let pb = DidFetchRowPB::from(row_detail);
  319. send_notification(&row_id, DatabaseNotification::DidFetchRow)
  320. .payload(pb)
  321. .send();
  322. }
  323. },
  324. }
  325. }
  326. });
  327. }
  328. struct UserDatabaseCollabServiceImpl {
  329. collab_builder: Arc<AppFlowyCollabBuilder>,
  330. cloud_service: Arc<dyn DatabaseCloudService>,
  331. }
  332. impl DatabaseCollabService for UserDatabaseCollabServiceImpl {
  333. fn get_collab_update(
  334. &self,
  335. object_id: &str,
  336. object_ty: CollabType,
  337. ) -> CollabFuture<Result<CollabObjectUpdate, DatabaseError>> {
  338. let object_id = object_id.to_string();
  339. let weak_cloud_service = Arc::downgrade(&self.cloud_service);
  340. Box::pin(async move {
  341. match weak_cloud_service.upgrade() {
  342. None => {
  343. tracing::warn!("Cloud service is dropped");
  344. Ok(vec![])
  345. },
  346. Some(cloud_service) => {
  347. let updates = cloud_service
  348. .get_collab_update(&object_id, object_ty)
  349. .await?;
  350. Ok(updates)
  351. },
  352. }
  353. })
  354. }
  355. fn batch_get_collab_update(
  356. &self,
  357. object_ids: Vec<String>,
  358. object_ty: CollabType,
  359. ) -> CollabFuture<Result<CollabObjectUpdateByOid, DatabaseError>> {
  360. let weak_cloud_service = Arc::downgrade(&self.cloud_service);
  361. Box::pin(async move {
  362. match weak_cloud_service.upgrade() {
  363. None => {
  364. tracing::warn!("Cloud service is dropped");
  365. Ok(CollabObjectUpdateByOid::default())
  366. },
  367. Some(cloud_service) => {
  368. let updates = cloud_service
  369. .batch_get_collab_updates(object_ids, object_ty)
  370. .await?;
  371. Ok(updates)
  372. },
  373. }
  374. })
  375. }
  376. fn build_collab_with_config(
  377. &self,
  378. uid: i64,
  379. object_id: &str,
  380. object_type: CollabType,
  381. collab_db: Weak<RocksCollabDB>,
  382. collab_raw_data: CollabRawData,
  383. config: &CollabPersistenceConfig,
  384. ) -> Arc<MutexCollab> {
  385. self
  386. .collab_builder
  387. .build_with_config(
  388. uid,
  389. object_id,
  390. object_type,
  391. collab_db,
  392. collab_raw_data,
  393. config,
  394. )
  395. .unwrap()
  396. }
  397. }