From 77ca6d030444d28efbf97fdc543621e4ce2e1457 Mon Sep 17 00:00:00 2001 From: "Elf M. Sternberg" Date: Mon, 2 Nov 2020 18:32:01 -0800 Subject: [PATCH] A reset for the single table case. --- .gitignore | 2 + Cargo.toml | 2 + server/nm-store/src/errors.rs | 1 + server/nm-store/src/lib.rs | 130 ++--- server/nm-store/src/reference_parser.rs | 134 ----- .../nm-store/src/sql/initialize_database.sql | 59 +-- server/nm-store/src/sql/insert_one_note.sql | 8 - server/nm-store/src/sql/insert_one_page.sql | 8 - .../reverse_select_references_from_note.sql | 72 --- .../src/sql/select_kasten_by_parameter.sql | 85 ++++ .../sql/select_note_collection_from_root.sql | 91 ---- server/nm-store/src/sql/select_one_note.sql | 1 - server/nm-store/src/sql/select_one_page.sql | 1 - server/nm-store/src/sql/update_one_note.sql | 3 - server/nm-store/src/store.rs | 230 +-------- server/nm-store/src/store_private.rs | 463 +----------------- server/nm-store/src/structs.rs | 127 +---- server/nm-trees/src/lib.rs | 2 +- 18 files changed, 212 insertions(+), 1207 deletions(-) create mode 100644 .gitignore create mode 100644 Cargo.toml delete mode 100644 server/nm-store/src/reference_parser.rs delete mode 100644 server/nm-store/src/sql/insert_one_note.sql delete mode 100644 server/nm-store/src/sql/insert_one_page.sql delete mode 100644 server/nm-store/src/sql/reverse_select_references_from_note.sql create mode 100644 server/nm-store/src/sql/select_kasten_by_parameter.sql delete mode 100644 server/nm-store/src/sql/select_note_collection_from_root.sql delete mode 100644 server/nm-store/src/sql/select_one_note.sql delete mode 100644 server/nm-store/src/sql/select_one_page.sql delete mode 100644 server/nm-store/src/sql/update_one_note.sql diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..96ef6c0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/target +Cargo.lock diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..527128a --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,2 @@ +[workspace] +members = ["server/*"] diff --git a/server/nm-store/src/errors.rs b/server/nm-store/src/errors.rs index f42d6da..26031f2 100644 --- a/server/nm-store/src/errors.rs +++ b/server/nm-store/src/errors.rs @@ -1,3 +1,4 @@ +use sqlx; use thiserror::Error; /// All the ways looking up objects can fail diff --git a/server/nm-store/src/lib.rs b/server/nm-store/src/lib.rs index f526296..38448ae 100644 --- a/server/nm-store/src/lib.rs +++ b/server/nm-store/src/lib.rs @@ -1,12 +1,12 @@ mod errors; -mod reference_parser; +// mod reference_parser; mod store; mod store_private; mod structs; pub use crate::errors::NoteStoreError; pub use crate::store::NoteStore; -pub use crate::structs::{RawPage, RawNote, NewPage, NewNote}; +pub use crate::structs::{RawZettle}; #[cfg(test)] mod tests { @@ -22,16 +22,17 @@ mod tests { assert!(reset.is_ok(), "{:?}", reset); storagepool } + + // Request for the page by slug. If the page exists, return it. + // If the page doesn't, return NotFound + // + #[tokio::test(threaded_scheduler)] + async fn fetching_unfound_page_by_slug_works() { + let storagepool = fresh_inmemory_database().await; + let foundkasten = storagepool.get_kasten_by_slug("nonexistent-kasten").await.unwrap(); + assert_eq!(foundkasten.len(), 0, "{:?}", foundkasten); + } - // Request for the page by slug. - // If the page exists, return it. If the page doesn't, return NotFound - - #[tokio::test(threaded_scheduler)] - async fn fetching_unfound_page_by_slug_works() { - let storagepool = fresh_inmemory_database().await; - let unfoundpage = storagepool.get_page_by_slug("nonexistent-page").await; - assert!(unfoundpage.is_err()); - } // Request for the page by title. If the page exists, return it. // If the page doesn't exist, create it then return it anyway. @@ -42,68 +43,67 @@ mod tests { let title = "Nonexistent Page"; let now = chrono::Utc::now(); let storagepool = fresh_inmemory_database().await; - let newpageresult = storagepool.get_page_by_title(&title).await; + let newpageresult = storagepool.get_kasten_by_title(&title).await; assert!(newpageresult.is_ok(), "{:?}", newpageresult); - let (newpage, newnotes) = newpageresult.unwrap(); + let newpage = newpageresult.unwrap(); - assert_eq!(newpage.title, title, "{:?}", newpage.title); - assert_eq!(newpage.slug, "nonexistent-page"); + assert_eq!(newpage.content, title, "{:?}", newpage.content); + assert_eq!(newpage.id, "nonexistent-page"); - assert_eq!(newnotes.len(), 1); - assert_eq!(newnotes[0].notetype, "root"); - assert_eq!(newpage.note_id, newnotes[0].id); + assert_eq!(newpage.children.len(), 0); + assert_eq!(newpage.kind, "page"); assert!((newpage.creation_date - now).num_minutes() < 1); assert!((newpage.updated_date - now).num_minutes() < 1); assert!((newpage.lastview_date - now).num_minutes() < 1); assert!(newpage.deleted_date.is_none()); } - - fn make_new_note(content: &str) -> structs::NewNote { - structs::NewNoteBuilder::default() - .content(content.to_string()) - .build() - .unwrap() - } - - #[tokio::test(threaded_scheduler)] - async fn can_nest_notes() { - let title = "Nonexistent Page"; - let storagepool = fresh_inmemory_database().await; - let newpageresult = storagepool.get_page_by_title(&title).await; - let (_newpage, newnotes) = newpageresult.unwrap(); - - let root = &newnotes[0]; - - let note1 = make_new_note("1"); - let note1_uuid = storagepool.insert_nested_note(¬e1, &root.uuid, 0).await; - assert!(note1_uuid.is_ok(), "{:?}", note1_uuid); - let note1_uuid = note1_uuid.unwrap(); - - let note2 = make_new_note("2"); - let note2_uuid = storagepool.insert_nested_note(¬e2, &root.uuid, 0).await; - assert!(note2_uuid.is_ok(), "{:?}", note2_uuid); - let note2_uuid = note2_uuid.unwrap(); - - let note3 = make_new_note("3"); - let note3_uuid = storagepool.insert_nested_note(¬e3, ¬e1_uuid, 0).await; - assert!(note3_uuid.is_ok(), "{:?}", note3_uuid); - let _note3_uuid = note3_uuid.unwrap(); - - let note4 = make_new_note("4"); - let note4_uuid = storagepool.insert_nested_note(¬e4, ¬e2_uuid, 0).await; - assert!(note4_uuid.is_ok(), "{:?}", note4_uuid); - let _note4_uuid = note4_uuid.unwrap(); - - let newpageresult = storagepool.get_page_by_title(&title).await; - let (newpage, newnotes) = newpageresult.unwrap(); - - assert_eq!(newpage.title, title, "{:?}", newpage.title); - assert_eq!(newpage.slug, "nonexistent-page"); - - assert_eq!(newnotes.len(), 5); - assert_eq!(newnotes[0].notetype, "root"); - assert_eq!(newpage.note_id, newnotes[0].id); - } +// +// fn make_new_note(content: &str) -> structs::NewNote { +// structs::NewNoteBuilder::default() +// .content(content.to_string()) +// .build() +// .unwrap() +// } +// +// #[tokio::test(threaded_scheduler)] +// async fn can_nest_notes() { +// let title = "Nonexistent Page"; +// let storagepool = fresh_inmemory_database().await; +// let newpageresult = storagepool.get_page_by_title(&title).await; +// let newpage = newpageresult.unwrap(); +// +// let root = &newnotes[0]; +// +// let note1 = make_new_note("1"); +// let note1_uuid = storagepool.insert_nested_note(¬e1, &root.uuid, 0).await; +// assert!(note1_uuid.is_ok(), "{:?}", note1_uuid); +// let note1_uuid = note1_uuid.unwrap(); +// +// let note2 = make_new_note("2"); +// let note2_uuid = storagepool.insert_nested_note(¬e2, &root.uuid, 0).await; +// assert!(note2_uuid.is_ok(), "{:?}", note2_uuid); +// let note2_uuid = note2_uuid.unwrap(); +// +// let note3 = make_new_note("3"); +// let note3_uuid = storagepool.insert_nested_note(¬e3, ¬e1_uuid, 0).await; +// assert!(note3_uuid.is_ok(), "{:?}", note3_uuid); +// let _note3_uuid = note3_uuid.unwrap(); +// +// let note4 = make_new_note("4"); +// let note4_uuid = storagepool.insert_nested_note(¬e4, ¬e2_uuid, 0).await; +// assert!(note4_uuid.is_ok(), "{:?}", note4_uuid); +// let _note4_uuid = note4_uuid.unwrap(); +// +// let newpageresult = storagepool.get_page_by_title(&title).await; +// let (newpage, newnotes) = newpageresult.unwrap(); +// +// assert_eq!(newpage.title, title, "{:?}", newpage.title); +// assert_eq!(newpage.slug, "nonexistent-page"); +// +// assert_eq!(newnotes.len(), 5); +// assert_eq!(newnotes[0].notetype, "root"); +// assert_eq!(newpage.note_id, newnotes[0].id); +// } } diff --git a/server/nm-store/src/reference_parser.rs b/server/nm-store/src/reference_parser.rs deleted file mode 100644 index d7d65f6..0000000 --- a/server/nm-store/src/reference_parser.rs +++ /dev/null @@ -1,134 +0,0 @@ -use comrak::nodes::{AstNode, NodeValue}; -use comrak::{parse_document, Arena, ComrakOptions}; -use lazy_static::lazy_static; -use regex::bytes::Regex as BytesRegex; -use regex::Regex; - -pub struct Finder(pub Vec); - -impl Finder { - pub fn new() -> Self { - Finder(Vec::new()) - } - - fn iter_nodes<'a, F>(&mut self, node: &'a AstNode<'a>, f: &F) - where - F: Fn(&'a AstNode<'a>) -> Option>, - { - if let Some(mut v) = f(node) { - self.0.append(&mut v); - } - for c in node.children() { - self.iter_nodes(c, f); - } - } -} - -fn find_links(document: &str) -> Vec { - let arena = Arena::new(); - let mut finder = Finder::new(); - let root = parse_document(&arena, document, &ComrakOptions::default()); - - finder.iter_nodes(root, &|node| { - lazy_static! { - static ref RE_REFERENCES: BytesRegex = BytesRegex::new(r"(\[\[([^\]]+)\]\]|(\#[:\w\-]+))").unwrap(); - } - - match &node.data.borrow().value { - NodeValue::Text(ref text) => Some( - RE_REFERENCES - .captures_iter(text) - .filter_map(|t| t.get(1)) - .map(|t| String::from_utf8_lossy(t.as_bytes()).to_string()) - .filter(|s| !s.is_empty()) - .collect(), - ), - _ => None, - } - }); - - finder.0 -} - -fn recase(title: &str) -> String { - lazy_static! { - static ref RE_PASS1: Regex = Regex::new(r"(?P.)(?P[A-Z][a-z]+)").unwrap(); - static ref RE_PASS2: Regex = Regex::new(r"(?P[[:lower:]]|\d)(?P[[:upper:]])").unwrap(); - static ref RE_PASS4: Regex = Regex::new(r"(?P[a-z])(?P\d)").unwrap(); - static ref RE_PASS3: Regex = Regex::new(r"(:|_|-| )+").unwrap(); - } - - // This should panic if misused, so... :-) - let pass = title.to_string(); - let pass = pass.strip_prefix("#").unwrap(); - - let pass = RE_PASS1.replace_all(&pass, "$s $n"); - let pass = RE_PASS4.replace_all(&pass, "$s $n"); - let pass = RE_PASS2.replace_all(&pass, "$s $n"); - RE_PASS3.replace_all(&pass, " ").trim().to_string() -} - -fn build_page_titles(references: &[String]) -> Vec { - references - .iter() - .filter_map(|s| match s.chars().next() { - Some('#') => Some(recase(s)), - Some('[') => Some(s.strip_prefix("[[").unwrap().strip_suffix("]]").unwrap().to_string()), - Some(_) => Some(s.clone()), - _ => None, - }) - .filter(|s| !s.is_empty()) - .collect() -} - -pub(crate) fn build_references(content: &str) -> Vec { - build_page_titles(&find_links(content)) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn finds_expected() { - let sample = r###" -# Header -- NotATest 1 -- [[Test 2]] -- #Test3 -- #TestFourAndSo -- #Test-six-is-six -- #recipe:entree -- # -- #-_- -- #--Prefixed -- [[]] - -But *[[Test Seven]]* isn't. And *#Test_Eight____is_Messed-up* -And [[Test Bite Me]] is the worst. -Right? [[ -]] -"###; - let res = build_page_titles(&find_links(sample)); - let expected = vec![ - "Test 2", - "Test 3", - "Test Four And So", - "Test six is six", - "recipe entree", - "Prefixed", - "Test Seven", - "Test Eight is Messed up", - "Test Bite Me", - ]; - assert!(res.iter().eq(expected.iter()), "{:?}", res); - } - - #[test] - fn doesnt_crash_on_empty() { - let sample = ""; - let res = build_page_titles(&find_links(sample)); - let expected: Vec = vec![]; - assert!(res.iter().eq(expected.iter()), "{:?}", res); - } -} diff --git a/server/nm-store/src/sql/initialize_database.sql b/server/nm-store/src/sql/initialize_database.sql index 9d4b6e1..a855b2e 100644 --- a/server/nm-store/src/sql/initialize_database.sql +++ b/server/nm-store/src/sql/initialize_database.sql @@ -1,53 +1,34 @@ -DROP TABLE IF EXISTS notes; -DROP TABLE IF EXISTS note_relationships; -DROP TABLE IF EXISTS pages; -DROP TABLE IF EXISTS page_relationships; +DROP TABLE IF EXISTS zetteln; +DROP TABLE IF EXISTS zettle_relationships; +DROP INDEX IF EXISTS zetteln_ids; DROP TABLE IF EXISTS favorites; -CREATE TABLE notes ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - uuid TEXT NOT NULL UNIQUE, - content TEXT NULL, - notetype TEXT, +CREATE TABLE zetteln ( + id TEXT NOT NULL PRIMARY KEY, + content TEXT NOT NULL, + kind TEXT NOT NULL, + location INTEGER NOT NULL, creation_date DATETIME NOT NULL, updated_date DATETIME NOT NULL, lastview_date DATETIME NOT NULL, deleted_date DATETIME NULL ); -CREATE INDEX notes_uuids ON notes (uuid); - -CREATE TABLE pages ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - title text NOT NULL UNIQUE, - slug text NOT NULL UNIQUE, - note_id INTEGER, - creation_date DATETIME NOT NULL, - updated_date DATETIME NOT NULL, - lastview_date DATETIME NOT NULL, - deleted_date DATETIME NULL, - FOREIGN KEY (note_id) REFERENCES notes (id) ON DELETE NO ACTION ON UPDATE NO ACTION -); - -CREATE INDEX pages_slugs ON pages (slug); +CREATE INDEX zettle_ids ON zetteln (id); CREATE TABLE favorites ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - position INTEGER NOT NULL + id TEXT NOT NULL, + location INTEGER NOT NULL, + FOREIGN KEY (id) REFERENCES zetteln (id) ON DELETE CASCADE ); -CREATE TABLE note_relationships ( - note_id INTEGER NOT NULL, - parent_id INTEGER NOT NULL, - position INTEGER NOT NULL, - nature TEXT NOT NULL, - FOREIGN KEY (note_id) REFERENCES notes (id) ON DELETE NO ACTION ON UPDATE NO ACTION, - FOREIGN KEY (parent_id) REFERENCES notes (id) ON DELETE NO ACTION ON UPDATE NO ACTION +CREATE TABLE zettle_relationships ( + zettle_id TEXT NOT NULL, + parent_id TEXT NOT NULL, + location INTEGER NOT NULL, + kind TEXT NOT NULL, + -- If either zettle disappears, we want all the edges to disappear as well. + FOREIGN KEY (zettle_id) REFERENCES zetteln (id) ON DELETE CASCADE, + FOREIGN KEY (parent_id) REFERENCES zetteln (id) ON DELETE CASCADE ); -CREATE TABLE page_relationships ( - note_id INTEGER NOT NULL, - page_id INTEGER NOT NULL, - FOREIGN KEY (note_id) references notes (id) ON DELETE NO ACTION ON UPDATE NO ACTION, - FOREIGN KEY (page_id) references pages (id) ON DELETE NO ACTION ON UPDATE NO ACTION -); diff --git a/server/nm-store/src/sql/insert_one_note.sql b/server/nm-store/src/sql/insert_one_note.sql deleted file mode 100644 index fdefb2c..0000000 --- a/server/nm-store/src/sql/insert_one_note.sql +++ /dev/null @@ -1,8 +0,0 @@ -INSERT INTO notes ( - uuid, - content, - notetype, - creation_date, - updated_date, - lastview_date) -VALUES (?, ?, ?, ?, ?, ?); diff --git a/server/nm-store/src/sql/insert_one_page.sql b/server/nm-store/src/sql/insert_one_page.sql deleted file mode 100644 index 6142c5e..0000000 --- a/server/nm-store/src/sql/insert_one_page.sql +++ /dev/null @@ -1,8 +0,0 @@ -INSERT INTO pages ( - slug, - title, - note_id, - creation_date, - updated_date, - lastview_date) -VALUES (?, ?, ?, ?, ?, ?); diff --git a/server/nm-store/src/sql/reverse_select_references_from_note.sql b/server/nm-store/src/sql/reverse_select_references_from_note.sql deleted file mode 100644 index 18a00c9..0000000 --- a/server/nm-store/src/sql/reverse_select_references_from_note.sql +++ /dev/null @@ -1,72 +0,0 @@ -SELECT - id, - uuid, - parent_id, - parent_uuid, - content, - notetype, - creation_date, - updated_date, - lastview_date, - deleted_date - -FROM ( - - WITH RECURSIVE parents ( - id, - uuid, - parent_id, - parent_uuid, - content, - notetype, - creation_date, - updated_date, - lastview_date, - deleted_date, - cycle - ) - - AS ( - - SELECT - notes.id, - notes.uuid, - note_parents.id, - note_parents.uuid, - notes.content, - notes.notetype, - notes.creation_date, - notes.updated_date, - notes.lastview_date, - notes.deleted_date, - ','||notes.id||',' - FROM notes - INNER JOIN note_relationships - ON notes.id = note_relationships.note_id - AND notes.notetype = 'note' - INNER JOIN notes as note_parents - ON note_parents.id = note_relationships.parent_id - WHERE notes.id = ? -- IMPORTANT: THIS IS THE PARAMETER - - UNION - SELECT DISTINCT - notes.id, - notes.uuid, - next_parent.id, - next_parent.uuid, - notes.content, - notes.creation_date, - notes.updated_date, - notes.lastview_date, - notes.deleted_date, - parents.cycle||notes.id||',' - FROM notes - INNER JOIN parents - ON parents.parent_id = notes.id - LEFT JOIN note_relationships - ON note_relationships.note_id = notes.id - LEFT JOIN notes as next_parent - ON next_parent.id = note_relationships.parent_id - WHERE parents.cycle NOT LIKE '%,'||notes.id||',%' - ) - SELECT * from parents); diff --git a/server/nm-store/src/sql/select_kasten_by_parameter.sql b/server/nm-store/src/sql/select_kasten_by_parameter.sql new file mode 100644 index 0000000..73cd70d --- /dev/null +++ b/server/nm-store/src/sql/select_kasten_by_parameter.sql @@ -0,0 +1,85 @@ +-- This is a cut-and-paste of the select_note_collection_from_title.sql +-- file with one line changed. This is necessary because Larry +-- Ellison didn't trust programmers to understand recursion and +-- composition in 1983 (https://www.holistics.io/blog/quel-vs-sql/) +-- and that still makes me angry to this day. + +SELECT + id, + parent_id, + content, + location, + kind, + creation_date, + updated_date, + lastview_date, + deleted_date + +FROM ( + + WITH RECURSIVE zettelntree ( + id, + parent_id, + content, + location, + kind, + creation_date, + updated_date, + lastview_date, + deleted_date, + cycle + ) + + AS ( + + -- The seed query. Finds the root node of any tree of zetteln, + -- which by definition has a location of zero and a type of + -- 'page'. + SELECT + zetteln.id, + zetteln.id AS parent_id, + zetteln.content, + zetteln.location, + zetteln.kind, + zetteln.creation_date, + zetteln.updated_date, + zetteln.lastview_date, + zetteln.deleted_date, + ','||zetteln.id||',' -- Cycle monitor + FROM zetteln + WHERE zetteln.kind = "page" + AND zetteln.location = 0 + AND QUERYPARAMETER = ? -- The Query Parameter + + -- RECURSIVE expression + -- + -- Here, for each recursion down the tree, we collect the child + -- nodes for a given node, eliding any cycles. + -- + -- TODO: Figure out what to do when a cycle DOES occur. + UNION SELECT + zetteln.id, + zettelntree.id AS parent_id, + zetteln.content, + zettle_relationships.location, + zetteln.kind, + zetteln.creation_date, + zetteln.updated_date, + zetteln.lastview_date, + zetteln.deleted_date, + zettelntree.cycle||zetteln.id||',' + FROM zetteln + INNER JOIN zettle_relationships + ON zetteln.id = zettle_relationships.zettle_id + -- For a given ID in the level of zettelntree in *this* recursion, + -- we want each note's branches one level down. + INNER JOIN zettelntree + ON zettle_relationships.parent_id = zettelntree.id + -- And we want to make sure there are no cycles. There shouldn't + -- be; we're supposed to prevent those. But you never know. + WHERE zettelntree.cycle NOT LIKE '%,'||zetteln.id||',%' + ORDER BY zettle_relationships.location + ) + + SELECT * from zettelntree); + diff --git a/server/nm-store/src/sql/select_note_collection_from_root.sql b/server/nm-store/src/sql/select_note_collection_from_root.sql deleted file mode 100644 index 9370e66..0000000 --- a/server/nm-store/src/sql/select_note_collection_from_root.sql +++ /dev/null @@ -1,91 +0,0 @@ --- This is undoubtedly one of the more complex bits of code I've --- written recently, and I do wish there had been macros because --- there's a lot of hand-written, copy-pasted code here around the --- basic content of a note; it would have been nice to be able to DRY --- that out. - --- This expression creates a table, 'notetree', that contains all of --- the notes nested under a page. Each entry in the table includes --- the note's parent's internal and external ids so that applications --- can build an actual tree out of a vec of these things. - --- TODO: Extensive testing to validate that the nodes are delivered --- *in nesting order* to the client. - -SELECT - id, - uuid, - parent_id, - parent_uuid, - content, - position, - notetype, - creation_date, - updated_date, - lastview_date, - deleted_date - -FROM ( - - WITH RECURSIVE notetree ( - id, - uuid, - parent_id, - parent_uuid, - content, - position, - notetype, - creation_date, - updated_date, - lastview_date, - deleted_date, - cycle - ) - - AS ( - - SELECT - notes.id, - notes.uuid, - notes.id AS parent_id, - notes.uuid AS parent_uuid, - notes.content, - 0, -- Root notes are always in position 0 - notes.notetype, - notes.creation_date, - notes.updated_date, - notes.lastview_date, - notes.deleted_date, - ','||notes.id||',' -- Cycle monitor - FROM notes - WHERE notes.id = ? AND notes.notetype = "root" - --- RECURSIVE expression - UNION SELECT - notes.id, - notes.uuid, - notetree.id AS parent_id, - notetree.uuid AS parent_uuid, - notes.content, - note_relationships.position, - notes.notetype, - notes.creation_date, - notes.updated_date, - notes.lastview_date, - notes.deleted_date, - notetree.cycle||notes.id||',' - FROM notes - INNER JOIN note_relationships - ON notes.id = note_relationships.note_id - -- For a given ID in the level of notetree in *this* recursion, - -- we want each note's branches one level down. - INNER JOIN notetree - ON note_relationships.parent_id = notetree.id - -- And we want to make sure there are no cycles. There shouldn't - -- be; we're supposed to prevent those. But you never know. - WHERE notetree.cycle NOT LIKE '%,'||notes.id||',%' - ORDER BY note_relationships.position - ) - - SELECT * from notetree); - diff --git a/server/nm-store/src/sql/select_one_note.sql b/server/nm-store/src/sql/select_one_note.sql deleted file mode 100644 index 8e7b16a..0000000 --- a/server/nm-store/src/sql/select_one_note.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT id, uuid, content, notetype, creation_date, updated_date, lastview_date, deleted_date FROM notes WHERE uuid=?; diff --git a/server/nm-store/src/sql/select_one_page.sql b/server/nm-store/src/sql/select_one_page.sql deleted file mode 100644 index c7599ce..0000000 --- a/server/nm-store/src/sql/select_one_page.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT id, title, slug, note_id, creation_date, updated_date, lastview_date, deleted_date FROM pages WHERE slug=?; diff --git a/server/nm-store/src/sql/update_one_note.sql b/server/nm-store/src/sql/update_one_note.sql deleted file mode 100644 index 2492905..0000000 --- a/server/nm-store/src/sql/update_one_note.sql +++ /dev/null @@ -1,3 +0,0 @@ -UPDATE notes - SET content = ?, updated_date = ?, lastview_date = ? - WHERE uuid = ?; diff --git a/server/nm-store/src/store.rs b/server/nm-store/src/store.rs index 5c3e4da..521a4ab 100644 --- a/server/nm-store/src/store.rs +++ b/server/nm-store/src/store.rs @@ -6,12 +6,13 @@ //! //! This library implements the core functionality of Notesmachine and //! describes that functionality to a storage layer. There's a bit of -//! intermingling in here which can't be helped, although it may make sense -//! in the future to separate the decomposition of the note content into a -//! higher layer. +//! intermingling in here which can't be helped, although it may make +//! sense in the future to separate the decomposition of the note +//! content into a higher layer. //! -//! Notesmachine storage notes consist of two items: Zettle and Kasten, -//! which are German for "Note" and "Box". Here are the basic rules: +//! Notesmachine storage notes consist of two items: Zettle and Kasten. +//! This distinction is somewhat arbitrary, as structurally these two +//! items are stored in the same table. //! //! - Boxes have titles (and date metadata) //! - Notes have content and a type (and date metadata) @@ -51,7 +52,6 @@ //! use crate::errors::NoteStoreError; -use crate::reference_parser::build_references; use crate::store_private::*; use crate::structs::*; use sqlx::sqlite::SqlitePool; @@ -65,13 +65,6 @@ pub struct NoteStore(Arc); type NoteResult = core::result::Result; -// One thing that's pretty terrible about this code is that the -// Executor type in Sqlx is move-only, so it can only be used once per -// outgoing function call. That means that a lot of this code is -// internally duplicated, which sucks. I tried using the Acquire() -// trait, but its interaction with Executor was not very -// deterministic. - impl NoteStore { /// Initializes a new instance of the note store. Note that the /// note store holds an Arc internally; this code is (I think) @@ -80,8 +73,8 @@ impl NoteStore { let pool = SqlitePool::connect(url).await?; Ok(NoteStore(Arc::new(pool))) } - - /// Erase all the data in the database and restore it + + /// Erase all the data in the database and restore it /// to its original empty form. Do not use unless you /// really, really want that to happen. pub async fn reset_database(&self) -> NoteResult<()> { @@ -95,208 +88,9 @@ impl NoteStore { /// the slug, the slug is insufficient to generate a new page, so /// this use case says that in the event of a failure to find the /// requested page, return a basic NotFound. - pub async fn get_page_by_slug(&self, slug: &str) -> NoteResult<(RawPage, Vec)> { - // let select_note_collection_for_root = include_str!("sql/select_note_collection_for_root.sql"); - let mut tx = self.0.begin().await?; - let page = select_page_by_slug(&mut tx, slug).await?; - let note_id = page.note_id; - let notes = select_note_collection_from_root(&mut tx, note_id).await?; - tx.commit().await?; - Ok((page, notes)) - } - - /// Fetch page by title - /// - /// Supports the use case of the user navigating to a page via - /// the page's formal title. Since the title is the key reference - /// of the system, if no page with that title is found, a page with - /// that title is generated automatically. - pub async fn get_page_by_title(&self, title: &str) -> NoteResult<(RawPage, Vec)> { - let mut tx = self.0.begin().await?; - let (page, notes) = match select_page_by_title(&mut tx, title).await { - Ok(page) => { - let note_id = page.note_id; - (page, select_note_collection_from_root(&mut tx, note_id).await?) - } - Err(sqlx::Error::RowNotFound) => { - let page = { - let new_root_note = create_unique_root_note(); - let new_root_note_id = insert_one_new_note(&mut tx, &new_root_note).await?; - let new_page_slug = generate_slug(&mut tx, title).await?; - let new_page = create_new_page_for(&title, &new_page_slug, new_root_note_id); - let _ = insert_one_new_page(&mut tx, &new_page).await?; - select_page_by_title(&mut tx, &title).await? - }; - let note_id = page.note_id; - (page, select_note_collection_from_root(&mut tx, note_id).await?) - } - Err(e) => return Err(NoteStoreError::DBError(e)), - }; - tx.commit().await?; - Ok((page, notes)) - } - - /// Insert a note as the child of an existing note, at a set position. - pub async fn insert_nested_note( - &self, - note: &NewNote, - parent_note_uuid: &str, - position: i64, - ) -> NoteResult { - let mut new_note = note.clone(); - new_note.uuid = friendly_id::create(); - let references = build_references(¬e.content); - let mut tx = self.0.begin().await?; - - // Start by building the note and putting it into its relationship. - println!("Select_note_id_for_uuid"); - let parent_id: ParentId = select_note_id_for_uuid(&mut tx, parent_note_uuid).await?; - - // Ensure new position is sane - println!("Assert Max Child Position"); - let parent_max_position = assert_max_child_position_for_note(&mut tx, parent_id).await?; - let position = cmp::min(parent_max_position + 1, position); - - println!("Insert_one_new_note"); - let new_note_id = insert_one_new_note(&mut tx, &new_note).await?; - println!("make_room_for_new_note"); - let _ = make_room_for_new_note(&mut tx, parent_id, position).await?; - println!("Insert_note_to_note_relationship"); - let _ = insert_note_to_note_relationship(&mut tx, parent_id, new_note_id, position, "note").await?; - - // From the references, make lists of pages that exist, and pages - // that do not. - println!("Find_all_page_references"); - let found_references = find_all_page_references_for(&mut tx, &references).await?; - let new_references = diff_references(&references, &found_references); - let mut known_reference_ids: Vec = Vec::new(); - - // Create the pages that don't exist - for one_reference in new_references.iter() { - let new_root_note = create_unique_root_note(); - println!("Insert_one_new_root_note"); - let new_root_note_id = insert_one_new_note(&mut tx, &new_root_note).await?; - println!("Generate_slug"); - let new_page_slug = generate_slug(&mut tx, &one_reference).await?; - let new_page = create_new_page_for(&one_reference, &new_page_slug, new_root_note_id); - println!("insert_one_new_page"); - known_reference_ids.push(insert_one_new_page(&mut tx, &new_page).await?) - } - - // And associate the note with all the pages. - known_reference_ids.append(&mut found_references.iter().map(|r| PageId(r.id)).collect()); - println!("insert_note_to_page_relationships"); - let _ = insert_note_to_page_relationships(&mut tx, new_note_id, &known_reference_ids).await?; - - tx.commit().await?; - Ok(new_note.uuid) - } - - // This doesn't do anything with the references, as those are - // dependent entirely on the *content*, and not the *position*, of - // the note and the referenced page. - // - /// Move a note from one location to another. - pub async fn move_note( - &self, - note_uuid: &str, - old_parent_uuid: &str, - new_parent_uuid: &str, - new_position: i64, - ) -> NoteResult<()> { - let all_uuids = vec![note_uuid, old_parent_uuid, new_parent_uuid]; - let mut tx = self.0.begin().await?; - - // This is one of the few cases where we we're getting IDs for - // notes, but the nature of the ID isn't known at this time. - // This has to be handled manually, in the next paragraph - // below. - let found_id_vec = bulk_select_ids_for_note_uuids(&mut tx, &all_uuids).await?; - let found_ids: HashMap = found_id_vec.into_iter().collect(); - if found_ids.len() != 3 { - return Err(NoteStoreError::NotFound); - } - - let old_parent_id = ParentId(*found_ids.get(old_parent_uuid).unwrap()); - let new_parent_id = ParentId(*found_ids.get(new_parent_uuid).unwrap()); - let note_id = NoteId(*found_ids.get(note_uuid).unwrap()); - - let old_note = get_note_to_note_relationship(&mut tx, old_parent_id, note_id).await?; - let old_note_position = old_note.position; - let old_note_nature = &old_note.nature; - - let _ = delete_note_to_note_relationship(&mut tx, old_parent_id, note_id).await?; - let _ = close_hole_for_deleted_note(&mut tx, old_parent_id, old_note_position).await?; - let parent_max_position = assert_max_child_position_for_note(&mut tx, new_parent_id).await?; - let new_position = cmp::min(parent_max_position + 1, new_position); - let _ = make_room_for_new_note(&mut tx, new_parent_id, new_position).await?; - let _ = - insert_note_to_note_relationship(&mut tx, new_parent_id, note_id, new_position, old_note_nature).await?; - tx.commit().await?; - Ok(()) - } - - /// Embed or reference a note from a different location. - pub async fn reference_or_embed_note( - &self, - note_uuid: &str, - new_parent_uuid: &str, - new_position: i64, - new_nature: &str, - ) -> NoteResult<()> { - let mut tx = self.0.begin().await?; - let existing_note_id: NoteId = NoteId(select_note_id_for_uuid(&mut tx, note_uuid).await?.0); - let new_parent_id: ParentId = select_note_id_for_uuid(&mut tx, new_parent_uuid).await?; - let _ = make_room_for_new_note(&mut tx, new_parent_id, new_position).await?; - let _ = insert_note_to_note_relationship(&mut tx, new_parent_id, existing_note_id, new_position, new_nature) - .await?; - tx.commit().await?; - Ok(()) - } - - /// Deletes a note. If the note's relationship drops to zero, all - /// references from that note to pages are also deleted. - pub async fn delete_note(&self, note_uuid: &str, note_parent_uuid: &str) -> NoteResult<()> { - let mut tx = self.0.begin().await?; - let condemned_note_id: NoteId = NoteId(select_note_id_for_uuid(&mut tx, note_uuid).await?.0); - let note_parent_id: ParentId = select_note_id_for_uuid(&mut tx, note_parent_uuid).await?; - let _ = delete_note_to_note_relationship(&mut tx, note_parent_id, condemned_note_id); - if count_existing_note_relationships(&mut tx, condemned_note_id).await? == 0 { - let _ = delete_note_to_page_relationships(&mut tx, condemned_note_id).await?; - let _ = delete_note(&mut tx, condemned_note_id).await?; - } - tx.commit().await?; - Ok(()) - } - - /// Updates a note's content. Completely rebuilds the note's - /// outgoing edge reference list every time. - pub async fn update_note_content(&self, note_uuid: &str, content: &str) -> NoteResult<()> { - let references = build_references(&content); - - let mut tx = self.0.begin().await?; - - let note_id: NoteId = NoteId(select_note_id_for_uuid(&mut tx, note_uuid).await?.0); - let _ = update_note_content(&mut tx, note_id, &content).await?; - - let found_references = find_all_page_references_for(&mut tx, &references).await?; - let new_references = diff_references(&references, &found_references); - let mut known_reference_ids: Vec = Vec::new(); - - // Create the pages that don't exist - for one_reference in new_references.iter() { - let new_root_note = create_unique_root_note(); - let new_root_note_id = insert_one_new_note(&mut tx, &new_root_note).await?; - let new_page_slug = generate_slug(&mut tx, &one_reference).await?; - let new_page = create_new_page_for(&one_reference, &new_page_slug, new_root_note_id); - known_reference_ids.push(insert_one_new_page(&mut tx, &new_page).await?) - } - - // And associate the note with all the pages. - known_reference_ids.append(&mut found_references.iter().map(|r| PageId(r.id)).collect()); - let _ = insert_note_to_page_relationships(&mut tx, note_id, &known_reference_ids).await?; - - tx.commit().await?; - Ok(()) + pub async fn get_kasten_by_slug(&self, slug: &str) -> NoteResult> { + let page = select_kasten_by_slug(&*self.0, slug).await?; + Ok(page) } + } diff --git a/server/nm-store/src/store_private.rs b/server/nm-store/src/store_private.rs index d921477..843af44 100644 --- a/server/nm-store/src/store_private.rs +++ b/server/nm-store/src/store_private.rs @@ -21,6 +21,20 @@ type SqlResult = sqlx::Result; // coherent and easily readable, and hides away the gnarliness of some // of the SQL queries. +lazy_static! { + static ref select_kasten_by_title_sql: String = str::replace( + include_str!("sql/select_kasten_by_parameter.sql"), + "QUERYPARAMETER", + "zetteln.title"); +} + +lazy_static! { + static ref select_kasten_by_id_sql: String = str::replace( + include_str!("sql/select_kasten_by_parameter.sql"), + "QUERYPARAMETER", + "zetteln.id"); +} + pub(crate) async fn reset_database<'a, E>(executor: E) -> SqlResult<()> where E: Executor<'a, Database = Sqlite>, @@ -29,452 +43,13 @@ where sqlx::query(initialize_sql).execute(executor).await.map(|_| ()) } -pub(crate) async fn select_page_by_slug<'a, E>(executor: E, slug: &str) -> SqlResult +pub(crate) async fn select_kasten_by_slug<'a, E>(executor: E, slug: &str) -> SqlResult> where E: Executor<'a, Database = Sqlite>, { - let select_one_page_by_slug_sql = concat!( - "SELECT id, title, slug, note_id, creation_date, updated_date, ", - "lastview_date, deleted_date FROM pages WHERE slug=?;" - ); - Ok(sqlx::query_as(&select_one_page_by_slug_sql) - .bind(&slug) - .fetch_one(executor) - .await?) + Ok(sqlx::query_as(&select_kasten_by_id_sql) + .bind(&slug) + .fetch_all(executor) + .await?) } -pub(crate) async fn select_page_by_title<'a, E>(executor: E, title: &str) -> SqlResult -where - E: Executor<'a, Database = Sqlite>, -{ - let select_one_page_by_title_sql = concat!( - "SELECT id, title, slug, note_id, creation_date, updated_date, ", - "lastview_date, deleted_date FROM pages WHERE title=?;" - ); - Ok(sqlx::query_as(&select_one_page_by_title_sql) - .bind(&title) - .fetch_one(executor) - .await?) -} - -pub(crate) async fn select_note_id_for_uuid<'a, E>(executor: E, uuid: &str) -> SqlResult -where - E: Executor<'a, Database = Sqlite>, -{ - let select_note_id_for_uuid_sql = "SELECT id FROM notes WHERE uuid = ?;"; - let id: JustId = sqlx::query_as(&select_note_id_for_uuid_sql) - .bind(&uuid) - .fetch_one(executor) - .await?; - Ok(ParentId(id.id)) -} - -pub(crate) async fn make_room_for_new_note<'a, E>(executor: E, parent_id: ParentId, position: i64) -> SqlResult<()> -where - E: Executor<'a, Database = Sqlite>, -{ - let make_room_for_new_note_sql = concat!( - "UPDATE note_relationships ", - "SET position = position + 1 ", - "WHERE position >= ? and parent_id = ?;" - ); - - sqlx::query(make_room_for_new_note_sql) - .bind(&position) - .bind(&*parent_id) - .execute(executor) - .await - .map(|_| ()) -} - -pub(crate) async fn insert_note_to_note_relationship<'a, E>( - executor: E, - parent_id: ParentId, - note_id: NoteId, - position: i64, - nature: &str, -) -> SqlResult<()> -where - E: Executor<'a, Database = Sqlite>, -{ - let insert_note_to_note_relationship_sql = concat!( - "INSERT INTO note_relationships (parent_id, note_id, position, nature) ", - "values (?, ?, ?, ?)" - ); - - sqlx::query(insert_note_to_note_relationship_sql) - .bind(&*parent_id) - .bind(&*note_id) - .bind(&position) - .bind(&nature) - .execute(executor) - .await - .map(|_| ()) -} - -pub(crate) async fn select_note_collection_from_root<'a, E>(executor: E, root: i64) -> SqlResult> -where - E: Executor<'a, Database = Sqlite>, -{ - let select_note_collection_from_root_sql = include_str!("sql/select_note_collection_from_root.sql"); - Ok(sqlx::query_as(&select_note_collection_from_root_sql) - .bind(&root) - .fetch_all(executor) - .await?) -} - -pub(crate) async fn insert_one_new_note<'a, E>(executor: E, note: &NewNote) -> SqlResult -where - E: Executor<'a, Database = Sqlite>, -{ - let insert_one_note_sql = concat!( - "INSERT INTO notes ( ", - " uuid, ", - " content, ", - " notetype, ", - " creation_date, ", - " updated_date, ", - " lastview_date) ", - "VALUES (?, ?, ?, ?, ?, ?);" - ); - - Ok(NoteId( - sqlx::query(insert_one_note_sql) - .bind(¬e.uuid) - .bind(¬e.content) - .bind(¬e.notetype) - .bind(¬e.creation_date) - .bind(¬e.updated_date) - .bind(¬e.lastview_date) - .execute(executor) - .await? - .last_insert_rowid(), - )) -} - -// Given a possible slug, find the slug with the highest -// uniquification number, and return that number, if any. - -pub(crate) fn find_maximal_slug(slugs: &[JustSlugs]) -> Option { - lazy_static! { - static ref RE_CAP_NUM: Regex = Regex::new(r"-(\d+)$").unwrap(); - } - - if slugs.is_empty() { - return None; - } - - let mut slug_counters: Vec = slugs - .iter() - .filter_map(|slug| RE_CAP_NUM.captures(&slug.slug)) - .map(|cap| cap.get(1).unwrap().as_str().parse::().unwrap()) - .collect(); - slug_counters.sort_unstable(); - slug_counters.pop() -} - -// Given an initial string and an existing collection of slugs, -// generate a new slug that does not conflict with the current -// collection. -pub(crate) async fn generate_slug<'a, E>(executor: E, title: &str) -> SqlResult -where - E: Executor<'a, Database = Sqlite>, -{ - lazy_static! { - static ref RE_STRIP_NUM: Regex = Regex::new(r"-\d+$").unwrap(); - } - - let initial_slug = slugify(title); - let sample_slug = RE_STRIP_NUM.replace_all(&initial_slug, ""); - let slug_finder_sql = "SELECT slug FROM pages WHERE slug LIKE '?%';"; - let similar_slugs: Vec = sqlx::query_as(&slug_finder_sql) - .bind(&*sample_slug) - .fetch_all(executor) - .await?; - let maximal_slug = find_maximal_slug(&similar_slugs); - match maximal_slug { - None => Ok(initial_slug), - Some(max_slug) => Ok(format!("{}-{}", initial_slug, max_slug + 1)), - } -} - -pub(crate) async fn insert_one_new_page<'a, E>(executor: E, page: &NewPage) -> SqlResult -where - E: Executor<'a, Database = Sqlite>, -{ - let insert_one_page_sql = concat!( - "INSERT INTO pages ( ", - " slug, ", - " title, ", - " note_id, ", - " creation_date, ", - " updated_date, ", - " lastview_date) ", - "VALUES (?, ?, ?, ?, ?, ?);" - ); - - Ok(PageId( - sqlx::query(insert_one_page_sql) - .bind(&page.slug) - .bind(&page.title) - .bind(&page.note_id) - .bind(&page.creation_date) - .bind(&page.updated_date) - .bind(&page.lastview_date) - .execute(executor) - .await? - .last_insert_rowid(), - )) -} - -pub(crate) async fn insert_note_to_page_relationships<'a, E>( - executor: E, - note_id: NoteId, - references: &[PageId], -) -> SqlResult<()> -where - E: Executor<'a, Database = Sqlite>, -{ - if references.is_empty() { - return Ok(()); - } - - let insert_note_page_references_sql = "INSERT INTO page_relationships (note_id, page_id) VALUES ".to_string() - + &["(?, ?)"].repeat(references.len()).join(", ") - + &";".to_string(); - - let mut request = sqlx::query(&insert_note_page_references_sql); - for reference in references { - request = request.bind(*note_id).bind(**reference); - } - - request.execute(executor).await.map(|_| ()) -} - -// For a given collection of uuids, retrieve the internal ID used by -// the database. -pub(crate) async fn bulk_select_ids_for_note_uuids<'a, E>(executor: E, ids: &[&str]) -> SqlResult> -where - E: Executor<'a, Database = Sqlite>, -{ - if ids.is_empty() { - return Ok(vec![]); - } - - let bulk_select_ids_for_note_uuids_sql = "SELECT uuid, id FROM notes WHERE uuid IN (".to_string() - + &["?"].repeat(ids.len()).join(",") - + &");".to_string(); - - let mut request = sqlx::query(&bulk_select_ids_for_note_uuids_sql); - for id in ids.iter() { - request = request.bind(id); - } - Ok(request - .try_map(|row: SqliteRow| { - let l = row.try_get::(0)?; - let r = row.try_get::(1)?; - Ok((l, r)) - }) - .fetch_all(executor) - .await? - .into_iter() - .collect()) -} - -// Used by move_note to identify the single note to note relationship -// by the original parent and child pair. Used mostly to find the -// position for recalculation, to create a new gap or close an old -// one. -pub(crate) async fn get_note_to_note_relationship<'a, E>( - executor: E, - parent_id: ParentId, - note_id: NoteId, -) -> SqlResult -where - E: Executor<'a, Database = Sqlite>, -{ - let get_note_to_note_relationship_sql = concat!( - "SELECT parent_id, note_id, position, nature ", - "FROM note_relationships ", - "WHERE parent_id = ? and note_id = ? ", - "LIMIT 1" - ); - sqlx::query_as(get_note_to_note_relationship_sql) - .bind(&*parent_id) - .bind(&*note_id) - .fetch_one(executor) - .await -} - -pub(crate) async fn delete_note_to_note_relationship<'a, E>( - executor: E, - parent_id: ParentId, - note_id: NoteId, -) -> SqlResult<()> -where - E: Executor<'a, Database = Sqlite>, -{ - let delete_note_to_note_relationship_sql = concat!( - "DELETE FROM note_relationships ", - "WHERE parent_id = ? and note_id = ? " - ); - - let count = sqlx::query(delete_note_to_note_relationship_sql) - .bind(&*parent_id) - .bind(&*note_id) - .execute(executor) - .await? - .rows_affected(); - - match count { - 1 => Ok(()), - _ => Err(sqlx::Error::RowNotFound), - } -} - -pub(crate) async fn delete_note_to_page_relationships<'a, E>(executor: E, note_id: NoteId) -> SqlResult<()> -where - E: Executor<'a, Database = Sqlite>, -{ - let delete_note_to_page_relationships_sql = "DELETE FROM page_relationships WHERE note_id = ?;"; - - let _ = sqlx::query(delete_note_to_page_relationships_sql) - .bind(&*note_id) - .execute(executor) - .await?; - Ok(()) -} - -pub(crate) async fn delete_note<'a, E>(executor: E, note_id: NoteId) -> SqlResult<()> -where - E: Executor<'a, Database = Sqlite>, -{ - let delete_note_sql = "DELETE FROM notes WHERE note_id = ?"; - - let count = sqlx::query(delete_note_sql) - .bind(&*note_id) - .execute(executor) - .await? - .rows_affected(); - - match count { - 1 => Ok(()), - _ => Err(sqlx::Error::RowNotFound), - } -} - -pub(crate) async fn count_existing_note_relationships<'a, E>(executor: E, note_id: NoteId) -> SqlResult -where - E: Executor<'a, Database = Sqlite>, -{ - let count_existing_note_relationships_sql = "SELECT COUNT(*) as count FROM page_relationships WHERE note_id = ?;"; - - let count: RowCount = sqlx::query_as(count_existing_note_relationships_sql) - .bind(&*note_id) - .fetch_one(executor) - .await?; - - Ok(count.count) -} - -pub(crate) async fn assert_max_child_position_for_note<'a, E>(executor: E, note_id: ParentId) -> SqlResult -where - E: Executor<'a, Database = Sqlite>, -{ - let assert_max_child_position_for_note_sql = - "SELECT MAX(position) AS count FROM note_relationships WHERE parent_id = ?;"; - - let count: RowCount = sqlx::query_as(assert_max_child_position_for_note_sql) - .bind(&*note_id) - .fetch_one(executor) - .await?; - - Ok(count.count) -} - -// After removing a note, recalculate the position of all notes under -// the parent note, such that there order is now completely -// sequential. -pub(crate) async fn close_hole_for_deleted_note<'a, E>(executor: E, parent_id: ParentId, position: i64) -> SqlResult<()> -where - E: Executor<'a, Database = Sqlite>, -{ - let close_hole_for_deleted_note_sql = concat!( - "UPDATE note_relationships ", - "SET position = position - 1 ", - "WHERE position > ? and parent_id = ?;" - ); - - sqlx::query(close_hole_for_deleted_note_sql) - .bind(&position) - .bind(&*parent_id) - .execute(executor) - .await - .map(|_| ()) -} - -pub(crate) async fn find_all_page_references_for<'a, E>( - executor: E, - references: &[String], -) -> SqlResult> -where - E: Executor<'a, Database = Sqlite>, -{ - if references.is_empty() { - return Ok(vec![]); - } - - let find_all_references_for_sql = "SELECT id, title FROM pages WHERE title IN (".to_string() - + &["?"].repeat(references.len()).join(",") - + &");".to_string(); - - let mut request = sqlx::query_as(&find_all_references_for_sql); - for id in references.iter() { - request = request.bind(id); - } - request.fetch_all(executor).await -} - -pub(crate) async fn update_note_content<'a, E>(executor: E, note_id: NoteId, content: &str) -> SqlResult<()> -where - E: Executor<'a, Database = Sqlite>, -{ - let update_note_content_sql = "UPDATE notes SET content = ? WHERE note_id = ?"; - let count = sqlx::query(update_note_content_sql) - .bind(content) - .bind(&*note_id) - .execute(executor) - .await? - .rows_affected(); - - match count { - 1 => Ok(()), - _ => Err(sqlx::Error::RowNotFound), - } -} - -pub(crate) fn create_unique_root_note() -> NewNote { - NewNoteBuilder::default() - .uuid(friendly_id::create()) - .content("".to_string()) - .notetype("root".to_string()) - .build() - .unwrap() -} - -pub(crate) fn create_new_page_for(title: &str, slug: &str, note_id: NoteId) -> NewPage { - NewPageBuilder::default() - .slug(slug.to_string()) - .title(title.to_string()) - .note_id(*note_id) - .build() - .unwrap() -} - -// Given the references supplied, and the references found in the datastore, -// return a list of the references not found in the datastore. -pub(crate) fn diff_references(references: &[String], found_references: &[PageTitles]) -> Vec { - let all: HashSet = references.iter().cloned().collect(); - let found: HashSet = found_references.iter().map(|r| r.title.clone()).collect(); - all.difference(&found).cloned().collect() -} diff --git a/server/nm-store/src/structs.rs b/server/nm-store/src/structs.rs index 2d5c69c..6d08631 100644 --- a/server/nm-store/src/structs.rs +++ b/server/nm-store/src/structs.rs @@ -1,134 +1,17 @@ use chrono::{DateTime, Utc}; -use derive_builder::Builder; +// use derive_builder::Builder; use serde::{Deserialize, Serialize}; -use shrinkwraprs::Shrinkwrap; +// use shrinkwraprs::Shrinkwrap; use sqlx::{self, FromRow}; -#[derive(Shrinkwrap, Copy, Clone)] -pub(crate) struct PageId(pub i64); - -#[derive(Shrinkwrap, Copy, Clone)] -pub(crate) struct NoteId(pub i64); - -#[derive(Shrinkwrap, Copy, Clone)] -pub(crate) struct ParentId(pub i64); - -/// A RawPage is what this layer of the API returns when requesting a -/// page. Note that usually what you'll get back in the RawPage and a -/// Vec. It's the next level's responsibility to turn that -/// into a proper tree. #[derive(Clone, Serialize, Deserialize, Debug, FromRow)] -pub struct RawPage { - pub id: i64, - pub slug: String, - pub title: String, - pub note_id: i64, - pub creation_date: DateTime, - pub updated_date: DateTime, - pub lastview_date: DateTime, - pub deleted_date: Option>, -} - -/// A RawNote is what this layer of the API returns -/// when requesting a note. -#[derive(Clone, Serialize, Deserialize, Debug, FromRow)] -pub struct RawNote { - pub id: i64, - pub uuid: String, - pub parent_id: i64, - pub parent_uuid: String, +pub struct RawZettle { + pub id: String, pub content: String, + pub kind: String, pub position: i64, - pub notetype: String, pub creation_date: DateTime, pub updated_date: DateTime, pub lastview_date: DateTime, pub deleted_date: Option>, } - -/// The interface for passing a new page to the store. -#[derive(Clone, Serialize, Deserialize, Debug, Builder)] -pub struct NewPage { - pub slug: String, - pub title: String, - pub note_id: i64, - #[builder(default = r#"chrono::Utc::now()"#)] - pub creation_date: DateTime, - #[builder(default = r#"chrono::Utc::now()"#)] - pub updated_date: DateTime, - #[builder(default = r#"chrono::Utc::now()"#)] - pub lastview_date: DateTime, - #[builder(default = r#"None"#)] - pub deleted_date: Option>, -} - -/// The interface for passing a new note to the store. -#[derive(Clone, Serialize, Deserialize, Debug, Builder)] -pub struct NewNote { - #[builder(default = r#""".to_string()"#)] - pub uuid: String, - pub content: String, - #[builder(default = r#""note".to_string()"#)] - pub notetype: String, - #[builder(default = r#"chrono::Utc::now()"#)] - pub creation_date: DateTime, - #[builder(default = r#"chrono::Utc::now()"#)] - pub updated_date: DateTime, - #[builder(default = r#"chrono::Utc::now()"#)] - pub lastview_date: DateTime, - #[builder(default = r#"None"#)] - pub deleted_date: Option>, -} - -#[derive(Clone, Serialize, Deserialize, Debug, FromRow)] -pub(crate) struct JustSlugs { - pub slug: String, -} - -#[derive(Clone, Serialize, Deserialize, Debug, FromRow)] -pub(crate) struct JustTitles { - title: String, -} - -#[derive(Clone, Serialize, Deserialize, Debug, FromRow)] -pub(crate) struct JustId { - pub id: i64, -} - -#[derive(Clone, Serialize, Deserialize, Debug, FromRow)] -pub(crate) struct PageTitles { - pub id: i64, - pub title: String, -} - -#[derive(Clone, Serialize, Deserialize, Debug, FromRow)] -pub(crate) struct NoteRelationship { - pub parent_id: i64, - pub note_id: i64, - pub position: i64, - pub nature: String, -} - -#[derive(Clone, Serialize, Deserialize, Debug, FromRow)] -pub(crate) struct RowCount { - pub count: i64, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn can_build_new_note() { - let now = chrono::Utc::now(); - let newnote = NewNoteBuilder::default() - .uuid("foo".to_string()) - .content("bar".to_string()) - .build() - .unwrap(); - assert!((newnote.creation_date - now).num_minutes() < 1); - assert!((newnote.updated_date - now).num_minutes() < 1); - assert!((newnote.lastview_date - now).num_minutes() < 1); - assert!(newnote.deleted_date.is_none()); - } -} diff --git a/server/nm-trees/src/lib.rs b/server/nm-trees/src/lib.rs index fccf560..b23ec50 100644 --- a/server/nm-trees/src/lib.rs +++ b/server/nm-trees/src/lib.rs @@ -27,7 +27,7 @@ impl Notesmachine { Ok(Notesmachine(notestore)) } - pub async fn navigate_via_slug(&self, slug: &str) -> Result { + pub async fn get_box_via_slug(&self, slug: &str) -> Result { let (rawpage, rawnotes) = self.0.get_page_by_slug(slug).await?; Ok(make_tree(&rawpage, &rawnotes)) }