Compare commits

..

No commits in common. "bad0de9bc04412c5c2de91be5584a4e4a665a9cf" and "da44610098e4670bd54eb0f27ac968124ce1a2a6" have entirely different histories.

23 changed files with 841 additions and 1117 deletions

6
.gitignore vendored
View File

@ -1,6 +0,0 @@
/target
Cargo.lock
*#
.#*
*~

View File

@ -1,2 +0,0 @@
[workspace]
members = ["server/*"]

View File

@ -1,11 +0,0 @@
[[source]]
url = "https://pypi.python.org/simple"
verify_ssl = true
name = "pypi"
[packages]
[dev-packages]
[requires]
python_version = "2.7"

View File

@ -1,4 +0,0 @@
[ ] Add RelationshipKind to Notes passed out
[ ] Add KastenKind to Backreferences passed out
[ ] Provide the array of notes references (the 'cycle' manager) to make
mapping from Vec->Tree easier.

View File

@ -1,10 +0,0 @@
The thing of it is, we have two kinds of notes:
1. This layer of the system will handle broken/missing position issues.
2. The client layer of the system will ensure that a parent is provided.
3. The notes retrieved via the CTE have information and parenting and
location.
4. Notes put *into* the system have parent and location provided
separately.
5. Clients do not specify the ids of notes put into the system.
6. Retrieval by slug must test for is-a-box.

View File

@ -1,4 +1,3 @@
use sqlx;
use thiserror::Error; use thiserror::Error;
/// All the ways looking up objects can fail /// All the ways looking up objects can fail
@ -9,9 +8,6 @@ pub enum NoteStoreError {
#[error("Invalid Note Structure")] #[error("Invalid Note Structure")]
InvalidNoteStructure(String), InvalidNoteStructure(String),
/// The requested kasten or note was not found. As much as
/// possible, this should be preferred to a
/// sqlx::Error::RowNotFound.
#[error("Not found")] #[error("Not found")]
NotFound, NotFound,

View File

@ -6,8 +6,7 @@ mod structs;
pub use crate::errors::NoteStoreError; pub use crate::errors::NoteStoreError;
pub use crate::store::NoteStore; pub use crate::store::NoteStore;
pub use crate::structs::{Note, NoteKind, NoteRelationship, KastenRelationship}; pub use crate::structs::{RawPage, RawNote, NewPage, NewNote};
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@ -24,14 +23,14 @@ mod tests {
storagepool storagepool
} }
// Request for the page by slug. If the page exists, return it. // Request for the page by slug.
// If the page doesn't, return NotFound // If the page exists, return it. If the page doesn't, return NotFound
//
#[tokio::test(threaded_scheduler)] #[tokio::test(threaded_scheduler)]
async fn fetching_unfound_page_by_slug_works() { async fn fetching_unfound_page_by_slug_works() {
let storagepool = fresh_inmemory_database().await; let storagepool = fresh_inmemory_database().await;
let foundkasten = storagepool.get_kasten_by_slug("nonexistent-kasten").await; let unfoundpage = storagepool.get_page_by_slug("nonexistent-page").await;
assert!(foundkasten.is_err()); assert!(unfoundpage.is_err());
} }
// Request for the page by title. If the page exists, return it. // Request for the page by title. If the page exists, return it.
@ -43,17 +42,18 @@ mod tests {
let title = "Nonexistent Page"; let title = "Nonexistent Page";
let now = chrono::Utc::now(); let now = chrono::Utc::now();
let storagepool = fresh_inmemory_database().await; let storagepool = fresh_inmemory_database().await;
let newpageresult = storagepool.get_kasten_by_title(&title).await; let newpageresult = storagepool.get_page_by_title(&title).await;
assert!(newpageresult.is_ok(), "{:?}", newpageresult); assert!(newpageresult.is_ok(), "{:?}", newpageresult);
let (newpages, _) = newpageresult.unwrap(); let (newpage, newnotes) = newpageresult.unwrap();
assert_eq!(newpages.len(), 1); assert_eq!(newpage.title, title, "{:?}", newpage.title);
let newpage = newpages.iter().next().unwrap(); assert_eq!(newpage.slug, "nonexistent-page");
assert_eq!(newnotes.len(), 1);
assert_eq!(newnotes[0].notetype, "root");
assert_eq!(newpage.note_id, newnotes[0].id);
assert_eq!(newpage.content, title, "{:?}", newpage.content);
assert_eq!(newpage.id, "nonexistent-page");
assert_eq!(newpage.kind, NoteKind::Kasten);
assert!((newpage.creation_date - now).num_minutes() < 1); assert!((newpage.creation_date - now).num_minutes() < 1);
assert!((newpage.updated_date - now).num_minutes() < 1); assert!((newpage.updated_date - now).num_minutes() < 1);
assert!((newpage.lastview_date - now).num_minutes() < 1); assert!((newpage.lastview_date - now).num_minutes() < 1);
@ -71,49 +71,39 @@ mod tests {
async fn can_nest_notes() { async fn can_nest_notes() {
let title = "Nonexistent Page"; let title = "Nonexistent Page";
let storagepool = fresh_inmemory_database().await; let storagepool = fresh_inmemory_database().await;
let newpageresult = storagepool.get_kasten_by_title(&title).await; let newpageresult = storagepool.get_page_by_title(&title).await;
let (_newpage, newnotes) = newpageresult.unwrap();
assert!(newpageresult.is_ok(), "{:?}", newpageresult); let root = &newnotes[0];
let (newpages, _) = newpageresult.unwrap();
assert_eq!(newpages.len(), 1);
let root = &newpages[0];
// root <- 1 <- 3
// <- 2 <- 4
let note1 = make_new_note("1"); let note1 = make_new_note("1");
let note1_id = storagepool.add_note(&note1, &root.id, Some(0)).await; let note1_uuid = storagepool.insert_nested_note(&note1, &root.uuid, 0).await;
assert!(note1_id.is_ok(), "{:?}", note1_id); assert!(note1_uuid.is_ok(), "{:?}", note1_uuid);
let note1_id = note1_id.unwrap(); let note1_uuid = note1_uuid.unwrap();
let note2 = make_new_note("2"); let note2 = make_new_note("2");
let note2_id = storagepool.add_note(&note2, &root.id, Some(0)).await; let note2_uuid = storagepool.insert_nested_note(&note2, &root.uuid, 0).await;
assert!(note2_id.is_ok(), "{:?}", note2_id); assert!(note2_uuid.is_ok(), "{:?}", note2_uuid);
let note2_id = note2_id.unwrap(); let note2_uuid = note2_uuid.unwrap();
let note3 = make_new_note("3"); let note3 = make_new_note("3");
let note3_id = storagepool.add_note(&note3, &note1_id, Some(0)).await; let note3_uuid = storagepool.insert_nested_note(&note3, &note1_uuid, 0).await;
assert!(note3_id.is_ok(), "{:?}", note3_id); assert!(note3_uuid.is_ok(), "{:?}", note3_uuid);
let _note3_id = note3_id.unwrap(); let _note3_uuid = note3_uuid.unwrap();
let note4 = make_new_note("4"); let note4 = make_new_note("4");
let note4_id = storagepool.add_note(&note4, &note2_id, Some(0)).await; let note4_uuid = storagepool.insert_nested_note(&note4, &note2_uuid, 0).await;
assert!(note4_id.is_ok(), "{:?}", note4_id); assert!(note4_uuid.is_ok(), "{:?}", note4_uuid);
let _note4_id = note4_id.unwrap(); let _note4_uuid = note4_uuid.unwrap();
let newpageresult = storagepool.get_kasten_by_title(&title).await; let newpageresult = storagepool.get_page_by_title(&title).await;
assert!(newpageresult.is_ok(), "{:?}", newpageresult); let (newpage, newnotes) = newpageresult.unwrap();
let (newpages, _) = newpageresult.unwrap();
assert_eq!(newpages.len(), 5); assert_eq!(newpage.title, title, "{:?}", newpage.title);
let newroot = newpages.iter().next().unwrap(); assert_eq!(newpage.slug, "nonexistent-page");
assert_eq!(newroot.content, title, "{:?}", newroot.content); assert_eq!(newnotes.len(), 5);
assert_eq!(newroot.id, "nonexistent-page"); assert_eq!(newnotes[0].notetype, "root");
assert_eq!(newpage.note_id, newnotes[0].id);
assert_eq!(newpages[1].parent_id, Some(newroot.id.clone()));
assert_eq!(newpages[2].parent_id, Some(newpages[1].id.clone()));
} }
} }

View File

@ -1,58 +1,53 @@
DROP TABLE IF EXISTS notes; DROP TABLE IF EXISTS notes;
DROP TABLE IF EXISTS note_relationships; DROP TABLE IF EXISTS note_relationships;
DROP TABLE IF EXISTS note_kasten_relationships; DROP TABLE IF EXISTS pages;
DROP TABLE IF EXISTS page_relationships;
DROP TABLE IF EXISTS favorites; DROP TABLE IF EXISTS favorites;
CREATE TABLE notes ( CREATE TABLE notes (
id TEXT NOT NULL PRIMARY KEY, id INTEGER PRIMARY KEY AUTOINCREMENT,
content TEXT NOT NULL, uuid TEXT NOT NULL UNIQUE,
kind TEXT NOT NULL, content TEXT NULL,
notetype TEXT,
creation_date DATETIME NOT NULL, creation_date DATETIME NOT NULL,
updated_date DATETIME NOT NULL, updated_date DATETIME NOT NULL,
lastview_date DATETIME NOT NULL, lastview_date DATETIME NOT NULL,
deleted_date DATETIME NULL deleted_date DATETIME NULL
); );
CREATE INDEX note_ids ON notes (id); CREATE INDEX notes_uuids ON notes (uuid);
CREATE TABLE pages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title text NOT NULL UNIQUE,
slug text NOT NULL UNIQUE,
note_id INTEGER,
creation_date DATETIME NOT NULL,
updated_date DATETIME NOT NULL,
lastview_date DATETIME NOT NULL,
deleted_date DATETIME NULL,
FOREIGN KEY (note_id) REFERENCES notes (id) ON DELETE NO ACTION ON UPDATE NO ACTION
);
CREATE INDEX pages_slugs ON pages (slug);
CREATE TABLE favorites ( CREATE TABLE favorites (
id TEXT NOT NULL UNIQUE, id INTEGER PRIMARY KEY AUTOINCREMENT,
location INTEGER NOT NULL, position INTEGER NOT NULL
FOREIGN KEY (id) REFERENCES notes (id) ON DELETE CASCADE
); );
-- This table represents the forest of data relating a kasten to its
-- collections of notes. The root is itself "a note," but the content
-- of that note will always be just the title of the kasten.
--
CREATE TABLE note_relationships ( CREATE TABLE note_relationships (
note_id TEXT NOT NULL, note_id INTEGER NOT NULL,
parent_id TEXT NOT NULL, parent_id INTEGER NOT NULL,
location INTEGER NOT NULL, position INTEGER NOT NULL,
kind TEXT NOT NULL, nature TEXT NOT NULL,
-- If either note disappears, we want all the edges to disappear as well. FOREIGN KEY (note_id) REFERENCES notes (id) ON DELETE NO ACTION ON UPDATE NO ACTION,
FOREIGN KEY (note_id) REFERENCES notes (id) ON DELETE CASCADE, FOREIGN KEY (parent_id) REFERENCES notes (id) ON DELETE NO ACTION ON UPDATE NO ACTION
FOREIGN KEY (parent_id) REFERENCES notes (id) ON DELETE CASCADE,
UNIQUE (note_id, parent_id),
CHECK (note_id <> parent_id)
); );
-- This table represents the graph of data relating notes to kastens. CREATE TABLE page_relationships (
-- note_id INTEGER NOT NULL,
CREATE TABLE note_kasten_relationships ( page_id INTEGER NOT NULL,
note_id TEXT NOT NULL, FOREIGN KEY (note_id) references notes (id) ON DELETE NO ACTION ON UPDATE NO ACTION,
kasten_id TEXT NOT NULL, FOREIGN KEY (page_id) references pages (id) ON DELETE NO ACTION ON UPDATE NO ACTION
kind TEXT NOT NULL,
-- If either note disappears, we want all the edges to disappear as well.
FOREIGN KEY (note_id) REFERENCES notes (id) ON DELETE CASCADE,
FOREIGN KEY (kasten_id) REFERENCES notes (id) ON DELETE CASCADE,
UNIQUE (note_id, kasten_id),
CHECK (note_id <> kasten_id)
); );
-- A fabulous constraint. This index prevents us from saying that
-- if a note points to a kasten, the kasten may not point to a
-- note. Now, it's absolutely required that a kasten_id point to
-- a KastenType note; the content should be a title only.
CREATE UNIQUE INDEX note_kasten_unique_idx
ON note_kasten_relationships (MIN(note_id, kasten_id), MAX(note_id, kasten_id));

View File

@ -0,0 +1,8 @@
INSERT INTO notes (
uuid,
content,
notetype,
creation_date,
updated_date,
lastview_date)
VALUES (?, ?, ?, ?, ?, ?);

View File

@ -0,0 +1,8 @@
INSERT INTO pages (
slug,
title,
note_id,
creation_date,
updated_date,
lastview_date)
VALUES (?, ?, ?, ?, ?, ?);

View File

@ -1,9 +1,10 @@
SELECT SELECT
id, id,
uuid,
parent_id, parent_id,
parent_uuid,
content, content,
location, notetype,
kind,
creation_date, creation_date,
updated_date, updated_date,
lastview_date, lastview_date,
@ -13,10 +14,11 @@ FROM (
WITH RECURSIVE parents ( WITH RECURSIVE parents (
id, id,
uuid,
parent_id, parent_id,
parent_uuid,
content, content,
location, notetype,
kind,
creation_date, creation_date,
updated_date, updated_date,
lastview_date, lastview_date,
@ -28,10 +30,11 @@ FROM (
SELECT SELECT
notes.id, notes.id,
notes.uuid,
note_parents.id, note_parents.id,
note_parents.uuid,
notes.content, notes.content,
note_relationships.location, notes.notetype,
notes.kind,
notes.creation_date, notes.creation_date,
notes.updated_date, notes.updated_date,
notes.lastview_date, notes.lastview_date,
@ -40,21 +43,18 @@ FROM (
FROM notes FROM notes
INNER JOIN note_relationships INNER JOIN note_relationships
ON notes.id = note_relationships.note_id ON notes.id = note_relationships.note_id
AND notes.kind = 'note' AND notes.notetype = 'note'
INNER JOIN notes as note_parents INNER JOIN notes as note_parents
ON note_parents.id = note_relationships.parent_id ON note_parents.id = note_relationships.parent_id
WHERE notes.id WHERE notes.id = ? -- IMPORTANT: THIS IS THE PARAMETER
IN (SELECT note_id
FROM note_kasten_relationships
WHERE kasten_id = ?) -- IMPORTANT: THIS IS THE PARAMETER
UNION UNION
SELECT DISTINCT SELECT DISTINCT
notes.id, notes.id,
notes.uuid,
next_parent.id, next_parent.id,
next_parent.uuid,
notes.content, notes.content,
note_relationships.location,
notes.kind,
notes.creation_date, notes.creation_date,
notes.updated_date, notes.updated_date,
notes.lastview_date, notes.lastview_date,

View File

@ -0,0 +1,91 @@
-- This is undoubtedly one of the more complex bits of code I've
-- written recently, and I do wish there had been macros because
-- there's a lot of hand-written, copy-pasted code here around the
-- basic content of a note; it would have been nice to be able to DRY
-- that out.
-- This expression creates a table, 'notetree', that contains all of
-- the notes nested under a page. Each entry in the table includes
-- the note's parent's internal and external ids so that applications
-- can build an actual tree out of a vec of these things.
-- TODO: Extensive testing to validate that the nodes are delivered
-- *in nesting order* to the client.
SELECT
id,
uuid,
parent_id,
parent_uuid,
content,
position,
notetype,
creation_date,
updated_date,
lastview_date,
deleted_date
FROM (
WITH RECURSIVE notetree (
id,
uuid,
parent_id,
parent_uuid,
content,
position,
notetype,
creation_date,
updated_date,
lastview_date,
deleted_date,
cycle
)
AS (
SELECT
notes.id,
notes.uuid,
notes.id AS parent_id,
notes.uuid AS parent_uuid,
notes.content,
0, -- Root notes are always in position 0
notes.notetype,
notes.creation_date,
notes.updated_date,
notes.lastview_date,
notes.deleted_date,
','||notes.id||',' -- Cycle monitor
FROM notes
WHERE notes.id = ? AND notes.notetype = "root"
-- RECURSIVE expression
UNION SELECT
notes.id,
notes.uuid,
notetree.id AS parent_id,
notetree.uuid AS parent_uuid,
notes.content,
note_relationships.position,
notes.notetype,
notes.creation_date,
notes.updated_date,
notes.lastview_date,
notes.deleted_date,
notetree.cycle||notes.id||','
FROM notes
INNER JOIN note_relationships
ON notes.id = note_relationships.note_id
-- For a given ID in the level of notetree in *this* recursion,
-- we want each note's branches one level down.
INNER JOIN notetree
ON note_relationships.parent_id = notetree.id
-- And we want to make sure there are no cycles. There shouldn't
-- be; we're supposed to prevent those. But you never know.
WHERE notetree.cycle NOT LIKE '%,'||notes.id||',%'
ORDER BY note_relationships.position
)
SELECT * from notetree);

View File

@ -1,98 +0,0 @@
-- This is undoubtedly one of the more complex bits of code I've
-- written recently, and I do wish there had been macros because
-- there's a lot of hand-written, copy-pasted code here around the
-- basic content of a note; it would have been nice to be able to DRY
-- that out.
-- This expression creates a table, 'notetree', that contains all of
-- the notes nested under a page. Each entry in the table includes
-- the note's parent's internal and external ids so that applications
-- can build an actual tree out of a vec of these things.
-- TODO: Extensive testing to validate that the nodes are delivered
-- *in nesting order* to the client.
-- Search in here for the term QUERYPARAMETER. That string will be
-- substituted with the correct parameter (id or title) depending on
-- the use case, by the level 1 client (the private parts of
-- store.rs).
SELECT
id,
parent_id,
content,
location,
kind,
creation_date,
updated_date,
lastview_date,
deleted_date
FROM (
WITH RECURSIVE notestree (
id,
parent_id,
content,
location,
kind,
creation_date,
updated_date,
lastview_date,
deleted_date,
cycle
)
AS (
-- The seed query. Finds the root node of any tree of notes,
-- which by definition has a location of zero and a type of
-- 'page'.
SELECT
notes.id,
NULL as parent_id,
notes.content,
0, -- All boxes are at position zero. They are the root of the tree.
notes.kind,
notes.creation_date,
notes.updated_date,
notes.lastview_date,
notes.deleted_date,
','||notes.id||',' -- Cycle monitor
FROM notes
WHERE notes.kind = "box"
AND QUERYPARAMETER = ? -- The Query Parameter
-- RECURSIVE expression
--
-- Here, for each recursion down the tree, we collect the child
-- nodes for a given node, eliding any cycles.
--
-- TODO: Figure out what to do when a cycle DOES occur.
UNION SELECT
notes.id,
notestree.id AS parent_id,
notes.content,
note_relationships.location,
notes.kind,
notes.creation_date,
notes.updated_date,
notes.lastview_date,
notes.deleted_date,
notestree.cycle||notes.id||','
FROM notes
INNER JOIN note_relationships
ON notes.id = note_relationships.note_id
-- For a given ID in the level of notestree in *this* recursion,
-- we want each note's branches one level down.
INNER JOIN notestree
ON note_relationships.parent_id = notestree.id
-- And we want to make sure there are no cycles. There shouldn't
-- be; we're supposed to prevent those. But you never know.
WHERE notestree.cycle NOT LIKE '%,'||notes.id||',%'
ORDER BY note_relationships.location
)
SELECT * from notestree);

View File

@ -0,0 +1 @@
SELECT id, uuid, content, notetype, creation_date, updated_date, lastview_date, deleted_date FROM notes WHERE uuid=?;

View File

@ -0,0 +1 @@
SELECT id, title, slug, note_id, creation_date, updated_date, lastview_date, deleted_date FROM pages WHERE slug=?;

View File

@ -0,0 +1,3 @@
UPDATE notes
SET content = ?, updated_date = ?, lastview_date = ?
WHERE uuid = ?;

View File

@ -6,13 +6,12 @@
//! //!
//! This library implements the core functionality of Notesmachine and //! This library implements the core functionality of Notesmachine and
//! describes that functionality to a storage layer. There's a bit of //! describes that functionality to a storage layer. There's a bit of
//! intermingling in here which can't be helped, although it may make //! intermingling in here which can't be helped, although it may make sense
//! sense in the future to separate the decomposition of the note //! in the future to separate the decomposition of the note content into a
//! content into a higher layer. //! higher layer.
//! //!
//! Notesmachine storage notes consist of two items: Note and Kasten. //! Notesmachine storage notes consist of two items: Zettle and Kasten,
//! This distinction is somewhat arbitrary, as structurally these two //! which are German for "Note" and "Box". Here are the basic rules:
//! items are stored in the same table.
//! //!
//! - Boxes have titles (and date metadata) //! - Boxes have titles (and date metadata)
//! - Notes have content and a type (and date metadata) //! - Notes have content and a type (and date metadata)
@ -57,7 +56,7 @@ use crate::store_private::*;
use crate::structs::*; use crate::structs::*;
use sqlx::sqlite::SqlitePool; use sqlx::sqlite::SqlitePool;
use std::cmp; use std::cmp;
// use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
/// A handle to our Sqlite database. /// A handle to our Sqlite database.
@ -66,9 +65,12 @@ pub struct NoteStore(Arc<SqlitePool>);
type NoteResult<T> = core::result::Result<T, NoteStoreError>; type NoteResult<T> = core::result::Result<T, NoteStoreError>;
// After wrestling for a while with the fact that 'box' is a reserved // One thing that's pretty terrible about this code is that the
// word in Rust, I decided to just go with Note (note) and Kasten // Executor type in Sqlx is move-only, so it can only be used once per
// (box). // outgoing function call. That means that a lot of this code is
// internally duplicated, which sucks. I tried using the Acquire()
// trait, but its interaction with Executor was not very
// deterministic.
impl NoteStore { impl NoteStore {
/// Initializes a new instance of the note store. Note that the /// Initializes a new instance of the note store. Note that the
@ -78,6 +80,7 @@ impl NoteStore {
let pool = SqlitePool::connect(url).await?; let pool = SqlitePool::connect(url).await?;
Ok(NoteStore(Arc::new(pool))) Ok(NoteStore(Arc::new(pool)))
} }
/// Erase all the data in the database and restore it /// Erase all the data in the database and restore it
/// to its original empty form. Do not use unless you /// to its original empty form. Do not use unless you
/// really, really want that to happen. /// really, really want that to happen.
@ -92,204 +95,208 @@ impl NoteStore {
/// the slug, the slug is insufficient to generate a new page, so /// the slug, the slug is insufficient to generate a new page, so
/// this use case says that in the event of a failure to find the /// this use case says that in the event of a failure to find the
/// requested page, return a basic NotFound. /// requested page, return a basic NotFound.
pub async fn get_kasten_by_slug(&self, slug: &str) -> NoteResult<(Vec<Note>, Vec<Note>)> { pub async fn get_page_by_slug(&self, slug: &str) -> NoteResult<(RawPage, Vec<RawNote>)> {
let kasten = select_kasten_by_slug(&*self.0, &NoteId(slug.to_string())).await?; // let select_note_collection_for_root = include_str!("sql/select_note_collection_for_root.sql");
if kasten.is_empty() { let mut tx = self.0.begin().await?;
return Err(NoteStoreError::NotFound) let page = select_page_by_slug(&mut tx, slug).await?;
} let note_id = page.note_id;
let notes = select_note_collection_from_root(&mut tx, note_id).await?;
let note_id = NoteId(kasten[0].id.clone()); tx.commit().await?;
Ok((kasten, select_backreferences_for_kasten(&*self.0, &note_id).await?)) Ok((page, notes))
} }
/// Fetch page by title /// Fetch page by title
///
/// The most common use case: the user is navigating by requesting /// Supports the use case of the user navigating to a page via
/// a page. The page either exists or it doesn't. If it /// the page's formal title. Since the title is the key reference
/// doesn't, we go out and make it. Since we know it doesn't exist, /// of the system, if no page with that title is found, a page with
/// we also know no backreferences to it exist, so in that case you /// that title is generated automatically.
/// get back two empty vecs. pub async fn get_page_by_title(&self, title: &str) -> NoteResult<(RawPage, Vec<RawNote>)> {
pub async fn get_kasten_by_title(&self, title: &str) -> NoteResult<(Vec<Note>, Vec<Note>)> {
if title.len() == 0 {
return Err(NoteStoreError::NotFound);
}
let kasten = select_kasten_by_title(&*self.0, title).await?;
if kasten.len() > 0 {
let note_id = NoteId(kasten[0].id.clone());
return Ok((kasten, select_backreferences_for_kasten(&*self.0, &note_id).await?));
}
// Sanity check!
let references = build_references(&title);
if references.len() > 0 {
return Err(NoteStoreError::InvalidNoteStructure(
"Titles may not contain nested references.".to_string(),
));
}
let mut tx = self.0.begin().await?; let mut tx = self.0.begin().await?;
let slug = generate_slug(&mut tx, title).await?; let (page, notes) = match select_page_by_title(&mut tx, title).await {
let zettlekasten = create_zettlekasten(&title, &slug); Ok(page) => {
let _ = insert_note(&mut tx, &zettlekasten).await?; let note_id = page.note_id;
(page, select_note_collection_from_root(&mut tx, note_id).await?)
}
Err(sqlx::Error::RowNotFound) => {
let page = {
let new_root_note = create_unique_root_note();
let new_root_note_id = insert_one_new_note(&mut tx, &new_root_note).await?;
let new_page_slug = generate_slug(&mut tx, title).await?;
let new_page = create_new_page_for(&title, &new_page_slug, new_root_note_id);
let _ = insert_one_new_page(&mut tx, &new_page).await?;
select_page_by_title(&mut tx, &title).await?
};
let note_id = page.note_id;
(page, select_note_collection_from_root(&mut tx, note_id).await?)
}
Err(e) => return Err(NoteStoreError::DBError(e)),
};
tx.commit().await?; tx.commit().await?;
Ok((page, notes))
Ok((vec![Note::from(zettlekasten)], vec![]))
} }
pub async fn add_note(&self, note: &NewNote, parent_id: &str, location: Option<i64>) -> NoteResult<String> { /// Insert a note as the child of an existing note, at a set position.
let new_id = self.insert_note( pub async fn insert_nested_note(
note, &self,
&ParentId(parent_id.to_string()), note: &NewNote,
location, parent_note_uuid: &str,
RelationshipKind::Direct).await?; position: i64,
Ok(new_id) ) -> NoteResult<String> {
let mut new_note = note.clone();
new_note.uuid = friendly_id::create();
let references = build_references(&note.content);
let mut tx = self.0.begin().await?;
// Start by building the note and putting it into its relationship.
println!("Select_note_id_for_uuid");
let parent_id: ParentId = select_note_id_for_uuid(&mut tx, parent_note_uuid).await?;
// Ensure new position is sane
println!("Assert Max Child Position");
let parent_max_position = assert_max_child_position_for_note(&mut tx, parent_id).await?;
let position = cmp::min(parent_max_position + 1, position);
println!("Insert_one_new_note");
let new_note_id = insert_one_new_note(&mut tx, &new_note).await?;
println!("make_room_for_new_note");
let _ = make_room_for_new_note(&mut tx, parent_id, position).await?;
println!("Insert_note_to_note_relationship");
let _ = insert_note_to_note_relationship(&mut tx, parent_id, new_note_id, position, "note").await?;
// From the references, make lists of pages that exist, and pages
// that do not.
println!("Find_all_page_references");
let found_references = find_all_page_references_for(&mut tx, &references).await?;
let new_references = diff_references(&references, &found_references);
let mut known_reference_ids: Vec<PageId> = Vec::new();
// Create the pages that don't exist
for one_reference in new_references.iter() {
let new_root_note = create_unique_root_note();
println!("Insert_one_new_root_note");
let new_root_note_id = insert_one_new_note(&mut tx, &new_root_note).await?;
println!("Generate_slug");
let new_page_slug = generate_slug(&mut tx, &one_reference).await?;
let new_page = create_new_page_for(&one_reference, &new_page_slug, new_root_note_id);
println!("insert_one_new_page");
known_reference_ids.push(insert_one_new_page(&mut tx, &new_page).await?)
} }
// And associate the note with all the pages.
known_reference_ids.append(&mut found_references.iter().map(|r| PageId(r.id)).collect());
println!("insert_note_to_page_relationships");
let _ = insert_note_to_page_relationships(&mut tx, new_note_id, &known_reference_ids).await?;
tx.commit().await?;
Ok(new_note.uuid)
}
// This doesn't do anything with the references, as those are
// dependent entirely on the *content*, and not the *position*, of
// the note and the referenced page.
//
/// Move a note from one location to another. /// Move a note from one location to another.
pub async fn move_note( pub async fn move_note(
&self, &self,
note_id: &str, note_uuid: &str,
old_parent_id: &str, old_parent_uuid: &str,
new_parent_id: &str, new_parent_uuid: &str,
new_location: i64, new_position: i64,
) -> NoteResult<()> { ) -> NoteResult<()> {
let all_uuids = vec![note_uuid, old_parent_uuid, new_parent_uuid];
let mut tx = self.0.begin().await?; let mut tx = self.0.begin().await?;
let old_parent_id = ParentId(old_parent_id.to_string()); // This is one of the few cases where we we're getting IDs for
let new_parent_id = ParentId(new_parent_id.to_string()); // notes, but the nature of the ID isn't known at this time.
let note_id = NoteId(note_id.to_string()); // This has to be handled manually, in the next paragraph
// below.
let found_id_vec = bulk_select_ids_for_note_uuids(&mut tx, &all_uuids).await?;
let found_ids: HashMap<String, i64> = found_id_vec.into_iter().collect();
if found_ids.len() != 3 {
return Err(NoteStoreError::NotFound);
}
let old_note = select_note_to_note_relationship(&mut tx, &old_parent_id, &note_id).await?; let old_parent_id = ParentId(*found_ids.get(old_parent_uuid).unwrap());
let old_note_location = old_note.location; let new_parent_id = ParentId(*found_ids.get(new_parent_uuid).unwrap());
let old_note_kind = old_note.kind; let note_id = NoteId(*found_ids.get(note_uuid).unwrap());
let _ = delete_note_to_note_relationship(&mut tx, &old_parent_id, &note_id).await?; let old_note = get_note_to_note_relationship(&mut tx, old_parent_id, note_id).await?;
let _ = close_hole_for_deleted_note(&mut tx, &old_parent_id, old_note_location).await?; let old_note_position = old_note.position;
let parent_max_location = assert_max_child_location_for_note(&mut tx, &new_parent_id).await?; let old_note_nature = &old_note.nature;
let new_location = cmp::min(parent_max_location + 1, new_location);
let _ = make_room_for_new_note(&mut tx, &new_parent_id, new_location).await?; let _ = delete_note_to_note_relationship(&mut tx, old_parent_id, note_id).await?;
let _ = close_hole_for_deleted_note(&mut tx, old_parent_id, old_note_position).await?;
let parent_max_position = assert_max_child_position_for_note(&mut tx, new_parent_id).await?;
let new_position = cmp::min(parent_max_position + 1, new_position);
let _ = make_room_for_new_note(&mut tx, new_parent_id, new_position).await?;
let _ = let _ =
insert_note_to_note_relationship(&mut tx, &new_parent_id, &note_id, new_location, &old_note_kind).await?; insert_note_to_note_relationship(&mut tx, new_parent_id, note_id, new_position, old_note_nature).await?;
tx.commit().await?; tx.commit().await?;
Ok(()) Ok(())
} }
/// Updates a note's content. Completely rebuilds the note's /// Embed or reference a note from a different location.
/// outgoing edge reference list every time. pub async fn reference_or_embed_note(
pub async fn update_note_content(&self, note_id: &str, content: &str) -> NoteResult<()> { &self,
let references = build_references(&content); note_uuid: &str,
let note_id = NoteId(note_id.to_string()); new_parent_uuid: &str,
new_position: i64,
new_nature: &str,
) -> NoteResult<()> {
let mut tx = self.0.begin().await?; let mut tx = self.0.begin().await?;
let _ = update_note_content(&mut tx, &note_id, &content).await?; let existing_note_id: NoteId = NoteId(select_note_id_for_uuid(&mut tx, note_uuid).await?.0);
let _ = delete_bulk_note_to_kasten_relationships(&mut tx, &note_id).await?; let new_parent_id: ParentId = select_note_id_for_uuid(&mut tx, new_parent_uuid).await?;
let found_references = find_all_kasten_from_list_of_references(&mut tx, &references).await?; let _ = make_room_for_new_note(&mut tx, new_parent_id, new_position).await?;
let new_references = diff_references(&references, &found_references); let _ = insert_note_to_note_relationship(&mut tx, new_parent_id, existing_note_id, new_position, new_nature)
let mut known_reference_ids: Vec<NoteId> = Vec::new(); .await?;
for one_reference in new_references.iter() {
let slug = generate_slug(&mut tx, one_reference).await?;
let zettlekasten = create_zettlekasten(&one_reference, &slug);
let _ = insert_note(&mut tx, &zettlekasten).await?;
known_reference_ids.push(NoteId(slug));
}
known_reference_ids.append(&mut found_references.iter().map(|r| NoteId(r.id.clone())).collect());
let _ = insert_bulk_note_to_kasten_relationships(&mut tx, &note_id, &known_reference_ids).await?;
tx.commit().await?; tx.commit().await?;
Ok(()) Ok(())
} }
/// Deletes a note. If the note's relationship drops to zero, all /// Deletes a note. If the note's relationship drops to zero, all
/// references from that note to pages are also deleted. /// references from that note to pages are also deleted.
pub async fn delete_note(&self, note_id: &str, note_parent_id: &str) -> NoteResult<()> { pub async fn delete_note(&self, note_uuid: &str, note_parent_uuid: &str) -> NoteResult<()> {
let mut tx = self.0.begin().await?; let mut tx = self.0.begin().await?;
let note_id = NoteId(note_id.to_string()); let condemned_note_id: NoteId = NoteId(select_note_id_for_uuid(&mut tx, note_uuid).await?.0);
let parent_id = ParentId(note_parent_id.to_string()); let note_parent_id: ParentId = select_note_id_for_uuid(&mut tx, note_parent_uuid).await?;
let _ = delete_note_to_note_relationship(&mut tx, note_parent_id, condemned_note_id);
if *parent_id != *note_id { if count_existing_note_relationships(&mut tx, condemned_note_id).await? == 0 {
let _ = delete_note_to_note_relationship(&mut tx, &parent_id, &note_id); let _ = delete_note_to_page_relationships(&mut tx, condemned_note_id).await?;
} let _ = delete_note(&mut tx, condemned_note_id).await?;
// The big one: if zero parents report having an interest in this note, then it,
// *and any sub-relationships*, go away.
if count_existing_note_relationships(&mut tx, &note_id).await? == 0 {
let _ = delete_note_to_kasten_relationships(&mut tx, &note_id).await?;
let _ = delete_note(&mut tx, &note_id).await?;
} }
tx.commit().await?; tx.commit().await?;
Ok(()) Ok(())
} }
} /// Updates a note's content. Completely rebuilds the note's
/// outgoing edge reference list every time.
// The Private stuff pub async fn update_note_content(&self, note_uuid: &str, content: &str) -> NoteResult<()> {
let references = build_references(&content);
impl NoteStore {
// Pretty much the most dangerous function in our system. Has to
// have ALL the error checking.
async fn insert_note(
&self,
note: &NewNote,
parent_id: &ParentId,
location: Option<i64>,
kind: RelationshipKind,
) -> NoteResult<String> {
if let Some(location) = location {
if location < 0 {
return Err(NoteStoreError::InvalidNoteStructure(
"Add note: A negative location is not valid.".to_string(),
));
}
}
if parent_id.is_empty() {
return Err(NoteStoreError::InvalidNoteStructure(
"Add note: A parent note ID is required.".to_string(),
));
}
if note.id.is_empty() {
return Err(NoteStoreError::InvalidNoteStructure(
"Add note: Your note should have an id already".to_string(),
));
}
if note.content.is_empty() {
return Err(NoteStoreError::InvalidNoteStructure(
"Add note: Empty notes are not supported.".to_string(),
));
}
let references = build_references(&note.content);
let mut tx = self.0.begin().await?; let mut tx = self.0.begin().await?;
let location = {
let max_child = assert_max_child_location_for_note(&mut tx, parent_id).await? + 1;
if let Some(location) = location {
cmp::min(max_child, location)
} else {
max_child
}
};
let note_id = NoteId(note.id.clone()); let note_id: NoteId = NoteId(select_note_id_for_uuid(&mut tx, note_uuid).await?.0);
insert_note(&mut tx, &note).await?; let _ = update_note_content(&mut tx, note_id, &content).await?;
make_room_for_new_note(&mut tx, &parent_id, location).await?;
insert_note_to_note_relationship(&mut tx, &parent_id, &note_id, location, &kind).await?;
let found_references = find_all_kasten_from_list_of_references(&mut tx, &references).await?; let found_references = find_all_page_references_for(&mut tx, &references).await?;
let new_references = diff_references(&references, &found_references); let new_references = diff_references(&references, &found_references);
let mut known_reference_ids: Vec<NoteId> = Vec::new(); let mut known_reference_ids: Vec<PageId> = Vec::new();
// Create the pages that don't exist
for one_reference in new_references.iter() { for one_reference in new_references.iter() {
let slug = generate_slug(&mut tx, one_reference).await?; let new_root_note = create_unique_root_note();
let zettlekasten = create_zettlekasten(&one_reference, &slug); let new_root_note_id = insert_one_new_note(&mut tx, &new_root_note).await?;
let _ = insert_note(&mut tx, &zettlekasten).await?; let new_page_slug = generate_slug(&mut tx, &one_reference).await?;
known_reference_ids.push(NoteId(slug)); let new_page = create_new_page_for(&one_reference, &new_page_slug, new_root_note_id);
known_reference_ids.push(insert_one_new_page(&mut tx, &new_page).await?)
} }
known_reference_ids.append(&mut found_references.iter().map(|r| NoteId(r.id.clone())).collect()); // And associate the note with all the pages.
let _ = insert_bulk_note_to_kasten_relationships(&mut tx, &note_id, &known_reference_ids).await?; known_reference_ids.append(&mut found_references.iter().map(|r| PageId(r.id)).collect());
let _ = insert_note_to_page_relationships(&mut tx, note_id, &known_reference_ids).await?;
tx.commit().await?; tx.commit().await?;
Ok(note_id.to_string()) Ok(())
} }
} }

View File

@ -2,7 +2,10 @@ use crate::structs::*;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use regex::Regex; use regex::Regex;
use slug::slugify; use slug::slugify;
use sqlx::{sqlite::Sqlite, Done, Executor}; use sqlx::{
sqlite::{Sqlite, SqliteRow},
Done, Executor, Row,
};
use std::collections::HashSet; use std::collections::HashSet;
type SqlResult<T> = sqlx::Result<T>; type SqlResult<T> = sqlx::Result<T>;
@ -18,33 +21,6 @@ type SqlResult<T> = sqlx::Result<T>;
// coherent and easily readable, and hides away the gnarliness of some // coherent and easily readable, and hides away the gnarliness of some
// of the SQL queries. // of the SQL queries.
lazy_static! {
static ref SELECT_KASTEN_BY_TITLE_SQL: String = str::replace(
include_str!("sql/select_notes_by_parameter.sql"),
"QUERYPARAMETER",
"notes.content"
);
}
lazy_static! {
static ref SELECT_KASTEN_BY_ID_SQL: String = str::replace(
include_str!("sql/select_notes_by_parameter.sql"),
"QUERYPARAMETER",
"notes.id"
);
}
lazy_static! {
static ref SELECT_NOTES_BACKREFENCING_KASTEN_SQL: &'static str =
include_str!("sql/select_notes_backreferencing_kasten.sql");
}
// ___ _
// | _ \___ ___ ___| |_
// | / -_|_-</ -_) _|
// |_|_\___/__/\___|\__|
//
pub(crate) async fn reset_database<'a, E>(executor: E) -> SqlResult<()> pub(crate) async fn reset_database<'a, E>(executor: E) -> SqlResult<()>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
@ -53,99 +29,133 @@ where
sqlx::query(initialize_sql).execute(executor).await.map(|_| ()) sqlx::query(initialize_sql).execute(executor).await.map(|_| ())
} }
// ___ _ _ _ __ _ pub(crate) async fn select_page_by_slug<'a, E>(executor: E, slug: &str) -> SqlResult<RawPage>
// | __|__| |_ __| |_ | |/ /__ _ __| |_ ___ _ _
// | _/ -_) _/ _| ' \ | ' </ _` (_-< _/ -_) ' \
// |_|\___|\__\__|_||_| |_|\_\__,_/__/\__\___|_||_|
//
// Select the requested kasten via its id. This is fairly rare;
// kastens should usually be picked up via their title, but if you're
// navigating to an instance, this is how you specify the kasten in a
// URL. The return value is an array of Note objects; it is the
// responsibility of client code to restructure these into a tree-like
// object.
//
// Recommended: Clients should update the URL whenever changing
// kasten.
pub(crate) async fn select_kasten_by_slug<'a, E>(executor: E, slug: &NoteId) -> SqlResult<Vec<Note>>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
{ {
let r: Vec<RowNote> = sqlx::query_as(&SELECT_KASTEN_BY_ID_SQL) let select_one_page_by_slug_sql = concat!(
.bind(&**slug) "SELECT id, title, slug, note_id, creation_date, updated_date, ",
.fetch_all(executor) "lastview_date, deleted_date FROM pages WHERE slug=?;"
.await?; );
Ok(r.into_iter().map(|z| Note::from(z)).collect()) Ok(sqlx::query_as(&select_one_page_by_slug_sql)
.bind(&slug)
.fetch_one(executor)
.await?)
} }
// Fetch the kasten by title. The return value is an array of Note pub(crate) async fn select_page_by_title<'a, E>(executor: E, title: &str) -> SqlResult<RawPage>
// objects; it is the responsibility of client code to restructure
// these into a tree-like object.
pub(crate) async fn select_kasten_by_title<'a, E>(executor: E, title: &str) -> SqlResult<Vec<Note>>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
{ {
let r: Vec<RowNote> = sqlx::query_as(&SELECT_KASTEN_BY_TITLE_SQL) let select_one_page_by_title_sql = concat!(
"SELECT id, title, slug, note_id, creation_date, updated_date, ",
"lastview_date, deleted_date FROM pages WHERE title=?;"
);
Ok(sqlx::query_as(&select_one_page_by_title_sql)
.bind(&title) .bind(&title)
.fetch_all(executor) .fetch_one(executor)
.await?; .await?)
Ok(r.into_iter().map(|z| Note::from(z)).collect())
} }
// Fetch all backreferences to a kasten. The return value is an array pub(crate) async fn select_note_id_for_uuid<'a, E>(executor: E, uuid: &str) -> SqlResult<ParentId>
// of arrays, and inside each array is a list from a root kasten to
// the note that references the give kasten. Clients may choose how
// they want to display that collection.
pub(crate) async fn select_backreferences_for_kasten<'a, E>(executor: E, kasten_id: &NoteId) -> SqlResult<Vec<Note>>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
{ {
let r: Vec<RowNote> = sqlx::query_as(&SELECT_NOTES_BACKREFENCING_KASTEN_SQL) let select_note_id_for_uuid_sql = "SELECT id FROM notes WHERE uuid = ?;";
.bind(&**kasten_id) let id: JustId = sqlx::query_as(&select_note_id_for_uuid_sql)
.fetch_all(executor) .bind(&uuid)
.fetch_one(executor)
.await?; .await?;
Ok(r.into_iter().map(|z| Note::from(z)).collect()) Ok(ParentId(id.id))
} }
// ___ _ ___ _ _ _ pub(crate) async fn make_room_for_new_note<'a, E>(executor: E, parent_id: ParentId, position: i64) -> SqlResult<()>
// |_ _|_ _ ___ ___ _ _| |_ / _ \ _ _ ___ | \| |___| |_ ___
// | || ' \(_-</ -_) '_| _| | (_) | ' \/ -_) | .` / _ \ _/ -_)
// |___|_||_/__/\___|_| \__| \___/|_||_\___| |_|\_\___/\__\___|
//
// Inserts a single note into the notes table. That is all.
pub(crate) async fn insert_note<'a, E>(executor: E, zettle: &NewNote) -> SqlResult<String>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
{ {
let insert_one_page_sql = concat!( let make_room_for_new_note_sql = concat!(
"INSERT INTO notes (id, content, kind, ", "UPDATE note_relationships ",
" creation_date, updated_date, lastview_date) ", "SET position = position + 1 ",
"WHERE position >= ? and parent_id = ?;"
);
sqlx::query(make_room_for_new_note_sql)
.bind(&position)
.bind(&*parent_id)
.execute(executor)
.await
.map(|_| ())
}
pub(crate) async fn insert_note_to_note_relationship<'a, E>(
executor: E,
parent_id: ParentId,
note_id: NoteId,
position: i64,
nature: &str,
) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let insert_note_to_note_relationship_sql = concat!(
"INSERT INTO note_relationships (parent_id, note_id, position, nature) ",
"values (?, ?, ?, ?)"
);
sqlx::query(insert_note_to_note_relationship_sql)
.bind(&*parent_id)
.bind(&*note_id)
.bind(&position)
.bind(&nature)
.execute(executor)
.await
.map(|_| ())
}
pub(crate) async fn select_note_collection_from_root<'a, E>(executor: E, root: i64) -> SqlResult<Vec<RawNote>>
where
E: Executor<'a, Database = Sqlite>,
{
let select_note_collection_from_root_sql = include_str!("sql/select_note_collection_from_root.sql");
Ok(sqlx::query_as(&select_note_collection_from_root_sql)
.bind(&root)
.fetch_all(executor)
.await?)
}
pub(crate) async fn insert_one_new_note<'a, E>(executor: E, note: &NewNote) -> SqlResult<NoteId>
where
E: Executor<'a, Database = Sqlite>,
{
let insert_one_note_sql = concat!(
"INSERT INTO notes ( ",
" uuid, ",
" content, ",
" notetype, ",
" creation_date, ",
" updated_date, ",
" lastview_date) ",
"VALUES (?, ?, ?, ?, ?, ?);" "VALUES (?, ?, ?, ?, ?, ?);"
); );
let _ = sqlx::query(insert_one_page_sql) Ok(NoteId(
.bind(&zettle.id) sqlx::query(insert_one_note_sql)
.bind(&zettle.content) .bind(&note.uuid)
.bind(zettle.kind.to_string()) .bind(&note.content)
.bind(&zettle.creation_date) .bind(&note.notetype)
.bind(&zettle.updated_date) .bind(&note.creation_date)
.bind(&zettle.lastview_date) .bind(&note.updated_date)
.bind(&note.lastview_date)
.execute(executor) .execute(executor)
.await?; .await?
Ok(zettle.id.clone()) .last_insert_rowid(),
))
} }
// ___ _ _ _ _ __ _
// | _ )_ _(_) |__| | | |/ /__ _ __| |_ ___ _ _
// | _ \ || | | / _` | | ' </ _` (_-< _/ -_) ' \
// |___/\_,_|_|_\__,_| |_|\_\__,_/__/\__\___|_||_|
//
// Given a possible slug, find the slug with the highest // Given a possible slug, find the slug with the highest
// uniquification number, and return that number, if any. // uniquification number, and return that number, if any.
pub(crate) fn find_maximal_slug_number(slugs: &[JustId]) -> Option<u32> {
pub(crate) fn find_maximal_slug(slugs: &[JustSlugs]) -> Option<u32> {
lazy_static! { lazy_static! {
static ref RE_CAP_NUM: Regex = Regex::new(r"-(\d+)$").unwrap(); static ref RE_CAP_NUM: Regex = Regex::new(r"-(\d+)$").unwrap();
} }
@ -156,7 +166,7 @@ pub(crate) fn find_maximal_slug_number(slugs: &[JustId]) -> Option<u32> {
let mut slug_counters: Vec<u32> = slugs let mut slug_counters: Vec<u32> = slugs
.iter() .iter()
.filter_map(|slug| RE_CAP_NUM.captures(&slug.id)) .filter_map(|slug| RE_CAP_NUM.captures(&slug.slug))
.map(|cap| cap.get(1).unwrap().as_str().parse::<u32>().unwrap()) .map(|cap| cap.get(1).unwrap().as_str().parse::<u32>().unwrap())
.collect(); .collect();
slug_counters.sort_unstable(); slug_counters.sort_unstable();
@ -165,170 +175,62 @@ pub(crate) fn find_maximal_slug_number(slugs: &[JustId]) -> Option<u32> {
// Given an initial string and an existing collection of slugs, // Given an initial string and an existing collection of slugs,
// generate a new slug that does not conflict with the current // generate a new slug that does not conflict with the current
// collection. Right now we're using the slugify operation, which... // collection.
// isn't all that.
pub(crate) async fn generate_slug<'a, E>(executor: E, title: &str) -> SqlResult<String> pub(crate) async fn generate_slug<'a, E>(executor: E, title: &str) -> SqlResult<String>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
{ {
lazy_static! { lazy_static! {
static ref RE_STRIP_NUM: Regex = Regex::new(r"-\d+$").unwrap(); static ref RE_STRIP_NUM: Regex = Regex::new(r"-\d+$").unwrap();
static ref SLUG_FINDER_SQL: String = format!(
"SELECT id FROM notes WHERE kind = '{}' AND id LIKE '?%';",
NoteKind::Kasten.to_string()
);
} }
let initial_slug = slugify(title); let initial_slug = slugify(title);
let sample_slug = RE_STRIP_NUM.replace_all(&initial_slug, ""); let sample_slug = RE_STRIP_NUM.replace_all(&initial_slug, "");
let similar_slugs: Vec<JustId> = sqlx::query_as(&SLUG_FINDER_SQL) let slug_finder_sql = "SELECT slug FROM pages WHERE slug LIKE '?%';";
let similar_slugs: Vec<JustSlugs> = sqlx::query_as(&slug_finder_sql)
.bind(&*sample_slug) .bind(&*sample_slug)
.fetch_all(executor) .fetch_all(executor)
.await?; .await?;
let maximal_slug_number = find_maximal_slug_number(&similar_slugs); let maximal_slug = find_maximal_slug(&similar_slugs);
Ok(match maximal_slug_number { match maximal_slug {
None => initial_slug, None => Ok(initial_slug),
Some(slug_number) => format!("{}-{}", initial_slug, slug_number + 1), Some(max_slug) => Ok(format!("{}-{}", initial_slug, max_slug + 1)),
}) }
} }
// A helper function: given a title and a slug, create a KastenType pub(crate) async fn insert_one_new_page<'a, E>(executor: E, page: &NewPage) -> SqlResult<PageId>
// note.
pub(crate) fn create_zettlekasten(title: &str, slug: &str) -> NewNote {
NewNoteBuilder::default()
.id(slug.to_string())
.content(title.to_string())
.kind(NoteKind::Kasten)
.build()
.unwrap()
}
// _ _ _ _ ___ _ _ _
// | | | |_ __ __| |__ _| |_ ___ / _ \ _ _ ___ | \| |___| |_ ___
// | |_| | '_ \/ _` / _` | _/ -_) | (_) | ' \/ -_) | .` / _ \ _/ -_)
// \___/| .__/\__,_\__,_|\__\___| \___/|_||_\___| |_|\_\___/\__\___|
// |_|
pub(crate) async fn update_note_content<'a, E>(executor: E, note_id: &NoteId, content: &str) -> SqlResult<()>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
{ {
let update_note_content_sql = "UPDATE notes SET content = ? WHERE note_id = ?"; let insert_one_page_sql = concat!(
let count = sqlx::query(update_note_content_sql) "INSERT INTO pages ( ",
.bind(content) " slug, ",
.bind(&**note_id) " title, ",
" note_id, ",
" creation_date, ",
" updated_date, ",
" lastview_date) ",
"VALUES (?, ?, ?, ?, ?, ?);"
);
Ok(PageId(
sqlx::query(insert_one_page_sql)
.bind(&page.slug)
.bind(&page.title)
.bind(&page.note_id)
.bind(&page.creation_date)
.bind(&page.updated_date)
.bind(&page.lastview_date)
.execute(executor) .execute(executor)
.await? .await?
.rows_affected(); .last_insert_rowid(),
))
match count {
1 => Ok(()),
_ => Err(sqlx::Error::RowNotFound),
}
} }
// ___ _ _ ___ _ _ _ ___ _ _ _ _ _ pub(crate) async fn insert_note_to_page_relationships<'a, E>(
// | __|__| |_ __| |_ / _ \ _ _ ___ | \| |___| |_ ___ | _ \___| |__ _| |_(_)___ _ _ __| |_ (_)_ __
// | _/ -_) _/ _| ' \ | (_) | ' \/ -_) | .` / _ \ _/ -_) | / -_) / _` | _| / _ \ ' \(_-< ' \| | '_ \
// |_|\___|\__\__|_||_| \___/|_||_\___| |_|\_\___/\__\___| |_|_\___|_\__,_|\__|_\___/_||_/__/_||_|_| .__/
// |_|
pub(crate) async fn select_note_to_note_relationship<'a, E>(
executor: E, executor: E,
parent_id: &ParentId, note_id: NoteId,
note_id: &NoteId, references: &[PageId],
) -> SqlResult<NoteRelationship>
where
E: Executor<'a, Database = Sqlite>,
{
let get_note_to_note_relationship_sql = concat!(
"SELECT parent_id, note_id, location, kind ",
"FROM note_relationships ",
"WHERE parent_id = ? and note_id = ? ",
"LIMIT 1"
);
let s: NoteRelationshipRow = sqlx::query_as(get_note_to_note_relationship_sql)
.bind(&**parent_id)
.bind(&**note_id)
.fetch_one(executor)
.await?;
Ok(NoteRelationship::from(s))
}
// _ _ _ _ _ _ _ ___ _ _ _ _ _
// | \| |___| |_ ___ | |_ ___ | \| |___| |_ ___ | _ \___| |__ _| |_(_)___ _ _ __| |_ (_)_ __ ___
// | .` / _ \ _/ -_) | _/ _ \ | .` / _ \ _/ -_) | / -_) / _` | _| / _ \ ' \(_-< ' \| | '_ (_-<
// |_|\_\___/\__\___| \__\___/ |_|\_\___/\__\___| |_|_\___|_\__,_|\__|_\___/_||_/__/_||_|_| .__/__/
// |_|
pub(crate) async fn insert_note_to_note_relationship<'a, E>(
executor: E,
parent_id: &ParentId,
note_id: &NoteId,
location: i64,
kind: &RelationshipKind,
) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let insert_note_to_note_relationship_sql = concat!(
"INSERT INTO note_relationships (parent_id, note_id, location, kind) ",
"values (?, ?, ?, ?)"
);
let _ = sqlx::query(insert_note_to_note_relationship_sql)
.bind(&**parent_id)
.bind(&**note_id)
.bind(&location)
.bind(&kind.to_string())
.execute(executor)
.await?;
Ok(())
}
pub(crate) async fn make_room_for_new_note<'a, E>(executor: E, parent_id: &ParentId, location: i64) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let make_room_for_new_note_sql = concat!(
"UPDATE note_relationships ",
"SET location = location + 1 ",
"WHERE location >= ? and parent_id = ?;"
);
let _ = sqlx::query(make_room_for_new_note_sql)
.bind(&location)
.bind(&**parent_id)
.execute(executor)
.await?;
Ok(())
}
pub(crate) async fn assert_max_child_location_for_note<'a, E>(executor: E, note_id: &ParentId) -> SqlResult<i64>
where
E: Executor<'a, Database = Sqlite>,
{
let assert_max_child_location_for_note_sql =
"SELECT MAX(location) AS count FROM note_relationships WHERE parent_id = ?;";
let count: RowCount = sqlx::query_as(assert_max_child_location_for_note_sql)
.bind(&**note_id)
.fetch_one(executor)
.await?;
Ok(count.count)
}
// _ _ _ _ _ __ _ ___ _ _ _ _ _
// | \| |___| |_ ___ | |_ ___ | |/ /__ _ __| |_ ___ _ _ | _ \___| |__ _| |_(_)___ _ _ __| |_ (_)_ __ ___
// | .` / _ \ _/ -_) | _/ _ \ | ' </ _` (_-< _/ -_) ' \ | / -_) / _` | _| / _ \ ' \(_-< ' \| | '_ (_-<
// |_|\_\___/\__\___| \__\___/ |_|\_\__,_/__/\__\___|_||_| |_|_\___|_\__,_|\__|_\___/_||_/__/_||_|_| .__/__/
// |_|
pub(crate) async fn insert_bulk_note_to_kasten_relationships<'a, E>(
executor: E,
note_id: &NoteId,
references: &[NoteId],
) -> SqlResult<()> ) -> SqlResult<()>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
@ -337,87 +239,77 @@ where
return Ok(()); return Ok(());
} }
let insert_pattern = format!("(?, ?, '{}')", KastenRelationshipKind::Kasten.to_string()); let insert_note_page_references_sql = "INSERT INTO page_relationships (note_id, page_id) VALUES ".to_string()
let insert_note_page_references_sql = "INSERT INTO note_kasten_relationships (note_id, kasten_id, kind) VALUES " + &["(?, ?)"].repeat(references.len()).join(", ")
.to_string()
+ &[insert_pattern.as_str()].repeat(references.len()).join(", ")
+ &";".to_string(); + &";".to_string();
let mut request = sqlx::query(&insert_note_page_references_sql); let mut request = sqlx::query(&insert_note_page_references_sql);
for reference in references { for reference in references {
request = request.bind(&**note_id).bind(&**reference); request = request.bind(*note_id).bind(**reference);
} }
request.execute(executor).await.map(|_| ()) request.execute(executor).await.map(|_| ())
} }
pub(crate) async fn delete_bulk_note_to_kasten_relationships<'a, E>(executor: E, note_id: &NoteId) -> SqlResult<()> // For a given collection of uuids, retrieve the internal ID used by
// the database.
pub(crate) async fn bulk_select_ids_for_note_uuids<'a, E>(executor: E, ids: &[&str]) -> SqlResult<Vec<(String, i64)>>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
{ {
let delete_note_to_kasten_relationship_sql = "DELETE FROM note_kasten_relationships WHERE and note_id = ?;"; if ids.is_empty() {
let _ = sqlx::query(delete_note_to_kasten_relationship_sql)
.bind(&**note_id)
.execute(executor)
.await?;
Ok(())
}
// Given the references supplied, and the references found in the datastore,
// return a list of the references not found in the datastore.
pub(crate) fn diff_references(references: &[String], found_references: &[PageTitle]) -> Vec<String> {
let all: HashSet<String> = references.iter().cloned().collect();
let found: HashSet<String> = found_references.iter().map(|r| r.content.clone()).collect();
all.difference(&found).cloned().collect()
}
// ___ _ _ _ _ __ _ ___ _ _ _ _ _
// / __|___ _ _| |_ ___ _ _| |_ | |_ ___ | |/ /__ _ __| |_ ___ _ _ | _ \___| |__ _| |_(_)___ _ _ __| |_ (_)_ __ ___
// | (__/ _ \ ' \ _/ -_) ' \ _| | _/ _ \ | ' </ _` (_-< _/ -_) ' \ | / -_) / _` | _| / _ \ ' \(_-< ' \| | '_ (_-<
// \___\___/_||_\__\___|_||_\__| \__\___/ |_|\_\__,_/__/\__\___|_||_| |_|_\___|_\__,_|\__|_\___/_||_/__/_||_|_| .__/__/
// |_|
// Returns all the (Id, title) pairs found in the database out of a
// list of titles. Used by insert_note and update_note_content to
// find the ids of all the references in a given document.
pub(crate) async fn find_all_kasten_from_list_of_references<'a, E>(
executor: E,
references: &[String],
) -> SqlResult<Vec<PageTitle>>
where
E: Executor<'a, Database = Sqlite>,
{
if references.is_empty() {
return Ok(vec![]); return Ok(vec![]);
} }
lazy_static! { let bulk_select_ids_for_note_uuids_sql = "SELECT uuid, id FROM notes WHERE uuid IN (".to_string()
static ref SELECT_ALL_REFERENCES_FOR_SQL_BASE: String = format!( + &["?"].repeat(ids.len()).join(",")
"SELECT id, content FROM notes WHERE kind = '{}' AND content IN (", + &");".to_string();
NoteKind::Kasten.to_string()
);
}
let find_all_references_for_sql = let mut request = sqlx::query(&bulk_select_ids_for_note_uuids_sql);
SELECT_ALL_REFERENCES_FOR_SQL_BASE.to_string() + &["?"].repeat(references.len()).join(",") + &");".to_string(); for id in ids.iter() {
let mut request = sqlx::query_as(&find_all_references_for_sql);
for id in references.iter() {
request = request.bind(id); request = request.bind(id);
} }
request.fetch_all(executor).await Ok(request
.try_map(|row: SqliteRow| {
let l = row.try_get::<String, _>(0)?;
let r = row.try_get::<i64, _>(1)?;
Ok((l, r))
})
.fetch_all(executor)
.await?
.into_iter()
.collect())
} }
// ___ _ _ // Used by move_note to identify the single note to note relationship
// | \ ___| |___| |_ ___ // by the original parent and child pair. Used mostly to find the
// | |) / -_) / -_) _/ -_) // position for recalculation, to create a new gap or close an old
// |___/\___|_\___|\__\___| // one.
// pub(crate) async fn get_note_to_note_relationship<'a, E>(
executor: E,
parent_id: ParentId,
note_id: NoteId,
) -> SqlResult<NoteRelationship>
where
E: Executor<'a, Database = Sqlite>,
{
let get_note_to_note_relationship_sql = concat!(
"SELECT parent_id, note_id, position, nature ",
"FROM note_relationships ",
"WHERE parent_id = ? and note_id = ? ",
"LIMIT 1"
);
sqlx::query_as(get_note_to_note_relationship_sql)
.bind(&*parent_id)
.bind(&*note_id)
.fetch_one(executor)
.await
}
pub(crate) async fn delete_note_to_note_relationship<'a, E>( pub(crate) async fn delete_note_to_note_relationship<'a, E>(
executor: E, executor: E,
parent_id: &ParentId, parent_id: ParentId,
note_id: &NoteId, note_id: NoteId,
) -> SqlResult<()> ) -> SqlResult<()>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
@ -428,8 +320,8 @@ where
); );
let count = sqlx::query(delete_note_to_note_relationship_sql) let count = sqlx::query(delete_note_to_note_relationship_sql)
.bind(&**parent_id) .bind(&*parent_id)
.bind(&**note_id) .bind(&*note_id)
.execute(executor) .execute(executor)
.await? .await?
.rows_affected(); .rows_affected();
@ -440,33 +332,27 @@ where
} }
} }
pub(crate) async fn delete_note_to_kasten_relationships<'a, E>(executor: E, note_id: &NoteId) -> SqlResult<()> pub(crate) async fn delete_note_to_page_relationships<'a, E>(executor: E, note_id: NoteId) -> SqlResult<()>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
{ {
lazy_static! { let delete_note_to_page_relationships_sql = "DELETE FROM page_relationships WHERE note_id = ?;";
static ref DELETE_NOTE_TO_KASTEN_RELATIONSHIPS_SQL: String = format!(
"DELETE FROM note_relationships WHERE kind in ('{}', '{}') AND parent_id = ?;",
KastenRelationshipKind::Kasten.to_string(),
KastenRelationshipKind::Unacked.to_string()
);
}
let _ = sqlx::query(&DELETE_NOTE_TO_KASTEN_RELATIONSHIPS_SQL) let _ = sqlx::query(delete_note_to_page_relationships_sql)
.bind(&**note_id) .bind(&*note_id)
.execute(executor) .execute(executor)
.await?; .await?;
Ok(()) Ok(())
} }
pub(crate) async fn delete_note<'a, E>(executor: E, note_id: &NoteId) -> SqlResult<()> pub(crate) async fn delete_note<'a, E>(executor: E, note_id: NoteId) -> SqlResult<()>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
{ {
let delete_note_sql = "DELETE FROM notes WHERE note_id = ?"; let delete_note_sql = "DELETE FROM notes WHERE note_id = ?";
let count = sqlx::query(delete_note_sql) let count = sqlx::query(delete_note_sql)
.bind(&**note_id) .bind(&*note_id)
.execute(executor) .execute(executor)
.await? .await?
.rows_affected(); .rows_affected();
@ -477,50 +363,118 @@ where
} }
} }
// After removing a note, recalculate the position of all notes under pub(crate) async fn count_existing_note_relationships<'a, E>(executor: E, note_id: NoteId) -> SqlResult<i64>
// the parent note, such that there order is now completely
// sequential.
pub(crate) async fn close_hole_for_deleted_note<'a, E>(
executor: E,
parent_id: &ParentId,
location: i64,
) -> SqlResult<()>
where where
E: Executor<'a, Database = Sqlite>, E: Executor<'a, Database = Sqlite>,
{ {
let close_hole_for_deleted_note_sql = concat!( let count_existing_note_relationships_sql = "SELECT COUNT(*) as count FROM page_relationships WHERE note_id = ?;";
"UPDATE note_relationships ",
"SET location = location - 1 ",
"WHERE location > ? and parent_id = ?;"
);
let _ = sqlx::query(close_hole_for_deleted_note_sql) let count: RowCount = sqlx::query_as(count_existing_note_relationships_sql)
.bind(&location) .bind(&*note_id)
.bind(&**parent_id)
.execute(executor)
.await?;
Ok(())
}
// __ __ _
// | \/ (_)___ __
// | |\/| | (_-</ _|
// |_| |_|_/__/\__|
//
// The dreaded miscellaneous!
pub(crate) async fn count_existing_note_relationships<'a, E>(executor: E, note_id: &NoteId) -> SqlResult<i64>
where
E: Executor<'a, Database = Sqlite>,
{
let count_existing_note_relationships_sql =
"SELECT COUNT(*) as count FROM note_relationships WHERE note_id = ?;";
let count: RowCount = sqlx::query_as(&count_existing_note_relationships_sql)
.bind(&**note_id)
.fetch_one(executor) .fetch_one(executor)
.await?; .await?;
Ok(count.count) Ok(count.count)
} }
pub(crate) async fn assert_max_child_position_for_note<'a, E>(executor: E, note_id: ParentId) -> SqlResult<i64>
where
E: Executor<'a, Database = Sqlite>,
{
let assert_max_child_position_for_note_sql =
"SELECT MAX(position) AS count FROM note_relationships WHERE parent_id = ?;";
let count: RowCount = sqlx::query_as(assert_max_child_position_for_note_sql)
.bind(&*note_id)
.fetch_one(executor)
.await?;
Ok(count.count)
}
// After removing a note, recalculate the position of all notes under
// the parent note, such that there order is now completely
// sequential.
pub(crate) async fn close_hole_for_deleted_note<'a, E>(executor: E, parent_id: ParentId, position: i64) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let close_hole_for_deleted_note_sql = concat!(
"UPDATE note_relationships ",
"SET position = position - 1 ",
"WHERE position > ? and parent_id = ?;"
);
sqlx::query(close_hole_for_deleted_note_sql)
.bind(&position)
.bind(&*parent_id)
.execute(executor)
.await
.map(|_| ())
}
pub(crate) async fn find_all_page_references_for<'a, E>(
executor: E,
references: &[String],
) -> SqlResult<Vec<PageTitles>>
where
E: Executor<'a, Database = Sqlite>,
{
if references.is_empty() {
return Ok(vec![]);
}
let find_all_references_for_sql = "SELECT id, title FROM pages WHERE title IN (".to_string()
+ &["?"].repeat(references.len()).join(",")
+ &");".to_string();
let mut request = sqlx::query_as(&find_all_references_for_sql);
for id in references.iter() {
request = request.bind(id);
}
request.fetch_all(executor).await
}
pub(crate) async fn update_note_content<'a, E>(executor: E, note_id: NoteId, content: &str) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let update_note_content_sql = "UPDATE notes SET content = ? WHERE note_id = ?";
let count = sqlx::query(update_note_content_sql)
.bind(content)
.bind(&*note_id)
.execute(executor)
.await?
.rows_affected();
match count {
1 => Ok(()),
_ => Err(sqlx::Error::RowNotFound),
}
}
pub(crate) fn create_unique_root_note() -> NewNote {
NewNoteBuilder::default()
.uuid(friendly_id::create())
.content("".to_string())
.notetype("root".to_string())
.build()
.unwrap()
}
pub(crate) fn create_new_page_for(title: &str, slug: &str, note_id: NoteId) -> NewPage {
NewPageBuilder::default()
.slug(slug.to_string())
.title(title.to_string())
.note_id(*note_id)
.build()
.unwrap()
}
// Given the references supplied, and the references found in the datastore,
// return a list of the references not found in the datastore.
pub(crate) fn diff_references(references: &[String], found_references: &[PageTitles]) -> Vec<String> {
let all: HashSet<String> = references.iter().cloned().collect();
let found: HashSet<String> = found_references.iter().map(|r| r.title.clone()).collect();
all.difference(&found).cloned().collect()
}

View File

@ -1,149 +1,57 @@
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use derive_builder::Builder; use derive_builder::Builder;
use friendly_id; use serde::{Deserialize, Serialize};
use shrinkwraprs::Shrinkwrap; use shrinkwraprs::Shrinkwrap;
use sqlx::{self, FromRow}; use sqlx::{self, FromRow};
// Kasten is German for "Box," and is used both because this is #[derive(Shrinkwrap, Copy, Clone)]
// supposed to be a Zettlekasten, and because "Box" is a heavily pub(crate) struct PageId(pub i64);
// reserved word in Rust. So, for that matter, are "crate" and
// "cargo," "cell," and so forth. If I'd wanted to go the Full
// Noguchi, I guess I could have used "envelope."
// In order to prevent arbitrary enumeration tokens from getting into #[derive(Shrinkwrap, Copy, Clone)]
// the database, the private layer takes a very hard line on insisting pub(crate) struct NoteId(pub i64);
// that everything sent TO the datastore come in the enumerated
// format, and everything coming OUT of the database be converted back
// into an enumeration. These macros instantiate those objects
// and their conversions to/from strings.
macro_rules! build_conversion_enums { #[derive(Shrinkwrap, Copy, Clone)]
( $ty:ident, $( $s:literal => $x:ident, )*) => { pub(crate) struct ParentId(pub i64);
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum $ty {
$( $x ), *
}
impl From<String> for $ty { /// A RawPage is what this layer of the API returns when requesting a
fn from(kind: String) -> Self { /// page. Note that usually what you'll get back in the RawPage and a
match &kind[..] { /// Vec<RawNote>. It's the next level's responsibility to turn that
$( $s => $ty::$x, )* /// into a proper tree.
_ => panic!("Illegal value in $ty database: {}", kind), #[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
} pub struct RawPage {
} pub id: i64,
} pub slug: String,
pub title: String,
impl From<$ty> for String { pub note_id: i64,
fn from(kind: $ty) -> Self {
match kind {
$( $ty::$x => $s ),*
}
.to_string()
}
}
impl $ty {
pub fn to_string(&self) -> String {
String::from(self.clone())
}
}
};
}
#[derive(Shrinkwrap, Clone)]
pub(crate) struct NoteId(pub String);
#[derive(Shrinkwrap, Clone)]
pub(crate) struct ParentId(pub String);
// The different kinds of objects we support.
build_conversion_enums!(
NoteKind,
"box" => Kasten,
"note" => Note,
"resource" => Resource,
);
// The different kinds of relationships we support. I do not yet
// know how to ensure that there is a maximum of one (a ->
// b)::Direct, and that for any (a -> b) there is no (b <- a), that
// is, nor, for that matter, do I know how to prevent cycles.
build_conversion_enums!(
RelationshipKind,
"direct" => Direct,
"reference" => Reference,
"embed" => Embed,
);
build_conversion_enums!(
KastenRelationshipKind,
"kasten" => Kasten,
"unacked" => Unacked,
"cancelled" => Cancelled,
);
// A Note is the base construct of our system. It represents a
// single note and contains information about its parent and location.
// This is the object *retrieved* from the database.
#[derive(Clone, Debug, FromRow)]
pub(crate) struct RowNote {
pub id: String,
pub parent_id: Option<String>,
pub content: String,
pub kind: String,
pub location: i64,
pub creation_date: DateTime<Utc>, pub creation_date: DateTime<Utc>,
pub updated_date: DateTime<Utc>, pub updated_date: DateTime<Utc>,
pub lastview_date: DateTime<Utc>, pub lastview_date: DateTime<Utc>,
pub deleted_date: Option<DateTime<Utc>>, pub deleted_date: Option<DateTime<Utc>>,
} }
/// A Note as it's returned from the private layer. This is /// A RawNote is what this layer of the API returns
/// provided to ensure that the NoteKind is an enum, and that we /// when requesting a note.
/// control the list of possible values stored in the database. #[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
#[derive(Clone, Debug)] pub struct RawNote {
pub struct Note { pub id: i64,
pub id: String, pub uuid: String,
pub parent_id: Option<String>, pub parent_id: i64,
pub parent_uuid: String,
pub content: String, pub content: String,
pub kind: NoteKind, pub position: i64,
pub location: i64, pub notetype: String,
pub creation_date: DateTime<Utc>, pub creation_date: DateTime<Utc>,
pub updated_date: DateTime<Utc>, pub updated_date: DateTime<Utc>,
pub lastview_date: DateTime<Utc>, pub lastview_date: DateTime<Utc>,
pub deleted_date: Option<DateTime<Utc>>, pub deleted_date: Option<DateTime<Utc>>,
} }
impl From<RowNote> for Note { /// The interface for passing a new page to the store.
fn from(note: RowNote) -> Self { #[derive(Clone, Serialize, Deserialize, Debug, Builder)]
Self { pub struct NewPage {
id: note.id, pub slug: String,
parent_id: note.parent_id, pub title: String,
content: note.content, pub note_id: i64,
kind: NoteKind::from(note.kind),
location: note.location,
creation_date: note.creation_date,
updated_date: note.updated_date,
lastview_date: note.lastview_date,
deleted_date: note.deleted_date,
}
}
}
/// A new Note object as it's inserted into the system. It has no
/// parent or location information; those are data relative to the
/// parent, and must be provided by the client. In the case of a
/// Kasten, no location or parent is necessary.
#[derive(Clone, Debug, Builder)]
pub struct NewNote {
#[builder(default = r#"friendly_id::create()"#)]
pub id: String,
pub content: String,
#[builder(default = r#"NoteKind::Note"#)]
pub kind: NoteKind,
#[builder(default = r#"chrono::Utc::now()"#)] #[builder(default = r#"chrono::Utc::now()"#)]
pub creation_date: DateTime<Utc>, pub creation_date: DateTime<Utc>,
#[builder(default = r#"chrono::Utc::now()"#)] #[builder(default = r#"chrono::Utc::now()"#)]
@ -154,92 +62,58 @@ pub struct NewNote {
pub deleted_date: Option<DateTime<Utc>>, pub deleted_date: Option<DateTime<Utc>>,
} }
impl From<NewNote> for Note { /// The interface for passing a new note to the store.
/// Only used for building new kastens, so the decision- making is #[derive(Clone, Serialize, Deserialize, Debug, Builder)]
/// limited to kasten-level things, like pointing to self and pub struct NewNote {
/// having a location of zero. #[builder(default = r#""".to_string()"#)]
fn from(note: NewNote) -> Self { pub uuid: String,
Self {
id: note.id,
parent_id: None,
content: note.content,
kind: note.kind,
location: 0,
creation_date: note.creation_date,
updated_date: note.updated_date,
lastview_date: note.lastview_date,
deleted_date: note.deleted_date,
}
}
}
#[derive(Clone, Debug, FromRow)]
pub(crate) struct JustId {
pub id: String,
}
#[derive(Clone, Debug, FromRow)]
pub(crate) struct PageTitle {
pub id: String,
pub content: String, pub content: String,
#[builder(default = r#""note".to_string()"#)]
pub notetype: String,
#[builder(default = r#"chrono::Utc::now()"#)]
pub creation_date: DateTime<Utc>,
#[builder(default = r#"chrono::Utc::now()"#)]
pub updated_date: DateTime<Utc>,
#[builder(default = r#"chrono::Utc::now()"#)]
pub lastview_date: DateTime<Utc>,
#[builder(default = r#"None"#)]
pub deleted_date: Option<DateTime<Utc>>,
} }
#[derive(Clone, Debug, FromRow)] #[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
pub(crate) struct JustSlugs {
pub slug: String,
}
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
pub(crate) struct JustTitles {
title: String,
}
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
pub(crate) struct JustId {
pub id: i64,
}
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
pub(crate) struct PageTitles {
pub id: i64,
pub title: String,
}
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
pub(crate) struct NoteRelationship {
pub parent_id: i64,
pub note_id: i64,
pub position: i64,
pub nature: String,
}
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
pub(crate) struct RowCount { pub(crate) struct RowCount {
pub count: i64, pub count: i64,
} }
#[derive(Clone, Debug, FromRow)]
pub(crate) struct NoteRelationshipRow {
pub parent_id: String,
pub note_id: String,
pub location: i64,
pub kind: String,
}
#[derive(Clone, Debug)]
pub struct NoteRelationship {
pub parent_id: String,
pub note_id: String,
pub location: i64,
pub kind: RelationshipKind,
}
impl From<NoteRelationshipRow> for NoteRelationship {
fn from(rel: NoteRelationshipRow) -> Self {
Self {
parent_id: rel.parent_id,
note_id: rel.note_id,
location: rel.location,
kind: RelationshipKind::from(rel.kind),
}
}
}
#[derive(Clone, Debug, FromRow)]
pub(crate) struct KastenRelationshipRow {
pub note_id: String,
pub kasten_id: String,
pub kind: String,
}
#[derive(Clone, Debug)]
pub struct KastenRelationship {
pub note_id: String,
pub kasten_id: String,
pub kind: KastenRelationshipKind,
}
impl From<KastenRelationshipRow> for KastenRelationship {
fn from(rel: KastenRelationshipRow) -> Self {
Self {
kasten_id: rel.kasten_id,
note_id: rel.note_id,
kind: KastenRelationshipKind::from(rel.kind),
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -247,8 +121,11 @@ mod tests {
#[test] #[test]
fn can_build_new_note() { fn can_build_new_note() {
let now = chrono::Utc::now(); let now = chrono::Utc::now();
let newnote = NewNoteBuilder::default().content("bar".to_string()).build().unwrap(); let newnote = NewNoteBuilder::default()
assert!(newnote.id.len() > 4); .uuid("foo".to_string())
.content("bar".to_string())
.build()
.unwrap();
assert!((newnote.creation_date - now).num_minutes() < 1); assert!((newnote.creation_date - now).num_minutes() < 1);
assert!((newnote.updated_date - now).num_minutes() < 1); assert!((newnote.updated_date - now).num_minutes() < 1);
assert!((newnote.lastview_date - now).num_minutes() < 1); assert!((newnote.lastview_date - now).num_minutes() < 1);

View File

@ -1,15 +0,0 @@
.PHONY: all
all: help
.PHONY: help
help:
@M=$$(perl -ne 'm/((\w|-)*):.*##/ && print length($$1)."\n"' Makefile | \
sort -nr | head -1) && \
perl -ne "m/^((\w|-)*):.*##\s*(.*)/ && print(sprintf(\"%s: %s\t%s\n\", \$$1, \" \"x($$M-length(\$$1)), \$$3))" Makefile
# This is necessary because I'm trying hard not to use
# any `nightly` features. But rustfmt is likely to be
# a `nightly-only` feature for a long time to come, so
# this is my hack.
fmt: ## Format the code, using the most modern version of rustfmt
rustup run nightly cargo fmt

View File

@ -12,99 +12,67 @@
mod make_tree; mod make_tree;
mod structs; mod structs;
use crate::make_tree::{make_backreferences, make_note_tree}; use nm_store::{NoteStore, NoteStoreError, NewNote};
use crate::structs::{Note, Page}; use crate::structs::Page;
use chrono::{DateTime, Utc}; use crate::make_tree::make_tree;
use nm_store::{NewNote, NoteStore, NoteStoreError};
#[derive(Debug)] #[derive(Debug)]
pub struct Notesmachine(pub(crate) NoteStore); pub struct Notesmachine(pub(crate) NoteStore);
type Result<T> = core::result::Result<T, NoteStoreError>; type Result<T> = core::result::Result<T, NoteStoreError>;
pub fn make_page(foundtree: &Note, backreferences: Vec<Vec<Note>>) -> Page {
Page {
slug: foundtree.id,
title: foundtree.content,
creation_date: foundtree.creation_date,
updated_date: foundtree.updated_date,
lastview_date: foundtree.lastview_date,
deleted_date: foundtree.deleted_date,
notes: foundtree.children,
backreferences: backreferences,
}
}
impl Notesmachine { impl Notesmachine {
pub async fn new(url: &str) -> Result<Self> { pub async fn new(url: &str) -> Result<Self> {
let notestore = NoteStore::new(url).await?; let notestore = NoteStore::new(url).await?;
Ok(Notesmachine(notestore)) Ok(Notesmachine(notestore))
} }
pub async fn get_page_via_slug(&self, slug: &str) -> Result<Page> { pub async fn navigate_via_slug(&self, slug: &str) -> Result<Page> {
let (rawtree, rawbackreferences) = self.0.get_kasten_by_slug(slug).await?; let (rawpage, rawnotes) = self.0.get_page_by_slug(slug).await?;
Ok(make_page( Ok(make_tree(&rawpage, &rawnotes))
&make_note_tree(&rawtree),
make_backreferences(&rawbackreferences),
))
} }
pub async fn get_page(&self, title: &str) -> Result<Page> { pub async fn get_box(&self, title: &str) -> Result<Page> {
let (rawtree, rawbackreferences) = self.0.get_kasten_by_title(title).await?; let (rawpage, rawnotes) = self.0.get_page_by_title(title).await?;
Ok(make_page( Ok(make_tree(&rawpage, &rawnotes))
&make_note_tree(&rawtree),
make_backreferences(&rawbackreferences),
))
} }
// TODO: // TODO:
// You should be able to: // You should be able to:
// Add a note that has no parent (gets added to "today") // Add a note that has no parent (gets added to "today")
// Add a note that specifies only the page (gets added to page/root) // Add a note that specifies only the page (gets added to page/root)
// Add a note that has no location (gets tacked onto the end of the above) // Add a note that has no position (gets tacked onto the end of the above)
// Add a note that specifies the date of creation. // Add a note that specifies the date of creation.
pub async fn add_note(&self, note: &NewNote) -> Result<String> { pub async fn add_note(&self, note: &NewNote) -> Result<()> {
let mut note = note.clone(); todo!();
if note.parent_id.is_none() {
note.parent_id = self.get_today_page().await?;
}
Ok(self.0.add_note(&note))
} }
// pub async fn reference_note(&self, note_id: &str, new_parent_id: &str, new_location: i64) -> Result<()> { pub async fn add_note_to_page(&self, note: &NewNote) -> Result<()> {
// todo!(); todo!();
// } }
//
// pub async fn embed_note(&self, note_id: &str, new_parent_id: &str, new_location: i64) -> Result<()> {
// todo!();
// }
pub async fn move_note( pub async fn add_note_to_today(&self, note: &NewNote) -> Result<()> {
&self, todo!();
note_id: &str, }
old_parent_id: &str,
new_parent_id: &str, pub async fn reference_note(&self, note_id: &str, new_parent_id: &str, new_position: i64) -> Result<()> {
location: i64, todo!();
) -> Result<()> { }
self.0.move_note(note_id, old_parent_id, new_parent_id, location).await
pub async fn embed_note(&self, note_id: &str, new_parent_id: &str, new_position: i64) -> Result<()> {
todo!();
}
pub async fn move_note(&self, note_id: &str, old_parent_id: &str, new_parent_id: &str, position: i64) -> Result<()> {
todo!();
} }
pub async fn update_note(&self, note_id: &str, content: &str) -> Result<()> { pub async fn update_note(&self, note_id: &str, content: &str) -> Result<()> {
self.0.update_note_content(note_id, content).await todo!();
} }
pub async fn delete_note(&self, note_id: &str, parent_note_id: &str) -> Result<()> { pub async fn delete_note(&self, note_id: &str) -> Result<()> {
self.0.delete_note(note_id, parent_note_id).await todo!();
}
}
// Private stuff
impl Notesmachine {
async fn get_today_page(&self) -> Result<String> {
let title = chrono::Utc::now().format("%F").to_string();
let (rawtree, _) = self.0.get_kasten_by_title(title).await?;
Ok(rawtree.id)
} }
} }
@ -125,7 +93,7 @@ mod tests {
#[tokio::test(threaded_scheduler)] #[tokio::test(threaded_scheduler)]
async fn fetching_unfound_page_by_slug_works() { async fn fetching_unfound_page_by_slug_works() {
let notesmachine = fresh_inmemory_database().await; let notesmachine = fresh_inmemory_database().await;
let unfoundpage = notesmachine.get_page_via_slug("nonexistent-slug").await; let unfoundpage = notesmachine.navigate_via_slug("nonexistent-slug").await;
assert!(unfoundpage.is_err()); assert!(unfoundpage.is_err());
} }
@ -133,7 +101,7 @@ mod tests {
async fn fetching_unfound_page_by_title_works() { async fn fetching_unfound_page_by_title_works() {
let title = "Nonexistent Page"; let title = "Nonexistent Page";
let notesmachine = fresh_inmemory_database().await; let notesmachine = fresh_inmemory_database().await;
let newpageresult = notesmachine.get_page(&title).await; let newpageresult = notesmachine.get_box(&title).await;
assert!(newpageresult.is_ok(), "{:?}", newpageresult); assert!(newpageresult.is_ok(), "{:?}", newpageresult);
let newpage = newpageresult.unwrap(); let newpage = newpageresult.unwrap();
@ -144,3 +112,4 @@ mod tests {
assert_eq!(newpage.root_note.children.len(), 0, "{:?}", newpage.root_note.children); assert_eq!(newpage.root_note.children.len(), 0, "{:?}", newpage.root_note.children);
} }
} }

View File

@ -1,12 +1,8 @@
use crate::structs::{Note, Page}; use crate::structs::{Note, Page};
use nm_store::NoteKind; use nm_store::{RawNote, RawPage};
fn make_note_tree_from(rawnotes: &[nm_store::Note], root_id: &str) -> Note { fn make_note_tree(rawnotes: &[RawNote], root: i64) -> Note {
let the_note = { let the_note = rawnotes.iter().find(|note| note.id == root).unwrap().clone();
let foundroots: Vec<&nm_store::Note> = rawnotes.iter().filter(|note| note.id == root_id).collect();
debug_assert!(foundroots.len() == 1);
foundroots.iter().next().unwrap().clone()
};
// The special case of the root node must be filtered out here to // The special case of the root node must be filtered out here to
// prevent the first pass from smashing the stack in an infinite // prevent the first pass from smashing the stack in an infinite
@ -15,61 +11,35 @@ fn make_note_tree_from(rawnotes: &[nm_store::Note], root_id: &str) -> Note {
// are faster. // are faster.
let mut children = rawnotes let mut children = rawnotes
.iter() .iter()
.filter(|note| note.parent_id.is_some() && note.parent_id.unwrap() == root_id && note.id != the_note.id) .filter(|note| note.parent_id == root && note.id != root)
.map(|note| make_note_tree_from(rawnotes, &note.id)) .map(|note| make_note_tree(rawnotes, note.id))
.collect::<Vec<Note>>(); .collect::<Vec<Note>>();
children.sort_unstable_by(|a, b| a.location.cmp(&b.location)); children.sort_unstable_by(|a, b| a.position.cmp(&b.position));
Note { Note {
id: the_note.id, uuid: the_note.uuid,
parent_id: the_note.parent_id, parent_uuid: the_note.parent_uuid,
content: the_note.content, content: the_note.content,
kind: the_note.kind.to_string(), notetype: the_note.notetype,
location: the_note.location, position: the_note.position,
creation_date: the_note.creation_date, creation_date: the_note.creation_date,
updated_date: the_note.updated_date, updated_date: the_note.updated_date,
lastview_date: the_note.updated_date, lastview_date: the_note.updated_date,
deleted_date: the_note.deleted_date, deleted_date: the_note.deleted_date,
children: children,
}
}
pub(crate) fn make_note_tree(rawnotes: &[nm_store::Note]) -> Note {
let the_root = {
let foundroots: Vec<&nm_store::Note> = rawnotes.iter().filter(|note| note.kind == NoteKind::Kasten).collect();
debug_assert!(foundroots.len() == 1);
foundroots.iter().next().unwrap().clone()
};
make_note_tree_from(&rawnotes, &the_root.id)
}
fn add_child(rawnotes: &[nm_store::Note], acc: &mut Vec<Note>, note_id: &str) -> Vec<Note> {
let child = rawnotes
.iter()
.find(|note| note.parent_id.is_some() && note.parent_id.unwrap() == note_id);
if let Some(c) = child {
acc.push(Note {
id: c.id,
parent_id: Some(note_id.to_string()),
content: c.content,
kind: c.kind.to_string(),
location: c.location,
creation_date: c.creation_date,
updated_date: c.updated_date,
lastview_date: c.updated_date,
deleted_date: c.deleted_date,
children: vec![], children: vec![],
});
add_child(rawnotes, acc, &c.id)
} else {
acc.to_vec()
} }
} }
pub(crate) fn make_backreferences(rawnotes: &[nm_store::Note]) -> Vec<Vec<Note>> { pub(crate) fn make_tree(rawpage: &RawPage, rawnotes: &[RawNote]) -> Page {
rawnotes let the_page = rawpage.clone();
.iter()
.filter(|note| note.parent_id.is_none() && note.kind == NoteKind::Kasten) Page {
.map(|root| add_child(rawnotes, &mut Vec::<Note>::new(), &root.id)) slug: the_page.slug,
.collect() title: the_page.title,
creation_date: the_page.creation_date,
updated_date: the_page.updated_date,
lastview_date: the_page.updated_date,
deleted_date: the_page.deleted_date,
root_note: make_note_tree(rawnotes, rawpage.note_id),
}
} }

View File

@ -1,12 +1,13 @@
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug)] #[derive(Clone, Serialize, Deserialize, Debug)]
pub struct Note { pub struct Note {
pub id: String, pub uuid: String,
pub parent_id: Option<String>, pub parent_uuid: String,
pub content: String, pub content: String,
pub location: i64, pub position: i64,
pub kind: String, pub notetype: String,
pub creation_date: DateTime<Utc>, pub creation_date: DateTime<Utc>,
pub updated_date: DateTime<Utc>, pub updated_date: DateTime<Utc>,
pub lastview_date: DateTime<Utc>, pub lastview_date: DateTime<Utc>,
@ -14,7 +15,7 @@ pub struct Note {
pub children: Vec<Note>, pub children: Vec<Note>,
} }
#[derive(Clone, Debug)] #[derive(Clone, Serialize, Deserialize, Debug)]
pub struct Page { pub struct Page {
pub slug: String, pub slug: String,
pub title: String, pub title: String,
@ -22,6 +23,5 @@ pub struct Page {
pub updated_date: DateTime<Utc>, pub updated_date: DateTime<Utc>,
pub lastview_date: DateTime<Utc>, pub lastview_date: DateTime<Utc>,
pub deleted_date: Option<DateTime<Utc>>, pub deleted_date: Option<DateTime<Utc>>,
pub notes: Vec<Note>, pub root_note: Note,
pub backreferences: Vec<Vec<Note>>,
} }