Compare commits

...

2 Commits

Author SHA1 Message Date
Elf M. Sternberg 9337b98ad3 REFACTOR Again! note->note and note->kasten are now separate tables
This was getting semantically confusing, so I decided to short
circuit the whole mess by separating the two.  The results are
promising.  It does mean that deleting a note means traversing
two tables to clean out all the cruft, which is *sigh*, but it
also means that the tree is stored in one table and the graph in
another, giving us a much better separation of concerns down at
the SQL layer.
2020-11-04 17:53:25 -08:00
Elf M. Sternberg 1bbe8c1ee8 Completely revamped the internal structures.
This removes the page/note dichotomy, since it wasn't working
as well as I'd hoped.  The discipline required now is higher
where the data store layer is concerned, but the actual structures
are smaller and more efficient.
2020-11-04 12:54:17 -08:00
10 changed files with 1340 additions and 257 deletions

View File

@ -0,0 +1,10 @@
The thing of it is, we have two kinds of notes:
1. This layer of the system will handle broken/missing position issues.
2. The client layer of the system will ensure that a parent is provided.
3. The notes retrieved via the CTE have information and parenting and
location.
4. Notes put *into* the system have parent and location provided
separately.
5. Clients do not specify the ids of notes put into the system.
6. Retrieval by slug must test for is-a-box.

View File

@ -1,12 +1,12 @@
mod errors;
// mod reference_parser;
mod reference_parser;
mod store;
mod store_private;
mod structs;
pub use crate::errors::NoteStoreError;
pub use crate::store::NoteStore;
pub use crate::structs::{RawZettle};
pub use crate::structs::{Note, NoteKind};
#[cfg(test)]
mod tests {
@ -22,17 +22,16 @@ mod tests {
assert!(reset.is_ok(), "{:?}", reset);
storagepool
}
// Request for the page by slug. If the page exists, return it.
// If the page doesn't, return NotFound
//
#[tokio::test(threaded_scheduler)]
async fn fetching_unfound_page_by_slug_works() {
let storagepool = fresh_inmemory_database().await;
let foundkasten = storagepool.get_kasten_by_slug("nonexistent-kasten").await.unwrap();
assert_eq!(foundkasten.len(), 0, "{:?}", foundkasten);
}
// Request for the page by slug. If the page exists, return it.
// If the page doesn't, return NotFound
//
#[tokio::test(threaded_scheduler)]
async fn fetching_unfound_page_by_slug_works() {
let storagepool = fresh_inmemory_database().await;
let foundkasten = storagepool.get_kasten_by_slug("nonexistent-kasten").await;
assert!(foundkasten.is_err());
}
// Request for the page by title. If the page exists, return it.
// If the page doesn't exist, create it then return it anyway.
@ -46,64 +45,74 @@ mod tests {
let newpageresult = storagepool.get_kasten_by_title(&title).await;
assert!(newpageresult.is_ok(), "{:?}", newpageresult);
let newpage = newpageresult.unwrap();
let (newpages, _) = newpageresult.unwrap();
assert_eq!(newpages.len(), 1);
let newpage = newpages.iter().next().unwrap();
assert_eq!(newpage.content, title, "{:?}", newpage.content);
assert_eq!(newpage.id, "nonexistent-page");
assert_eq!(newpage.children.len(), 0);
assert_eq!(newpage.kind, "page");
assert_eq!(newpage.kind, NoteKind::Kasten);
assert!((newpage.creation_date - now).num_minutes() < 1);
assert!((newpage.updated_date - now).num_minutes() < 1);
assert!((newpage.lastview_date - now).num_minutes() < 1);
assert!(newpage.deleted_date.is_none());
}
//
// fn make_new_note(content: &str) -> structs::NewNote {
// structs::NewNoteBuilder::default()
// .content(content.to_string())
// .build()
// .unwrap()
// }
//
// #[tokio::test(threaded_scheduler)]
// async fn can_nest_notes() {
// let title = "Nonexistent Page";
// let storagepool = fresh_inmemory_database().await;
// let newpageresult = storagepool.get_page_by_title(&title).await;
// let newpage = newpageresult.unwrap();
//
// let root = &newnotes[0];
//
// let note1 = make_new_note("1");
// let note1_uuid = storagepool.insert_nested_note(&note1, &root.uuid, 0).await;
// assert!(note1_uuid.is_ok(), "{:?}", note1_uuid);
// let note1_uuid = note1_uuid.unwrap();
//
// let note2 = make_new_note("2");
// let note2_uuid = storagepool.insert_nested_note(&note2, &root.uuid, 0).await;
// assert!(note2_uuid.is_ok(), "{:?}", note2_uuid);
// let note2_uuid = note2_uuid.unwrap();
//
// let note3 = make_new_note("3");
// let note3_uuid = storagepool.insert_nested_note(&note3, &note1_uuid, 0).await;
// assert!(note3_uuid.is_ok(), "{:?}", note3_uuid);
// let _note3_uuid = note3_uuid.unwrap();
//
// let note4 = make_new_note("4");
// let note4_uuid = storagepool.insert_nested_note(&note4, &note2_uuid, 0).await;
// assert!(note4_uuid.is_ok(), "{:?}", note4_uuid);
// let _note4_uuid = note4_uuid.unwrap();
//
// let newpageresult = storagepool.get_page_by_title(&title).await;
// let (newpage, newnotes) = newpageresult.unwrap();
//
// assert_eq!(newpage.title, title, "{:?}", newpage.title);
// assert_eq!(newpage.slug, "nonexistent-page");
//
// assert_eq!(newnotes.len(), 5);
// assert_eq!(newnotes[0].notetype, "root");
// assert_eq!(newpage.note_id, newnotes[0].id);
// }
fn make_new_note(content: &str) -> structs::NewNote {
structs::NewNoteBuilder::default()
.content(content.to_string())
.build()
.unwrap()
}
#[tokio::test(threaded_scheduler)]
async fn can_nest_notes() {
let title = "Nonexistent Page";
let storagepool = fresh_inmemory_database().await;
let newpageresult = storagepool.get_kasten_by_title(&title).await;
assert!(newpageresult.is_ok(), "{:?}", newpageresult);
let (newpages, _) = newpageresult.unwrap();
assert_eq!(newpages.len(), 1);
let root = &newpages[0];
// root <- 1 <- 3
// <- 2 <- 4
let note1 = make_new_note("1");
let note1_id = storagepool.add_note(&note1, &root.id, 0).await;
assert!(note1_id.is_ok(), "{:?}", note1_id);
let note1_id = note1_id.unwrap();
let note2 = make_new_note("2");
let note2_id = storagepool.add_note(&note2, &root.id, 0).await;
assert!(note2_id.is_ok(), "{:?}", note2_id);
let note2_id = note2_id.unwrap();
let note3 = make_new_note("3");
let note3_id = storagepool.add_note(&note3, &note1_id, 0).await;
assert!(note3_id.is_ok(), "{:?}", note3_id);
let _note3_id = note3_id.unwrap();
let note4 = make_new_note("4");
let note4_id = storagepool.add_note(&note4, &note2_id, 0).await;
assert!(note4_id.is_ok(), "{:?}", note4_id);
let _note4_id = note4_id.unwrap();
let newpageresult = storagepool.get_kasten_by_title(&title).await;
assert!(newpageresult.is_ok(), "{:?}", newpageresult);
let (newpages, _) = newpageresult.unwrap();
assert_eq!(newpages.len(), 5);
let newroot = newpages.iter().next().unwrap();
assert_eq!(newroot.content, title, "{:?}", newroot.content);
assert_eq!(newroot.id, "nonexistent-page");
assert_eq!(newpages[1].parent_id, Some(newroot.id.clone()));
assert_eq!(newpages[2].parent_id, Some(newpages[1].id.clone()));
}
}

View File

@ -0,0 +1,134 @@
use comrak::nodes::{AstNode, NodeValue};
use comrak::{parse_document, Arena, ComrakOptions};
use lazy_static::lazy_static;
use regex::bytes::Regex as BytesRegex;
use regex::Regex;
pub struct Finder(pub Vec<String>);
impl Finder {
pub fn new() -> Self {
Finder(Vec::new())
}
fn iter_nodes<'a, F>(&mut self, node: &'a AstNode<'a>, f: &F)
where
F: Fn(&'a AstNode<'a>) -> Option<Vec<String>>,
{
if let Some(mut v) = f(node) {
self.0.append(&mut v);
}
for c in node.children() {
self.iter_nodes(c, f);
}
}
}
fn find_links(document: &str) -> Vec<String> {
let arena = Arena::new();
let mut finder = Finder::new();
let root = parse_document(&arena, document, &ComrakOptions::default());
finder.iter_nodes(root, &|node| {
lazy_static! {
static ref RE_REFERENCES: BytesRegex = BytesRegex::new(r"(\[\[([^\]]+)\]\]|(\#[:\w\-]+))").unwrap();
}
match &node.data.borrow().value {
NodeValue::Text(ref text) => Some(
RE_REFERENCES
.captures_iter(text)
.filter_map(|t| t.get(1))
.map(|t| String::from_utf8_lossy(t.as_bytes()).to_string())
.filter(|s| !s.is_empty())
.collect(),
),
_ => None,
}
});
finder.0
}
fn recase(title: &str) -> String {
lazy_static! {
static ref RE_PASS1: Regex = Regex::new(r"(?P<s>.)(?P<n>[A-Z][a-z]+)").unwrap();
static ref RE_PASS2: Regex = Regex::new(r"(?P<s>[[:lower:]]|\d)(?P<n>[[:upper:]])").unwrap();
static ref RE_PASS4: Regex = Regex::new(r"(?P<s>[a-z])(?P<n>\d)").unwrap();
static ref RE_PASS3: Regex = Regex::new(r"(:|_|-| )+").unwrap();
}
// This should panic if misused, so... :-)
let pass = title.to_string();
let pass = pass.strip_prefix("#").unwrap();
let pass = RE_PASS1.replace_all(&pass, "$s $n");
let pass = RE_PASS4.replace_all(&pass, "$s $n");
let pass = RE_PASS2.replace_all(&pass, "$s $n");
RE_PASS3.replace_all(&pass, " ").trim().to_string()
}
fn build_page_titles(references: &[String]) -> Vec<String> {
references
.iter()
.filter_map(|s| match s.chars().next() {
Some('#') => Some(recase(s)),
Some('[') => Some(s.strip_prefix("[[").unwrap().strip_suffix("]]").unwrap().to_string()),
Some(_) => Some(s.clone()),
_ => None,
})
.filter(|s| !s.is_empty())
.collect()
}
pub(crate) fn build_references(content: &str) -> Vec<String> {
build_page_titles(&find_links(content))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn finds_expected() {
let sample = r###"
# Header
- NotATest 1
- [[Test 2]]
- #Test3
- #TestFourAndSo
- #Test-six-is-six
- #recipe:entree
- #
- #-_-
- #--Prefixed
- [[]]
But *[[Test Seven]]* isn't. And *#Test_Eight____is_Messed-up*
And [[Test Bite Me]] is the worst.
Right? [[
]]
"###;
let res = build_page_titles(&find_links(sample));
let expected = vec![
"Test 2",
"Test 3",
"Test Four And So",
"Test six is six",
"recipe entree",
"Prefixed",
"Test Seven",
"Test Eight is Messed up",
"Test Bite Me",
];
assert!(res.iter().eq(expected.iter()), "{:?}", res);
}
#[test]
fn doesnt_crash_on_empty() {
let sample = "";
let res = build_page_titles(&find_links(sample));
let expected: Vec<String> = vec![];
assert!(res.iter().eq(expected.iter()), "{:?}", res);
}
}

View File

@ -1,34 +1,48 @@
DROP TABLE IF EXISTS zetteln;
DROP TABLE IF EXISTS zettle_relationships;
DROP INDEX IF EXISTS zetteln_ids;
DROP TABLE IF EXISTS notes;
DROP TABLE IF EXISTS note_relationships;
DROP TABLE IF EXISTS note_kasten_relationships;
DROP TABLE IF EXISTS favorites;
CREATE TABLE zetteln (
CREATE TABLE notes (
id TEXT NOT NULL PRIMARY KEY,
content TEXT NOT NULL,
kind TEXT NOT NULL,
location INTEGER NOT NULL,
creation_date DATETIME NOT NULL,
updated_date DATETIME NOT NULL,
lastview_date DATETIME NOT NULL,
deleted_date DATETIME NULL
);
CREATE INDEX zettle_ids ON zetteln (id);
CREATE INDEX note_ids ON notes (id);
CREATE TABLE favorites (
id TEXT NOT NULL,
location INTEGER NOT NULL,
FOREIGN KEY (id) REFERENCES zetteln (id) ON DELETE CASCADE
FOREIGN KEY (id) REFERENCES notes (id) ON DELETE CASCADE
);
CREATE TABLE zettle_relationships (
zettle_id TEXT NOT NULL,
-- This table represents the forest of data relating a kasten to its
-- collections of notes. The root is itself "a note," but the content
-- of that note will always be just the title of the kasten.
--
CREATE TABLE note_relationships (
note_id TEXT NOT NULL,
parent_id TEXT NOT NULL,
location INTEGER NOT NULL,
kind TEXT NOT NULL,
-- If either zettle disappears, we want all the edges to disappear as well.
FOREIGN KEY (zettle_id) REFERENCES zetteln (id) ON DELETE CASCADE,
FOREIGN KEY (parent_id) REFERENCES zetteln (id) ON DELETE CASCADE
-- If either note disappears, we want all the edges to disappear as well.
FOREIGN KEY (note_id) REFERENCES notes (id) ON DELETE CASCADE,
FOREIGN KEY (parent_id) REFERENCES notes (id) ON DELETE CASCADE
);
-- This table represents the graph of data relating notes to kastens.
--
CREATE TABLE note_kasten_relationships (
note_id TEXT NOT NULL,
kasten_id TEXT NOT NULL,
kind TEXT NOT NULL,
-- If either note disappears, we want all the edges to disappear as well.
FOREIGN KEY (note_id) REFERENCES notes (id) ON DELETE CASCADE,
FOREIGN KEY (kasten_id) REFERENCES notes (id) ON DELETE CASCADE
);

View File

@ -1,85 +0,0 @@
-- This is a cut-and-paste of the select_note_collection_from_title.sql
-- file with one line changed. This is necessary because Larry
-- Ellison didn't trust programmers to understand recursion and
-- composition in 1983 (https://www.holistics.io/blog/quel-vs-sql/)
-- and that still makes me angry to this day.
SELECT
id,
parent_id,
content,
location,
kind,
creation_date,
updated_date,
lastview_date,
deleted_date
FROM (
WITH RECURSIVE zettelntree (
id,
parent_id,
content,
location,
kind,
creation_date,
updated_date,
lastview_date,
deleted_date,
cycle
)
AS (
-- The seed query. Finds the root node of any tree of zetteln,
-- which by definition has a location of zero and a type of
-- 'page'.
SELECT
zetteln.id,
zetteln.id AS parent_id,
zetteln.content,
zetteln.location,
zetteln.kind,
zetteln.creation_date,
zetteln.updated_date,
zetteln.lastview_date,
zetteln.deleted_date,
','||zetteln.id||',' -- Cycle monitor
FROM zetteln
WHERE zetteln.kind = "page"
AND zetteln.location = 0
AND QUERYPARAMETER = ? -- The Query Parameter
-- RECURSIVE expression
--
-- Here, for each recursion down the tree, we collect the child
-- nodes for a given node, eliding any cycles.
--
-- TODO: Figure out what to do when a cycle DOES occur.
UNION SELECT
zetteln.id,
zettelntree.id AS parent_id,
zetteln.content,
zettle_relationships.location,
zetteln.kind,
zetteln.creation_date,
zetteln.updated_date,
zetteln.lastview_date,
zetteln.deleted_date,
zettelntree.cycle||zetteln.id||','
FROM zetteln
INNER JOIN zettle_relationships
ON zetteln.id = zettle_relationships.zettle_id
-- For a given ID in the level of zettelntree in *this* recursion,
-- we want each note's branches one level down.
INNER JOIN zettelntree
ON zettle_relationships.parent_id = zettelntree.id
-- And we want to make sure there are no cycles. There shouldn't
-- be; we're supposed to prevent those. But you never know.
WHERE zettelntree.cycle NOT LIKE '%,'||zetteln.id||',%'
ORDER BY zettle_relationships.location
)
SELECT * from zettelntree);

View File

@ -0,0 +1,72 @@
SELECT
id,
parent_id,
content,
location,
kind,
creation_date,
updated_date,
lastview_date,
deleted_date
FROM (
WITH RECURSIVE parents (
id,
parent_id,
content,
location,
kind,
creation_date,
updated_date,
lastview_date,
deleted_date,
cycle
)
AS (
SELECT
notes.id,
note_parents.id,
notes.content,
note_relationships.location,
notes.kind,
notes.creation_date,
notes.updated_date,
notes.lastview_date,
notes.deleted_date,
','||notes.id||','
FROM notes
INNER JOIN note_relationships
ON notes.id = note_relationships.note_id
AND notes.kind = 'note'
INNER JOIN notes as note_parents
ON note_parents.id = note_relationships.parent_id
WHERE notes.id
IN (SELECT note_id
FROM note_kasten_relationships
WHERE kasten_id = ?) -- IMPORTANT: THIS IS THE PARAMETER
UNION
SELECT DISTINCT
notes.id,
next_parent.id,
notes.content,
note_relationships.location,
notes.kind,
notes.creation_date,
notes.updated_date,
notes.lastview_date,
notes.deleted_date,
parents.cycle||notes.id||','
FROM notes
INNER JOIN parents
ON parents.parent_id = notes.id
LEFT JOIN note_relationships
ON note_relationships.note_id = notes.id
LEFT JOIN notes as next_parent
ON next_parent.id = note_relationships.parent_id
WHERE parents.cycle NOT LIKE '%,'||notes.id||',%'
)
SELECT * from parents);

View File

@ -0,0 +1,98 @@
-- This is undoubtedly one of the more complex bits of code I've
-- written recently, and I do wish there had been macros because
-- there's a lot of hand-written, copy-pasted code here around the
-- basic content of a note; it would have been nice to be able to DRY
-- that out.
-- This expression creates a table, 'notetree', that contains all of
-- the notes nested under a page. Each entry in the table includes
-- the note's parent's internal and external ids so that applications
-- can build an actual tree out of a vec of these things.
-- TODO: Extensive testing to validate that the nodes are delivered
-- *in nesting order* to the client.
-- Search in here for the term QUERYPARAMETER. That string will be
-- substituted with the correct parameter (id or title) depending on
-- the use case, by the level 1 client (the private parts of
-- store.rs).
SELECT
id,
parent_id,
content,
location,
kind,
creation_date,
updated_date,
lastview_date,
deleted_date
FROM (
WITH RECURSIVE notestree (
id,
parent_id,
content,
location,
kind,
creation_date,
updated_date,
lastview_date,
deleted_date,
cycle
)
AS (
-- The seed query. Finds the root node of any tree of notes,
-- which by definition has a location of zero and a type of
-- 'page'.
SELECT
notes.id,
NULL as parent_id,
notes.content,
0, -- All boxes are at position zero. They are the root of the tree.
notes.kind,
notes.creation_date,
notes.updated_date,
notes.lastview_date,
notes.deleted_date,
','||notes.id||',' -- Cycle monitor
FROM notes
WHERE notes.kind = "box"
AND QUERYPARAMETER = ? -- The Query Parameter
-- RECURSIVE expression
--
-- Here, for each recursion down the tree, we collect the child
-- nodes for a given node, eliding any cycles.
--
-- TODO: Figure out what to do when a cycle DOES occur.
UNION SELECT
notes.id,
notestree.id AS parent_id,
notes.content,
note_relationships.location,
notes.kind,
notes.creation_date,
notes.updated_date,
notes.lastview_date,
notes.deleted_date,
notestree.cycle||notes.id||','
FROM notes
INNER JOIN note_relationships
ON notes.id = note_relationships.note_id
-- For a given ID in the level of notestree in *this* recursion,
-- we want each note's branches one level down.
INNER JOIN notestree
ON note_relationships.parent_id = notestree.id
-- And we want to make sure there are no cycles. There shouldn't
-- be; we're supposed to prevent those. But you never know.
WHERE notestree.cycle NOT LIKE '%,'||notes.id||',%'
ORDER BY note_relationships.location
)
SELECT * from notestree);

View File

@ -10,7 +10,7 @@
//! sense in the future to separate the decomposition of the note
//! content into a higher layer.
//!
//! Notesmachine storage notes consist of two items: Zettle and Kasten.
//! Notesmachine storage notes consist of two items: Note and Kasten.
//! This distinction is somewhat arbitrary, as structurally these two
//! items are stored in the same table.
//!
@ -52,11 +52,12 @@
//!
use crate::errors::NoteStoreError;
use crate::reference_parser::build_references;
use crate::store_private::*;
use crate::structs::*;
use sqlx::sqlite::SqlitePool;
use std::cmp;
use std::collections::HashMap;
// use std::collections::HashMap;
use std::sync::Arc;
/// A handle to our Sqlite database.
@ -66,88 +67,221 @@ pub struct NoteStore(Arc<SqlitePool>);
type NoteResult<T> = core::result::Result<T, NoteStoreError>;
// After wrestling for a while with the fact that 'box' is a reserved
// word in Rust, I decided to just go with Zettle (note) and Kasten
// word in Rust, I decided to just go with Note (note) and Kasten
// (box).
impl NoteStore {
/// Initializes a new instance of the note store. Note that the
/// note store holds an Arc internally; this code is (I think)
/// safe to Send.
pub async fn new(url: &str) -> NoteResult<Self> {
let pool = SqlitePool::connect(url).await?;
Ok(NoteStore(Arc::new(pool)))
}
/// Erase all the data in the database and restore it
/// to its original empty form. Do not use unless you
/// really, really want that to happen.
pub async fn reset_database(&self) -> NoteResult<()> {
reset_database(&*self.0)
.await
.map_err(NoteStoreError::DBError)
}
/// Initializes a new instance of the note store. Note that the
/// note store holds an Arc internally; this code is (I think)
/// safe to Send.
pub async fn new(url: &str) -> NoteResult<Self> {
let pool = SqlitePool::connect(url).await?;
Ok(NoteStore(Arc::new(pool)))
}
/// Erase all the data in the database and restore it
/// to its original empty form. Do not use unless you
/// really, really want that to happen.
pub async fn reset_database(&self) -> NoteResult<()> {
reset_database(&*self.0).await.map_err(NoteStoreError::DBError)
}
/// Fetch page by slug
///
/// Supports the use case of the user navigating to a known place
/// via a bookmark or other URL. Since the title isn't clear from
/// the slug, the slug is insufficient to generate a new page, so
/// this use case says that in the event of a failure to find the
/// requested page, return a basic NotFound.
pub async fn get_kasten_by_slug(&self, slug: &str) -> NoteResult<Vec<RawZettle>> {
Ok(select_kasten_by_slug(&*self.0, slug).await?)
}
pub async fn get_kasten_by_title(&self, title: &str) -> NoteResult<Vec<RawZettle>> {
let kasten = select_page_by_title(&mut tx, title).await?;
if kasten.len() > 0 {
return kasten
/// Fetch page by slug
///
/// Supports the use case of the user navigating to a known place
/// via a bookmark or other URL. Since the title isn't clear from
/// the slug, the slug is insufficient to generate a new page, so
/// this use case says that in the event of a failure to find the
/// requested page, return a basic NotFound.
pub async fn get_kasten_by_slug(&self, slug: &str) -> NoteResult<(Vec<Note>, Vec<Note>)> {
let kasten = select_kasten_by_slug(&*self.0, &NoteId(slug.to_string())).await?;
if kasten.is_empty() {
return Err(NoteStoreError::NotFound)
}
let note_id = NoteId(kasten[0].id.clone());
Ok((kasten, select_backreferences_for_kasten(&*self.0, &note_id).await?))
}
/// Fetch page by title
/// The most common use case: the user is navigating by requesting
/// a page. The page either exists or it doesn't. If it
/// doesn't, we go out and make it. Since we know it doesn't exist,
/// we also know no backreferences to it exist, so in that case you
/// get back two empty vecs.
pub async fn get_kasten_by_title(&self, title: &str) -> NoteResult<(Vec<Note>, Vec<Note>)> {
if title.len() == 0 {
return Err(NoteStoreError::NotFound);
}
let kasten = select_kasten_by_title(&*self.0, title).await?;
if kasten.len() > 0 {
let note_id = NoteId(kasten[0].id.clone());
return Ok((kasten, select_backreferences_for_kasten(&*self.0, &note_id).await?));
}
// Sanity check!
let references = build_references(&title);
if references.len() > 0 {
return Err(NoteStoreError::InvalidNoteStructure(
"Titles may not contain nested references.".to_string(),
));
}
let mut tx = self.0.begin().await?;
let new slug = generate_slug(&mut tx, title).await?;
let new zettlekasten = create_unique_zettlekasten(&title, &slug);
let _ = insert_zettle(&zettlekasten).await?;
tx.commit().await?;
let slug = generate_slug(&mut tx, title).await?;
let zettlekasten = create_zettlekasten(&title, &slug);
let _ = insert_note(&mut tx, &zettlekasten).await?;
tx.commit().await?;
Ok(vec![zettlekasten])
}
Ok((vec![Note::from(zettlekasten)], vec![]))
}
pub async fn insert_zettle(
&self,
note: &NewNote,
parent_note_id: &str,
location: i64
) -> NoteResult<String> {
let note = {
let mut new_note = note.clone();
new_note.id = friendly_id::create();
new_note
};
let references = build_references(&note.content);
pub async fn add_note(&self, note: &NewNote, parent_id: &str, location: i64) -> NoteResult<String> {
self.insert_note(
note,
&ParentId(parent_id.to_string()),
location,
RelationshipKind::Direct,
)
.await
}
let mut tx = self.0.begin().await?;
let location = cmp::min(
assert_max_child_position_for_note(&mut tx, parent_note_id).await? + 1,
location);
/// Move a note from one location to another.
pub async fn move_note(
&self,
note_id: &str,
old_parent_id: &str,
new_parent_id: &str,
new_location: i64,
) -> NoteResult<()> {
let mut tx = self.0.begin().await?;
insert_one_new_note(&mut tx, &note).await?;
make_room_for_new_note(&mut tx, parent_id, location).await?;
insert_note_to_note_relationship(&mut tx, parent_id, note.id, location, "note");
let old_parent_id = ParentId(old_parent_id.to_string());
let new_parent_id = ParentId(new_parent_id.to_string());
let note_id = NoteId(note_id.to_string());
let found_references = find_all_page_references_for(&mut tx, &references).await?;
let mut known_reference_ids: Vec<PageId> = Vec::new();
let old_note = select_note_to_note_relationship(&mut tx, &old_parent_id, &note_id).await?;
let old_note_location = old_note.location;
let old_note_kind = old_note.kind;
let _ = delete_note_to_note_relationship(&mut tx, &old_parent_id, &note_id).await?;
let _ = close_hole_for_deleted_note(&mut tx, &old_parent_id, old_note_location).await?;
let parent_max_location = assert_max_child_location_for_note(&mut tx, &new_parent_id).await?;
let new_location = cmp::min(parent_max_location + 1, new_location);
let _ = make_room_for_new_note(&mut tx, &new_parent_id, new_location).await?;
let _ =
insert_note_to_note_relationship(&mut tx, &new_parent_id, &note_id, new_location, &old_note_kind).await?;
tx.commit().await?;
Ok(())
}
/// Deletes a note. If the note's relationship drops to zero, all
/// references from that note to pages are also deleted.
pub async fn delete_note(&self, note_id: &str, note_parent_id: &str) -> NoteResult<()> {
let mut tx = self.0.begin().await?;
let note_id = NoteId(note_id.to_string());
let parent_id = ParentId(note_parent_id.to_string());
let _ = delete_note_to_note_relationship(&mut tx, &parent_id, &note_id);
// The big one: if zero parents report having an interest in this note, then it,
// *and any sub-relationships*, go away.
if count_existing_note_relationships(&mut tx, &note_id).await? == 0 {
let _ = delete_note_to_kasten_relationships(&mut tx, &note_id).await?;
let _ = delete_note(&mut tx, &note_id).await?;
}
tx.commit().await?;
Ok(())
}
/// Updates a note's content. Completely rebuilds the note's
/// outgoing edge reference list every time.
pub async fn update_note_content(&self, note_id: &str, content: &str) -> NoteResult<()> {
let references = build_references(&content);
let note_id = NoteId(note_id.to_string());
let mut tx = self.0.begin().await?;
let _ = update_note_content(&mut tx, &note_id, &content).await?;
let _ = delete_bulk_note_to_kasten_relationships(&mut tx, &note_id).await?;
let found_references = find_all_kasten_from_list_of_references(&mut tx, &references).await?;
let new_references = diff_references(&references, &found_references);
let mut known_reference_ids: Vec<NoteId> = Vec::new();
for one_reference in new_references.iter() {
let new slug = generate_slug(&mut tx, one_reference).await?;
let new zettlekasten = create_unique_zettlekasten(&one_reference, &slug);
let _ = insert_zettle(&zettlekasten).await?;
known_reference_ids.push(slug);
}
let slug = generate_slug(&mut tx, one_reference).await?;
let zettlekasten = create_zettlekasten(&one_reference, &slug);
let _ = insert_note(&mut tx, &zettlekasten).await?;
known_reference_ids.push(NoteId(slug));
}
known_reference_ids.append(&mut found_references.iter().map(|r| PageId(r.id)).collect());
let _ = insert_note_to_page_relationships(&mut tx, new_note_id, &known_reference_ids).await?;
tx.commit().await?;
Ok(note.id);
}
known_reference_ids.append(&mut found_references.iter().map(|r| NoteId(r.id.clone())).collect());
let _ = insert_bulk_note_to_kasten_relationships(&mut tx, &note_id, &known_reference_ids).await?;
tx.commit().await?;
Ok(())
}
}
// The Private stuff
impl NoteStore {
// Pretty much the most dangerous function in our system. Has to
// have ALL the error checking.
async fn insert_note(
&self,
note: &NewNote,
parent_id: &ParentId,
location: i64,
kind: RelationshipKind,
) -> NoteResult<String> {
if location < 0 {
return Err(NoteStoreError::InvalidNoteStructure(
"Add note: A negative position is not valid.".to_string(),
));
}
if parent_id.is_empty() {
return Err(NoteStoreError::InvalidNoteStructure(
"Add note: A parent note ID is required.".to_string(),
));
}
if note.id.is_empty() {
return Err(NoteStoreError::InvalidNoteStructure(
"Add note: Your note should have an id already".to_string(),
));
}
if note.content.is_empty() {
return Err(NoteStoreError::InvalidNoteStructure(
"Add note: Empty notes are not supported.".to_string(),
));
}
let references = build_references(&note.content);
let mut tx = self.0.begin().await?;
let location = cmp::min(
assert_max_child_location_for_note(&mut tx, parent_id).await? + 1,
location,
);
let note_id = NoteId(note.id.clone());
insert_note(&mut tx, &note).await?;
make_room_for_new_note(&mut tx, &parent_id, location).await?;
insert_note_to_note_relationship(&mut tx, &parent_id, &note_id, location, &kind).await?;
let found_references = find_all_kasten_from_list_of_references(&mut tx, &references).await?;
let new_references = diff_references(&references, &found_references);
let mut known_reference_ids: Vec<NoteId> = Vec::new();
for one_reference in new_references.iter() {
let slug = generate_slug(&mut tx, one_reference).await?;
let zettlekasten = create_zettlekasten(&one_reference, &slug);
let _ = insert_note(&mut tx, &zettlekasten).await?;
known_reference_ids.push(NoteId(slug));
}
known_reference_ids.append(&mut found_references.iter().map(|r| NoteId(r.id.clone())).collect());
let _ = insert_bulk_note_to_kasten_relationships(&mut tx, &note_id, &known_reference_ids).await?;
tx.commit().await?;
Ok(note_id.to_string())
}
}

View File

@ -2,10 +2,7 @@ use crate::structs::*;
use lazy_static::lazy_static;
use regex::Regex;
use slug::slugify;
use sqlx::{
sqlite::{Sqlite, SqliteRow},
Done, Executor, Row,
};
use sqlx::{sqlite::Sqlite, Done, Executor};
use std::collections::HashSet;
type SqlResult<T> = sqlx::Result<T>;
@ -21,20 +18,39 @@ type SqlResult<T> = sqlx::Result<T>;
// coherent and easily readable, and hides away the gnarliness of some
// of the SQL queries.
// Important!!! Note_relationships are usually (parent_note -> note),
// but Note to Kasten relationships are always (note-as-parent ->
// kasten_note), so when looking for "all the notes referring to this
// kasten", you use the kasten's id as the TARGET note_id, and the
// note referring to the kasten in the parent_id.
lazy_static! {
static ref select_kasten_by_title_sql: String = str::replace(
include_str!("sql/select_kasten_by_parameter.sql"),
"QUERYPARAMETER",
"zetteln.title");
static ref SELECT_KASTEN_BY_TITLE_SQL: String = str::replace(
include_str!("sql/select_notes_by_parameter.sql"),
"QUERYPARAMETER",
"notes.content"
);
}
lazy_static! {
static ref select_kasten_by_id_sql: String = str::replace(
include_str!("sql/select_kasten_by_parameter.sql"),
"QUERYPARAMETER",
"zetteln.id");
static ref SELECT_KASTEN_BY_ID_SQL: String = str::replace(
include_str!("sql/select_notes_by_parameter.sql"),
"QUERYPARAMETER",
"notes.id"
);
}
lazy_static! {
static ref SELECT_NOTES_BACKREFENCING_KASTEN_SQL: &'static str =
include_str!("sql/select_notes_backreferencing_kasten.sql");
}
// ___ _
// | _ \___ ___ ___| |_
// | / -_|_-</ -_) _|
// |_|_\___/__/\___|\__|
//
pub(crate) async fn reset_database<'a, E>(executor: E) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
@ -43,13 +59,454 @@ where
sqlx::query(initialize_sql).execute(executor).await.map(|_| ())
}
pub(crate) async fn select_kasten_by_slug<'a, E>(executor: E, slug: &str) -> SqlResult<Vec<RawZettle>>
// ___ _ _ _ __ _
// | __|__| |_ __| |_ | |/ /__ _ __| |_ ___ _ _
// | _/ -_) _/ _| ' \ | ' </ _` (_-< _/ -_) ' \
// |_|\___|\__\__|_||_| |_|\_\__,_/__/\__\___|_||_|
//
pub(crate) async fn select_kasten_by_slug<'a, E>(executor: E, slug: &NoteId) -> SqlResult<Vec<Note>>
where
E: Executor<'a, Database = Sqlite>,
{
Ok(sqlx::query_as(&select_kasten_by_id_sql)
.bind(&slug)
.fetch_all(executor)
.await?)
let r: Vec<RowNote> = sqlx::query_as(&SELECT_KASTEN_BY_ID_SQL)
.bind(&**slug)
.fetch_all(executor)
.await?;
Ok(r.into_iter().map(|z| Note::from(z)).collect())
}
pub(crate) async fn select_kasten_by_title<'a, E>(executor: E, title: &str) -> SqlResult<Vec<Note>>
where
E: Executor<'a, Database = Sqlite>,
{
let r: Vec<RowNote> = sqlx::query_as(&SELECT_KASTEN_BY_TITLE_SQL)
.bind(&title)
.fetch_all(executor)
.await?;
Ok(r.into_iter().map(|z| Note::from(z)).collect())
}
pub(crate) async fn select_backreferences_for_kasten<'a, E>(executor: E, kasten_id: &NoteId) -> SqlResult<Vec<Note>>
where
E: Executor<'a, Database = Sqlite>,
{
let r: Vec<RowNote> = sqlx::query_as(&SELECT_NOTES_BACKREFENCING_KASTEN_SQL)
.bind(&**kasten_id)
.fetch_all(executor)
.await?;
Ok(r.into_iter().map(|z| Note::from(z)).collect())
}
// ___ _ ___ _ _ _
// |_ _|_ _ ___ ___ _ _| |_ / _ \ _ _ ___ | \| |___| |_ ___
// | || ' \(_-</ -_) '_| _| | (_) | ' \/ -_) | .` / _ \ _/ -_)
// |___|_||_/__/\___|_| \__| \___/|_||_\___| |_|\_\___/\__\___|
//
pub(crate) async fn insert_note<'a, E>(executor: E, zettle: &NewNote) -> SqlResult<String>
where
E: Executor<'a, Database = Sqlite>,
{
let insert_one_page_sql = concat!(
"INSERT INTO notes (id, content, kind, ",
" creation_date, updated_date, lastview_date) ",
"VALUES (?, ?, ?, ?, ?, ?);"
);
let _ = sqlx::query(insert_one_page_sql)
.bind(&zettle.id)
.bind(&zettle.content)
.bind(zettle.kind.to_string())
.bind(&zettle.creation_date)
.bind(&zettle.updated_date)
.bind(&zettle.lastview_date)
.execute(executor)
.await?;
Ok(zettle.id.clone())
}
// ___ _ _ _ _ __ _
// | _ )_ _(_) |__| | | |/ /__ _ __| |_ ___ _ _
// | _ \ || | | / _` | | ' </ _` (_-< _/ -_) ' \
// |___/\_,_|_|_\__,_| |_|\_\__,_/__/\__\___|_||_|
//
// Given a possible slug, find the slug with the highest
// uniquification number, and return that number, if any.
pub(crate) fn find_maximal_slug_number(slugs: &[JustId]) -> Option<u32> {
lazy_static! {
static ref RE_CAP_NUM: Regex = Regex::new(r"-(\d+)$").unwrap();
}
if slugs.is_empty() {
return None;
}
let mut slug_counters: Vec<u32> = slugs
.iter()
.filter_map(|slug| RE_CAP_NUM.captures(&slug.id))
.map(|cap| cap.get(1).unwrap().as_str().parse::<u32>().unwrap())
.collect();
slug_counters.sort_unstable();
slug_counters.pop()
}
// Given an initial string and an existing collection of slugs,
// generate a new slug that does not conflict with the current
// collection.
pub(crate) async fn generate_slug<'a, E>(executor: E, title: &str) -> SqlResult<String>
where
E: Executor<'a, Database = Sqlite>,
{
lazy_static! {
static ref RE_STRIP_NUM: Regex = Regex::new(r"-\d+$").unwrap();
static ref SLUG_FINDER_SQL: String = format!(
"SELECT id FROM notes WHERE kind = '{}' AND id LIKE '?%';",
NoteKind::Kasten.to_string()
);
}
let initial_slug = slugify(title);
let sample_slug = RE_STRIP_NUM.replace_all(&initial_slug, "");
let similar_slugs: Vec<JustId> = sqlx::query_as(&SLUG_FINDER_SQL)
.bind(&*sample_slug)
.fetch_all(executor)
.await?;
let maximal_slug_number = find_maximal_slug_number(&similar_slugs);
Ok(match maximal_slug_number {
None => initial_slug,
Some(slug_number) => format!("{}-{}", initial_slug, slug_number + 1),
})
}
pub(crate) fn create_zettlekasten(title: &str, slug: &str) -> NewNote {
NewNoteBuilder::default()
.id(slug.to_string())
.content(title.to_string())
.kind(NoteKind::Kasten)
.build()
.unwrap()
}
// _ _ _ _ ___ _ _ _
// | | | |_ __ __| |__ _| |_ ___ / _ \ _ _ ___ | \| |___| |_ ___
// | |_| | '_ \/ _` / _` | _/ -_) | (_) | ' \/ -_) | .` / _ \ _/ -_)
// \___/| .__/\__,_\__,_|\__\___| \___/|_||_\___| |_|\_\___/\__\___|
// |_|
pub(crate) async fn update_note_content<'a, E>(executor: E, note_id: &NoteId, content: &str) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let update_note_content_sql = "UPDATE notes SET content = ? WHERE note_id = ?";
let count = sqlx::query(update_note_content_sql)
.bind(content)
.bind(&**note_id)
.execute(executor)
.await?
.rows_affected();
match count {
1 => Ok(()),
_ => Err(sqlx::Error::RowNotFound),
}
}
// ___ _ _ ___ _ _ _ ___ _ _ _ _ _
// | __|__| |_ __| |_ / _ \ _ _ ___ | \| |___| |_ ___ | _ \___| |__ _| |_(_)___ _ _ __| |_ (_)_ __
// | _/ -_) _/ _| ' \ | (_) | ' \/ -_) | .` / _ \ _/ -_) | / -_) / _` | _| / _ \ ' \(_-< ' \| | '_ \
// |_|\___|\__\__|_||_| \___/|_||_\___| |_|\_\___/\__\___| |_|_\___|_\__,_|\__|_\___/_||_/__/_||_|_| .__/
// |_|
pub(crate) async fn select_note_to_note_relationship<'a, E>(
executor: E,
parent_id: &ParentId,
note_id: &NoteId,
) -> SqlResult<NoteRelationship>
where
E: Executor<'a, Database = Sqlite>,
{
let get_note_to_note_relationship_sql = concat!(
"SELECT parent_id, note_id, location, kind ",
"FROM note_relationships ",
"WHERE parent_id = ? and note_id = ? ",
"LIMIT 1"
);
let s: NoteRelationshipRow = sqlx::query_as(get_note_to_note_relationship_sql)
.bind(&**parent_id)
.bind(&**note_id)
.fetch_one(executor)
.await?;
Ok(NoteRelationship::from(s))
}
// _ _ _ _ _ _ _ ___ _ _ _ _ _
// | \| |___| |_ ___ | |_ ___ | \| |___| |_ ___ | _ \___| |__ _| |_(_)___ _ _ __| |_ (_)_ __ ___
// | .` / _ \ _/ -_) | _/ _ \ | .` / _ \ _/ -_) | / -_) / _` | _| / _ \ ' \(_-< ' \| | '_ (_-<
// |_|\_\___/\__\___| \__\___/ |_|\_\___/\__\___| |_|_\___|_\__,_|\__|_\___/_||_/__/_||_|_| .__/__/
// |_|
pub(crate) async fn insert_note_to_note_relationship<'a, E>(
executor: E,
parent_id: &ParentId,
note_id: &NoteId,
location: i64,
kind: &RelationshipKind,
) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let insert_note_to_note_relationship_sql = concat!(
"INSERT INTO note_relationships (parent_id, note_id, location, kind) ",
"values (?, ?, ?, ?)"
);
let _ = sqlx::query(insert_note_to_note_relationship_sql)
.bind(&**parent_id)
.bind(&**note_id)
.bind(&location)
.bind(&kind.to_string())
.execute(executor)
.await?;
Ok(())
}
pub(crate) async fn make_room_for_new_note<'a, E>(executor: E, parent_id: &ParentId, location: i64) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let make_room_for_new_note_sql = concat!(
"UPDATE note_relationships ",
"SET location = location + 1 ",
"WHERE location >= ? and parent_id = ?;"
);
let _ = sqlx::query(make_room_for_new_note_sql)
.bind(&location)
.bind(&**parent_id)
.execute(executor)
.await?;
Ok(())
}
pub(crate) async fn assert_max_child_location_for_note<'a, E>(executor: E, note_id: &ParentId) -> SqlResult<i64>
where
E: Executor<'a, Database = Sqlite>,
{
let assert_max_child_location_for_note_sql =
"SELECT MAX(location) AS count FROM note_relationships WHERE parent_id = ?;";
let count: RowCount = sqlx::query_as(assert_max_child_location_for_note_sql)
.bind(&**note_id)
.fetch_one(executor)
.await?;
Ok(count.count)
}
// _ _ _ _ _ __ _ ___ _ _ _ _ _
// | \| |___| |_ ___ | |_ ___ | |/ /__ _ __| |_ ___ _ _ | _ \___| |__ _| |_(_)___ _ _ __| |_ (_)_ __ ___
// | .` / _ \ _/ -_) | _/ _ \ | ' </ _` (_-< _/ -_) ' \ | / -_) / _` | _| / _ \ ' \(_-< ' \| | '_ (_-<
// |_|\_\___/\__\___| \__\___/ |_|\_\__,_/__/\__\___|_||_| |_|_\___|_\__,_|\__|_\___/_||_/__/_||_|_| .__/__/
// |_|
pub(crate) async fn insert_bulk_note_to_kasten_relationships<'a, E>(
executor: E,
note_id: &NoteId,
references: &[NoteId],
) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
if references.is_empty() {
return Ok(());
}
let insert_pattern = format!("(?, ?, '{}')", KastenRelationshipKind::Kasten.to_string());
let insert_note_page_references_sql = "INSERT INTO note_kasten_relationships (note_id, kasten_id, kind) VALUES "
.to_string()
+ &[insert_pattern.as_str()].repeat(references.len()).join(", ")
+ &";".to_string();
let mut request = sqlx::query(&insert_note_page_references_sql);
for reference in references {
request = request.bind(&**note_id).bind(&**reference);
}
request.execute(executor).await.map(|_| ())
}
pub(crate) async fn delete_bulk_note_to_kasten_relationships<'a, E>(executor: E, note_id: &NoteId) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let delete_note_to_kasten_relationship_sql = "DELETE FROM note_kasten_relationships WHERE and note_id = ?;";
let _ = sqlx::query(delete_note_to_kasten_relationship_sql)
.bind(&**note_id)
.execute(executor)
.await?;
Ok(())
}
// Given the references supplied, and the references found in the datastore,
// return a list of the references not found in the datastore.
pub(crate) fn diff_references(references: &[String], found_references: &[PageTitle]) -> Vec<String> {
let all: HashSet<String> = references.iter().cloned().collect();
let found: HashSet<String> = found_references.iter().map(|r| r.content.clone()).collect();
all.difference(&found).cloned().collect()
}
// ___ _ _ _ _ __ _ ___ _ _ _ _ _
// / __|___ _ _| |_ ___ _ _| |_ | |_ ___ | |/ /__ _ __| |_ ___ _ _ | _ \___| |__ _| |_(_)___ _ _ __| |_ (_)_ __ ___
// | (__/ _ \ ' \ _/ -_) ' \ _| | _/ _ \ | ' </ _` (_-< _/ -_) ' \ | / -_) / _` | _| / _ \ ' \(_-< ' \| | '_ (_-<
// \___\___/_||_\__\___|_||_\__| \__\___/ |_|\_\__,_/__/\__\___|_||_| |_|_\___|_\__,_|\__|_\___/_||_/__/_||_|_| .__/__/
// |_|
// Returns all the (Id, title) pairs found in the database out of a
// list of titles. Used by insert_note and update_note_content to
// find the ids of all the references in a given document.
pub(crate) async fn find_all_kasten_from_list_of_references<'a, E>(
executor: E,
references: &[String],
) -> SqlResult<Vec<PageTitle>>
where
E: Executor<'a, Database = Sqlite>,
{
if references.is_empty() {
return Ok(vec![]);
}
lazy_static! {
static ref SELECT_ALL_REFERENCES_FOR_SQL_BASE: String = format!(
"SELECT id, content FROM notes WHERE kind = '{}' AND content IN (",
NoteKind::Kasten.to_string()
);
}
let find_all_references_for_sql =
SELECT_ALL_REFERENCES_FOR_SQL_BASE.to_string() + &["?"].repeat(references.len()).join(",") + &");".to_string();
let mut request = sqlx::query_as(&find_all_references_for_sql);
for id in references.iter() {
request = request.bind(id);
}
request.fetch_all(executor).await
}
// ___ _ _
// | \ ___| |___| |_ ___
// | |) / -_) / -_) _/ -_)
// |___/\___|_\___|\__\___|
//
pub(crate) async fn delete_note_to_note_relationship<'a, E>(
executor: E,
parent_id: &ParentId,
note_id: &NoteId,
) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let delete_note_to_note_relationship_sql = concat!(
"DELETE FROM note_relationships ",
"WHERE parent_id = ? and note_id = ? "
);
let count = sqlx::query(delete_note_to_note_relationship_sql)
.bind(&**parent_id)
.bind(&**note_id)
.execute(executor)
.await?
.rows_affected();
match count {
1 => Ok(()),
_ => Err(sqlx::Error::RowNotFound),
}
}
pub(crate) async fn delete_note_to_kasten_relationships<'a, E>(executor: E, note_id: &NoteId) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
lazy_static! {
static ref DELETE_NOTE_TO_KASTEN_RELATIONSHIPS_SQL: String = format!(
"DELETE FROM note_relationships WHERE kind in ('{}', '{}') AND parent_id = ?;",
KastenRelationshipKind::Kasten.to_string(),
KastenRelationshipKind::Unacked.to_string()
);
}
let _ = sqlx::query(&DELETE_NOTE_TO_KASTEN_RELATIONSHIPS_SQL)
.bind(&**note_id)
.execute(executor)
.await?;
Ok(())
}
pub(crate) async fn delete_note<'a, E>(executor: E, note_id: &NoteId) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let delete_note_sql = "DELETE FROM notes WHERE note_id = ?";
let count = sqlx::query(delete_note_sql)
.bind(&**note_id)
.execute(executor)
.await?
.rows_affected();
match count {
1 => Ok(()),
_ => Err(sqlx::Error::RowNotFound),
}
}
// After removing a note, recalculate the position of all notes under
// the parent note, such that there order is now completely
// sequential.
pub(crate) async fn close_hole_for_deleted_note<'a, E>(
executor: E,
parent_id: &ParentId,
location: i64,
) -> SqlResult<()>
where
E: Executor<'a, Database = Sqlite>,
{
let close_hole_for_deleted_note_sql = concat!(
"UPDATE note_relationships ",
"SET location = location - 1 ",
"WHERE location > ? and parent_id = ?;"
);
let _ = sqlx::query(close_hole_for_deleted_note_sql)
.bind(&location)
.bind(&**parent_id)
.execute(executor)
.await?;
Ok(())
}
// __ __ _
// | \/ (_)___ __
// | |\/| | (_-</ _|
// |_| |_|_/__/\__|
//
// The dreaded miscellaneous!
pub(crate) async fn count_existing_note_relationships<'a, E>(executor: E, note_id: &NoteId) -> SqlResult<i64>
where
E: Executor<'a, Database = Sqlite>,
{
let count_existing_note_relationships_sql =
"SELECT COUNT(*) as count FROM note_relationships WHERE note_id = ?;";
let count: RowCount = sqlx::query_as(&count_existing_note_relationships_sql)
.bind(&**note_id)
.fetch_one(executor)
.await?;
Ok(count.count)
}

View File

@ -1,17 +1,257 @@
use chrono::{DateTime, Utc};
// use derive_builder::Builder;
use serde::{Deserialize, Serialize};
// use shrinkwraprs::Shrinkwrap;
use derive_builder::Builder;
use friendly_id;
use shrinkwraprs::Shrinkwrap;
use sqlx::{self, FromRow};
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
pub struct RawZettle {
// Kasten is German for "Box," and is used both because this is
// supposed to be a Zettlekasten, and because "Box" is a heavily
// reserved word in Rust. So, for that matter, are "crate" and
// "cargo," "cell," and so forth. If I'd wanted to go the Full
// Noguchi, I guess I could have used "envelope."
// In order to prevent arbitrary enumeration tokens from getting into
// the database, the private layer takes a very hard line on insisting
// that everything sent TO the datastore come in the enumerated
// format, and everything coming OUT of the database be converted back
// into an enumeration. These macros instantiate those objects
// and their conversions to/from strings.
macro_rules! build_conversion_enums {
( $ty:ident, $( $s:literal => $x:ident, )*) => {
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum $ty {
$( $x ), *
}
impl From<String> for $ty {
fn from(kind: String) -> Self {
match &kind[..] {
$( $s => $ty::$x, )*
_ => panic!("Illegal value in $ty database: {}", kind),
}
}
}
impl From<$ty> for String {
fn from(kind: $ty) -> Self {
match kind {
$( $ty::$x => $s ),*
}
.to_string()
}
}
impl $ty {
pub fn to_string(&self) -> String {
String::from(self.clone())
}
}
};
}
#[derive(Shrinkwrap, Clone)]
pub(crate) struct NoteId(pub String);
#[derive(Shrinkwrap, Clone)]
pub(crate) struct ParentId(pub String);
// The different kinds of objects we support.
build_conversion_enums!(
NoteKind,
"box" => Kasten,
"note" => Note,
"resource" => Resource,
);
// The different kinds of relationships we support. I do not yet
// know how to ensure that there is a maximum of one (a ->
// b)::Direct, and that for any (a -> b) there is no (b <- a), that
// is, nor, for that matter, do I know how to prevent cycles.
build_conversion_enums!(
RelationshipKind,
"direct" => Direct,
"reference" => Reference,
"embed" => Embed,
);
build_conversion_enums!(
KastenRelationshipKind,
"kasten" => Kasten,
"unacked" => Unacked,
"cancelled" => Cancelled,
);
// A Note is the base construct of our system. It represents a
// single note and contains information about its parent and location.
// This is the object *retrieved* from the database.
#[derive(Clone, Debug, FromRow)]
pub(crate) struct RowNote {
pub id: String,
pub parent_id: Option<String>,
pub content: String,
pub kind: String,
pub position: i64,
pub location: i64,
pub creation_date: DateTime<Utc>,
pub updated_date: DateTime<Utc>,
pub lastview_date: DateTime<Utc>,
pub deleted_date: Option<DateTime<Utc>>,
}
/// A Note as it's returned from the private layer. This is
/// provided to ensure that the NoteKind is an enum, and that we
/// control the list of possible values stored in the database.
#[derive(Clone, Debug)]
pub struct Note {
pub id: String,
pub parent_id: Option<String>,
pub content: String,
pub kind: NoteKind,
pub location: i64,
pub creation_date: DateTime<Utc>,
pub updated_date: DateTime<Utc>,
pub lastview_date: DateTime<Utc>,
pub deleted_date: Option<DateTime<Utc>>,
}
impl From<RowNote> for Note {
fn from(note: RowNote) -> Self {
Self {
id: note.id,
parent_id: note.parent_id,
content: note.content,
kind: NoteKind::from(note.kind),
location: note.location,
creation_date: note.creation_date,
updated_date: note.updated_date,
lastview_date: note.lastview_date,
deleted_date: note.deleted_date,
}
}
}
/// A new Note object as it's inserted into the system. It has no
/// parent or location information; those are data relative to the
/// parent, and must be provided by the client. In the case of a
/// Kasten, no location or parent is necessary.
#[derive(Clone, Debug, Builder)]
pub struct NewNote {
#[builder(default = r#"friendly_id::create()"#)]
pub id: String,
pub content: String,
#[builder(default = r#"NoteKind::Note"#)]
pub kind: NoteKind,
#[builder(default = r#"chrono::Utc::now()"#)]
pub creation_date: DateTime<Utc>,
#[builder(default = r#"chrono::Utc::now()"#)]
pub updated_date: DateTime<Utc>,
#[builder(default = r#"chrono::Utc::now()"#)]
pub lastview_date: DateTime<Utc>,
#[builder(default = r#"None"#)]
pub deleted_date: Option<DateTime<Utc>>,
}
impl From<NewNote> for Note {
/// Only used for building new kastens, so the decision- making is
/// limited to kasten-level things, like pointing to self and
/// having a location of zero.
fn from(note: NewNote) -> Self {
Self {
id: note.id,
parent_id: None,
content: note.content,
kind: note.kind,
location: 0,
creation_date: note.creation_date,
updated_date: note.updated_date,
lastview_date: note.lastview_date,
deleted_date: note.deleted_date,
}
}
}
#[derive(Clone, Debug, FromRow)]
pub(crate) struct JustId {
pub id: String,
}
#[derive(Clone, Debug, FromRow)]
pub(crate) struct PageTitle {
pub id: String,
pub content: String,
}
#[derive(Clone, Debug, FromRow)]
pub(crate) struct RowCount {
pub count: i64,
}
#[derive(Clone, Debug, FromRow)]
pub(crate) struct NoteRelationshipRow {
pub parent_id: String,
pub note_id: String,
pub location: i64,
pub kind: String,
}
#[derive(Clone, Debug)]
pub(crate) struct NoteRelationship {
pub parent_id: String,
pub note_id: String,
pub location: i64,
pub kind: RelationshipKind,
}
impl From<NoteRelationshipRow> for NoteRelationship {
fn from(rel: NoteRelationshipRow) -> Self {
Self {
parent_id: rel.parent_id,
note_id: rel.note_id,
location: rel.location,
kind: RelationshipKind::from(rel.kind),
}
}
}
#[derive(Clone, Debug, FromRow)]
pub(crate) struct KastenRelationshipRow {
pub note_id: String,
pub kasten_id: String,
pub kind: String,
}
#[derive(Clone, Debug)]
pub(crate) struct KastenRelationship {
pub note_id: String,
pub kasten_id: String,
pub kind: KastenRelationshipKind,
}
impl From<KastenRelationshipRow> for KastenRelationship {
fn from(rel: KastenRelationshipRow) -> Self {
Self {
kasten_id: rel.kasten_id,
note_id: rel.note_id,
kind: KastenRelationshipKind::from(rel.kind),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_build_new_note() {
let now = chrono::Utc::now();
let newnote = NewNoteBuilder::default().content("bar".to_string()).build().unwrap();
assert!(newnote.id.len() > 4);
assert!((newnote.creation_date - now).num_minutes() < 1);
assert!((newnote.updated_date - now).num_minutes() < 1);
assert!((newnote.lastview_date - now).num_minutes() < 1);
assert!(newnote.deleted_date.is_none());
}
}