Compare commits
14 Commits
0f5d15ad14
...
72fb3b11ee
Author | SHA1 | Date |
---|---|---|
Elf M. Sternberg | 72fb3b11ee | |
Elf M. Sternberg | 739ff93427 | |
Elf M. Sternberg | 0f98dc4523 | |
Elf M. Sternberg | 417320b27c | |
Elf M. Sternberg | 4e04bb47d5 | |
Elf M. Sternberg | 7639d1a6f2 | |
Elf M. Sternberg | bb841c7769 | |
Elf M. Sternberg | 1c0f3abd6c | |
Elf M. Sternberg | 380d3f4a7c | |
Elf M. Sternberg | 1b36183edb | |
Elf M. Sternberg | e429eaf93c | |
Elf M. Sternberg | fd4f39b5b8 | |
Elf M. Sternberg | 1b8e1c067d | |
Elf M. Sternberg | e0c463f9fc |
|
@ -3,7 +3,10 @@ name = "nm-store-cli"
|
|||
version = "0.1.0"
|
||||
authors = ["Elf M. Sternberg <elf.sternberg@gmail.com>"]
|
||||
edition = "2018"
|
||||
description = "Command-line direct access to the notesmachine store."
|
||||
readme = "./README.org"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
|
||||
|
|
|
@ -13,6 +13,12 @@ readme = "./README.org"
|
|||
[dependencies]
|
||||
friendly_id = "0.3.0"
|
||||
thiserror = "1.0.20"
|
||||
derive_builder = "0.9.0"
|
||||
lazy_static = "1.4.0"
|
||||
comrak = "0.8.2"
|
||||
shrinkwraprs = "0.3.0"
|
||||
regex = "1.3.9"
|
||||
slug = "0.1.4"
|
||||
tokio = { version = "0.2.22", features = ["rt-threaded", "blocking"] }
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
serde_json = "1.0.56"
|
||||
|
|
|
@ -14,14 +14,10 @@ representations:
|
|||
|
||||
** Plans
|
||||
|
||||
*** TODO Make it possible to save a note
|
||||
*** TODO Make it possible to retrieve a note
|
||||
*** TODO Read how others use SQLX to initialize the database
|
||||
*** TODO Implement CLI features
|
||||
*** TODO Make it possible to connect two notes
|
||||
*** TODO Make it possible to save a page
|
||||
*** TODO Make it possible to connect a note to a page
|
||||
*** TODO Make it possible to retrieve a collection of notes
|
||||
*** TODO Make it possible to retrieve a page
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,190 @@
|
|||
/async fn insert_note<'e, E>(executor: E, id: &str, content: &str, notetype: &str) -> SqlResult<i64>
|
||||
where
|
||||
E: 'e + Executor<'e, Database = Sqlite>,
|
||||
{
|
||||
lazy_static! {
|
||||
static ref INSERT_ONE_NOTE_SQL: String = include_str!("sql/insert_one_note.sql");
|
||||
}
|
||||
let now = chrono::Utc::now();
|
||||
Ok(sqlx::query(INSERT_ONE_NOTE_SQL)
|
||||
.bind(&id)
|
||||
.bind(&content)
|
||||
.bind(¬etype)
|
||||
.bind(&now)
|
||||
.bind(&now)
|
||||
.bind(&now)
|
||||
.execute(executor)
|
||||
.await?
|
||||
.last_insert_rowid())
|
||||
}
|
||||
|
||||
|
||||
#[derive(Clone, FromRow)]
|
||||
struct JustSlugs {
|
||||
slug: String
|
||||
}
|
||||
|
||||
|
||||
// Given an initial string and an existing collection of slugs,
|
||||
// generate a new slug that does not conflict with the current
|
||||
// collection.
|
||||
async fn generate_slug<'e, E>(executor: E, title: &str) -> SqlResult<String>
|
||||
where
|
||||
E: 'e + Executor<'e, Database = Sqlite>,
|
||||
{
|
||||
lazy_static! {
|
||||
static ref RE_JUSTNUM: Regex = Regex::new(r"-\d+$").unwrap();
|
||||
}
|
||||
lazy_static! {
|
||||
static ref RE_CAPNUM: Regex = Regex::new(r"-(\d+)$").unwrap();
|
||||
}
|
||||
|
||||
let initial_slug = slugify::slugify(title);
|
||||
let sample_slug = RE_JUSTNUM.replace_all(slug, "");
|
||||
let similar_slugs: Vec<JustSlugs> = sqlx::query("SELECT slug FROM pages WHERE slug LIKE '?%';")
|
||||
.bind(&sample_slug)
|
||||
.execute(executor)
|
||||
.await?;
|
||||
let slug_counters = similar_slugs
|
||||
.iter()
|
||||
.map(|slug| RE_CAPNUM.captures(slug.slug))
|
||||
.filter_map(|cap| cap.get(1).unwrap().parse::<u32>().unwrap())
|
||||
.collect();
|
||||
match slug_counters.len() {
|
||||
0 => Ok(initial_slug),
|
||||
_ => {
|
||||
slug_counters.sort_unstable();
|
||||
return Ok(format!("{}-{}", initial_slug, slug_counters.pop() + 1))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn insert_page<'e, E>(executor: E, page: &RawPage) -> SqlResult<i64>
|
||||
where
|
||||
E: 'e + Executor<'e, Database = Sqlite>,
|
||||
{
|
||||
let insert_one_page_sql = include_str!("sql/insert_one_page.sql");
|
||||
Ok(sqlx::query(insert_one_page_sql)
|
||||
.bind(&page.id)
|
||||
.bind(&page.title)
|
||||
.bind(&page.note_id)
|
||||
.bind(&page.creation_date)
|
||||
.bind(&page.updated_date)
|
||||
.bind(&page.lastview_date)
|
||||
.execute(&mut tx)
|
||||
.await?
|
||||
.last_insert_rowid())
|
||||
}
|
||||
|
||||
/// Given a title, insert a new page. All dates are today, and the slug is
|
||||
/// generated as above:
|
||||
async fn insert_new_page_for_title<'e, E>(executor: E, title: &str) -> SqlResult<Page> {
|
||||
|
||||
|
||||
|
||||
|
||||
// /// Fetch page by title
|
||||
// ///
|
||||
// /// This is the most common use case, in which a specific title
|
||||
// /// has been requested of the server via POST. The page always
|
||||
// /// exists; if it doesn't, it will be automatically generated.
|
||||
// pub async fn get_page_by_title(&slug, slug: &title) -> NoteResult<(Page, Notes)> {
|
||||
// let mut tx = self.0.begin().await?;
|
||||
// let maybe_page = sqlx::query_as(select_one_page_by_title)
|
||||
// .bind(&title)
|
||||
// .fetch_one(&tx)
|
||||
// .await;
|
||||
// let page = match maybe_page {
|
||||
// Ok(page) => page,
|
||||
// Err(sqlx::Error::NotFound) => insert_new_page_for_title(tx, title),
|
||||
// Err(a) => return Err(a)
|
||||
// };
|
||||
// let notes = sqlx::query_as(select_note_collection_for_root)
|
||||
// .bind(page.note_id)
|
||||
// .fetch(&tx)
|
||||
// .await?;
|
||||
// tx.commit().await?;
|
||||
// Ok((page, notes))
|
||||
// }
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
// /// This will erase all the data in the database. Only use this
|
||||
// /// if you're sure that's what you want.
|
||||
// pub async fn reset_database(&self) -> NoteResult<()> {
|
||||
// let initialize_sql = include_str!("sql/initialize_database.sql");
|
||||
// sqlx::query(initialize_sql).execute(&*self.0).await?;
|
||||
// Ok(())
|
||||
// }
|
||||
//
|
||||
// async fn create_new_page(&self, title: &str) -> SqlResult<Page, Vec<Notes>> {
|
||||
// let now = chrono::Utc::now();
|
||||
// let new_note_id = friendly_id::create();
|
||||
//
|
||||
// let mut tx = self.0.begin().await?;
|
||||
// let new_slug = generate_slug(&mut tx, title);
|
||||
// let note_id = insert_note(&mut tx, &new_note_id, &"", &"page").await?;
|
||||
// insert_page(&mut tx, NewPage {
|
||||
// slug,
|
||||
// title,
|
||||
// note_id,
|
||||
// creation_date: now,
|
||||
// updated_date: now,
|
||||
// lastview_date: now
|
||||
// }).await;
|
||||
// tx.commit();
|
||||
// self.fetch_one_page(title)
|
||||
// }
|
||||
//
|
||||
// async fn fetch_one_page(&self, title: &str) ->
|
||||
//
|
||||
// pub async fn fetch_page(&self, title: &str) -> SqlResult<(Page, Vec<Notes>)> {
|
||||
// match self.fetch_one_page(title) {
|
||||
// Ok((page, notes)) => Ok((page, notes)),
|
||||
// Err(NotFound) => self.create_new_page(title),
|
||||
// Err(e) => Err(e)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// pub async fn fetch_raw_page(&self, id: &str) -> SqlResult<RawPage> {
|
||||
// let select_one_page_sql = include_str!("sql/select_one_page.sql");
|
||||
// sqlx::query_as(select_one_page_sql).bind(&id).fetch_one(&*self.0).await
|
||||
// }
|
||||
//
|
||||
// pub async fn fetch_raw_note(&self, id: &str) -> SqlResult<RawNote> {
|
||||
// let select_one_note_sql = include_str!("sql/select_one_note.sql");
|
||||
// sqlx::query_as(select_one_note_sql).bind(&id).fetch_one(&*self.0).await
|
||||
// }
|
||||
//
|
||||
// pub async fn insert_note(&self, id: &str, content: &str, notetype: &str) -> SqlResult<i64> {
|
||||
// insert_note(&*self.0, id, content, notetype).await
|
||||
// }
|
||||
//
|
||||
// pub async fn update_raw_note(&self, id: &str, content: &str) -> NoteResult<()> {
|
||||
// let update_one_note_sql = include_str!("sql/update_one_note.sql");
|
||||
// let now = chrono::Utc::now();
|
||||
// let rows_updated = sqlx::query(update_one_note_sql)
|
||||
// .bind(&content)
|
||||
// .bind(&now)
|
||||
// .bind(&now)
|
||||
// .bind(&id)
|
||||
// .execute(&*self.0).await?
|
||||
// .rows_affected();
|
||||
// match rows_updated {
|
||||
// 1 => Ok(()),
|
||||
// _ => Err(NoteStoreError::NotFound)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // TODO: We're returning the raw page with the raw note id, note
|
||||
// // the friendly ID. Is there a disconnect there? It's making me
|
||||
// // furiously to think.
|
||||
//
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
# Storage layer for Notesmachine
|
||||
|
||||
This library implements the core functionality of Notesmachine and
|
||||
describes that functionality to a storage layer. There's a bit of
|
||||
intermingling in here which can't be helped, although it may make sense
|
||||
in the future to separate the decomposition of the note content into a
|
||||
higher layer.
|
||||
|
||||
Notesmachine storage notes consist of two items: Zettle and Kasten,
|
||||
which are German for "Note" and "Box". Here are the basic rules:
|
||||
|
||||
- Boxes have titles (and date metadata)
|
||||
- Notes have content and a type (and date metadata)
|
||||
- Notes are stored in boxes
|
||||
- Notes are positioned with respect to other notes.
|
||||
- There are two positions:
|
||||
- Siblings, creating lists
|
||||
- Children, creating trees like this one
|
||||
- Notes may have references (pointers) to other boxes
|
||||
- Notes may be moved around
|
||||
- Notes may be deleted
|
||||
- Boxes may be deleted
|
||||
- When a box is renamed, every reference to that box is auto-edited to
|
||||
reflect the change. If a box is renamed to match an existing box, the
|
||||
notes in both boxes are merged.
|
||||
|
||||
Note-to-note relationships form trees, and are kept in a SQL database of
|
||||
(`parent_id`, `child_id`, `position`, `relationship_type`). The
|
||||
`position` is a monotonic index on the parent (that is, every pair
|
||||
(`parent_id`, `position`) must be unique). The `relationship_type` is
|
||||
an enum and can specify that the relationship is *original*,
|
||||
*embedding*, or *referencing*. An embedded or referenced note may be
|
||||
read/write or read-only with respect to the original, but there is only
|
||||
one original note at any time.
|
||||
|
||||
Note-to-box relationships form a graph, and are kept in the SQL database
|
||||
as a collection of *edges* from the note to the box (and naturally
|
||||
vice-versa).
|
||||
|
||||
- Decision: When an original note is deleted, do all references and
|
||||
embeddings also get deleted, or is the oldest one elevated to be a new
|
||||
"original"? Or is that something the user may choose?
|
||||
|
||||
- Decision: Should the merging issue be handled at this layer, or would
|
||||
it make sense to move this to a higher layer, and only provide the
|
||||
hooks for it here?
|
|
@ -1,4 +1,3 @@
|
|||
use sqlx;
|
||||
use thiserror::Error;
|
||||
|
||||
/// All the ways looking up objects can fail
|
||||
|
@ -13,6 +12,6 @@ pub enum NoteStoreError {
|
|||
NotFound,
|
||||
|
||||
/// All other errors from the database.
|
||||
#[error(transparent)]
|
||||
#[error("Sqlx")]
|
||||
DBError(#[from] sqlx::Error),
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
mod errors;
|
||||
mod reference_parser;
|
||||
mod store;
|
||||
mod structs;
|
||||
|
||||
|
@ -8,6 +9,7 @@ pub use crate::store::NoteStore;
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono;
|
||||
use tokio;
|
||||
|
||||
async fn fresh_inmemory_database() -> NoteStore {
|
||||
|
@ -19,81 +21,87 @@ mod tests {
|
|||
storagepool
|
||||
}
|
||||
|
||||
// Request for the page by slug.
|
||||
// If the page exists, return it. If the page doesn't, return NotFound
|
||||
|
||||
#[tokio::test(threaded_scheduler)]
|
||||
async fn fetching_unfound_page_works() {
|
||||
async fn fetching_unfound_page_by_slug_works() {
|
||||
let storagepool = fresh_inmemory_database().await;
|
||||
let unfoundpage = storagepool.fetch_raw_page("nonexistent-page").await;
|
||||
let unfoundpage = storagepool.get_page_by_slug("nonexistent-page").await;
|
||||
assert!(unfoundpage.is_err());
|
||||
}
|
||||
|
||||
// Request for the page by title. If the page exists, return it.
|
||||
// If the page doesn't exist, create it then return it anyway.
|
||||
// There should be at least one note, the root note.
|
||||
|
||||
#[tokio::test(threaded_scheduler)]
|
||||
async fn fetching_unfound_note_works() {
|
||||
async fn fetching_unfound_page_by_title_works() {
|
||||
let title = "Nonexistent Page";
|
||||
let now = chrono::Utc::now();
|
||||
let storagepool = fresh_inmemory_database().await;
|
||||
let unfoundnote = storagepool.fetch_raw_note("nonexistent-note").await;
|
||||
assert!(unfoundnote.is_err());
|
||||
let newpageresult = storagepool.get_page_by_title(&title).await;
|
||||
|
||||
assert!(newpageresult.is_ok(), "{:?}", newpageresult);
|
||||
let (newpage, newnotes) = newpageresult.unwrap();
|
||||
|
||||
assert_eq!(newpage.title, title, "{:?}", newpage.title);
|
||||
assert_eq!(newpage.slug, "nonexistent-page");
|
||||
|
||||
assert_eq!(newnotes.len(), 1);
|
||||
assert_eq!(newnotes[0].notetype, "root");
|
||||
assert_eq!(newpage.note_id, newnotes[0].id);
|
||||
|
||||
assert!((newpage.creation_date - now).num_minutes() < 1);
|
||||
assert!((newpage.updated_date - now).num_minutes() < 1);
|
||||
assert!((newpage.lastview_date - now).num_minutes() < 1);
|
||||
assert!(newpage.deleted_date.is_none());
|
||||
}
|
||||
|
||||
fn make_new_note(content: &str) -> row_structs::NewNote {
|
||||
row_structs::NewNoteBuilder::default()
|
||||
.content(content.to_string())
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test(threaded_scheduler)]
|
||||
async fn cloning_storagepool_is_ok() {
|
||||
async fn can_nest_notes() {
|
||||
let title = "Nonexistent Page";
|
||||
let storagepool = fresh_inmemory_database().await;
|
||||
let storagepool2 = storagepool.clone();
|
||||
let unfoundnote = storagepool2.fetch_raw_note("nonexistent-note").await;
|
||||
assert!(unfoundnote.is_err());
|
||||
let unfoundnote = storagepool.fetch_raw_note("nonexistent-note").await;
|
||||
assert!(unfoundnote.is_err());
|
||||
}
|
||||
let newpageresult = storagepool.get_page_by_title(&title).await;
|
||||
let (_newpage, newnotes) = newpageresult.unwrap();
|
||||
|
||||
#[tokio::test(threaded_scheduler)]
|
||||
async fn can_save_a_note() {
|
||||
let storagepool = fresh_inmemory_database().await;
|
||||
let note_id = storagepool.insert_note("noteid", "notecontent", "note").await;
|
||||
assert!(note_id.is_ok(), "{:?}", note_id);
|
||||
let note_id = note_id.unwrap();
|
||||
assert!(note_id > 0);
|
||||
let root = &newnotes[0];
|
||||
|
||||
let foundnote = storagepool.fetch_raw_note("noteid").await;
|
||||
assert!(foundnote.is_ok(), "{:?}", foundnote);
|
||||
let foundnote = foundnote.unwrap();
|
||||
assert_eq!(foundnote.content, "notecontent");
|
||||
assert_eq!(foundnote.notetype, "note");
|
||||
}
|
||||
let note1 = make_new_note("1");
|
||||
let note1_uuid = storagepool.insert_nested_note(¬e1, &root.uuid, 0).await;
|
||||
assert!(note1_uuid.is_ok(), "{:?}", note1_uuid);
|
||||
let note1_uuid = note1_uuid.unwrap();
|
||||
|
||||
#[tokio::test(threaded_scheduler)]
|
||||
async fn can_save_a_page() {
|
||||
let storagepool = fresh_inmemory_database().await;
|
||||
let page_id = storagepool.insert_page("pageid", "Test page").await;
|
||||
assert!(page_id.is_ok(), "{:?}", page_id);
|
||||
let note2 = make_new_note("2");
|
||||
let note2_uuid = storagepool.insert_nested_note(¬e2, &root.uuid, 0).await;
|
||||
assert!(note2_uuid.is_ok(), "{:?}", note2_uuid);
|
||||
let note2_uuid = note2_uuid.unwrap();
|
||||
|
||||
let page = storagepool.fetch_raw_page("pageid").await;
|
||||
assert!(page.is_ok(), "{:?}", page);
|
||||
let page = page.unwrap();
|
||||
assert_eq!(page.title, "Test page");
|
||||
assert!(page.note_id > 0);
|
||||
}
|
||||
let note3 = make_new_note("3");
|
||||
let note3_uuid = storagepool.insert_nested_note(¬e3, ¬e1_uuid, 0).await;
|
||||
assert!(note3_uuid.is_ok(), "{:?}", note3_uuid);
|
||||
let note3_uuid = note3_uuid.unwrap();
|
||||
|
||||
#[tokio::test(threaded_scheduler)]
|
||||
async fn reports_note_update_failure() {
|
||||
let storagepool = fresh_inmemory_database().await;
|
||||
let note_id = storagepool.insert_note("noteid", "notecontent", "note").await;
|
||||
assert!(note_id.is_ok(), "{:?}", note_id);
|
||||
let note4 = make_new_note("4");
|
||||
let note4_uuid = storagepool.insert_nested_note(¬e4, ¬e2_uuid, 0).await;
|
||||
assert!(note4_uuid.is_ok(), "{:?}", note4_uuid);
|
||||
let note4_uuid = note4_uuid.unwrap();
|
||||
|
||||
let update = storagepool.update_raw_note("badnote", "Bad Note Content").await;
|
||||
assert!(update.is_err());
|
||||
}
|
||||
let newpageresult = storagepool.get_page_by_title(&title).await;
|
||||
let (newpage, newnotes) = newpageresult.unwrap();
|
||||
|
||||
assert_eq!(newpage.title, title, "{:?}", newpage.title);
|
||||
assert_eq!(newpage.slug, "nonexistent-page");
|
||||
|
||||
#[tokio::test(threaded_scheduler)]
|
||||
async fn can_update_a_note() {
|
||||
let storagepool = fresh_inmemory_database().await;
|
||||
let note_id = storagepool.insert_note("noteid", "notecontent", "note").await;
|
||||
assert!(note_id.is_ok(), "{:?}", note_id);
|
||||
|
||||
let update = storagepool.update_raw_note("noteid", "Good Note Content").await;
|
||||
assert!(update.is_ok(), "{:?}", update);
|
||||
|
||||
let note = storagepool.fetch_raw_note("noteid").await;
|
||||
assert!(note.is_ok(), "{:?}", note);
|
||||
let note = note.unwrap();
|
||||
assert_eq!(note.content, "Good Note Content");
|
||||
assert_eq!(newnotes.len(), 5);
|
||||
assert_eq!(newnotes[0].notetype, "root");
|
||||
assert_eq!(newpage.note_id, newnotes[0].id);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
use comrak::nodes::{AstNode, NodeValue};
|
||||
use comrak::{parse_document, Arena, ComrakOptions};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::bytes::Regex as BytesRegex;
|
||||
use regex::Regex;
|
||||
|
||||
pub struct Finder(pub Vec<String>);
|
||||
|
||||
impl Finder {
|
||||
pub fn new() -> Self {
|
||||
Finder(Vec::new())
|
||||
}
|
||||
|
||||
fn iter_nodes<'a, F>(&mut self, node: &'a AstNode<'a>, f: &F)
|
||||
where
|
||||
F: Fn(&'a AstNode<'a>) -> Option<Vec<String>>,
|
||||
{
|
||||
if let Some(mut v) = f(node) {
|
||||
self.0.append(&mut v);
|
||||
}
|
||||
for c in node.children() {
|
||||
self.iter_nodes(c, f);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a content block, return a list of all the page references found
|
||||
/// within the block. The references may need further massaging.
|
||||
pub(crate) fn find_links(document: &str) -> Vec<String> {
|
||||
let arena = Arena::new();
|
||||
let mut finder = Finder::new();
|
||||
let root = parse_document(&arena, document, &ComrakOptions::default());
|
||||
|
||||
finder.iter_nodes(root, &|node| {
|
||||
lazy_static! {
|
||||
static ref RE_REFERENCES: BytesRegex = BytesRegex::new(r"(\[\[([^\]]+)\]\]|(\#[:\w\-]+))").unwrap();
|
||||
}
|
||||
|
||||
match &node.data.borrow().value {
|
||||
NodeValue::Text(ref text) => Some(
|
||||
RE_REFERENCES
|
||||
.captures_iter(text)
|
||||
.map(|t| String::from_utf8_lossy(&t.get(1).unwrap().as_bytes()).to_string())
|
||||
.collect(),
|
||||
),
|
||||
_ => None,
|
||||
}
|
||||
});
|
||||
|
||||
finder.0
|
||||
}
|
||||
|
||||
fn recase(title: &str) -> String {
|
||||
lazy_static! {
|
||||
static ref RE_PASS1: Regex = Regex::new(r"(?P<s>.)(?P<n>[A-Z][a-z]+)").unwrap();
|
||||
static ref RE_PASS2: Regex = Regex::new(r"(?P<s>[[:lower:]]|\d)(?P<n>[[:upper:]])").unwrap();
|
||||
static ref RE_PASS4: Regex = Regex::new(r"(?P<s>[a-z])(?P<n>\d)").unwrap();
|
||||
static ref RE_PASS3: Regex = Regex::new(r"(:|_|-| )+").unwrap();
|
||||
}
|
||||
|
||||
// This should panic if misused, so... :-)
|
||||
let pass = title.to_string();
|
||||
let pass = pass.strip_prefix("#").unwrap();
|
||||
|
||||
let pass = RE_PASS1.replace_all(&pass, "$s $n");
|
||||
let pass = RE_PASS4.replace_all(&pass, "$s $n");
|
||||
let pass = RE_PASS2.replace_all(&pass, "$s $n");
|
||||
RE_PASS3.replace_all(&pass, " ").trim().to_string()
|
||||
}
|
||||
|
||||
fn build_page_titles(references: &[String]) -> Vec<String> {
|
||||
references
|
||||
.iter()
|
||||
.map(|s| match s.chars().next() {
|
||||
Some('#') => recase(s),
|
||||
Some('[') => s.strip_prefix("[[").unwrap().strip_suffix("]]").unwrap().to_string(),
|
||||
Some(_) => s.clone(),
|
||||
_ => "".to_string(),
|
||||
})
|
||||
.filter(|s| s.is_empty())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn build_references(content: &str) -> Vec<String> {
|
||||
build_page_titles(&find_links(content))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn finds_expected() {
|
||||
let sample = r###"
|
||||
# Header
|
||||
- NotATest 1
|
||||
- [[Test 2]]
|
||||
- #Test3
|
||||
- #TestFourAndSo
|
||||
- #Test-six-is-six
|
||||
- #recipe:entree
|
||||
- #
|
||||
- #-_-
|
||||
- #--Prefixed
|
||||
- [[]]
|
||||
|
||||
But *[[Test Seven]]* isn't. And *#Test_Eight____is_Messed-up*
|
||||
And [[Test Bite Me]] is the worst.
|
||||
Right? [[
|
||||
]]
|
||||
"###;
|
||||
let res = build_page_titles(&find_links(sample));
|
||||
let expected = vec![
|
||||
"Test 2",
|
||||
"Test 3",
|
||||
"Test Four And So",
|
||||
"Test six is six",
|
||||
"recipe entree",
|
||||
"Prefixed",
|
||||
"Test Seven",
|
||||
"Test Eight is Messed up",
|
||||
"Test Bite Me",
|
||||
];
|
||||
assert!(res.iter().eq(expected.iter()), "{:?}", res);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn doesnt_crash_on_empty() {
|
||||
let sample = "";
|
||||
let res = build_page_titles(&find_links(sample));
|
||||
let expected: Vec<String> = vec![];
|
||||
assert!(res.iter().eq(expected.iter()), "{:?}", res);
|
||||
}
|
||||
}
|
|
@ -1,8 +1,8 @@
|
|||
DROP TABLE IF EXISTS notes;
|
||||
DROP TABLE IF EXISTS note_relationships;
|
||||
DROP TABLE IF EXISTS pages;
|
||||
DROP TABLE IF EXISTS favorites;
|
||||
DROP TABLE IF EXISTS page_relationships;
|
||||
DROP TABLE IF EXISTS favorites;
|
||||
|
||||
CREATE TABLE notes (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
-- This is undoubtedly one of the more complex bits of code I've
|
||||
-- written recently, and I do wish there had been macros because
|
||||
-- there's a lot of hand-written, copy-pasted code here around the
|
||||
-- basic content of a note; it would have been nice to be able to DRY
|
||||
-- that out.
|
||||
|
||||
-- This expression creates a table, 'notetree', that contains all of
|
||||
-- the notes nested under a page. Each entry in the table includes
|
||||
-- the note's parent's internal and external ids so that applications
|
||||
-- can build an actual tree out of a vec of these things.
|
||||
|
||||
-- TODO: Extensive testing to validate that the nodes are delivered
|
||||
-- *in nesting order* to the client.
|
||||
|
||||
SELECT
|
||||
id,
|
||||
uuid,
|
||||
parent_id,
|
||||
parent_uuid,
|
||||
content,
|
||||
position,
|
||||
notetype,
|
||||
creation_date,
|
||||
updated_date,
|
||||
lastview_date,
|
||||
deleted_date
|
||||
|
||||
FROM (
|
||||
|
||||
WITH RECURSIVE notetree(
|
||||
id,
|
||||
uuid,
|
||||
parent_id,
|
||||
parent_uuid,
|
||||
content,
|
||||
position,
|
||||
notetype,
|
||||
creation_date,
|
||||
updated_date,
|
||||
lastview_date,
|
||||
deleted_date,
|
||||
cycle) AS
|
||||
|
||||
-- ROOT expression
|
||||
(SELECT
|
||||
notes.id,
|
||||
notes.uuid,
|
||||
notes.id AS parent_id,
|
||||
notes.uuid AS parent_uuid,
|
||||
notes.content,
|
||||
0, -- Root notes are always in position 0
|
||||
notes.notetype,
|
||||
notes.creation_date,
|
||||
notes.updated_date,
|
||||
notes.lastview_date,
|
||||
notes.deleted_date,
|
||||
','||notes.id||',' -- Cycle monitor
|
||||
FROM notes
|
||||
WHERE notes.id = ? AND notes.notetype = "root"
|
||||
|
||||
-- RECURSIVE expression
|
||||
UNION SELECT
|
||||
notes.id,
|
||||
notes.uuid,
|
||||
notetree.id AS parent_id,
|
||||
notetree.uuid AS parent_uuid,
|
||||
notes.content,
|
||||
note_relationships.position,
|
||||
notes.notetype,
|
||||
notes.creation_date,
|
||||
notes.updated_date,
|
||||
notes.lastview_date,
|
||||
notes.deleted_date,
|
||||
notetree.cycle||notes.id||','
|
||||
FROM notes
|
||||
INNER JOIN note_relationships ON notes.id = note_relationships.note_id
|
||||
-- For a given ID in the level of notetree in *this* recursion,
|
||||
-- we want each note's branches one level down.
|
||||
INNER JOIN notetree ON note_relationships.parent_id = notetree.id
|
||||
-- And we want to make sure there are no cycles. There shouldn't
|
||||
-- be; we're supposed to prevent those. But you never know.
|
||||
WHERE
|
||||
notetree.cycle NOT LIKE '%,'||notes.id||',%'
|
||||
ORDER BY note_relationships.position)
|
||||
SELECT * from notetree);
|
||||
|
|
@ -1,11 +1,30 @@
|
|||
use crate::errors::NoteStoreError;
|
||||
use crate::structs::{RawNote, RawPage};
|
||||
use chrono;
|
||||
use friendly_id;
|
||||
use sqlx;
|
||||
use sqlx::{sqlite::{Sqlite, SqlitePool}, Executor, Done};
|
||||
use crate::reference_parser::build_references;
|
||||
use crate::structs::{
|
||||
JustId, JustSlugs, NewNote, NewNoteBuilder, NewPage, NewPageBuilder, NoteRelationship, PageTitles, RawNote,
|
||||
RawPage, RowCount,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use shrinkwraprs::Shrinkwrap;
|
||||
use slug::slugify;
|
||||
use sqlx::{
|
||||
sqlite::{Sqlite, SqlitePool, SqliteRow},
|
||||
Done, Executor, Row,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Shrinkwrap, Copy, Clone)]
|
||||
struct PageId(i64);
|
||||
|
||||
#[derive(Shrinkwrap, Copy, Clone)]
|
||||
struct NoteId(i64);
|
||||
|
||||
#[derive(Shrinkwrap, Copy, Clone)]
|
||||
struct ParentId(i64);
|
||||
|
||||
/// A handle to our Sqlite database.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct NoteStore(Arc<SqlitePool>);
|
||||
|
@ -13,92 +32,671 @@ pub struct NoteStore(Arc<SqlitePool>);
|
|||
type NoteResult<T> = core::result::Result<T, NoteStoreError>;
|
||||
type SqlResult<T> = sqlx::Result<T>;
|
||||
|
||||
async fn insert_note<'e, E>(executor: E, id: &str, content: &str, notetype: &str) -> SqlResult<i64>
|
||||
where
|
||||
E: 'e + Executor<'e, Database = Sqlite>,
|
||||
{
|
||||
let insert_one_note_sql = include_str!("sql/insert_one_note.sql");
|
||||
let now = chrono::Utc::now();
|
||||
Ok(sqlx::query(insert_one_note_sql)
|
||||
.bind(&id)
|
||||
.bind(&content)
|
||||
.bind(¬etype)
|
||||
.bind(&now)
|
||||
.bind(&now)
|
||||
.bind(&now)
|
||||
.execute(executor)
|
||||
.await?
|
||||
.last_insert_rowid())
|
||||
}
|
||||
|
||||
impl NoteStore {
|
||||
pub async fn new(url: &str) -> NoteResult<Self> {
|
||||
let pool = SqlitePool::connect(url).await?;
|
||||
Ok(NoteStore(Arc::new(pool)))
|
||||
}
|
||||
|
||||
/// This will erase all the data in the database. Only use this
|
||||
/// if you're sure that's what you want.
|
||||
// Erase all the data in the database and restore it
|
||||
// to its original empty form. Do not use unless you
|
||||
// really, really want that to happen.
|
||||
pub async fn reset_database(&self) -> NoteResult<()> {
|
||||
let initialize_sql = include_str!("sql/initialize_database.sql");
|
||||
sqlx::query(initialize_sql).execute(&*self.0).await?;
|
||||
reset_database(&*self.0).await.map_err(NoteStoreError::DBError)
|
||||
}
|
||||
|
||||
/// Fetch page by slug
|
||||
///
|
||||
/// Supports the use case of the user navigating to a known place
|
||||
/// via a bookmark or other URL. Since the title isn't clear from
|
||||
/// the slug, the slug is insufficient to generate a new page, so
|
||||
/// this use case says that in the event of a failure to find the
|
||||
/// requested page, return a basic NotFound.
|
||||
pub async fn get_page_by_slug(&self, slug: &str) -> NoteResult<(RawPage, Vec<RawNote>)> {
|
||||
// let select_note_collection_for_root = include_str!("sql/select_note_collection_for_root.sql");
|
||||
let mut tx = self.0.begin().await?;
|
||||
let page = select_page_by_slug(&mut tx, slug).await?;
|
||||
let note_id = page.note_id;
|
||||
let notes = select_note_collection_from_root(&mut tx, note_id).await?;
|
||||
tx.commit().await?;
|
||||
Ok((page, notes))
|
||||
}
|
||||
|
||||
/// Fetch page by title
|
||||
///
|
||||
/// Supports the use case of the user navigating to a page via
|
||||
/// the page's formal title. Since the title is the key reference
|
||||
/// of the system, if no page with that title is found, a page with
|
||||
/// that title is generated automatically.
|
||||
pub async fn get_page_by_title(&self, title: &str) -> NoteResult<(RawPage, Vec<RawNote>)> {
|
||||
let mut tx = self.0.begin().await?;
|
||||
let (page, notes) = match select_page_by_title(&mut tx, title).await {
|
||||
Ok(page) => {
|
||||
let note_id = page.note_id;
|
||||
(page, select_note_collection_from_root(&mut tx, note_id).await?)
|
||||
}
|
||||
Err(sqlx::Error::RowNotFound) => {
|
||||
let page = {
|
||||
let new_root_note = create_unique_root_note();
|
||||
let new_root_note_id = insert_one_new_note(&mut tx, &new_root_note).await?;
|
||||
let new_page_slug = generate_slug(&mut tx, title).await?;
|
||||
let new_page = create_new_page_for(&title, &new_page_slug, new_root_note_id);
|
||||
let _ = insert_one_new_page(&mut tx, &new_page).await?;
|
||||
select_page_by_title(&mut tx, &title).await?
|
||||
};
|
||||
let note_id = page.note_id;
|
||||
(page, select_note_collection_from_root(&mut tx, note_id).await?)
|
||||
}
|
||||
Err(e) => return Err(NoteStoreError::DBError(e)),
|
||||
};
|
||||
tx.commit().await?;
|
||||
Ok((page, notes))
|
||||
}
|
||||
|
||||
// TODO: Make sure the position is sane.
|
||||
/// Insert a note as the child of an existing note, at a set position.
|
||||
pub async fn insert_nested_note(
|
||||
&self,
|
||||
note: &NewNote,
|
||||
parent_note_uuid: &str,
|
||||
position: i64,
|
||||
) -> NoteResult<String> {
|
||||
let mut new_note = note.clone();
|
||||
new_note.uuid = friendly_id::create();
|
||||
let references = build_references(¬e.content);
|
||||
let mut tx = self.0.begin().await?;
|
||||
|
||||
// Start by building the note and putting it into its relationship.
|
||||
let parent_id: ParentId = select_note_id_for_uuid(&mut tx, parent_note_uuid).await?;
|
||||
let parent_max_position = assert_max_child_position_for_note(&mut tx, parent_id).await?;
|
||||
let position = if position > parent_max_position {
|
||||
parent_max_position + 1
|
||||
} else {
|
||||
position
|
||||
};
|
||||
let new_note_id = insert_one_new_note(&mut tx, &new_note).await?;
|
||||
let _ = make_room_for_new_note(&mut tx, parent_id, position).await?;
|
||||
let _ = insert_note_to_note_relationship(&mut tx, parent_id, new_note_id, position, "note").await?;
|
||||
|
||||
// From the references, make lists of pages that exist, and pages
|
||||
// that do not.
|
||||
let found_references = find_all_page_references_for(&mut tx, &references).await?;
|
||||
let new_references = diff_references(&references, &found_references);
|
||||
let mut known_reference_ids: Vec<PageId> = Vec::new();
|
||||
|
||||
// Create the pages that don't exist
|
||||
for one_reference in new_references.iter() {
|
||||
let new_root_note = create_unique_root_note();
|
||||
let new_root_note_id = insert_one_new_note(&mut tx, &new_root_note).await?;
|
||||
let new_page_slug = generate_slug(&mut tx, &one_reference).await?;
|
||||
let new_page = create_new_page_for(&one_reference, &new_page_slug, new_root_note_id);
|
||||
known_reference_ids.push(insert_one_new_page(&mut tx, &new_page).await?)
|
||||
}
|
||||
|
||||
// And associate the note with all the pages.
|
||||
known_reference_ids.append(&mut found_references.iter().map(|r| PageId(r.id)).collect());
|
||||
let _ = insert_note_to_page_relationships(&mut tx, new_note_id, &known_reference_ids).await?;
|
||||
|
||||
tx.commit().await?;
|
||||
Ok(new_note.uuid)
|
||||
}
|
||||
|
||||
// This doesn't do anything with the references, as those are
|
||||
// dependent entirely on the *content*, and not the *position*, of
|
||||
// the note and the referenced page.
|
||||
//
|
||||
// TODO: Ensure the position is sane.
|
||||
/// Move a note from one location to another.
|
||||
pub async fn move_note(
|
||||
&self,
|
||||
note_uuid: &str,
|
||||
old_parent_uuid: &str,
|
||||
new_parent_uuid: &str,
|
||||
new_position: i64,
|
||||
) -> NoteResult<()> {
|
||||
let all_uuids = vec![note_uuid, old_parent_uuid, new_parent_uuid];
|
||||
let mut tx = self.0.begin().await?;
|
||||
|
||||
// This is one of the few cases where we we're getting IDs for
|
||||
// notes, but the nature of the ID isn't known at this time.
|
||||
// This has to be handled manually, in the next paragraph
|
||||
// below.
|
||||
let found_id_vec = bulk_select_ids_for_note_uuids(&mut tx, &all_uuids).await?;
|
||||
let found_ids: HashMap<String, i64> = found_id_vec.into_iter().collect();
|
||||
if found_ids.len() != 3 {
|
||||
return Err(NoteStoreError::NotFound);
|
||||
}
|
||||
|
||||
let old_parent_id = ParentId(*found_ids.get(old_parent_uuid).unwrap());
|
||||
let new_parent_id = ParentId(*found_ids.get(new_parent_uuid).unwrap());
|
||||
let note_id = NoteId(*found_ids.get(note_uuid).unwrap());
|
||||
|
||||
let old_note = get_note_to_note_relationship(&mut tx, old_parent_id, note_id).await?;
|
||||
let old_note_position = old_note.position;
|
||||
let old_note_nature = &old_note.nature;
|
||||
|
||||
let _ = delete_note_to_note_relationship(&mut tx, old_parent_id, note_id).await?;
|
||||
let _ = close_hole_for_deleted_note(&mut tx, old_parent_id, old_note_position).await?;
|
||||
let parent_max_position = assert_max_child_position_for_note(&mut tx, new_parent_id).await?;
|
||||
let new_position = if new_position > parent_max_position {
|
||||
parent_max_position + 1
|
||||
} else {
|
||||
new_position
|
||||
};
|
||||
let _ = make_room_for_new_note(&mut tx, new_parent_id, new_position).await?;
|
||||
let _ =
|
||||
insert_note_to_note_relationship(&mut tx, new_parent_id, note_id, new_position, old_note_nature).await?;
|
||||
tx.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn fetch_raw_page(&self, id: &str) -> SqlResult<RawPage> {
|
||||
let select_one_page_sql = include_str!("sql/select_one_page.sql");
|
||||
sqlx::query_as(select_one_page_sql).bind(&id).fetch_one(&*self.0).await
|
||||
/// Embed or reference a note from a different location.
|
||||
pub async fn reference_or_embed_note(
|
||||
&self,
|
||||
note_uuid: &str,
|
||||
new_parent_uuid: &str,
|
||||
new_position: i64,
|
||||
new_nature: &str,
|
||||
) -> NoteResult<()> {
|
||||
let mut tx = self.0.begin().await?;
|
||||
let existing_note_id: NoteId = NoteId(select_note_id_for_uuid(&mut tx, note_uuid).await?.0);
|
||||
let new_parent_id: ParentId = select_note_id_for_uuid(&mut tx, new_parent_uuid).await?;
|
||||
let _ = make_room_for_new_note(&mut tx, new_parent_id, new_position).await?;
|
||||
let _ = insert_note_to_note_relationship(&mut tx, new_parent_id, existing_note_id, new_position, new_nature)
|
||||
.await?;
|
||||
tx.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn fetch_raw_note(&self, id: &str) -> SqlResult<RawNote> {
|
||||
let select_one_note_sql = include_str!("sql/select_one_note.sql");
|
||||
sqlx::query_as(select_one_note_sql).bind(&id).fetch_one(&*self.0).await
|
||||
/// Delete a note
|
||||
pub async fn delete_note(&self, note_uuid: &str, note_parent_uuid: &str) -> NoteResult<()> {
|
||||
let mut tx = self.0.begin().await?;
|
||||
let condemned_note_id: NoteId = NoteId(select_note_id_for_uuid(&mut tx, note_uuid).await?.0);
|
||||
let note_parent_id: ParentId = select_note_id_for_uuid(&mut tx, note_parent_uuid).await?;
|
||||
let _ = delete_note_to_note_relationship(&mut tx, note_parent_id, condemned_note_id);
|
||||
if count_existing_note_relationships(&mut tx, condemned_note_id).await? == 0 {
|
||||
let _ = delete_note_to_page_relationships(&mut tx, condemned_note_id).await?;
|
||||
let _ = delete_note(&mut tx, condemned_note_id).await?;
|
||||
}
|
||||
tx.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn insert_note(&self, id: &str, content: &str, notetype: &str) -> SqlResult<i64> {
|
||||
insert_note(&*self.0, id, content, notetype).await
|
||||
}
|
||||
|
||||
pub async fn update_raw_note(&self, id: &str, content: &str) -> NoteResult<()> {
|
||||
let update_one_note_sql = include_str!("sql/update_one_note.sql");
|
||||
let now = chrono::Utc::now();
|
||||
let rows_updated = sqlx::query(update_one_note_sql)
|
||||
.bind(&content)
|
||||
.bind(&now)
|
||||
.bind(&now)
|
||||
.bind(&id)
|
||||
.execute(&*self.0).await?
|
||||
.rows_affected();
|
||||
match rows_updated {
|
||||
1 => Ok(()),
|
||||
_ => Err(NoteStoreError::NotFound)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: We're returning the raw page with the raw note id, note
|
||||
// the friendly ID. Is there a disconnect there? It's making me
|
||||
// furiously to think.
|
||||
pub async fn insert_page(&self, id: &str, title: &str) -> SqlResult<i64> {
|
||||
let insert_one_page_sql = include_str!("sql/insert_one_page.sql");
|
||||
let new_note_id = friendly_id::create();
|
||||
let now = chrono::Utc::now();
|
||||
/// Update a note's content
|
||||
pub async fn update_note_content(&self, note_uuid: &str, content: &str) -> NoteResult<()> {
|
||||
let references = build_references(&content);
|
||||
|
||||
let mut tx = self.0.begin().await?;
|
||||
|
||||
let note_id = insert_note(&mut tx, &new_note_id, &"", &"page").await?;
|
||||
let note_id: NoteId = NoteId(select_note_id_for_uuid(&mut tx, note_uuid).await?.0);
|
||||
let _ = update_note_content(&mut tx, note_id, &content).await?;
|
||||
|
||||
let page_id = sqlx::query(insert_one_page_sql)
|
||||
.bind(&id)
|
||||
.bind(&title)
|
||||
.bind(¬e_id)
|
||||
.bind(&now)
|
||||
.bind(&now)
|
||||
.bind(&now)
|
||||
.execute(&mut tx)
|
||||
.await?
|
||||
.last_insert_rowid();
|
||||
let found_references = find_all_page_references_for(&mut tx, &references).await?;
|
||||
let new_references = diff_references(&references, &found_references);
|
||||
let mut known_reference_ids: Vec<PageId> = Vec::new();
|
||||
|
||||
// Create the pages that don't exist
|
||||
for one_reference in new_references.iter() {
|
||||
let new_root_note = create_unique_root_note();
|
||||
let new_root_note_id = insert_one_new_note(&mut tx, &new_root_note).await?;
|
||||
let new_page_slug = generate_slug(&mut tx, &one_reference).await?;
|
||||
let new_page = create_new_page_for(&one_reference, &new_page_slug, new_root_note_id);
|
||||
known_reference_ids.push(insert_one_new_page(&mut tx, &new_page).await?)
|
||||
}
|
||||
|
||||
// And associate the note with all the pages.
|
||||
known_reference_ids.append(&mut found_references.iter().map(|r| PageId(r.id)).collect());
|
||||
let _ = insert_note_to_page_relationships(&mut tx, note_id, &known_reference_ids).await?;
|
||||
|
||||
tx.commit().await?;
|
||||
Ok(page_id)
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
// ___ _ _
|
||||
// | _ \_ _(_)_ ____ _| |_ ___
|
||||
// | _/ '_| \ V / _` | _/ -_)
|
||||
// |_| |_| |_|\_/\__,_|\__\___|
|
||||
//
|
||||
|
||||
// I'm putting a lot of faith in Rust's ability to inline stuff. I'm
|
||||
// sure this is okay. But really, this lets the API be clean and
|
||||
// coherent and easily readable, and hides away the gnarliness of some
|
||||
// of the SQL queries.
|
||||
|
||||
async fn reset_database<'a, E>(executor: E) -> SqlResult<()>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let initialize_sql = include_str!("sql/initialize_database.sql");
|
||||
sqlx::query(initialize_sql).execute(executor).await.map(|_| ())
|
||||
}
|
||||
|
||||
async fn select_page_by_slug<'a, E>(executor: E, slug: &str) -> SqlResult<RawPage>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let select_one_page_by_slug_sql = concat!(
|
||||
"SELECT id, title, slug, note_id, creation_date, updated_date, ",
|
||||
"lastview_date, deleted_date FROM pages WHERE slug=?;"
|
||||
);
|
||||
Ok(sqlx::query_as(&select_one_page_by_slug_sql)
|
||||
.bind(&slug)
|
||||
.fetch_one(executor)
|
||||
.await?)
|
||||
}
|
||||
|
||||
async fn select_page_by_title<'a, E>(executor: E, title: &str) -> SqlResult<RawPage>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let select_one_page_by_title_sql = concat!(
|
||||
"SELECT id, title, slug, note_id, creation_date, updated_date, ",
|
||||
"lastview_date, deleted_date FROM pages WHERE title=?;"
|
||||
);
|
||||
Ok(sqlx::query_as(&select_one_page_by_title_sql)
|
||||
.bind(&title)
|
||||
.fetch_one(executor)
|
||||
.await?)
|
||||
}
|
||||
|
||||
async fn select_note_id_for_uuid<'a, E>(executor: E, uuid: &str) -> SqlResult<ParentId>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let select_note_id_for_uuid_sql = "SELECT id FROM notes WHERE uuid = ?";
|
||||
let id: JustId = sqlx::query_as(&select_note_id_for_uuid_sql)
|
||||
.bind(&uuid)
|
||||
.fetch_one(executor)
|
||||
.await?;
|
||||
Ok(ParentId(id.id))
|
||||
}
|
||||
|
||||
async fn make_room_for_new_note<'a, E>(executor: E, parent_id: ParentId, position: i64) -> SqlResult<()>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let make_room_for_new_note_sql = concat!(
|
||||
"UPDATE note_relationships ",
|
||||
"SET position = position + 1 ",
|
||||
"WHERE position >= ? and parent_id = ?;"
|
||||
);
|
||||
|
||||
sqlx::query(make_room_for_new_note_sql)
|
||||
.bind(&position)
|
||||
.bind(&*parent_id)
|
||||
.execute(executor)
|
||||
.await
|
||||
.map(|_| ())
|
||||
}
|
||||
|
||||
async fn insert_note_to_note_relationship<'a, E>(
|
||||
executor: E,
|
||||
parent_id: ParentId,
|
||||
note_id: NoteId,
|
||||
position: i64,
|
||||
nature: &str,
|
||||
) -> SqlResult<()>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let insert_note_to_note_relationship_sql = concat!(
|
||||
"INSERT INTO note_relationships (parent_id, note_id, position, nature) ",
|
||||
"values (?, ?, ?, ?)"
|
||||
);
|
||||
|
||||
sqlx::query(insert_note_to_note_relationship_sql)
|
||||
.bind(&*parent_id)
|
||||
.bind(&*note_id)
|
||||
.bind(&position)
|
||||
.bind(&nature)
|
||||
.execute(executor)
|
||||
.await
|
||||
.map(|_| ())
|
||||
}
|
||||
|
||||
async fn select_note_collection_from_root<'a, E>(executor: E, root: i64) -> SqlResult<Vec<RawNote>>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let select_note_collection_from_root_sql = include_str!("sql/select_note_collection_from_root.sql");
|
||||
Ok(sqlx::query_as(&select_note_collection_from_root_sql)
|
||||
.bind(&root)
|
||||
.fetch_all(executor)
|
||||
.await?)
|
||||
}
|
||||
|
||||
async fn insert_one_new_note<'a, E>(executor: E, note: &NewNote) -> SqlResult<NoteId>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let insert_one_note_sql = concat!(
|
||||
"INSERT INTO notes ( ",
|
||||
" uuid, ",
|
||||
" content, ",
|
||||
" notetype, ",
|
||||
" creation_date, ",
|
||||
" updated_date, ",
|
||||
" lastview_date) ",
|
||||
"VALUES (?, ?, ?, ?, ?, ?);"
|
||||
);
|
||||
|
||||
Ok(NoteId(
|
||||
sqlx::query(insert_one_note_sql)
|
||||
.bind(¬e.uuid)
|
||||
.bind(¬e.content)
|
||||
.bind(¬e.notetype)
|
||||
.bind(¬e.creation_date)
|
||||
.bind(¬e.updated_date)
|
||||
.bind(¬e.lastview_date)
|
||||
.execute(executor)
|
||||
.await?
|
||||
.last_insert_rowid(),
|
||||
))
|
||||
}
|
||||
|
||||
// Given a possible slug, find the slug with the highest
|
||||
// uniquification number, and return that number, if any.
|
||||
fn find_maximal_slug(slugs: &[JustSlugs]) -> Option<u32> {
|
||||
lazy_static! {
|
||||
static ref RE_CAP_NUM: Regex = Regex::new(r"-(\d+)$").unwrap();
|
||||
}
|
||||
|
||||
if slugs.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut slug_counters: Vec<u32> = slugs
|
||||
.iter()
|
||||
.filter_map(|slug| RE_CAP_NUM.captures(&slug.slug))
|
||||
.map(|cap| cap.get(1).unwrap().as_str().parse::<u32>().unwrap())
|
||||
.collect();
|
||||
slug_counters.sort_unstable();
|
||||
slug_counters.pop()
|
||||
}
|
||||
|
||||
// Given an initial string and an existing collection of slugs,
|
||||
// generate a new slug that does not conflict with the current
|
||||
// collection.
|
||||
async fn generate_slug<'a, E>(executor: E, title: &str) -> SqlResult<String>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
lazy_static! {
|
||||
static ref RE_STRIP_NUM: Regex = Regex::new(r"-\d+$").unwrap();
|
||||
}
|
||||
|
||||
let initial_slug = slugify(title);
|
||||
let sample_slug = RE_STRIP_NUM.replace_all(&initial_slug, "");
|
||||
let slug_finder_sql = "SELECT slug FROM pages WHERE slug LIKE '?%';";
|
||||
let similar_slugs: Vec<JustSlugs> = sqlx::query_as(&slug_finder_sql)
|
||||
.bind(&*sample_slug)
|
||||
.fetch_all(executor)
|
||||
.await?;
|
||||
let maximal_slug = find_maximal_slug(&similar_slugs);
|
||||
match maximal_slug {
|
||||
None => Ok(initial_slug),
|
||||
Some(max_slug) => Ok(format!("{}-{}", initial_slug, max_slug + 1)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn insert_one_new_page<'a, E>(executor: E, page: &NewPage) -> SqlResult<PageId>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let insert_one_page_sql = concat!(
|
||||
"INSERT INTO pages ( ",
|
||||
" slug, ",
|
||||
" title, ",
|
||||
" note_id, ",
|
||||
" creation_date, ",
|
||||
" updated_date, ",
|
||||
" lastview_date) ",
|
||||
"VALUES (?, ?, ?, ?, ?, ?);"
|
||||
);
|
||||
|
||||
Ok(PageId(
|
||||
sqlx::query(insert_one_page_sql)
|
||||
.bind(&page.slug)
|
||||
.bind(&page.title)
|
||||
.bind(&page.note_id)
|
||||
.bind(&page.creation_date)
|
||||
.bind(&page.updated_date)
|
||||
.bind(&page.lastview_date)
|
||||
.execute(executor)
|
||||
.await?
|
||||
.last_insert_rowid(),
|
||||
))
|
||||
}
|
||||
|
||||
async fn insert_note_to_page_relationships<'a, E>(
|
||||
executor: E,
|
||||
note_id: NoteId,
|
||||
references: &[PageId],
|
||||
) -> SqlResult<()>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let insert_note_page_references_sql = "INSERT INTO page_relationships (note_id, page_id) VALUES ".to_string()
|
||||
+ &["(?, ?)"].repeat(references.len()).join(", ")
|
||||
+ &";".to_string();
|
||||
|
||||
let mut request = sqlx::query(&insert_note_page_references_sql);
|
||||
for reference in references {
|
||||
request = request.bind(*note_id).bind(**reference);
|
||||
}
|
||||
|
||||
request.execute(executor).await.map(|_| ())
|
||||
}
|
||||
|
||||
// For a given collection of uuids, retrieve the internal ID used by
|
||||
// the database.
|
||||
async fn bulk_select_ids_for_note_uuids<'a, E>(executor: E, ids: &[&str]) -> SqlResult<Vec<(String, i64)>>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let bulk_select_ids_for_note_uuids_sql = "SELECT uuid, id FROM notes WHERE uuid IN (".to_string()
|
||||
+ &["?"].repeat(ids.len()).join(",")
|
||||
+ &");".to_string();
|
||||
|
||||
let mut request = sqlx::query(&bulk_select_ids_for_note_uuids_sql);
|
||||
for id in ids.iter() {
|
||||
request = request.bind(id);
|
||||
}
|
||||
Ok(request
|
||||
.try_map(|row: SqliteRow| {
|
||||
let l = row.try_get::<String, _>(0)?;
|
||||
let r = row.try_get::<i64, _>(1)?;
|
||||
Ok((l, r))
|
||||
})
|
||||
.fetch_all(executor)
|
||||
.await?
|
||||
.into_iter()
|
||||
.collect())
|
||||
}
|
||||
|
||||
// Used by move_note to identify the single note to note relationship
|
||||
// by the original parent and child pair. Used mostly to find the
|
||||
// position for recalculation, to create a new gap or close an old
|
||||
// one.
|
||||
async fn get_note_to_note_relationship<'a, E>(
|
||||
executor: E,
|
||||
parent_id: ParentId,
|
||||
note_id: NoteId,
|
||||
) -> SqlResult<NoteRelationship>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let get_note_to_note_relationship_sql = concat!(
|
||||
"SELECT parent_id, note_id, position, nature ",
|
||||
"FROM note_relationships ",
|
||||
"WHERE parent_id = ? and note_id = ? ",
|
||||
"LIMIT 1"
|
||||
);
|
||||
sqlx::query_as(get_note_to_note_relationship_sql)
|
||||
.bind(&*parent_id)
|
||||
.bind(&*note_id)
|
||||
.fetch_one(executor)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn delete_note_to_note_relationship<'a, E>(executor: E, parent_id: ParentId, note_id: NoteId) -> SqlResult<()>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let delete_note_to_note_relationship_sql = concat!(
|
||||
"DELETE FROM note_relationships ",
|
||||
"WHERE parent_id = ? and note_id = ? "
|
||||
);
|
||||
|
||||
let count = sqlx::query(delete_note_to_note_relationship_sql)
|
||||
.bind(&*parent_id)
|
||||
.bind(&*note_id)
|
||||
.execute(executor)
|
||||
.await?
|
||||
.rows_affected();
|
||||
|
||||
match count {
|
||||
1 => Ok(()),
|
||||
_ => Err(sqlx::Error::RowNotFound),
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_note_to_page_relationships<'a, E>(executor: E, note_id: NoteId) -> SqlResult<()>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let delete_note_to_page_relationships_sql = concat!("DELETE FROM page_relationships ", "WHERE note_id = ? ");
|
||||
|
||||
let _ = sqlx::query(delete_note_to_page_relationships_sql)
|
||||
.bind(&*note_id)
|
||||
.execute(executor)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_note<'a, E>(executor: E, note_id: NoteId) -> SqlResult<()>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let delete_note_sql = concat!("DELETE FROM notes WHERE note_id = ?");
|
||||
|
||||
let count = sqlx::query(delete_note_sql)
|
||||
.bind(&*note_id)
|
||||
.execute(executor)
|
||||
.await?
|
||||
.rows_affected();
|
||||
|
||||
match count {
|
||||
1 => Ok(()),
|
||||
_ => Err(sqlx::Error::RowNotFound),
|
||||
}
|
||||
}
|
||||
|
||||
async fn count_existing_note_relationships<'a, E>(executor: E, note_id: NoteId) -> SqlResult<i64>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let count_existing_note_relationships_sql = "SELECT COUNT(*) as count FROM page_relationships WHERE note_id = ?";
|
||||
|
||||
let count: RowCount = sqlx::query_as(count_existing_note_relationships_sql)
|
||||
.bind(&*note_id)
|
||||
.fetch_one(executor)
|
||||
.await?;
|
||||
|
||||
Ok(count.count)
|
||||
}
|
||||
|
||||
async fn assert_max_child_position_for_note<'a, E>(executor: E, note_id: ParentId) -> SqlResult<i64>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let assert_max_child_position_for_note_sql =
|
||||
"SELECT MAX(position) as count FROM note_relationships WHERE parent_id = ?";
|
||||
|
||||
let count: RowCount = sqlx::query_as(assert_max_child_position_for_note_sql)
|
||||
.bind(&*note_id)
|
||||
.fetch_one(executor)
|
||||
.await?;
|
||||
|
||||
Ok(count.count)
|
||||
}
|
||||
|
||||
// After removing a note, recalculate the position of all notes under
|
||||
// the parent note, such that there order is now completely
|
||||
// sequential.
|
||||
async fn close_hole_for_deleted_note<'a, E>(executor: E, parent_id: ParentId, position: i64) -> SqlResult<()>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let close_hole_for_deleted_note_sql = concat!(
|
||||
"UPDATE note_relationships ",
|
||||
"SET position = position - 1 ",
|
||||
"WHERE position > ? and parent_id = ?;"
|
||||
);
|
||||
|
||||
sqlx::query(close_hole_for_deleted_note_sql)
|
||||
.bind(&position)
|
||||
.bind(&*parent_id)
|
||||
.execute(executor)
|
||||
.await
|
||||
.map(|_| ())
|
||||
}
|
||||
|
||||
async fn find_all_page_references_for<'a, E>(executor: E, references: &[String]) -> SqlResult<Vec<PageTitles>>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let find_all_references_for_sql = "SELECT id, title FROM pages WHERE title IN (".to_string()
|
||||
+ &["?"].repeat(references.len()).join(",")
|
||||
+ &");".to_string();
|
||||
|
||||
let mut request = sqlx::query_as(&find_all_references_for_sql);
|
||||
for id in references.iter() {
|
||||
request = request.bind(id);
|
||||
}
|
||||
request.fetch_all(executor).await
|
||||
}
|
||||
|
||||
async fn update_note_content<'a, E>(executor: E, note_id: NoteId, content: &str) -> SqlResult<()>
|
||||
where
|
||||
E: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let update_note_content_sql = "UPDATE notes SET content = ? WHERE note_id = ?";
|
||||
let count = sqlx::query(update_note_content_sql)
|
||||
.bind(content)
|
||||
.bind(&*note_id)
|
||||
.execute(executor)
|
||||
.await?
|
||||
.rows_affected();
|
||||
|
||||
match count {
|
||||
1 => Ok(()),
|
||||
_ => Err(sqlx::Error::RowNotFound),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_unique_root_note() -> NewNote {
|
||||
NewNoteBuilder::default()
|
||||
.uuid(friendly_id::create())
|
||||
.content("".to_string())
|
||||
.notetype("root".to_string())
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn create_new_page_for(title: &str, slug: &str, note_id: NoteId) -> NewPage {
|
||||
NewPageBuilder::default()
|
||||
.slug(slug.to_string())
|
||||
.title(title.to_string())
|
||||
.note_id(*note_id)
|
||||
.build()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// Given the references supplied, and the references found in the datastore,
|
||||
// return a list of the references not found in the datastore.
|
||||
fn diff_references(references: &[String], found_references: &[PageTitles]) -> Vec<String> {
|
||||
let all: HashSet<String> = references.iter().cloned().collect();
|
||||
let found: HashSet<String> = found_references.iter().map(|r| r.title.clone()).collect();
|
||||
all.difference(&found).cloned().collect()
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use chrono::{DateTime, Utc};
|
||||
use derive_builder::Builder;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::{self, FromRow};
|
||||
|
||||
|
@ -18,10 +19,98 @@ pub struct RawPage {
|
|||
pub struct RawNote {
|
||||
pub id: i64,
|
||||
pub uuid: String,
|
||||
pub parent_id: i64,
|
||||
pub parent_uuid: String,
|
||||
pub content: String,
|
||||
pub position: i64,
|
||||
pub notetype: String,
|
||||
pub creation_date: DateTime<Utc>,
|
||||
pub updated_date: DateTime<Utc>,
|
||||
pub lastview_date: DateTime<Utc>,
|
||||
pub deleted_date: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, Builder)]
|
||||
pub struct NewPage {
|
||||
pub slug: String,
|
||||
pub title: String,
|
||||
pub note_id: i64,
|
||||
#[builder(default = r#"chrono::Utc::now()"#)]
|
||||
pub creation_date: DateTime<Utc>,
|
||||
#[builder(default = r#"chrono::Utc::now()"#)]
|
||||
pub updated_date: DateTime<Utc>,
|
||||
#[builder(default = r#"chrono::Utc::now()"#)]
|
||||
pub lastview_date: DateTime<Utc>,
|
||||
#[builder(default = r#"None"#)]
|
||||
pub deleted_date: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, Builder)]
|
||||
pub struct NewNote {
|
||||
#[builder(default = r#""".to_string()"#)]
|
||||
pub uuid: String,
|
||||
pub content: String,
|
||||
#[builder(default = r#""note".to_string()"#)]
|
||||
pub notetype: String,
|
||||
#[builder(default = r#"chrono::Utc::now()"#)]
|
||||
pub creation_date: DateTime<Utc>,
|
||||
#[builder(default = r#"chrono::Utc::now()"#)]
|
||||
pub updated_date: DateTime<Utc>,
|
||||
#[builder(default = r#"chrono::Utc::now()"#)]
|
||||
pub lastview_date: DateTime<Utc>,
|
||||
#[builder(default = r#"None"#)]
|
||||
pub deleted_date: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
|
||||
pub(crate) struct JustSlugs {
|
||||
pub slug: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
|
||||
pub(crate) struct JustTitles {
|
||||
title: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
|
||||
pub(crate) struct JustId {
|
||||
pub id: i64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
|
||||
pub(crate) struct PageTitles {
|
||||
pub id: i64,
|
||||
pub title: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
|
||||
pub(crate) struct NoteRelationship {
|
||||
pub parent_id: i64,
|
||||
pub note_id: i64,
|
||||
pub position: i64,
|
||||
pub nature: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
|
||||
pub(crate) struct RowCount {
|
||||
pub count: i64,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn can_build_new_note() {
|
||||
let now = chrono::Utc::now();
|
||||
let newnote = NewNoteBuilder::default()
|
||||
.uuid("foo".to_string())
|
||||
.content("bar".to_string())
|
||||
.build()
|
||||
.unwrap();
|
||||
assert!((newnote.creation_date - now).num_minutes() < 1);
|
||||
assert!((newnote.updated_date - now).num_minutes() < 1);
|
||||
assert!((newnote.lastview_date - now).num_minutes() < 1);
|
||||
assert!(newnote.deleted_date.is_none());
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue