Not working.

This commit is contained in:
Elf M. Sternberg 2020-10-06 08:01:25 -07:00
parent 0f5d15ad14
commit e0c463f9fc
7 changed files with 575 additions and 145 deletions

View File

@ -13,6 +13,7 @@ readme = "./README.org"
[dependencies] [dependencies]
friendly_id = "0.3.0" friendly_id = "0.3.0"
thiserror = "1.0.20" thiserror = "1.0.20"
derive_builder = "0.9.0"
tokio = { version = "0.2.22", features = ["rt-threaded", "blocking"] } tokio = { version = "0.2.22", features = ["rt-threaded", "blocking"] }
serde = { version = "1.0.116", features = ["derive"] } serde = { version = "1.0.116", features = ["derive"] }
serde_json = "1.0.56" serde_json = "1.0.56"

View File

@ -0,0 +1,190 @@
/async fn insert_note<'e, E>(executor: E, id: &str, content: &str, notetype: &str) -> SqlResult<i64>
where
E: 'e + Executor<'e, Database = Sqlite>,
{
lazy_static! {
static ref INSERT_ONE_NOTE_SQL: String = include_str!("sql/insert_one_note.sql");
}
let now = chrono::Utc::now();
Ok(sqlx::query(INSERT_ONE_NOTE_SQL)
.bind(&id)
.bind(&content)
.bind(&notetype)
.bind(&now)
.bind(&now)
.bind(&now)
.execute(executor)
.await?
.last_insert_rowid())
}
#[derive(Clone, FromRow)]
struct JustSlugs {
slug: String
}
// Given an initial string and an existing collection of slugs,
// generate a new slug that does not conflict with the current
// collection.
async fn generate_slug<'e, E>(executor: E, title: &str) -> SqlResult<String>
where
E: 'e + Executor<'e, Database = Sqlite>,
{
lazy_static! {
static ref RE_JUSTNUM: Regex = Regex::new(r"-\d+$").unwrap();
}
lazy_static! {
static ref RE_CAPNUM: Regex = Regex::new(r"-(\d+)$").unwrap();
}
let initial_slug = slugify::slugify(title);
let sample_slug = RE_JUSTNUM.replace_all(slug, "");
let similar_slugs: Vec<JustSlugs> = sqlx::query("SELECT slug FROM pages WHERE slug LIKE '?%';")
.bind(&sample_slug)
.execute(executor)
.await?;
let slug_counters = similar_slugs
.iter()
.map(|slug| RE_CAPNUM.captures(slug.slug))
.filter_map(|cap| cap.get(1).unwrap().parse::<u32>().unwrap())
.collect();
match slug_counters.len() {
0 => Ok(initial_slug),
_ => {
slug_counters.sort_unstable();
return Ok(format!("{}-{}", initial_slug, slug_counters.pop() + 1))
}
}
}
async fn insert_page<'e, E>(executor: E, page: &RawPage) -> SqlResult<i64>
where
E: 'e + Executor<'e, Database = Sqlite>,
{
let insert_one_page_sql = include_str!("sql/insert_one_page.sql");
Ok(sqlx::query(insert_one_page_sql)
.bind(&page.id)
.bind(&page.title)
.bind(&page.note_id)
.bind(&page.creation_date)
.bind(&page.updated_date)
.bind(&page.lastview_date)
.execute(&mut tx)
.await?
.last_insert_rowid())
}
/// Given a title, insert a new page. All dates are today, and the slug is
/// generated as above:
async fn insert_new_page_for_title<'e, E>(executor: E, title: &str) -> SqlResult<Page> {
// /// Fetch page by title
// ///
// /// This is the most common use case, in which a specific title
// /// has been requested of the server via POST. The page always
// /// exists; if it doesn't, it will be automatically generated.
// pub async fn get_page_by_title(&slug, slug: &title) -> NoteResult<(Page, Notes)> {
// let mut tx = self.0.begin().await?;
// let maybe_page = sqlx::query_as(select_one_page_by_title)
// .bind(&title)
// .fetch_one(&tx)
// .await;
// let page = match maybe_page {
// Ok(page) => page,
// Err(sqlx::Error::NotFound) => insert_new_page_for_title(tx, title),
// Err(a) => return Err(a)
// };
// let notes = sqlx::query_as(select_note_collection_for_root)
// .bind(page.note_id)
// .fetch(&tx)
// .await?;
// tx.commit().await?;
// Ok((page, notes))
// }
//
//
//
//
//
//
//
//
//
//
// /// This will erase all the data in the database. Only use this
// /// if you're sure that's what you want.
// pub async fn reset_database(&self) -> NoteResult<()> {
// let initialize_sql = include_str!("sql/initialize_database.sql");
// sqlx::query(initialize_sql).execute(&*self.0).await?;
// Ok(())
// }
//
// async fn create_new_page(&self, title: &str) -> SqlResult<Page, Vec<Notes>> {
// let now = chrono::Utc::now();
// let new_note_id = friendly_id::create();
//
// let mut tx = self.0.begin().await?;
// let new_slug = generate_slug(&mut tx, title);
// let note_id = insert_note(&mut tx, &new_note_id, &"", &"page").await?;
// insert_page(&mut tx, NewPage {
// slug,
// title,
// note_id,
// creation_date: now,
// updated_date: now,
// lastview_date: now
// }).await;
// tx.commit();
// self.fetch_one_page(title)
// }
//
// async fn fetch_one_page(&self, title: &str) ->
//
// pub async fn fetch_page(&self, title: &str) -> SqlResult<(Page, Vec<Notes>)> {
// match self.fetch_one_page(title) {
// Ok((page, notes)) => Ok((page, notes)),
// Err(NotFound) => self.create_new_page(title),
// Err(e) => Err(e)
// }
// }
//
// pub async fn fetch_raw_page(&self, id: &str) -> SqlResult<RawPage> {
// let select_one_page_sql = include_str!("sql/select_one_page.sql");
// sqlx::query_as(select_one_page_sql).bind(&id).fetch_one(&*self.0).await
// }
//
// pub async fn fetch_raw_note(&self, id: &str) -> SqlResult<RawNote> {
// let select_one_note_sql = include_str!("sql/select_one_note.sql");
// sqlx::query_as(select_one_note_sql).bind(&id).fetch_one(&*self.0).await
// }
//
// pub async fn insert_note(&self, id: &str, content: &str, notetype: &str) -> SqlResult<i64> {
// insert_note(&*self.0, id, content, notetype).await
// }
//
// pub async fn update_raw_note(&self, id: &str, content: &str) -> NoteResult<()> {
// let update_one_note_sql = include_str!("sql/update_one_note.sql");
// let now = chrono::Utc::now();
// let rows_updated = sqlx::query(update_one_note_sql)
// .bind(&content)
// .bind(&now)
// .bind(&now)
// .bind(&id)
// .execute(&*self.0).await?
// .rows_affected();
// match rows_updated {
// 1 => Ok(()),
// _ => Err(NoteStoreError::NotFound)
// }
// }
//
// // TODO: We're returning the raw page with the raw note id, note
// // the friendly ID. Is there a disconnect there? It's making me
// // furiously to think.
//

View File

@ -1,4 +1,6 @@
use chrono::{DateTime, Utc};
mod errors; mod errors;
mod row_structs;
mod store; mod store;
mod structs; mod structs;
@ -19,81 +21,42 @@ mod tests {
storagepool storagepool
} }
// Request for the page by slug.
// If the page exists, return it. If the page doesn't, return NotFound
#[tokio::test(threaded_scheduler)] #[tokio::test(threaded_scheduler)]
async fn fetching_unfound_page_works() { async fn fetching_unfound_page_by_slug_works() {
let storagepool = fresh_inmemory_database().await; let storagepool = fresh_inmemory_database().await;
let unfoundpage = storagepool.fetch_raw_page("nonexistent-page").await; let unfoundpage = storagepool.get_page_by_slug("nonexistent-page").await;
assert!(unfoundpage.is_err()); assert!(unfoundpage.is_err());
} }
// Request for the page by title. If the page exists, return it.
// If the page doesn't exist, create it then return it anyway.
// There should be at least one note, the root note.
#[tokio::test(threaded_scheduler)] #[tokio::test(threaded_scheduler)]
async fn fetching_unfound_note_works() { async fn fetching_unfound_page_by_title_works() {
let title = "Nonexistent Page";
let now = chrono::Utc::now();
let storagepool = fresh_inmemory_database().await; let storagepool = fresh_inmemory_database().await;
let unfoundnote = storagepool.fetch_raw_note("nonexistent-note").await; let newpageresult = storagepool.get_page_by_title(&title).await;
assert!(unfoundnote.is_err());
assert!(newpageresult.is_ok(), "{:?}", newpage);
let (newpage, newnotes) = newpageresult.unwrap();
assert_eq!(newpage.title, title, "{:?}", newpage.title);
assert_eq!(newpage.slug, "nonexistent-page");
} }
#[tokio::test(threaded_scheduler)] // // TODO: This should be 1, not 0
async fn cloning_storagepool_is_ok() { // assert_eq!(newnotes.len(), 0);
let storagepool = fresh_inmemory_database().await; // // assert_eq!(newnotes[0].notetype, "root");
let storagepool2 = storagepool.clone(); // // assert_eq!(newpage.note_id, newnotes[0].id);
let unfoundnote = storagepool2.fetch_raw_note("nonexistent-note").await; //
assert!(unfoundnote.is_err()); // assert!((newpage.creation_date - now).num_minutes() < 1.0);
let unfoundnote = storagepool.fetch_raw_note("nonexistent-note").await; // assert!((newpage.updated_date - now).num_minutes() < 1.0);
assert!(unfoundnote.is_err()); // assert!((newpage.lastview_date - now).num_minutes() < 1.0);
} // assert!(newpage.deleted_date.is_none());
// }
#[tokio::test(threaded_scheduler)]
async fn can_save_a_note() {
let storagepool = fresh_inmemory_database().await;
let note_id = storagepool.insert_note("noteid", "notecontent", "note").await;
assert!(note_id.is_ok(), "{:?}", note_id);
let note_id = note_id.unwrap();
assert!(note_id > 0);
let foundnote = storagepool.fetch_raw_note("noteid").await;
assert!(foundnote.is_ok(), "{:?}", foundnote);
let foundnote = foundnote.unwrap();
assert_eq!(foundnote.content, "notecontent");
assert_eq!(foundnote.notetype, "note");
}
#[tokio::test(threaded_scheduler)]
async fn can_save_a_page() {
let storagepool = fresh_inmemory_database().await;
let page_id = storagepool.insert_page("pageid", "Test page").await;
assert!(page_id.is_ok(), "{:?}", page_id);
let page = storagepool.fetch_raw_page("pageid").await;
assert!(page.is_ok(), "{:?}", page);
let page = page.unwrap();
assert_eq!(page.title, "Test page");
assert!(page.note_id > 0);
}
#[tokio::test(threaded_scheduler)]
async fn reports_note_update_failure() {
let storagepool = fresh_inmemory_database().await;
let note_id = storagepool.insert_note("noteid", "notecontent", "note").await;
assert!(note_id.is_ok(), "{:?}", note_id);
let update = storagepool.update_raw_note("badnote", "Bad Note Content").await;
assert!(update.is_err());
}
#[tokio::test(threaded_scheduler)]
async fn can_update_a_note() {
let storagepool = fresh_inmemory_database().await;
let note_id = storagepool.insert_note("noteid", "notecontent", "note").await;
assert!(note_id.is_ok(), "{:?}", note_id);
let update = storagepool.update_raw_note("noteid", "Good Note Content").await;
assert!(update.is_ok(), "{:?}", update);
let note = storagepool.fetch_raw_note("noteid").await;
assert!(note.is_ok(), "{:?}", note);
let note = note.unwrap();
assert_eq!(note.content, "Good Note Content");
}
} }

View File

@ -0,0 +1,72 @@
use chrono::{DateTime, Utc};
use derive_builder;
use serde::{Deserialize, Serialize};
use sqlx::{self, FromRow};
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
pub struct RawPage {
pub id: i64,
pub slug: String,
pub title: String,
pub note_id: i64,
pub creation_date: DateTime<Utc>,
pub updated_date: DateTime<Utc>,
pub lastview_date: DateTime<Utc>,
pub deleted_date: Option<DateTime<Utc>>,
}
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
pub struct RawNote {
pub id: i64,
pub uuid: String,
pub content: String,
pub notetype: String,
pub creation_date: DateTime<Utc>,
pub updated_date: DateTime<Utc>,
pub lastview_date: DateTime<Utc>,
pub deleted_date: Option<DateTime<Utc>>,
}
#[derive(Clone, Serialize, Deserialize, Debug, Builder)]
pub struct NewPage {
pub slug: String,
pub title: String,
pub note_id: i64,
#[builder(default = "chrono::Utc::now()")]
pub creation_date: DateTime<Utc>,
#[builder(default = "chrono::Utc::now()")]
pub updated_date: DateTime<Utc>,
#[builder(default = "chrono::Utc::now()")]
pub lastview_date: DateTime<Utc>,
pub deleted_date: Option<DateTime<Utc>>,
}
#[derive(Clone, Serialize, Deserialize, Debug, Builder)]
pub struct NewNote {
pub uuid: String,
pub content: String,
#[builder(default = "note")]
pub notetype: String,
#[builder(default = "chrono::Utc::now()")]
pub creation_date: DateTime<Utc>,
#[builder(default = "chrono::Utc::now()")]
pub updated_date: DateTime<Utc>,
#[builder(default = "chrono::Utc::now()")]
pub lastview_date: DateTime<Utc>,
pub deleted_date: Option<DateTime<Utc>>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_build_new_page() {
let now = chrono::Utc::now();
let newnote = NewNoteBuilder::default();
assert!((newnote.creation_date - now).num_minutes() < 1.0);
assert!((newnote.updated_date - now).num_minutes() < 1.0);
assert!((newnote.lastview_date - now).num_minutes() < 1.0);
assert!(newnote.deleted_date.is_none());
}
}

View File

@ -0,0 +1,38 @@
SELECT parent_uuid, uuid, content, notetype, nature, position FROM (
WITH RECURSIVE children(
parent_id,
parent_uuid, id,
uuid,
content,
notetype,
creation_date,
updated_date,
lastview_date,
deleted_date,
cycle
) AS (
SELECT
notes.id,
notes.uuid,
notes.id,
notes.uuid,
notes.content,
notes.notetype, 'page', 0, ','||notes.id||','
FROM notes INNER JOIN pages
ON pages.note_id = notes.id
WHERE pages.id = ?
AND notes.notetype="page"
UNION
SELECT note_relationships.parent_id, notes.id,
notes.content, notes.notetype, note_relationships.nature,
note_relationships.position,
children.cycle||notes.id||','
FROM notes
INNER JOIN note_relationships ON notes.id = note_relationships.note_id
INNER JOIN children ON note_relationships.parent_id = children.id
WHERE children.cycle NOT LIKE '%,'||notes.id||',%'
ORDER BY note_relationships.position)
SELECT * from children);</code>

View File

@ -1,9 +1,12 @@
use crate::errors::NoteStoreError; use crate::errors::NoteStoreError;
use crate::structs::{RawNote, RawPage}; use crate::row_structs::{RawNote, RawPage};
use chrono; use chrono;
use friendly_id; use friendly_id;
use sqlx; use sqlx;
use sqlx::{sqlite::{Sqlite, SqlitePool}, Executor, Done}; use sqlx::{
sqlite::{Sqlite, SqlitePool},
Done, Executor,
};
use std::sync::Arc; use std::sync::Arc;
/// A handle to our Sqlite database. /// A handle to our Sqlite database.
@ -13,92 +16,199 @@ pub struct NoteStore(Arc<SqlitePool>);
type NoteResult<T> = core::result::Result<T, NoteStoreError>; type NoteResult<T> = core::result::Result<T, NoteStoreError>;
type SqlResult<T> = sqlx::Result<T>; type SqlResult<T> = sqlx::Result<T>;
async fn insert_note<'e, E>(executor: E, id: &str, content: &str, notetype: &str) -> SqlResult<i64>
where
E: 'e + Executor<'e, Database = Sqlite>,
{
let insert_one_note_sql = include_str!("sql/insert_one_note.sql");
let now = chrono::Utc::now();
Ok(sqlx::query(insert_one_note_sql)
.bind(&id)
.bind(&content)
.bind(&notetype)
.bind(&now)
.bind(&now)
.bind(&now)
.execute(executor)
.await?
.last_insert_rowid())
}
impl NoteStore { impl NoteStore {
pub async fn new(url: &str) -> NoteResult<Self> { pub async fn new(url: &str) -> NoteResult<Self> {
let pool = SqlitePool::connect(url).await?; let pool = SqlitePool::connect(url).await?;
Ok(NoteStore(Arc::new(pool))) Ok(NoteStore(Arc::new(pool)))
} }
/// This will erase all the data in the database. Only use this // Erase all the data in the database and restore it
/// if you're sure that's what you want. // to its original empty form. Do not use unless you
// really, really want that to happen.
pub async fn reset_database(&self) -> NoteResult<()> { pub async fn reset_database(&self) -> NoteResult<()> {
let initialize_sql = include_str!("sql/initialize_database.sql"); reset_databate(&*self.0).await
sqlx::query(initialize_sql).execute(&*self.0).await?;
Ok(())
} }
pub async fn fetch_raw_page(&self, id: &str) -> SqlResult<RawPage> { /// Fetch page by slug
let select_one_page_sql = include_str!("sql/select_one_page.sql"); ///
sqlx::query_as(select_one_page_sql).bind(&id).fetch_one(&*self.0).await /// Supports the use case of the user navigating to a known place
/// via a bookmark or other URL. Since the title isn't clear from
/// the slug, the slug is insufficient to generate a new page, so
/// this use case says that in the event of a failure to find the
/// requested page, return a basic NotFound.
pub async fn get_page_by_slug(&self, slug: &str) -> NoteResult<(RawPage, Vec<RawNote>)> {
// let select_note_collection_for_root = include_str!("sql/select_note_collection_for_root.sql");
let mut tx = self.0.begin().await?;
// let notes = sqlx::query_as(select_note_collection_for_root)
// .bind(page.note_id)
// .fetch(&tx)
// .await?;
tx.commit().await?;
Ok((page, vec![]))
} }
pub async fn fetch_raw_note(&self, id: &str) -> SqlResult<RawNote> { pub async fn get_page_by_title(&self, title: &str) -> NoteResult<(RawPage, Vec<RawNote>)> {
let select_one_note_sql = include_str!("sql/select_one_note.sql"); let mut tx = self.0.begin().await?;
sqlx::query_as(select_one_note_sql).bind(&id).fetch_one(&*self.0).await let page = match select_page_by_title(&mut tx, title) {
} Ok(page) => page,
Err(sqlx::Error::NotFound) => {
match create_page_for_title(&mut tx, title) {
Ok(page) => page,
Err(e) => return Err(e)
}
},
Err(e) => return Err(e),
};
// Todo: Replace vec with the results of the CTE
return Ok((page, vec![]))
}
}
pub async fn insert_note(&self, id: &str, content: &str, notetype: &str) -> SqlResult<i64> { // ___ _ _
insert_note(&*self.0, id, content, notetype).await // | _ \_ _(_)_ ____ _| |_ ___
} // | _/ '_| \ V / _` | _/ -_)
// |_| |_| |_|\_/\__,_|\__\___|
//
pub async fn update_raw_note(&self, id: &str, content: &str) -> NoteResult<()> { // I'm putting a lot of faith in Rust's ability to inline stuff. I'm
let update_one_note_sql = include_str!("sql/update_one_note.sql"); // sure this is okay. But really, this lets the API be clean and
let now = chrono::Utc::now(); // coherent and easily readable, and hides away the gnarliness of some
let rows_updated = sqlx::query(update_one_note_sql) // of the SQL queries.
.bind(&content)
.bind(&now) async fn select_page_by_slug<'e, E>(executor: E, slug: &str) -> SqlResult<RawPage>
.bind(&now) where
.bind(&id) E: 'e + Executor<'e, Database = Sqlite>,
.execute(&*self.0).await? {
.rows_affected(); let select_one_page_by_title_sql = concat!(
match rows_updated { "SELECT id, title, slug, note_id, creation_date, updated_date, ",
1 => Ok(()), "lastview_date, deleted_date FROM pages WHERE slug=?;"
_ => Err(NoteStoreError::NotFound) );
} sqlx::query_as(select_one_page_by_slug_sql)
.bind(&slug)
.fetch_one(&mut executor)
.await?
}
async fn select_page_by_title<'e, E>(executor: E, title: &str) -> SqlResult<RawPage>
where
E: 'e + Executor<'e, Database = Sqlite>,
{
let select_one_page_by_title_sql = concat!(
"SELECT id, title, slug, note_id, creation_date, updated_date, ",
"lastview_date, deleted_date FROM pages WHERE title=?;"
);
sqlx::query_as(select_one_page_by_title_sql)
.bind(&title)
.fetch_one(&mut executor)
.await?
}
async fn reset_database<'e, E>(executor: E) -> SqlResult<()>
where
E: 'e + Executor<'e, Database = Sqlite>,
{
let initialize_sql = include_str!("sql/initialize_database.sql");
sqlx::query(initialize_sql).execute(&*self.0).await?
}
async fn get_note_collection_for_root<'e, E>(executor: E, root: i64) -> SqlResult<Vec<RawNotes>>
where
E: 'e + Executor<'e, Database = Sqlite>,
{
let select_note_collection_for_root = include_str!("sql/select_note_collection_for_root.sql");
sqlx::query_as(select_note_collection_for_root)
.fetch(&*self.0)
.await?
}
async fn insert_one_new_note<'e, E>(executor: E, note: &NewNote) -> SqlResult<i64> where
E: 'e + Executor<'e, Database = Sqlite>,
{
let insert_one_note_sql = concat!(
"INSERT INTO notes ( ",
" uuid, ",
" content, ",
" notetype, ",
" creation_date, ",
" updated_date, ",
" lastview_date) ",
"VALUES (?, ?, ?, ?, ?, ?);");
Ok(sqlx::query(insert_one_note_sql)
.bind(&note.uuid)
.bind(&note.content)
.bind(&note.note_type)
.bind(&note.creation_date)
.bind(&note.updated_date)
.bind(&note.lastview_date)
.execute(&mut tx)
.await?
.last_insert_rowid())
}
// Given an initial string and an existing collection of slugs,
// generate a new slug that does not conflict with the current
// collection.
async fn generate_slug<'e, E>(executor: E, title: &str) -> SqlResult<String>
where
E: 'e + Executor<'e, Database = Sqlite>,
{
lazy_static! {
static ref RE_STRIP_NUM: Regex = Regex::new(r"-\d+$").unwrap();
}
lazy_static! {
static ref RE_CAP_NUM: Regex = Regex::new(r"-(\d+)$").unwrap();
} }
// TODO: We're returning the raw page with the raw note id, note let initial_slug = slugify::slugify(title);
// the friendly ID. Is there a disconnect there? It's making me let sample_slug = RE_STRIP_NUM.replace_all(slug, "");
// furiously to think. let similar_slugs: Vec<JustSlugs> = sqlx::query("SELECT slug FROM pages WHERE slug LIKE '?%';")
pub async fn insert_page(&self, id: &str, title: &str) -> SqlResult<i64> { .bind(&sample_slug)
let insert_one_page_sql = include_str!("sql/insert_one_page.sql"); .execute(executor)
let new_note_id = friendly_id::create(); .await?;
let now = chrono::Utc::now(); let slug_counters = similar_slugs
.iter()
let mut tx = self.0.begin().await?; .map(|slug| RE_CAPNUM.captures(slug.slug))
.filter_map(|cap| cap.get(1).unwrap().parse::<u32>().unwrap())
let note_id = insert_note(&mut tx, &new_note_id, &"", &"page").await?; .collect();
match slug_counters.len() {
let page_id = sqlx::query(insert_one_page_sql) 0 => Ok(initial_slug),
.bind(&id) _ => {
.bind(&title) slug_counters.sort_unstable();
.bind(&note_id) return Ok(format!("{}-{}", initial_slug, slug_counters.pop() + 1))
.bind(&now) }
.bind(&now) }
.bind(&now) }
.execute(&mut tx)
.await? async fn insert_one_new_page<'e, E>(executor: E, page: &NewPage) -> SqlResult<i64>
.last_insert_rowid(); where
E: 'e + Executor<'e, Database = Sqlite>,
tx.commit().await?; {
Ok(page_id) let insert_one_page_sql = concat!(
} "INSERT INTO pages ( ",
" slug, ",
" title, ",
" note_id, ",
" creation_date, ",
" updated_date, ",
" lastview_date) ",
"VALUES (?, ?, ?, ?, ?, ?);");
Ok(sqlx::query(insert_one_page_sql)
.bind(&page.slug)
.bind(&page.title)
.bind(&page.note_id)
.bind(&page.creation_date)
.bind(&page.updated_date)
.bind(&page.lastview_date)
.execute(&mut tx)
.await?
.last_insert_rowid())
}
async fn create_page_for_title<'e, E>(executor: E, title: &str) -> SqlResult<RawPage> where
E: 'e + Executor<'e, Database = Sqlite>,
{
todo!()
} }

View File

@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
use sqlx::{self, FromRow}; use sqlx::{self, FromRow};
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)] #[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
pub struct RawPage { pub(crate) struct RawPage {
pub id: i64, pub id: i64,
pub slug: String, pub slug: String,
pub title: String, pub title: String,
@ -15,7 +15,7 @@ pub struct RawPage {
} }
#[derive(Clone, Serialize, Deserialize, Debug, FromRow)] #[derive(Clone, Serialize, Deserialize, Debug, FromRow)]
pub struct RawNote { pub(crate) struct RawNote {
pub id: i64, pub id: i64,
pub uuid: String, pub uuid: String,
pub content: String, pub content: String,
@ -25,3 +25,59 @@ pub struct RawNote {
pub lastview_date: DateTime<Utc>, pub lastview_date: DateTime<Utc>,
pub deleted_date: Option<DateTime<Utc>>, pub deleted_date: Option<DateTime<Utc>>,
} }
// // A Resource is either content or a URL to content that the
// // user embeds in a note. TODO: I have no idea how to do this yet,
// // but I'll figure it out.
// #[derive(Clone, Serialize, Deserialize, Debug)]
// pub struct Resource {
// pub id: String,
// pub content: String,
// }
//
// // A Breadcrumb is a component of a reference. Every element should
// // be clickable, although in practice what's going to happen is that
// // the user will be sent to the *page* with that note, then *scrolled*
// // to that note via anchor.
// #[derive(Clone, Debug)]
// pub struct Breadcrumb {
// pub note_id: String,
// pub summary: String,
// }
//
// // A Note is the heart of our system. It is a single object that has
// // a place in our system; it has a parent, but it also has embedded
// // references that allow it to navigate through a web of related
// // objects. It may have children. *AT THIS LAYER*, though, it is
// // returned as an array. It is up to the
// #[derive(Clone, Debug)]
// pub struct Note {
// pub id: String,
// pub parent_id: String,
// pub content: String,
// pub resources: Vec<Resource>,
// pub note_type: String, // Describes the relationship to the parent note.
// pub created: DateTime<Utc>,
// pub updated: DateTime<Utc>,
// pub viewed: DateTime<Utc>,
// pub deleted: Option<DateTime<Utc>>,
// }
//
// pub struct Reference {
// pub page_id: String,
// pub page_title: String,
// pub reference_summary_titles: Vec<Breadcrumbs>,
// pub reference_summary: String,
// }
pub struct Page {
pub slug: String,
pub title: String,
// pub notes: Vec<Notes>, // The actual notes on this page.
// pub references: Vec<Reference>, // All other notes that reference this page.
// pub unlinked_references: Vec<Reference>,
pub created: DateTime<Utc>,
pub updated: DateTime<Utc>,
pub viewed: DateTime<Utc>,
pub deleted: Option<DateTime<Utc>>,
}