pinakes-core: update remaining modules and tests

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I9e0ff5ea33a5cf697473423e88f167ce6a6a6964
This commit is contained in:
raf 2026-03-08 00:42:29 +03:00
commit 3d9f8933d2
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
44 changed files with 1207 additions and 578 deletions

View file

@ -21,22 +21,32 @@ pub struct ChunkedUploadManager {
impl ChunkedUploadManager {
/// Create a new chunked upload manager.
pub fn new(temp_dir: PathBuf) -> Self {
#[must_use]
pub const fn new(temp_dir: PathBuf) -> Self {
Self { temp_dir }
}
/// Initialize the temp directory.
///
/// # Errors
///
/// Returns an error if the directory cannot be created.
pub async fn init(&self) -> Result<()> {
fs::create_dir_all(&self.temp_dir).await?;
Ok(())
}
/// Get the temp file path for an upload session.
#[must_use]
pub fn temp_path(&self, session_id: Uuid) -> PathBuf {
self.temp_dir.join(format!("{}.upload", session_id))
self.temp_dir.join(format!("{session_id}.upload"))
}
/// Create the temp file for a new upload session.
///
/// # Errors
///
/// Returns an error if the file cannot be created or sized.
pub async fn create_temp_file(&self, session: &UploadSession) -> Result<()> {
let path = self.temp_path(session.id);
@ -54,6 +64,11 @@ impl ChunkedUploadManager {
}
/// Write a chunk to the temp file.
///
/// # Errors
///
/// Returns an error if the session file is not found, the chunk index is out
/// of range, the chunk size is wrong, or the write fails.
pub async fn write_chunk(
&self,
session: &UploadSession,
@ -128,6 +143,11 @@ impl ChunkedUploadManager {
/// 1. All chunks are received
/// 2. File size matches expected
/// 3. Content hash matches expected
///
/// # Errors
///
/// Returns an error if chunks are missing, the file size does not match, the
/// hash does not match, or the file metadata cannot be read.
pub async fn finalize(
&self,
session: &UploadSession,
@ -147,12 +167,11 @@ impl ChunkedUploadManager {
// Verify chunk indices
let mut indices: Vec<u64> =
received_chunks.iter().map(|c| c.chunk_index).collect();
indices.sort();
indices.sort_unstable();
for (i, idx) in indices.iter().enumerate() {
if *idx != i as u64 {
return Err(PinakesError::InvalidData(format!(
"chunk {} missing or out of order",
i
"chunk {i} missing or out of order"
)));
}
}
@ -187,6 +206,10 @@ impl ChunkedUploadManager {
}
/// Cancel an upload and clean up temp file.
///
/// # Errors
///
/// Returns an error if the temp file cannot be removed.
pub async fn cancel(&self, session_id: Uuid) -> Result<()> {
let path = self.temp_path(session_id);
if path.exists() {
@ -197,6 +220,10 @@ impl ChunkedUploadManager {
}
/// Clean up expired temp files.
///
/// # Errors
///
/// Returns an error if the temp directory cannot be read.
pub async fn cleanup_expired(&self, max_age_hours: u64) -> Result<u64> {
let mut count = 0u64;
let max_age = std::time::Duration::from_secs(max_age_hours * 3600);
@ -204,7 +231,7 @@ impl ChunkedUploadManager {
let mut entries = fs::read_dir(&self.temp_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.extension().map(|e| e == "upload").unwrap_or(false)
if path.extension().is_some_and(|e| e == "upload")
&& let Ok(metadata) = fs::metadata(&path).await
&& let Ok(modified) = metadata.modified()
{
@ -267,7 +294,7 @@ mod tests {
expected_hash: ContentHash::new(hash.clone()),
expected_size: data.len() as u64,
chunk_size,
chunk_count: (data.len() as u64 + chunk_size - 1) / chunk_size,
chunk_count: (data.len() as u64).div_ceil(chunk_size),
status: UploadStatus::InProgress,
created_at: Utc::now(),
expires_at: Utc::now() + chrono::Duration::hours(24),

View file

@ -4,6 +4,7 @@ use super::DeviceSyncState;
use crate::config::ConflictResolution;
/// Detect if there's a conflict between local and server state.
#[must_use]
pub fn detect_conflict(state: &DeviceSyncState) -> Option<ConflictInfo> {
// If either side has no hash, no conflict possible
let local_hash = state.local_hash.as_ref()?;
@ -48,6 +49,7 @@ pub enum ConflictOutcome {
}
/// Resolve a conflict based on the configured strategy.
#[must_use]
pub fn resolve_conflict(
conflict: &ConflictInfo,
resolution: ConflictResolution,
@ -67,20 +69,21 @@ pub fn resolve_conflict(
}
/// Generate a new path for the conflicting local file.
/// Format: filename.conflict-<short_hash>.ext
/// Format: filename.conflict-<`short_hash>.ext`
fn generate_conflict_path(original_path: &str, local_hash: &str) -> String {
let short_hash = &local_hash[..8.min(local_hash.len())];
if let Some((base, ext)) = original_path.rsplit_once('.') {
format!("{}.conflict-{}.{}", base, short_hash, ext)
format!("{base}.conflict-{short_hash}.{ext}")
} else {
format!("{}.conflict-{}", original_path, short_hash)
format!("{original_path}.conflict-{short_hash}")
}
}
/// Automatic conflict resolution based on modification times.
/// Useful when ConflictResolution is set to a time-based strategy.
pub fn resolve_by_mtime(conflict: &ConflictInfo) -> ConflictOutcome {
/// Useful when `ConflictResolution` is set to a time-based strategy.
#[must_use]
pub const fn resolve_by_mtime(conflict: &ConflictInfo) -> ConflictOutcome {
match (conflict.local_mtime, conflict.server_mtime) {
(Some(local), Some(server)) => {
if local > server {

View file

@ -17,6 +17,7 @@ use crate::{
pub struct DeviceId(pub Uuid);
impl DeviceId {
#[must_use]
pub fn new() -> Self {
Self(Uuid::now_v7())
}
@ -70,7 +71,7 @@ impl std::str::FromStr for DeviceType {
"tablet" => Ok(Self::Tablet),
"server" => Ok(Self::Server),
"other" => Ok(Self::Other),
_ => Err(format!("unknown device type: {}", s)),
_ => Err(format!("unknown device type: {s}")),
}
}
}
@ -93,6 +94,7 @@ pub struct SyncDevice {
}
impl SyncDevice {
#[must_use]
pub fn new(
user_id: UserId,
name: String,
@ -150,7 +152,7 @@ impl std::str::FromStr for SyncChangeType {
"deleted" => Ok(Self::Deleted),
"moved" => Ok(Self::Moved),
"metadata_updated" => Ok(Self::MetadataUpdated),
_ => Err(format!("unknown sync change type: {}", s)),
_ => Err(format!("unknown sync change type: {s}")),
}
}
}
@ -171,6 +173,7 @@ pub struct SyncLogEntry {
}
impl SyncLogEntry {
#[must_use]
pub fn new(
change_type: SyncChangeType,
path: String,
@ -225,7 +228,7 @@ impl std::str::FromStr for FileSyncStatus {
"pending_download" => Ok(Self::PendingDownload),
"conflict" => Ok(Self::Conflict),
"deleted" => Ok(Self::Deleted),
_ => Err(format!("unknown file sync status: {}", s)),
_ => Err(format!("unknown file sync status: {s}")),
}
}
}
@ -260,6 +263,7 @@ pub struct SyncConflict {
}
impl SyncConflict {
#[must_use]
pub fn new(
device_id: DeviceId,
path: String,
@ -319,7 +323,7 @@ impl std::str::FromStr for UploadStatus {
"failed" => Ok(Self::Failed),
"expired" => Ok(Self::Expired),
"cancelled" => Ok(Self::Cancelled),
_ => Err(format!("unknown upload status: {}", s)),
_ => Err(format!("unknown upload status: {s}")),
}
}
}
@ -341,6 +345,7 @@ pub struct UploadSession {
}
impl UploadSession {
#[must_use]
pub fn new(
device_id: DeviceId,
target_path: String,

View file

@ -90,6 +90,10 @@ pub struct AckRequest {
}
/// Get changes since a cursor position.
///
/// # Errors
///
/// Returns an error if the storage query fails.
pub async fn get_changes(
storage: &DynStorageBackend,
cursor: i64,
@ -101,7 +105,7 @@ pub async fn get_changes(
let has_more = changes.len() > limit as usize;
let changes: Vec<_> = changes.into_iter().take(limit as usize).collect();
let new_cursor = changes.last().map(|c| c.sequence).unwrap_or(cursor);
let new_cursor = changes.last().map_or(cursor, |c| c.sequence);
Ok(ChangesResponse {
changes,
@ -111,6 +115,10 @@ pub async fn get_changes(
}
/// Record a change in the sync log.
///
/// # Errors
///
/// Returns an error if the storage record operation fails.
pub async fn record_change(
storage: &DynStorageBackend,
change_type: SyncChangeType,
@ -138,6 +146,10 @@ pub async fn record_change(
}
/// Update device cursor after processing changes.
///
/// # Errors
///
/// Returns an error if the device lookup or update fails.
pub async fn update_device_cursor(
storage: &DynStorageBackend,
device_id: DeviceId,
@ -152,6 +164,10 @@ pub async fn update_device_cursor(
}
/// Mark a file as synced for a device.
///
/// # Errors
///
/// Returns an error if the storage upsert operation fails.
pub async fn mark_synced(
storage: &DynStorageBackend,
device_id: DeviceId,
@ -176,6 +192,10 @@ pub async fn mark_synced(
}
/// Mark a file as pending download for a device.
///
/// # Errors
///
/// Returns an error if the storage lookup or upsert operation fails.
pub async fn mark_pending_download(
storage: &DynStorageBackend,
device_id: DeviceId,
@ -211,6 +231,7 @@ pub async fn mark_pending_download(
}
/// Generate a device token using UUIDs for randomness.
#[must_use]
pub fn generate_device_token() -> String {
// Concatenate two UUIDs for 256 bits of randomness
let uuid1 = uuid::Uuid::new_v4();
@ -219,6 +240,7 @@ pub fn generate_device_token() -> String {
}
/// Hash a device token for storage.
#[must_use]
pub fn hash_device_token(token: &str) -> String {
blake3::hash(token.as_bytes()).to_hex().to_string()
}