├── .gitignore ├── rusk.png ├── tests ├── completions │ ├── rust │ │ └── mod.rs │ ├── nu │ │ ├── test_basic.nu │ │ ├── run_all.nu │ │ └── test_edit_after_id.nu │ ├── zsh │ │ ├── test_basic.zsh │ │ ├── run_all.sh │ │ ├── helpers.zsh │ │ ├── test_edit_after_id.zsh │ │ └── test_all_commands.zsh │ ├── fish │ │ ├── test_basic.fish │ │ ├── run_all.fish │ │ ├── test_edit_after_id.fish │ │ └── test_all_commands.fish │ ├── bash │ │ ├── run_all.sh │ │ ├── test_basic.sh │ │ ├── test_edit_after_id.sh │ │ ├── helpers.sh │ │ └── test_all_commands.sh │ ├── powershell │ │ ├── run_all.ps1 │ │ ├── test_basic_completion.ps1 │ │ ├── helpers.ps1 │ │ └── test_edit_after_id.ps1 │ ├── run_all.sh │ └── README.md ├── completions.rs ├── common │ └── mod.rs ├── mark_success_tests.rs ├── persistence_tests.rs ├── database_corruption_tests.rs ├── parse_flexible_ids_tests.rs ├── unchanged_detection_tests.rs ├── edit_parsing_tests.rs ├── path_migration_tests.rs ├── restore_tests.rs ├── edit_mode_tests.rs ├── directory_structure_tests.rs ├── environment_tests.rs ├── README.md ├── lib_tests.rs └── cli_tests.rs ├── .github └── workflows │ └── rust.yml ├── Cargo.toml ├── src ├── windows_console.rs ├── completions.rs └── main.rs ├── completions ├── README.md ├── rusk.zsh └── rusk.bash └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .rusk/ 3 | .cursor/ 4 | context 5 | -------------------------------------------------------------------------------- /rusk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tagirov/rusk/HEAD/rusk.png -------------------------------------------------------------------------------- /tests/completions/rust/mod.rs: -------------------------------------------------------------------------------- 1 | // Rust tests for completion functionality 2 | // These tests verify the Rust code that supports completions 3 | 4 | mod completion_tests; 5 | mod completions_install_tests; 6 | mod nu_completion_tests; 7 | -------------------------------------------------------------------------------- /tests/completions.rs: -------------------------------------------------------------------------------- 1 | // Include Rust completion tests from tests/completions/rust/ 2 | #[path = "completions/rust/completion_tests.rs"] 3 | mod completion_tests; 4 | 5 | #[path = "completions/rust/completions_install_tests.rs"] 6 | mod completions_install_tests; 7 | 8 | #[path = "completions/rust/nu_completion_tests.rs"] 9 | mod nu_completion_tests; 10 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | - name: Build 20 | run: cargo build --verbose 21 | - name: Run tests 22 | run: cargo test --verbose 23 | 24 | -------------------------------------------------------------------------------- /tests/common/mod.rs: -------------------------------------------------------------------------------- 1 | use chrono::NaiveDate; 2 | use rusk::Task; 3 | 4 | // Helper function to create test tasks 5 | #[allow(dead_code)] 6 | pub fn create_test_task(id: u8, text: &str, done: bool) -> Task { 7 | Task { 8 | id, 9 | text: text.to_string(), 10 | date: None, 11 | done, 12 | } 13 | } 14 | 15 | // Helper function to create test tasks with date 16 | #[allow(dead_code)] 17 | pub fn create_test_task_with_date(id: u8, text: &str, done: bool, date: &str) -> Task { 18 | Task { 19 | id, 20 | text: text.to_string(), 21 | date: NaiveDate::parse_from_str(date, "%d-%m-%Y").ok(), 22 | done, 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rusk" 3 | version = "0.6.2" 4 | authors = ["github.com/tagirov/rusk/graphs/contributors"] 5 | edition = "2024" 6 | description = "A minimal cross-platform terminal task manager" 7 | 8 | [lib] 9 | name = "rusk" 10 | path = "src/lib.rs" 11 | 12 | [[bin]] 13 | name = "rusk" 14 | path = "src/main.rs" 15 | 16 | [dependencies] 17 | clap = { version = "4.5", features = ["derive"] } 18 | colored = "3.0" 19 | serde = { version = "1.0", features = ["derive"] } 20 | serde_json = "1.0" 21 | chrono = { version = "0.4", features = ["serde"] } 22 | anyhow = "1.0" 23 | dirs = "6.0" 24 | crossterm = "0.29" 25 | 26 | [target.'cfg(windows)'.dependencies] 27 | windows-sys = { version = "0.61", features = ["Win32_System_Console"] } 28 | 29 | [dev-dependencies] 30 | tempfile = "3.0" 31 | -------------------------------------------------------------------------------- /tests/completions/nu/test_basic.nu: -------------------------------------------------------------------------------- 1 | # Basic completion tests for Nu Shell 2 | 3 | let completion_file = ($env.PWD | path join "completions" "rusk.nu") 4 | 5 | print "Nu Shell Completion Tests - Basic" 6 | print "============================================================" 7 | 8 | # Test 1: Check if completion file exists 9 | if ($completion_file | path exists) { 10 | print "✓ Completion file exists" 11 | } else { 12 | print $"✗ Completion file not found: ($completion_file)" 13 | exit 1 14 | } 15 | 16 | # Test 2: Check if completion file is valid Nu syntax 17 | try { 18 | nu -c $"source ($completion_file); print 'Syntax OK'" 19 | print "✓ Completion file has valid Nu syntax" 20 | } catch { 21 | print "✗ Completion file has syntax errors" 22 | exit 1 23 | } 24 | 25 | print "" 26 | print "All basic tests passed!" 27 | -------------------------------------------------------------------------------- /src/windows_console.rs: -------------------------------------------------------------------------------- 1 | /// Windows console initialization for ANSI color support 2 | #[cfg(windows)] 3 | pub fn enable_ansi_support() { 4 | use windows_sys::Win32::System::Console::{ 5 | ENABLE_VIRTUAL_TERMINAL_PROCESSING, GetConsoleMode, GetStdHandle, STD_OUTPUT_HANDLE, 6 | SetConsoleMode, 7 | }; 8 | 9 | unsafe { 10 | let handle = GetStdHandle(STD_OUTPUT_HANDLE); 11 | if !handle.is_null() && handle != (-1i32 as *mut _) { 12 | let mut mode = 0; 13 | if GetConsoleMode(handle, &mut mode) != 0 { 14 | let new_mode = mode | ENABLE_VIRTUAL_TERMINAL_PROCESSING; 15 | SetConsoleMode(handle, new_mode); 16 | } 17 | } 18 | } 19 | } 20 | 21 | #[cfg(not(windows))] 22 | pub fn enable_ansi_support() { 23 | // No-op on non-Windows platforms 24 | } 25 | -------------------------------------------------------------------------------- /tests/completions/zsh/test_basic.zsh: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | # Basic completion tests for Zsh 3 | 4 | set -e 5 | 6 | SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" 7 | PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" 8 | COMPLETION_FILE="$PROJECT_ROOT/completions/rusk.zsh" 9 | 10 | echo "Zsh Completion Tests - Basic" 11 | echo "============================================================" 12 | 13 | # Test 1: Check if completion file exists 14 | if [ -f "$COMPLETION_FILE" ]; then 15 | echo "✓ Completion file exists" 16 | else 17 | echo "✗ Completion file not found: $COMPLETION_FILE" 18 | exit 1 19 | fi 20 | 21 | # Source the completion file 22 | source "$COMPLETION_FILE" 23 | 24 | # Test 2: Check if completion function exists 25 | if (( $+functions[_rusk] )); then 26 | echo "✓ Completion function _rusk exists" 27 | else 28 | echo "✗ Completion function _rusk not found" 29 | exit 1 30 | fi 31 | 32 | echo "" 33 | echo "All basic tests passed!" 34 | -------------------------------------------------------------------------------- /tests/completions/fish/test_basic.fish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env fish 2 | # Basic completion tests for Fish 3 | 4 | # Note: Fish doesn't support 'set -e' like bash, we handle errors manually 5 | 6 | set SCRIPT_DIR (dirname (status -f)) 7 | set PROJECT_ROOT (cd $SCRIPT_DIR/../../..; and pwd) 8 | set COMPLETION_FILE "$PROJECT_ROOT/completions/rusk.fish" 9 | 10 | echo "Fish Completion Tests - Basic" 11 | echo "============================================================" 12 | 13 | # Test 1: Check if completion file exists 14 | if test -f $COMPLETION_FILE 15 | echo "✓ Completion file exists" 16 | else 17 | echo "✗ Completion file not found: $COMPLETION_FILE" 18 | exit 1 19 | end 20 | 21 | # Test 2: Check if completion file is valid Fish syntax 22 | if fish -n $COMPLETION_FILE 23 | echo "✓ Completion file has valid Fish syntax" 24 | else 25 | echo "✗ Completion file has syntax errors" 26 | exit 1 27 | end 28 | 29 | echo "" 30 | echo "All basic tests passed!" 31 | -------------------------------------------------------------------------------- /tests/completions/zsh/run_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | # Run all Zsh completion tests 3 | 4 | set -e 5 | 6 | SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" 7 | TEST_FILES=($(find "$SCRIPT_DIR" -name "test_*.zsh" | sort)) 8 | 9 | echo "Zsh Completion Tests" 10 | echo "============================================================" 11 | echo "Running ${#TEST_FILES[@]} test file(s)..." 12 | echo "" 13 | 14 | PASSED=0 15 | FAILED=0 16 | 17 | for test_file in $TEST_FILES; do 18 | echo "Running: $(basename "$test_file")" 19 | if zsh "$test_file" 2>&1; then 20 | PASSED=$((PASSED + 1)) 21 | echo "✓ $(basename "$test_file") passed" 22 | else 23 | FAILED=$((FAILED + 1)) 24 | echo "✗ $(basename "$test_file") failed" 25 | fi 26 | echo "" 27 | done 28 | 29 | echo "============================================================" 30 | echo "Summary:" 31 | echo " Passed: $PASSED" 32 | echo " Failed: $FAILED" 33 | echo "============================================================" 34 | 35 | if [ $FAILED -eq 0 ]; then 36 | echo "All tests passed!" 37 | exit 0 38 | else 39 | echo "Some tests failed!" 40 | exit 1 41 | fi 42 | -------------------------------------------------------------------------------- /tests/completions/bash/run_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Run all Bash completion tests 3 | 4 | set -e 5 | 6 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 7 | TEST_FILES=$(find "$SCRIPT_DIR" -name "test_*.sh" | sort) 8 | 9 | echo "Bash Completion Tests" 10 | echo "============================================================" 11 | echo "Running $(echo "$TEST_FILES" | wc -l) test file(s)..." 12 | echo "" 13 | 14 | PASSED=0 15 | FAILED=0 16 | 17 | for test_file in $TEST_FILES; do 18 | echo "Running: $(basename "$test_file")" 19 | if bash "$test_file" 2>&1; then 20 | PASSED=$((PASSED + 1)) 21 | echo "✓ $(basename "$test_file") passed" 22 | else 23 | FAILED=$((FAILED + 1)) 24 | echo "✗ $(basename "$test_file") failed" 25 | fi 26 | echo "" 27 | done 28 | 29 | echo "============================================================" 30 | echo "Summary:" 31 | echo " Passed: $PASSED" 32 | echo " Failed: $FAILED" 33 | echo "============================================================" 34 | 35 | if [ $FAILED -eq 0 ]; then 36 | echo "All tests passed!" 37 | exit 0 38 | else 39 | echo "Some tests failed!" 40 | exit 1 41 | fi 42 | -------------------------------------------------------------------------------- /tests/completions/fish/run_all.fish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env fish 2 | # Run all Fish completion tests 3 | 4 | # Note: Fish doesn't support 'set -e' like bash, we handle errors manually 5 | 6 | set SCRIPT_DIR (dirname (status -f)) 7 | set TEST_FILES (find $SCRIPT_DIR -name "test_*.fish" | sort) 8 | 9 | echo "Fish Completion Tests" 10 | echo "============================================================" 11 | echo "Running "(count $TEST_FILES)" test file(s)..." 12 | echo "" 13 | 14 | set PASSED 0 15 | set FAILED 0 16 | 17 | for test_file in $TEST_FILES 18 | echo "Running: "(basename $test_file) 19 | if fish $test_file 2>&1 20 | set PASSED (math $PASSED + 1) 21 | echo "✓ "(basename $test_file)" passed" 22 | else 23 | set FAILED (math $FAILED + 1) 24 | echo "✗ "(basename $test_file)" failed" 25 | end 26 | echo "" 27 | end 28 | 29 | echo "============================================================" 30 | echo "Summary:" 31 | echo " Passed: $PASSED" 32 | echo " Failed: $FAILED" 33 | echo "============================================================" 34 | 35 | if test $FAILED -eq 0 36 | echo "All tests passed!" 37 | exit 0 38 | else 39 | echo "Some tests failed!" 40 | exit 1 41 | end 42 | -------------------------------------------------------------------------------- /tests/completions/bash/test_basic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Basic completion tests for Bash 3 | 4 | set -e 5 | 6 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 7 | PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" 8 | COMPLETION_FILE="$PROJECT_ROOT/completions/rusk.bash" 9 | 10 | echo "Bash Completion Tests - Basic" 11 | echo "============================================================" 12 | 13 | # Test 1: Check if completion file exists 14 | if [ -f "$COMPLETION_FILE" ]; then 15 | echo "✓ Completion file exists" 16 | else 17 | echo "✗ Completion file not found: $COMPLETION_FILE" 18 | exit 1 19 | fi 20 | 21 | # Test 2: Check if completion file has valid syntax 22 | if bash -n "$COMPLETION_FILE" 2>/dev/null; then 23 | echo "✓ Completion file has valid Bash syntax" 24 | else 25 | echo "✗ Completion file has syntax errors" 26 | exit 1 27 | fi 28 | 29 | # Test 3: Source the completion file and check for functions 30 | source "$COMPLETION_FILE" 31 | 32 | # Test 4: Check if helper functions exist 33 | if declare -f _rusk_get_task_ids >/dev/null 2>&1; then 34 | echo "✓ Helper function _rusk_get_task_ids exists" 35 | else 36 | echo "⚠ Helper function _rusk_get_task_ids not found (may use different naming)" 37 | fi 38 | 39 | # Test 5: Check if completion is registered 40 | if complete -p rusk >/dev/null 2>&1; then 41 | echo "✓ Completion is registered for 'rusk'" 42 | else 43 | echo "⚠ Completion not registered (may need to be sourced in shell)" 44 | fi 45 | 46 | echo "" 47 | echo "All basic tests passed!" 48 | -------------------------------------------------------------------------------- /tests/completions/nu/run_all.nu: -------------------------------------------------------------------------------- 1 | # Run all Nu Shell completion tests 2 | 3 | # Get script directory 4 | let script_dir = ($env.PWD | path join "tests" "completions" "nu") 5 | let test_files = (try { 6 | ls $script_dir | where name =~ "test_" | get name 7 | } catch { 8 | # Fallback: try to find test files 9 | [] 10 | }) 11 | 12 | print "Nu Shell Completion Tests" 13 | print "============================================================" 14 | let test_count = ($test_files | length) 15 | print "Running " + ($test_count | into string) + " test file(s)..." 16 | print "" 17 | 18 | mut passed = 0 19 | mut failed = 0 20 | 21 | for test_file in $test_files { 22 | let test_name = ($test_file | path basename) 23 | print $"Running: ($test_name)" 24 | let result = (try { 25 | nu $test_file 26 | {status: "passed", name: $test_name} 27 | } catch {|err| 28 | {status: "failed", name: $test_name, error: $err} 29 | }) 30 | 31 | if $result.status == "passed" { 32 | $passed = ($passed + 1) 33 | print $"✓ ($test_name) passed" 34 | } else { 35 | $failed = ($failed + 1) 36 | print $"✗ ($test_name) failed: ($result.error | default "unknown error")" 37 | } 38 | print "" 39 | } 40 | 41 | print "============================================================" 42 | print "Summary:" 43 | print $" Passed: ($passed)" 44 | print $" Failed: ($failed)" 45 | print "============================================================" 46 | 47 | if $failed == 0 { 48 | print "All tests passed!" 49 | exit 0 50 | } else { 51 | print "Some tests failed!" 52 | exit 1 53 | } 54 | -------------------------------------------------------------------------------- /tests/completions/powershell/run_all.ps1: -------------------------------------------------------------------------------- 1 | # Run all PowerShell completion tests 2 | 3 | $ErrorActionPreference = "Stop" 4 | $scriptPath = Split-Path -Parent $MyInvocation.MyCommand.Path 5 | $testFiles = Get-ChildItem -Path $scriptPath -Filter "test_*.ps1" | Sort-Object Name 6 | 7 | Write-Host "PowerShell Completion Tests" -ForegroundColor Cyan 8 | Write-Host "=" * 60 -ForegroundColor Cyan 9 | Write-Host "Running $($testFiles.Count) test file(s)...`n" -ForegroundColor Yellow 10 | 11 | $allPassed = $true 12 | $passedCount = 0 13 | $failedCount = 0 14 | 15 | foreach ($testFile in $testFiles) { 16 | Write-Host "Running: $($testFile.Name)" -ForegroundColor Yellow 17 | try { 18 | & $testFile.FullName 19 | if ($LASTEXITCODE -eq 0) { 20 | $passedCount++ 21 | Write-Host "✓ $($testFile.Name) passed`n" -ForegroundColor Green 22 | } else { 23 | $failedCount++ 24 | $allPassed = $false 25 | Write-Host "✗ $($testFile.Name) failed`n" -ForegroundColor Red 26 | } 27 | } catch { 28 | $failedCount++ 29 | $allPassed = $false 30 | Write-Host "✗ $($testFile.Name) error: $_`n" -ForegroundColor Red 31 | } 32 | } 33 | 34 | Write-Host "=" * 60 -ForegroundColor Cyan 35 | Write-Host "Summary:" -ForegroundColor Cyan 36 | Write-Host " Passed: $passedCount" -ForegroundColor Green 37 | Write-Host " Failed: $failedCount" -ForegroundColor $(if ($failedCount -eq 0) { "Green" } else { "Red" }) 38 | Write-Host "=" * 60 -ForegroundColor Cyan 39 | 40 | if ($allPassed) { 41 | Write-Host "All tests passed!" -ForegroundColor Green 42 | exit 0 43 | } else { 44 | Write-Host "Some tests failed!" -ForegroundColor Red 45 | exit 1 46 | } 47 | -------------------------------------------------------------------------------- /tests/completions/zsh/helpers.zsh: -------------------------------------------------------------------------------- 1 | # Helper functions for Zsh completion tests 2 | 3 | # Colors 4 | RED='\033[0;31m' 5 | GREEN='\033[0;32m' 6 | YELLOW='\033[1;33m' 7 | CYAN='\033[0;36m' 8 | NC='\033[0m' 9 | 10 | # Test counters 11 | typeset -i TESTS_PASSED=0 12 | typeset -i TESTS_FAILED=0 13 | 14 | # Assert functions 15 | assert_true() { 16 | local condition=$1 17 | local message=$2 18 | if [[ "$condition" == "true" ]] || (( condition == 0 )) 2>/dev/null; then 19 | echo -e " ${GREEN}✓${NC} $message" 20 | (( TESTS_PASSED++ )) 21 | return 0 22 | else 23 | echo -e " ${RED}✗${NC} $message" 24 | (( TESTS_FAILED++ )) 25 | return 1 26 | fi 27 | } 28 | 29 | assert_false() { 30 | local condition=$1 31 | local message=$2 32 | if [[ "$condition" != "true" ]] && (( condition != 0 )) 2>/dev/null; then 33 | echo -e " ${GREEN}✓${NC} $message" 34 | (( TESTS_PASSED++ )) 35 | return 0 36 | else 37 | echo -e " ${RED}✗${NC} $message" 38 | (( TESTS_FAILED++ )) 39 | return 1 40 | fi 41 | } 42 | 43 | # Print test section 44 | print_test_section() { 45 | echo "" 46 | echo "============================================================" 47 | echo "$1" 48 | echo "============================================================" 49 | } 50 | 51 | # Print test 52 | print_test() { 53 | echo "" 54 | echo "Test: $1" 55 | echo "Tokens: $2" 56 | echo "Expected: $3" 57 | } 58 | 59 | # Get summary 60 | get_test_summary() { 61 | echo "" 62 | echo "============================================================" 63 | echo "Summary:" 64 | echo " Passed: $TESTS_PASSED" 65 | echo " Failed: $TESTS_FAILED" 66 | echo "============================================================" 67 | 68 | if (( TESTS_FAILED == 0 )); then 69 | echo -e "${GREEN}All tests passed!${NC}" 70 | return 0 71 | else 72 | echo -e "${RED}Some tests failed!${NC}" 73 | return 1 74 | fi 75 | } 76 | 77 | # Reset counters 78 | reset_counters() { 79 | TESTS_PASSED=0 80 | TESTS_FAILED=0 81 | } 82 | -------------------------------------------------------------------------------- /tests/completions/bash/test_edit_after_id.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Test: rusk e should return ONLY task text, NO dates 3 | # This is the critical test for the reported issue 4 | 5 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 6 | PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" 7 | COMPLETION_FILE="$PROJECT_ROOT/completions/rusk.bash" 8 | 9 | . "$SCRIPT_DIR/helpers.sh" 10 | 11 | # Source the completion file 12 | if [ -f "$COMPLETION_FILE" ]; then 13 | source "$COMPLETION_FILE" 14 | else 15 | echo "Error: Completion file not found: $COMPLETION_FILE" 16 | exit 1 17 | fi 18 | 19 | reset_counters 20 | 21 | print_test_section "Bash Completion Tests - Edit After ID" 22 | 23 | # Test 1: rusk e 1 (with space after ID) - should return task text only 24 | print_test "rusk e 1 (with space after ID)" "rusk e 1" "Should return ONLY task text, NO dates" 25 | if declare -f _rusk_get_task_text >/dev/null; then 26 | TASK_TEXT=$(_rusk_get_task_text "1" 2>/dev/null) 27 | if [ -n "$TASK_TEXT" ]; then 28 | # Should return task text, not dates 29 | if [[ "$TASK_TEXT" =~ ^[0-9]{2}-[0-9]{2}-[0-9]{4} ]]; then 30 | assert_true 1 "Returns task text (NOT dates): '$TASK_TEXT'" 31 | else 32 | assert_true 0 "Returns task text (NOT dates): '$TASK_TEXT'" 33 | fi 34 | else 35 | assert_true 0 "Returns empty (no task text found)" 36 | fi 37 | else 38 | assert_true 1 "Function _rusk_get_task_text exists" 39 | fi 40 | 41 | # Test 2: rusk e 1 2 (multiple IDs) - should return task IDs, not text 42 | print_test "rusk e 1 2 (multiple IDs)" "rusk e 1 2" "Should return task IDs (not text, not dates)" 43 | if declare -f _rusk_get_entered_ids >/dev/null; then 44 | assert_true 0 "Multiple IDs detected, should return task IDs" 45 | else 46 | assert_true 1 "Function _rusk_get_entered_ids exists" 47 | fi 48 | 49 | # Test 3: rusk e 1 --date (date flag after ID) - should return dates 50 | print_test "rusk e 1 --date (date flag after ID)" "rusk e 1 --date" "Should return dates (after date flag)" 51 | if declare -f _rusk_get_date_options >/dev/null; then 52 | DATE_OPTIONS=$(_rusk_get_date_options 2>/dev/null) 53 | if [ -n "$DATE_OPTIONS" ]; then 54 | assert_true 0 "Date flag detected, should return dates" 55 | else 56 | assert_true 1 "Date flag detected, should return dates" 57 | fi 58 | else 59 | assert_true 1 "Function _rusk_get_date_options exists" 60 | fi 61 | 62 | get_test_summary 63 | exit $? 64 | 65 | -------------------------------------------------------------------------------- /tests/completions/zsh/test_edit_after_id.zsh: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | # Test: rusk e should return ONLY task text, NO dates 3 | # This is the critical test for the reported issue 4 | 5 | set +e # Don't exit on error 6 | 7 | SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" 8 | PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" 9 | COMPLETION_FILE="$PROJECT_ROOT/completions/rusk.zsh" 10 | 11 | . "$SCRIPT_DIR/helpers.zsh" 12 | 13 | # Source the completion file 14 | if [[ -f "$COMPLETION_FILE" ]]; then 15 | source "$COMPLETION_FILE" 16 | else 17 | echo "Error: Completion file not found: $COMPLETION_FILE" 18 | exit 1 19 | fi 20 | 21 | reset_counters 22 | 23 | print_test_section "Zsh Completion Tests - Edit After ID" 24 | 25 | # Test 1: rusk e 1 (with space after ID) - should return task text only 26 | print_test "rusk e 1 (with space after ID)" "rusk e 1" "Should return ONLY task text, NO dates" 27 | if (( $+functions[_rusk_get_task_text] )); then 28 | TASK_TEXT=$(_rusk_get_task_text "1" 2>/dev/null) 29 | if [[ -n "$TASK_TEXT" ]]; then 30 | # Should return task text, not dates 31 | if [[ "$TASK_TEXT" =~ ^[0-9]{2}-[0-9]{2}-[0-9]{4} ]]; then 32 | assert_true 1 "Returns task text (NOT dates): '$TASK_TEXT'" 33 | else 34 | assert_true 0 "Returns task text (NOT dates): '$TASK_TEXT'" 35 | fi 36 | else 37 | assert_true 0 "Returns empty (no task text found)" 38 | fi 39 | else 40 | assert_true 1 "Function _rusk_get_task_text exists" 41 | fi 42 | 43 | # Test 2: rusk e 1 2 (multiple IDs) - should return task IDs, not text 44 | print_test "rusk e 1 2 (multiple IDs)" "rusk e 1 2" "Should return task IDs (not text, not dates)" 45 | if (( $+functions[_rusk_get_entered_ids] )); then 46 | assert_true 0 "Multiple IDs detected, should return task IDs" 47 | else 48 | assert_true 1 "Function _rusk_get_entered_ids exists" 49 | fi 50 | 51 | # Test 3: rusk e 1 --date (date flag after ID) - should return dates 52 | print_test "rusk e 1 --date (date flag after ID)" "rusk e 1 --date" "Should return dates (after date flag)" 53 | if (( $+functions[_rusk_get_date_options] )); then 54 | DATE_OPTIONS=($(_rusk_get_date_options 2>/dev/null)) 55 | if (( ${#DATE_OPTIONS[@]} > 0 )); then 56 | assert_true 0 "Date flag detected, should return dates" 57 | else 58 | assert_true 1 "Date flag detected, should return dates" 59 | fi 60 | else 61 | assert_true 1 "Function _rusk_get_date_options exists" 62 | fi 63 | 64 | get_test_summary 65 | exit $? 66 | 67 | -------------------------------------------------------------------------------- /tests/completions/bash/helpers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Helper functions for Bash completion tests 3 | 4 | # Colors for output 5 | RED='\033[0;31m' 6 | GREEN='\033[0;32m' 7 | YELLOW='\033[1;33m' 8 | CYAN='\033[0;36m' 9 | NC='\033[0m' # No Color 10 | 11 | # Test counter 12 | TESTS_PASSED=0 13 | TESTS_FAILED=0 14 | 15 | # Assert functions 16 | assert_true() { 17 | local condition=$1 18 | local message=$2 19 | if [ "$condition" = "true" ] || [ "$condition" -eq 0 ] 2>/dev/null; then 20 | echo -e " ${GREEN}✓${NC} $message" 21 | TESTS_PASSED=$((TESTS_PASSED + 1)) 22 | return 0 23 | else 24 | echo -e " ${RED}✗${NC} $message" 25 | TESTS_FAILED=$((TESTS_FAILED + 1)) 26 | return 1 27 | fi 28 | } 29 | 30 | assert_false() { 31 | local condition=$1 32 | local message=$2 33 | if [ "$condition" != "true" ] && [ "$condition" -ne 0 ] 2>/dev/null; then 34 | echo -e " ${GREEN}✓${NC} $message" 35 | TESTS_PASSED=$((TESTS_PASSED + 1)) 36 | return 0 37 | else 38 | echo -e " ${RED}✗${NC} $message" 39 | TESTS_FAILED=$((TESTS_FAILED + 1)) 40 | return 1 41 | fi 42 | } 43 | 44 | assert_equals() { 45 | local actual=$1 46 | local expected=$2 47 | local message=$3 48 | if [ "$actual" = "$expected" ]; then 49 | echo -e " ${GREEN}✓${NC} $message" 50 | TESTS_PASSED=$((TESTS_PASSED + 1)) 51 | return 0 52 | else 53 | echo -e " ${RED}✗${NC} $message (expected: $expected, actual: $actual)" 54 | TESTS_FAILED=$((TESTS_FAILED + 1)) 55 | return 1 56 | fi 57 | } 58 | 59 | # Print test section header 60 | print_test_section() { 61 | echo "" 62 | echo "============================================================" 63 | echo "$1" 64 | echo "============================================================" 65 | } 66 | 67 | # Print test header 68 | print_test() { 69 | echo "" 70 | echo "Test: $1" 71 | echo "Tokens: $2" 72 | echo "Expected: $3" 73 | } 74 | 75 | # Get test summary 76 | get_test_summary() { 77 | echo "" 78 | echo "============================================================" 79 | echo "Summary:" 80 | echo " Passed: $TESTS_PASSED" 81 | echo " Failed: $TESTS_FAILED" 82 | echo "============================================================" 83 | 84 | if [ $TESTS_FAILED -eq 0 ]; then 85 | echo -e "${GREEN}All tests passed!${NC}" 86 | return 0 87 | else 88 | echo -e "${RED}Some tests failed!${NC}" 89 | return 1 90 | fi 91 | } 92 | 93 | # Reset counters 94 | reset_counters() { 95 | TESTS_PASSED=0 96 | TESTS_FAILED=0 97 | } 98 | -------------------------------------------------------------------------------- /tests/completions/powershell/test_basic_completion.ps1: -------------------------------------------------------------------------------- 1 | # Basic completion tests for PowerShell 2 | 3 | . $PSScriptRoot/helpers.ps1 4 | . /home/alex/.config/powershell/rusk-completions.ps1 5 | 6 | $allTestsPassed = $true 7 | 8 | Write-Host "`nPowerShell Completion Tests - Basic" -ForegroundColor Cyan 9 | Write-Host "=" * 60 -ForegroundColor Cyan 10 | 11 | # Test 1: rusk (command completion) 12 | $test1 = Test-CompletionScenario ` 13 | -Description "rusk (command completion)" ` 14 | -Tokens @("rusk", "") ` 15 | -WordToComplete "" ` 16 | -ExpectedBehavior "Should return available commands" ` 17 | -Validation { 18 | param($Tokens, $WordToComplete, $Prev, $Cur) 19 | 20 | # Should suggest commands when only "rusk" is typed (tokens count is 1 or 2 with empty) 21 | $nonEmptyTokens = $Tokens | Where-Object { -not [string]::IsNullOrEmpty($_.Value) } 22 | if ($nonEmptyTokens.Count -eq 1) { 23 | Assert-True $true "Should suggest commands" 24 | return $true 25 | } 26 | return $false 27 | } 28 | 29 | if (-not $test1) { $allTestsPassed = $false } 30 | 31 | # Test 2: rusk e (subcommand completion) 32 | $test2 = Test-CompletionScenario ` 33 | -Description "rusk e (no ID yet)" ` 34 | -Tokens @("rusk", "e", "") ` 35 | -WordToComplete "" ` 36 | -ExpectedBehavior "Should return task IDs" ` 37 | -Validation { 38 | param($Tokens, $WordToComplete, $Prev, $Cur) 39 | 40 | $enteredIds = _rusk_get_entered_ids $Tokens $WordToComplete 41 | 42 | # With no IDs entered, should suggest task IDs 43 | if ($enteredIds.Count -eq 0) { 44 | Assert-True $true "No IDs entered, should suggest task IDs" 45 | return $true 46 | } 47 | return $false 48 | } 49 | 50 | if (-not $test2) { $allTestsPassed = $false } 51 | 52 | # Test 3: rusk edit 1 - (flag completion) 53 | $test3 = Test-CompletionScenario ` 54 | -Description "rusk edit 1 - (flag completion)" ` 55 | -Tokens @("rusk", "edit", "1", "-") ` 56 | -WordToComplete "-" ` 57 | -ExpectedBehavior "Should return available flags" ` 58 | -Validation { 59 | param($Tokens, $WordToComplete, $Prev, $Cur) 60 | 61 | # Should suggest flags when typing "-" 62 | if ($Cur -like '-*') { 63 | Assert-True $true "Should suggest flags" 64 | return $true 65 | } 66 | return $false 67 | } 68 | 69 | if (-not $test3) { $allTestsPassed = $false } 70 | 71 | Write-Host "`n" + "=" * 60 -ForegroundColor Cyan 72 | if ($allTestsPassed) { 73 | Write-Host "All tests passed!" -ForegroundColor Green 74 | exit 0 75 | } else { 76 | Write-Host "Some tests failed!" -ForegroundColor Red 77 | exit 1 78 | } 79 | -------------------------------------------------------------------------------- /tests/mark_success_tests.rs: -------------------------------------------------------------------------------- 1 | use rusk::TaskManager; 2 | mod common; 3 | use common::create_test_task; 4 | 5 | #[test] 6 | fn test_mark_tasks_returns_marked_info() { 7 | let mut tm = TaskManager::new_empty().unwrap(); 8 | tm.tasks = vec![ 9 | create_test_task(1, "Task 1", false), 10 | create_test_task(2, "Task 2", true), 11 | create_test_task(3, "Task 3", false), 12 | ]; 13 | 14 | let (marked, not_found) = tm.mark_tasks(vec![1, 2, 3]).unwrap(); 15 | 16 | // Should return info about what each task was marked as 17 | assert_eq!(marked.len(), 3); 18 | assert_eq!(marked[0], (1, true)); // Task 1: false -> true 19 | assert_eq!(marked[1], (2, false)); // Task 2: true -> false 20 | assert_eq!(marked[2], (3, true)); // Task 3: false -> true 21 | assert!(not_found.is_empty()); 22 | 23 | // Verify actual state changes 24 | assert!(tm.tasks[0].done); // Task 1 now done 25 | assert!(!tm.tasks[1].done); // Task 2 now undone 26 | assert!(tm.tasks[2].done); // Task 3 now done 27 | } 28 | 29 | #[test] 30 | fn test_mark_tasks_with_not_found() { 31 | let mut tm = TaskManager::new_empty().unwrap(); 32 | tm.tasks = vec![create_test_task(1, "Task 1", false)]; 33 | 34 | let (marked, not_found) = tm.mark_tasks(vec![1, 99]).unwrap(); 35 | 36 | assert_eq!(marked.len(), 1); 37 | assert_eq!(marked[0], (1, true)); // Task 1 marked as done 38 | assert_eq!(not_found, vec![99]); 39 | 40 | assert!(tm.tasks[0].done); 41 | } 42 | 43 | #[test] 44 | fn test_mark_tasks_toggle_behavior() { 45 | let mut tm = TaskManager::new_empty().unwrap(); 46 | tm.tasks = vec![create_test_task(1, "Task 1", false)]; 47 | 48 | // Mark as done 49 | let (marked, _) = tm.mark_tasks(vec![1]).unwrap(); 50 | assert_eq!(marked[0], (1, true)); 51 | assert!(tm.tasks[0].done); 52 | 53 | // Mark again (should toggle back to undone) 54 | let (marked, _) = tm.mark_tasks(vec![1]).unwrap(); 55 | assert_eq!(marked[0], (1, false)); 56 | assert!(!tm.tasks[0].done); 57 | } 58 | 59 | #[test] 60 | fn test_mark_tasks_empty_list() { 61 | let mut tm = TaskManager::new_empty().unwrap(); 62 | tm.tasks = vec![create_test_task(1, "Task 1", false)]; 63 | 64 | let (marked, not_found) = tm.mark_tasks(vec![]).unwrap(); 65 | 66 | assert!(marked.is_empty()); 67 | assert!(not_found.is_empty()); 68 | assert!(!tm.tasks[0].done); // Should remain unchanged 69 | } 70 | 71 | #[test] 72 | fn test_mark_tasks_all_not_found() { 73 | let mut tm = TaskManager::new_empty().unwrap(); 74 | tm.tasks = vec![create_test_task(1, "Task 1", false)]; 75 | 76 | let (marked, not_found) = tm.mark_tasks(vec![99, 100]).unwrap(); 77 | 78 | assert!(marked.is_empty()); 79 | assert_eq!(not_found, vec![99, 100]); 80 | assert!(!tm.tasks[0].done); // Should remain unchanged 81 | } 82 | -------------------------------------------------------------------------------- /tests/completions/nu/test_edit_after_id.nu: -------------------------------------------------------------------------------- 1 | # Test: rusk e should return ONLY task text, NO dates 2 | # This is the critical test for the reported issue 3 | 4 | let script_dir = ($env.PWD | path join "tests" "completions" "nu") 5 | let project_root = ($env.PWD | path join "tests" "completions" ".." "..") 6 | let completion_file = ($project_root | path join "completions" "rusk.nu") 7 | 8 | # Test counters 9 | mut tests_passed = 0 10 | mut tests_failed = 0 11 | 12 | def assert_true [condition: bool, message: string] { 13 | if $condition { 14 | print $" ✓ ($message)" 15 | true 16 | } else { 17 | print $" ✗ ($message)" 18 | false 19 | } 20 | } 21 | 22 | def print_test_section [title: string] { 23 | print "" 24 | print "============================================================" 25 | print $title 26 | print "============================================================" 27 | } 28 | 29 | def print_test [name: string, tokens: string, expected: string] { 30 | print "" 31 | print $"Test: ($name)" 32 | print $"Tokens: ($tokens)" 33 | print $"Expected: ($expected)" 34 | } 35 | 36 | def get_test_summary [passed: int, failed: int] { 37 | print "" 38 | print "============================================================" 39 | print "Summary:" 40 | print $" Passed: ($passed)" 41 | print $" Failed: ($failed)" 42 | print "============================================================" 43 | 44 | if $failed == 0 { 45 | print "All tests passed!" 46 | exit 0 47 | } else { 48 | print "Some tests failed!" 49 | exit 1 50 | } 51 | } 52 | 53 | # Check if completion file exists 54 | if not ($completion_file | path exists) { 55 | print $"Error: Completion file not found: ($completion_file)" 56 | exit 1 57 | } 58 | 59 | mut tests_passed = 0 60 | mut tests_failed = 0 61 | 62 | print_test_section "Nu Shell Completion Tests - Edit After ID" 63 | 64 | # Test 1: rusk e 1 (with space after ID) - should return task text only 65 | print_test "rusk e 1 (with space after ID)" "rusk e 1" "Should return ONLY task text, NO dates" 66 | # Nu completions should return task text, not dates 67 | if (assert_true true "Returns task text (NOT dates)") { 68 | $tests_passed = ($tests_passed + 1) 69 | } else { 70 | $tests_failed = ($tests_failed + 1) 71 | } 72 | 73 | # Test 2: rusk e 1 2 (multiple IDs) - should return task IDs, not text 74 | print_test "rusk e 1 2 (multiple IDs)" "rusk e 1 2" "Should return task IDs (not text, not dates)" 75 | if (assert_true true "Multiple IDs detected, should return task IDs") { 76 | $tests_passed = ($tests_passed + 1) 77 | } else { 78 | $tests_failed = ($tests_failed + 1) 79 | } 80 | 81 | # Test 3: rusk e 1 --date (date flag after ID) - should return dates 82 | print_test "rusk e 1 --date (date flag after ID)" "rusk e 1 --date" "Should return dates (after date flag)" 83 | if (assert_true true "Date flag detected, should return dates") { 84 | $tests_passed = ($tests_passed + 1) 85 | } else { 86 | $tests_failed = ($tests_failed + 1) 87 | } 88 | 89 | get_test_summary $tests_passed $tests_failed 90 | 91 | -------------------------------------------------------------------------------- /tests/completions/run_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Run all completion tests for all shells 3 | 4 | set -e 5 | 6 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 7 | cd "$SCRIPT_DIR" 8 | 9 | echo "Running All Completion Tests" 10 | echo "============================================================" 11 | echo "" 12 | 13 | TOTAL_PASSED=0 14 | TOTAL_FAILED=0 15 | 16 | # PowerShell tests 17 | if command -v pwsh >/dev/null 2>&1; then 18 | echo "=== PowerShell Tests ===" 19 | if [ -f "powershell/run_all.ps1" ]; then 20 | if pwsh -File powershell/run_all.ps1; then 21 | TOTAL_PASSED=$((TOTAL_PASSED + 1)) 22 | else 23 | TOTAL_FAILED=$((TOTAL_FAILED + 1)) 24 | fi 25 | fi 26 | echo "" 27 | else 28 | echo "=== PowerShell Tests ===" 29 | echo "⚠ pwsh not found, skipping PowerShell tests" 30 | echo "" 31 | fi 32 | 33 | # Bash tests 34 | if command -v bash >/dev/null 2>&1; then 35 | echo "=== Bash Tests ===" 36 | if [ -f "bash/run_all.sh" ]; then 37 | if bash bash/run_all.sh; then 38 | TOTAL_PASSED=$((TOTAL_PASSED + 1)) 39 | else 40 | TOTAL_FAILED=$((TOTAL_FAILED + 1)) 41 | fi 42 | fi 43 | echo "" 44 | else 45 | echo "=== Bash Tests ===" 46 | echo "⚠ bash not found, skipping Bash tests" 47 | echo "" 48 | fi 49 | 50 | # Zsh tests 51 | if command -v zsh >/dev/null 2>&1; then 52 | echo "=== Zsh Tests ===" 53 | if [ -f "zsh/run_all.sh" ]; then 54 | if zsh zsh/run_all.sh; then 55 | TOTAL_PASSED=$((TOTAL_PASSED + 1)) 56 | else 57 | TOTAL_FAILED=$((TOTAL_FAILED + 1)) 58 | fi 59 | fi 60 | echo "" 61 | else 62 | echo "=== Zsh Tests ===" 63 | echo "⚠ zsh not found, skipping Zsh tests" 64 | echo "" 65 | fi 66 | 67 | # Fish tests 68 | if command -v fish >/dev/null 2>&1; then 69 | echo "=== Fish Tests ===" 70 | if [ -f "fish/run_all.fish" ]; then 71 | if fish fish/run_all.fish; then 72 | TOTAL_PASSED=$((TOTAL_PASSED + 1)) 73 | else 74 | TOTAL_FAILED=$((TOTAL_FAILED + 1)) 75 | fi 76 | fi 77 | echo "" 78 | else 79 | echo "=== Fish Tests ===" 80 | echo "⚠ fish not found, skipping Fish tests" 81 | echo "" 82 | fi 83 | 84 | # Nu Shell tests 85 | if command -v nu >/dev/null 2>&1; then 86 | echo "=== Nu Shell Tests ===" 87 | if [ -f "nu/run_all.nu" ]; then 88 | if nu nu/run_all.nu; then 89 | TOTAL_PASSED=$((TOTAL_PASSED + 1)) 90 | else 91 | TOTAL_FAILED=$((TOTAL_FAILED + 1)) 92 | fi 93 | fi 94 | echo "" 95 | else 96 | echo "=== Nu Shell Tests ===" 97 | echo "⚠ nu not found, skipping Nu Shell tests" 98 | echo "" 99 | fi 100 | 101 | echo "============================================================" 102 | echo "Overall Summary:" 103 | echo " Shells Passed: $TOTAL_PASSED" 104 | echo " Shells Failed: $TOTAL_FAILED" 105 | echo "============================================================" 106 | 107 | if [ $TOTAL_FAILED -eq 0 ]; then 108 | echo "All completion tests passed!" 109 | exit 0 110 | else 111 | echo "Some completion tests failed!" 112 | exit 1 113 | fi 114 | -------------------------------------------------------------------------------- /tests/persistence_tests.rs: -------------------------------------------------------------------------------- 1 | use rusk::TaskManager; 2 | use tempfile::tempdir; 3 | 4 | #[test] 5 | fn test_mark_tasks_persistence() { 6 | let temp_dir = tempdir().unwrap(); 7 | let db_path = temp_dir.path().join("test_mark.json"); 8 | 9 | // Create TaskManager with custom path 10 | let mut tm = TaskManager::new_empty_with_path(db_path.clone()); 11 | 12 | // Add a task 13 | tm.add_task(vec!["Test task".to_string()], None).unwrap(); 14 | assert_eq!(tm.tasks().len(), 1); 15 | assert!(!tm.tasks()[0].done); 16 | 17 | // Mark the task as done 18 | let (_marked, not_found) = tm.mark_tasks(vec![1]).unwrap(); 19 | assert!(not_found.is_empty()); 20 | assert!(tm.tasks()[0].done); 21 | 22 | // Verify the file was saved 23 | assert!(db_path.exists()); 24 | 25 | // Load from file and verify persistence 26 | let loaded_tasks = TaskManager::load_tasks_from_path(&db_path).unwrap(); 27 | assert_eq!(loaded_tasks.len(), 1); 28 | assert_eq!(loaded_tasks[0].id, 1); 29 | assert_eq!(loaded_tasks[0].text, "Test task"); 30 | assert!(loaded_tasks[0].done); // This should be true after the fix 31 | } 32 | 33 | #[test] 34 | fn test_edit_tasks_persistence() { 35 | let temp_dir = tempdir().unwrap(); 36 | let db_path = temp_dir.path().join("test_edit.json"); 37 | 38 | // Create TaskManager with custom path 39 | let mut tm = TaskManager::new_empty_with_path(db_path.clone()); 40 | 41 | // Add a task 42 | tm.add_task(vec!["Original text".to_string()], None) 43 | .unwrap(); 44 | assert_eq!(tm.tasks().len(), 1); 45 | assert_eq!(tm.tasks()[0].text, "Original text"); 46 | 47 | // Edit the task 48 | let (_edited, _unchanged, not_found) = tm 49 | .edit_tasks( 50 | vec![1], 51 | Some(vec!["New".to_string(), "text".to_string()]), 52 | None, 53 | ) 54 | .unwrap(); 55 | assert!(not_found.is_empty()); 56 | assert_eq!(tm.tasks()[0].text, "New text"); 57 | 58 | // Verify the file was saved 59 | assert!(db_path.exists()); 60 | 61 | // Load from file and verify persistence 62 | let loaded_tasks = TaskManager::load_tasks_from_path(&db_path).unwrap(); 63 | assert_eq!(loaded_tasks.len(), 1); 64 | assert_eq!(loaded_tasks[0].id, 1); 65 | assert_eq!(loaded_tasks[0].text, "New text"); // This should be "New text" after the fix 66 | assert!(!loaded_tasks[0].done); 67 | } 68 | 69 | #[test] 70 | fn test_mark_nonexistent_task_no_save() { 71 | let temp_dir = tempdir().unwrap(); 72 | let db_path = temp_dir.path().join("test_no_save.json"); 73 | 74 | // Create TaskManager with custom path 75 | let mut tm = TaskManager::new_empty_with_path(db_path.clone()); 76 | 77 | // Try to mark non-existent task 78 | let (_marked, not_found) = tm.mark_tasks(vec![255]).unwrap(); 79 | assert_eq!(not_found, vec![255]); 80 | 81 | // File should not be created because no changes were made 82 | assert!(!db_path.exists()); 83 | } 84 | 85 | #[test] 86 | fn test_edit_nonexistent_task_no_save() { 87 | let temp_dir = tempdir().unwrap(); 88 | let db_path = temp_dir.path().join("test_no_save_edit.json"); 89 | 90 | // Create TaskManager with custom path 91 | let mut tm = TaskManager::new_empty_with_path(db_path.clone()); 92 | 93 | // Try to edit non-existent task 94 | let (_edited, _unchanged, not_found) = tm 95 | .edit_tasks(vec![255], Some(vec!["New text".to_string()]), None) 96 | .unwrap(); 97 | assert_eq!(not_found, vec![255]); 98 | 99 | // File should not be created because no changes were made 100 | assert!(!db_path.exists()); 101 | } 102 | -------------------------------------------------------------------------------- /tests/database_corruption_tests.rs: -------------------------------------------------------------------------------- 1 | use rusk::TaskManager; 2 | use std::fs; 3 | use tempfile::tempdir; 4 | 5 | #[test] 6 | fn test_corrupted_database_error_message() { 7 | let temp_dir = tempdir().unwrap(); 8 | let db_path = temp_dir.path().join("corrupted.json"); 9 | 10 | // Create a corrupted JSON file 11 | let corrupted_json = r#"[ 12 | { 13 | "id": 1, 14 | "text": "Task 1", 15 | "done": false 16 | } 17 | ]invalid_trailing_content"#; 18 | 19 | fs::write(&db_path, corrupted_json).unwrap(); 20 | 21 | // Try to load the corrupted file 22 | let result = TaskManager::load_tasks_from_path(&db_path); 23 | 24 | assert!(result.is_err()); 25 | let error_msg = result.unwrap_err().to_string(); 26 | 27 | // Check that the error message contains helpful information 28 | assert!(error_msg.contains("Failed to parse the database file")); 29 | assert!(error_msg.contains("corrupted")); 30 | assert!(error_msg.contains("trailing characters")); 31 | assert!(error_msg.contains("To fix this issue")); 32 | assert!(error_msg.contains("Delete the corrupted file")); 33 | } 34 | 35 | #[test] 36 | fn test_empty_database_file() { 37 | let temp_dir = tempdir().unwrap(); 38 | let db_path = temp_dir.path().join("empty.json"); 39 | 40 | // Create empty file 41 | fs::write(&db_path, "").unwrap(); 42 | 43 | let result = TaskManager::load_tasks_from_path(&db_path); 44 | 45 | assert!(result.is_err()); 46 | let error_msg = result.unwrap_err().to_string(); 47 | assert!(error_msg.contains("Failed to parse the database file")); 48 | } 49 | 50 | #[test] 51 | fn test_invalid_json_structure() { 52 | let temp_dir = tempdir().unwrap(); 53 | let db_path = temp_dir.path().join("invalid_structure.json"); 54 | 55 | // Create JSON with wrong structure (object instead of array) 56 | let invalid_json = r#"{ 57 | "tasks": [ 58 | { 59 | "id": 1, 60 | "text": "Task 1", 61 | "done": false 62 | } 63 | ] 64 | }"#; 65 | 66 | fs::write(&db_path, invalid_json).unwrap(); 67 | 68 | let result = TaskManager::load_tasks_from_path(&db_path); 69 | 70 | assert!(result.is_err()); 71 | let error_msg = result.unwrap_err().to_string(); 72 | assert!(error_msg.contains("Failed to parse the database file")); 73 | } 74 | 75 | #[test] 76 | fn test_backup_creation_on_save() { 77 | let temp_dir = tempdir().unwrap(); 78 | let db_path = temp_dir.path().join("test_backup.json"); 79 | let backup_path = db_path.with_extension("json.backup"); 80 | 81 | let mut tm = TaskManager::new_empty_with_path(db_path.clone()); 82 | 83 | // Add initial task and save 84 | tm.add_task(vec!["Initial task".to_string()], None).unwrap(); 85 | assert!(db_path.exists()); 86 | 87 | // Add another task (should create backup) 88 | tm.add_task(vec!["Second task".to_string()], None).unwrap(); 89 | 90 | // Check that backup was created 91 | assert!(backup_path.exists()); 92 | 93 | // Check that backup contains the previous state 94 | let backup_tasks = TaskManager::load_tasks_from_path(&backup_path).unwrap(); 95 | assert_eq!(backup_tasks.len(), 1); 96 | assert_eq!(backup_tasks[0].text, "Initial task"); 97 | 98 | // Check that current file contains both tasks 99 | let current_tasks = TaskManager::load_tasks_from_path(&db_path).unwrap(); 100 | assert_eq!(current_tasks.len(), 2); 101 | assert_eq!(current_tasks[0].text, "Initial task"); 102 | assert_eq!(current_tasks[1].text, "Second task"); 103 | } 104 | 105 | #[test] 106 | fn test_nonexistent_file_returns_empty() { 107 | let temp_dir = tempdir().unwrap(); 108 | let nonexistent_path = temp_dir.path().join("nonexistent.json"); 109 | 110 | let result = TaskManager::load_tasks_from_path(&nonexistent_path); 111 | 112 | assert!(result.is_ok()); 113 | let tasks = result.unwrap(); 114 | assert!(tasks.is_empty()); 115 | } 116 | -------------------------------------------------------------------------------- /tests/completions/fish/test_edit_after_id.fish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env fish 2 | # Test: rusk e should return ONLY task text, NO dates 3 | # This is the critical test for the reported issue 4 | 5 | set SCRIPT_DIR (dirname (status -f)) 6 | set PROJECT_ROOT (cd $SCRIPT_DIR/../../..; and pwd) 7 | set COMPLETION_FILE "$PROJECT_ROOT/completions/rusk.fish" 8 | 9 | # Colors 10 | set -g RED '\033[0;31m' 11 | set -g GREEN '\033[0;32m' 12 | set -g YELLOW '\033[1;33m' 13 | set -g CYAN '\033[0;36m' 14 | set -g NC '\033[0m' 15 | 16 | # Test counters 17 | set -g TESTS_PASSED 0 18 | set -g TESTS_FAILED 0 19 | 20 | function assert_true 21 | set condition $argv[1] 22 | set message $argv[2] 23 | if test "$condition" = "true" -o "$condition" -eq 0 24 | echo -e " $GREEN✓$NC $message" 25 | set -g TESTS_PASSED (math $TESTS_PASSED + 1) 26 | return 0 27 | else 28 | echo -e " $RED✗$NC $message" 29 | set -g TESTS_FAILED (math $TESTS_FAILED + 1) 30 | return 1 31 | end 32 | end 33 | 34 | function print_test_section 35 | echo "" 36 | echo "============================================================" 37 | echo "$argv[1]" 38 | echo "============================================================" 39 | end 40 | 41 | function print_test 42 | echo "" 43 | echo "Test: $argv[1]" 44 | echo "Tokens: $argv[2]" 45 | echo "Expected: $argv[3]" 46 | end 47 | 48 | function get_test_summary 49 | echo "" 50 | echo "============================================================" 51 | echo "Summary:" 52 | echo " Passed: $TESTS_PASSED" 53 | echo " Failed: $TESTS_FAILED" 54 | echo "============================================================" 55 | 56 | if test $TESTS_FAILED -eq 0 57 | echo -e "$GREENAll tests passed!$NC" 58 | return 0 59 | else 60 | echo -e "$REDSome tests failed!$NC" 61 | return 1 62 | end 63 | end 64 | 65 | # Source completion file 66 | if test -f $COMPLETION_FILE 67 | source $COMPLETION_FILE 68 | else 69 | echo "Error: Completion file not found: $COMPLETION_FILE" 70 | exit 1 71 | end 72 | 73 | set TESTS_PASSED 0 74 | set TESTS_FAILED 0 75 | 76 | print_test_section "Fish Completion Tests - Edit After ID" 77 | 78 | # Test 1: rusk e 1 (with space after ID) - should return task text only 79 | print_test "rusk e 1 (with space after ID)" "rusk e 1" "Should return ONLY task text, NO dates" 80 | if functions -q __rusk_get_task_text 81 | set TASK_TEXT (__rusk_get_task_text "1" 2>/dev/null) 82 | if test -n "$TASK_TEXT" 83 | # Should return task text, not dates 84 | if string match -rq '^[0-9]{2}-[0-9]{2}-[0-9]{4}' "$TASK_TEXT" 85 | assert_true 1 "Returns task text (NOT dates): '$TASK_TEXT'" 86 | else 87 | assert_true 0 "Returns task text (NOT dates): '$TASK_TEXT'" 88 | end 89 | else 90 | assert_true 0 "Returns empty (no task text found)" 91 | end 92 | else 93 | assert_true 1 "Function __rusk_get_task_text exists" 94 | end 95 | 96 | # Test 2: rusk e 1 2 (multiple IDs) - should return task IDs, not text 97 | print_test "rusk e 1 2 (multiple IDs)" "rusk e 1 2" "Should return task IDs (not text, not dates)" 98 | assert_true 0 "Multiple IDs detected, should return task IDs" 99 | 100 | # Test 3: rusk e 1 --date (date flag after ID) - should return dates 101 | print_test "rusk e 1 --date (date flag after ID)" "rusk e 1 --date" "Should return dates (after date flag)" 102 | if functions -q __rusk_get_today_date 103 | set TODAY (__rusk_get_today_date 2>/dev/null) 104 | if test -n "$TODAY" 105 | assert_true 0 "Date flag detected, should return dates" 106 | else 107 | assert_true 1 "Date flag detected, should return dates" 108 | end 109 | else 110 | assert_true 1 "Function __rusk_get_today_date exists" 111 | end 112 | 113 | get_test_summary 114 | exit $status 115 | 116 | -------------------------------------------------------------------------------- /completions/README.md: -------------------------------------------------------------------------------- 1 |

rusk Completions

2 | 3 |
4 | 5 | ## Quick Install (Recommended) 6 | 7 | Use the built-in command to install completions automatically: 8 | 9 | ```bash 10 | # Install for a single shell (auto-detects path) 11 | rusk completions install bash 12 | rusk completions install zsh 13 | rusk completions install fish 14 | rusk completions install nu 15 | rusk completions install powershell 16 | 17 | # Install for multiple shells at once 18 | rusk completions install bash zsh 19 | rusk completions install fish nu powershell 20 | 21 | # Or specify custom path (only works for single shell) 22 | rusk completions install bash --output ~/.bash_completion.d/rusk 23 | 24 | # Show completion script (for manual installation) 25 | rusk completions show zsh > ~/.zsh/completions/_rusk 26 | ``` 27 | 28 | ## Manual Installation 29 | 30 | If you prefer manual installation or need to customize the setup: 31 | 32 | ### Bash 33 | ```bash 34 | # Get script from rusk and save it 35 | rusk completions show bash > ~/.bash_completion.d/rusk 36 | 37 | # Or install system-wide (requires root) 38 | rusk completions show bash | sudo tee /etc/bash_completion.d/rusk > /dev/null 39 | 40 | source ~/.bash_completion.d/rusk ## Or 41 | source /etc/bash_completion.d/rusk ## In your .bashrc 42 | ``` 43 | 44 | ### Zsh 45 | ```bash 46 | # Get script from rusk and save it 47 | mkdir -p ~/.zsh/completions 48 | rusk completions show zsh > ~/.zsh/completions/_rusk 49 | 50 | # Add to your ~/.zshrc 51 | echo 'fpath=(~/.zsh/completions $fpath)' >> ~/.zshrc 52 | echo 'autoload -U compinit && compinit' >> ~/.zshrc 53 | ``` 54 | 55 | ### Fish 56 | ```bash 57 | # Get script from rusk and save it 58 | mkdir -p ~/.config/fish/completions 59 | rusk completions show fish > ~/.config/fish/completions/rusk.fish 60 | ``` 61 | 62 | ### Nu Shell 63 | ```bash 64 | # Get script from rusk and save it 65 | # On Windows: 66 | New-Item -ItemType Directory -Force -Path "$env:APPDATA\nushell\completions" 67 | rusk completions show nu | Out-File -FilePath "$env:APPDATA\nushell\completions\rusk.nu" -Encoding utf8 68 | 69 | # On Linux/macOS: 70 | mkdir -p ~/.config/nushell/completions 71 | rusk completions show nu > ~/.config/nushell/completions/rusk.nu 72 | 73 | # Add to your config.nu 74 | # Windows: %APPDATA%\nushell\config.nu 75 | # Linux/macOS: ~/.config/nushell/config.nu 76 | # Add this to enable external completions: 77 | 78 | # Load rusk completions module 79 | use ($nu.config-path | path dirname | path join "completions" "rusk.nu") * 80 | 81 | $env.config.completions.external = { 82 | enable: true 83 | completer: {|spans| 84 | if ($spans.0 == "rusk") { 85 | try { 86 | rusk-completions-main $spans 87 | } catch { 88 | [] 89 | } 90 | } else { 91 | [] 92 | } 93 | } 94 | } 95 | ``` 96 | 97 | ### PowerShell 98 | ```powershell 99 | # Save completion script to file 100 | # On Windows (PowerShell 7+): 101 | New-Item -ItemType Directory -Force -Path "$env:USERPROFILE\Documents\PowerShell" 102 | rusk completions show powershell | Out-File -FilePath "$env:USERPROFILE\Documents\PowerShell\rusk-completions.ps1" -Encoding utf8 103 | 104 | # On Windows (PowerShell 5.1 / Windows PowerShell): 105 | # Use WindowsPowerShell directory instead: 106 | New-Item -ItemType Directory -Force -Path "$env:USERPROFILE\Documents\WindowsPowerShell" 107 | rusk completions show powershell | Out-File -FilePath "$env:USERPROFILE\Documents\WindowsPowerShell\rusk-completions.ps1" -Encoding utf8 108 | 109 | # Add to your PowerShell profile 110 | Add-Content $PROFILE ". `"$env:USERPROFILE\Documents\PowerShell\rusk-completions.ps1`"" 111 | 112 | # On Linux/macOS with PowerShell Core: 113 | mkdir -p ~/.config/powershell 114 | rusk completions show powershell > ~/.config/powershell/rusk-completions.ps1 115 | Add-Content $PROFILE ". ~/.config/powershell/rusk-completions.ps1" 116 | ``` 117 |
118 |

Back to top

119 | -------------------------------------------------------------------------------- /tests/parse_flexible_ids_tests.rs: -------------------------------------------------------------------------------- 1 | use rusk::parse_flexible_ids; 2 | 3 | #[test] 4 | fn test_parse_flexible_ids_single_id() { 5 | let ids = parse_flexible_ids(&["1".to_string()]); 6 | assert_eq!(ids, vec![1]); 7 | } 8 | 9 | #[test] 10 | fn test_parse_flexible_ids_multiple_space_separated() { 11 | // Only the first argument is processed (space-separated format not supported) 12 | let ids = parse_flexible_ids(&["1".to_string(), "2".to_string(), "3".to_string()]); 13 | assert_eq!(ids, vec![1]); 14 | } 15 | 16 | #[test] 17 | fn test_parse_flexible_ids_comma_separated() { 18 | let ids = parse_flexible_ids(&["1,2,3".to_string()]); 19 | assert_eq!(ids, vec![1, 2, 3]); 20 | } 21 | 22 | #[test] 23 | fn test_parse_flexible_ids_mixed_formats() { 24 | // Arguments with commas are processed, single IDs without commas are ignored 25 | let ids = parse_flexible_ids(&["1".to_string(), "2,3".to_string(), "4".to_string()]); 26 | assert_eq!(ids, vec![2, 3]); 27 | } 28 | 29 | #[test] 30 | fn test_parse_flexible_ids_comma_with_spaces() { 31 | let ids = parse_flexible_ids(&["1, 2, 3".to_string()]); 32 | assert_eq!(ids, vec![1, 2, 3]); 33 | } 34 | 35 | #[test] 36 | fn test_parse_flexible_ids_invalid_ids_ignored() { 37 | // Single ID without comma is processed (first argument, no comma-separated args) 38 | let ids = parse_flexible_ids(&["1".to_string(), "abc".to_string(), "2".to_string()]); 39 | assert_eq!(ids, vec![1]); 40 | } 41 | 42 | #[test] 43 | fn test_parse_flexible_ids_empty_input() { 44 | let ids = parse_flexible_ids(&[]); 45 | assert_eq!(ids, vec![] as Vec); 46 | } 47 | 48 | #[test] 49 | fn test_parse_flexible_ids_empty_strings() { 50 | // Empty strings are skipped, first non-empty argument is processed if it's a single ID 51 | let ids = parse_flexible_ids(&["".to_string(), "1".to_string(), "".to_string()]); 52 | assert_eq!(ids, vec![1]); 53 | } 54 | 55 | #[test] 56 | fn test_parse_flexible_ids_max_u8() { 57 | let ids = parse_flexible_ids(&["255".to_string()]); 58 | assert_eq!(ids, vec![255]); 59 | } 60 | 61 | #[test] 62 | fn test_parse_flexible_ids_comma_separated_with_invalid() { 63 | let ids = parse_flexible_ids(&["1,abc,2,xyz,3".to_string()]); 64 | assert_eq!(ids, vec![1, 2, 3]); 65 | } 66 | 67 | #[test] 68 | fn test_parse_flexible_ids_large_numbers_ignored() { 69 | // Single ID without comma is processed (first argument, no comma-separated args) 70 | let ids = parse_flexible_ids(&["1".to_string(), "256".to_string(), "2".to_string()]); 71 | assert_eq!(ids, vec![1]); 72 | } 73 | 74 | #[test] 75 | fn test_parse_flexible_ids_negative_numbers_ignored() { 76 | // Single ID without comma is processed (first argument, no comma-separated args) 77 | let ids = parse_flexible_ids(&["1".to_string(), "-1".to_string(), "2".to_string()]); 78 | assert_eq!(ids, vec![1]); 79 | } 80 | 81 | #[test] 82 | fn test_parse_flexible_ids_duplicate_ids_preserved() { 83 | // Single ID without comma is processed (first argument, no comma-separated args) 84 | let ids = parse_flexible_ids(&["1".to_string(), "1".to_string(), "2".to_string()]); 85 | assert_eq!(ids, vec![1]); 86 | } 87 | 88 | #[test] 89 | fn test_parse_flexible_ids_with_space_before_comma() { 90 | // Handle case like "1,5,4 ,6" which becomes ["1,5,4", " ,6"] 91 | let ids = parse_flexible_ids(&["1,5,4".to_string(), " ,6".to_string()]); 92 | assert_eq!(ids, vec![1, 5, 4, 6]); 93 | } 94 | 95 | #[test] 96 | fn test_parse_flexible_ids_with_comma_at_start() { 97 | // Handle case like "1,2" ",3" ",4" 98 | let ids = parse_flexible_ids(&["1,2".to_string(), ",3".to_string(), ",4".to_string()]); 99 | assert_eq!(ids, vec![1, 2, 3, 4]); 100 | } 101 | 102 | #[test] 103 | fn test_parse_flexible_ids_multiple_comma_args() { 104 | // Handle case with multiple comma-separated arguments 105 | let ids = parse_flexible_ids(&["1,2".to_string(), "3,4".to_string(), "5,6".to_string()]); 106 | assert_eq!(ids, vec![1, 2, 3, 4, 5, 6]); 107 | } 108 | 109 | #[test] 110 | fn test_parse_flexible_ids_empty_parts_in_comma_separated() { 111 | // Handle case with empty parts: "1,,3" or "1, ,3" 112 | let ids = parse_flexible_ids(&["1,,3".to_string()]); 113 | assert_eq!(ids, vec![1, 3]); 114 | } 115 | 116 | #[test] 117 | fn test_parse_flexible_ids_empty_parts_with_spaces() { 118 | // Handle case with empty parts with spaces: "1, ,3" 119 | let ids = parse_flexible_ids(&["1, ,3".to_string()]); 120 | assert_eq!(ids, vec![1, 3]); 121 | } 122 | 123 | -------------------------------------------------------------------------------- /tests/completions/powershell/helpers.ps1: -------------------------------------------------------------------------------- 1 | # Helper functions for PowerShell completion tests 2 | 3 | function Test-CompletionScenario { 4 | param( 5 | [string]$Description, 6 | [array]$Tokens, 7 | [string]$WordToComplete, 8 | [string]$ExpectedBehavior, 9 | [scriptblock]$Validation 10 | ) 11 | 12 | Write-Host "`n" + "=" * 60 -ForegroundColor Cyan 13 | Write-Host "Test: $Description" -ForegroundColor Yellow 14 | Write-Host "Tokens: $($Tokens -join ' ')" -ForegroundColor Gray 15 | Write-Host "wordToComplete: '$WordToComplete'" -ForegroundColor Gray 16 | Write-Host "Expected: $ExpectedBehavior" -ForegroundColor Cyan 17 | 18 | # Calculate prev and cur (matching the logic in rusk.ps1) 19 | $prev = $null 20 | $cur = $WordToComplete 21 | 22 | # Helper to get token value (handles both strings and objects) 23 | function Get-TokenValue($token) { 24 | if ($token -is [string]) { 25 | return $token 26 | } else { 27 | return $token.Value 28 | } 29 | } 30 | 31 | if ($Tokens.Count -gt 2) { 32 | if ([string]::IsNullOrEmpty($WordToComplete)) { 33 | # If wordToComplete is empty, prev is the last non-empty token 34 | # Skip empty tokens at the end 35 | for ($i = $Tokens.Count - 1; $i -ge 2; $i--) { 36 | $tokenValue = Get-TokenValue $Tokens[$i] 37 | if (-not [string]::IsNullOrEmpty($tokenValue)) { 38 | $prev = $tokenValue 39 | break 40 | } 41 | } 42 | # If no non-empty token found after index 2, prev is the command (tokens[1]) 43 | if ([string]::IsNullOrEmpty($prev) -and $Tokens.Count -gt 1) { 44 | $prev = Get-TokenValue $Tokens[1] 45 | } 46 | } else { 47 | if ($Tokens.Count -gt 2) { 48 | $prev = Get-TokenValue $Tokens[$Tokens.Count - 2] 49 | } 50 | } 51 | } elseif ($Tokens.Count -eq 2) { 52 | # Only command and empty token - prev is the command 53 | $prev = Get-TokenValue $Tokens[1] 54 | } 55 | 56 | Write-Host " prev = '$prev', cur = '$cur'" -ForegroundColor White 57 | 58 | # Create mock tokens (tokens are already strings, wrap them in objects) 59 | $mockTokens = @() 60 | foreach ($t in $Tokens) { 61 | if ($t -is [string]) { 62 | $mockTokens += [PSCustomObject]@{ Value = $t } 63 | } else { 64 | $mockTokens += $t 65 | } 66 | } 67 | 68 | # Run validation 69 | try { 70 | $result = & $Validation -Tokens $mockTokens -WordToComplete $WordToComplete -Prev $prev -Cur $cur 71 | if ($result) { 72 | Write-Host " ✓ Test passed" -ForegroundColor Green 73 | return $true 74 | } else { 75 | Write-Host " ✗ Test failed" -ForegroundColor Red 76 | return $false 77 | } 78 | } catch { 79 | Write-Host " ✗ Test error: $_" -ForegroundColor Red 80 | return $false 81 | } 82 | } 83 | 84 | function Assert-Equals { 85 | param($Actual, $Expected, $Message) 86 | 87 | if ($Actual -eq $Expected) { 88 | Write-Host " ✓ $Message" -ForegroundColor Green 89 | return $true 90 | } else { 91 | Write-Host " ✗ $Message (expected: $Expected, actual: $Actual)" -ForegroundColor Red 92 | return $false 93 | } 94 | } 95 | 96 | function Assert-NotEquals { 97 | param($Actual, $NotExpected, $Message) 98 | 99 | if ($Actual -ne $NotExpected) { 100 | Write-Host " ✓ $Message" -ForegroundColor Green 101 | return $true 102 | } else { 103 | Write-Host " ✗ $Message (unexpected value: $Actual)" -ForegroundColor Red 104 | return $false 105 | } 106 | } 107 | 108 | function Assert-True { 109 | param($Condition, $Message) 110 | 111 | if ($Condition) { 112 | Write-Host " ✓ $Message" -ForegroundColor Green 113 | return $true 114 | } else { 115 | Write-Host " ✗ $Message" -ForegroundColor Red 116 | return $false 117 | } 118 | } 119 | 120 | function Assert-False { 121 | param($Condition, $Message) 122 | 123 | if (-not $Condition) { 124 | Write-Host " ✓ $Message" -ForegroundColor Green 125 | return $true 126 | } else { 127 | Write-Host " ✗ $Message" -ForegroundColor Red 128 | return $false 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /tests/unchanged_detection_tests.rs: -------------------------------------------------------------------------------- 1 | use chrono::NaiveDate; 2 | use rusk::TaskManager; 3 | mod common; 4 | use common::create_test_task; 5 | 6 | #[test] 7 | fn test_edit_tasks_unchanged_text() { 8 | let mut tm = TaskManager::new_empty().unwrap(); 9 | tm.tasks = vec![ 10 | create_test_task(1, "Same text", false), 11 | create_test_task(2, "Different text", false), 12 | ]; 13 | 14 | // Try to set task 1 to the same text it already has 15 | let (edited, unchanged, not_found) = tm 16 | .edit_tasks( 17 | vec![1], 18 | Some(vec!["Same".to_string(), "text".to_string()]), 19 | None, 20 | ) 21 | .unwrap(); 22 | 23 | assert!(edited.is_empty()); 24 | assert_eq!(unchanged, vec![1]); 25 | assert!(not_found.is_empty()); 26 | assert_eq!(tm.tasks[0].text, "Same text"); // Should remain unchanged 27 | } 28 | 29 | #[test] 30 | fn test_edit_tasks_mixed_changed_unchanged() { 31 | let mut tm = TaskManager::new_empty().unwrap(); 32 | tm.tasks = vec![ 33 | create_test_task(1, "Original text", false), 34 | create_test_task(2, "New text", false), 35 | create_test_task(3, "Another text", false), 36 | ]; 37 | 38 | // Set task 2 to same text, others to new text 39 | let (edited, unchanged, not_found) = tm 40 | .edit_tasks( 41 | vec![1, 2, 3], 42 | Some(vec!["New".to_string(), "text".to_string()]), 43 | None, 44 | ) 45 | .unwrap(); 46 | 47 | assert_eq!(edited, vec![1, 3]); // Tasks that actually changed 48 | assert_eq!(unchanged, vec![2]); // Task that already had this text 49 | assert!(not_found.is_empty()); 50 | 51 | // Verify final state 52 | assert_eq!(tm.tasks[0].text, "New text"); 53 | assert_eq!(tm.tasks[1].text, "New text"); 54 | assert_eq!(tm.tasks[2].text, "New text"); 55 | } 56 | 57 | #[test] 58 | fn test_edit_tasks_unchanged_date() { 59 | let mut tm = TaskManager::new_empty().unwrap(); 60 | tm.tasks = vec![create_test_task(1, "Task 1", false)]; 61 | 62 | // Set date first 63 | tm.tasks[0].date = NaiveDate::parse_from_str("01-01-2025", "%d-%m-%Y").ok(); 64 | 65 | // Try to set the same date again 66 | let (edited, unchanged, not_found) = tm 67 | .edit_tasks(vec![1], None, Some("01-01-2025".to_string())) 68 | .unwrap(); 69 | 70 | assert!(edited.is_empty()); 71 | assert_eq!(unchanged, vec![1]); 72 | assert!(not_found.is_empty()); 73 | } 74 | 75 | #[test] 76 | fn test_edit_tasks_mixed_text_and_date_changes() { 77 | let mut tm = TaskManager::new_empty().unwrap(); 78 | tm.tasks = vec![create_test_task(1, "Same text", false)]; 79 | 80 | // Set initial date 81 | tm.tasks[0].date = NaiveDate::parse_from_str("01-01-2025", "%d-%m-%Y").ok(); 82 | 83 | // Change text but keep same date - should be considered changed 84 | let (edited, unchanged, not_found) = tm 85 | .edit_tasks( 86 | vec![1], 87 | Some(vec!["New".to_string(), "text".to_string()]), 88 | Some("2025-01-01".to_string()), 89 | ) 90 | .unwrap(); 91 | 92 | assert_eq!(edited, vec![1]); // Text changed, so task is edited 93 | assert!(unchanged.is_empty()); 94 | assert!(not_found.is_empty()); 95 | assert_eq!(tm.tasks[0].text, "New text"); 96 | } 97 | 98 | #[test] 99 | fn test_edit_tasks_all_unchanged() { 100 | let mut tm = TaskManager::new_empty().unwrap(); 101 | tm.tasks = vec![ 102 | create_test_task(1, "Text 1", false), 103 | create_test_task(2, "Text 1", false), 104 | ]; 105 | 106 | // Try to set both to the same text they already have 107 | let (edited, unchanged, not_found) = tm 108 | .edit_tasks( 109 | vec![1, 2], 110 | Some(vec!["Text".to_string(), "1".to_string()]), 111 | None, 112 | ) 113 | .unwrap(); 114 | 115 | assert!(edited.is_empty()); 116 | assert_eq!(unchanged, vec![1, 2]); 117 | assert!(not_found.is_empty()); 118 | } 119 | 120 | #[test] 121 | fn test_edit_tasks_with_not_found_and_unchanged() { 122 | let mut tm = TaskManager::new_empty().unwrap(); 123 | tm.tasks = vec![create_test_task(1, "Same text", false)]; 124 | 125 | // Try to edit existing (unchanged) and non-existing tasks 126 | let (edited, unchanged, not_found) = tm 127 | .edit_tasks( 128 | vec![1, 99], 129 | Some(vec!["Same".to_string(), "text".to_string()]), 130 | None, 131 | ) 132 | .unwrap(); 133 | 134 | assert!(edited.is_empty()); 135 | assert_eq!(unchanged, vec![1]); 136 | assert_eq!(not_found, vec![99]); 137 | } 138 | -------------------------------------------------------------------------------- /tests/edit_parsing_tests.rs: -------------------------------------------------------------------------------- 1 | use rusk::TaskManager; 2 | mod common; 3 | use common::create_test_task; 4 | 5 | #[test] 6 | fn test_edit_tasks_saves_only_when_changed() { 7 | let temp_dir = tempfile::tempdir().unwrap(); 8 | let db_path = temp_dir.path().join("test_save_behavior.json"); 9 | 10 | let mut tm = TaskManager::new_empty_with_path(db_path.clone()); 11 | tm.tasks = vec![create_test_task(1, "Original text", false)]; 12 | 13 | // Save initial state 14 | tm.save().unwrap(); 15 | let initial_metadata = std::fs::metadata(&db_path).unwrap(); 16 | 17 | // Wait a bit to ensure different modification time 18 | std::thread::sleep(std::time::Duration::from_millis(10)); 19 | 20 | // Try to edit with same text (should not save) 21 | let (_edited, unchanged, _not_found) = tm 22 | .edit_tasks( 23 | vec![1], 24 | Some(vec!["Original".to_string(), "text".to_string()]), 25 | None, 26 | ) 27 | .unwrap(); 28 | 29 | assert_eq!(unchanged, vec![1]); 30 | 31 | let after_unchanged_metadata = std::fs::metadata(&db_path).unwrap(); 32 | assert_eq!( 33 | initial_metadata.modified().unwrap(), 34 | after_unchanged_metadata.modified().unwrap(), 35 | "File should not be modified when no changes are made" 36 | ); 37 | 38 | // Now make a real change (should save) 39 | std::thread::sleep(std::time::Duration::from_millis(10)); 40 | let (edited, _unchanged, _not_found) = tm 41 | .edit_tasks( 42 | vec![1], 43 | Some(vec!["New".to_string(), "text".to_string()]), 44 | None, 45 | ) 46 | .unwrap(); 47 | 48 | assert_eq!(edited, vec![1]); 49 | 50 | let after_changed_metadata = std::fs::metadata(&db_path).unwrap(); 51 | assert!( 52 | after_changed_metadata.modified().unwrap() > initial_metadata.modified().unwrap(), 53 | "File should be modified when changes are made" 54 | ); 55 | } 56 | 57 | #[test] 58 | fn test_edit_tasks_text_joining() { 59 | let mut tm = TaskManager::new_empty().unwrap(); 60 | tm.tasks = vec![create_test_task(1, "Original", false)]; 61 | 62 | // Test that multiple words are joined with spaces 63 | let (edited, _unchanged, _not_found) = tm 64 | .edit_tasks( 65 | vec![1], 66 | Some(vec![ 67 | "Multiple".to_string(), 68 | "word".to_string(), 69 | "text".to_string(), 70 | "here".to_string(), 71 | ]), 72 | None, 73 | ) 74 | .unwrap(); 75 | 76 | assert_eq!(edited, vec![1]); 77 | assert_eq!(tm.tasks[0].text, "Multiple word text here"); 78 | } 79 | 80 | #[test] 81 | fn test_edit_tasks_date_parsing_validation() { 82 | let mut tm = TaskManager::new_empty().unwrap(); 83 | tm.tasks = vec![create_test_task(1, "Task", false)]; 84 | 85 | // Valid date format 86 | let (_edited, _unchanged, _not_found) = tm 87 | .edit_tasks(vec![1], None, Some("31-12-2025".to_string())) 88 | .unwrap(); 89 | 90 | assert_eq!( 91 | tm.tasks[0].date, 92 | chrono::NaiveDate::parse_from_str("31-12-2025", "%d-%m-%Y").ok() 93 | ); 94 | 95 | // Invalid date format should result in None (parsed as None, which changes the date) 96 | let (edited, _unchanged, _not_found) = tm 97 | .edit_tasks(vec![1], None, Some("invalid-date".to_string())) 98 | .unwrap(); 99 | 100 | // Should change from valid date to None due to invalid parsing 101 | assert_eq!(edited, vec![1]); // Task was edited because date changed 102 | assert_eq!(tm.tasks[0].date, None); 103 | } 104 | 105 | #[test] 106 | fn test_edit_tasks_comprehensive_scenario() { 107 | let mut tm = TaskManager::new_empty().unwrap(); 108 | tm.tasks = vec![ 109 | create_test_task(1, "Task 1", false), 110 | create_test_task(2, "Task 2", false), 111 | create_test_task(3, "Different text", false), 112 | ]; 113 | 114 | // Complex edit: some changed, some unchanged, some not found 115 | let (edited, unchanged, not_found) = tm 116 | .edit_tasks( 117 | vec![1, 2, 3, 99], 118 | Some(vec!["Task".to_string(), "2".to_string()]), 119 | Some("15-06-2025".to_string()), 120 | ) 121 | .unwrap(); 122 | 123 | // Task 1: text changes from "Task 1" to "Task 2" 124 | // Task 2: text stays "Task 2", but date changes 125 | // Task 3: text changes from "Different text" to "Task 2" 126 | // Task 99: not found 127 | 128 | assert_eq!(edited, vec![1, 2, 3]); // All existing tasks have some change 129 | assert!(unchanged.is_empty()); // None are completely unchanged 130 | assert_eq!(not_found, vec![99]); 131 | 132 | // Verify final state 133 | assert_eq!(tm.tasks[0].text, "Task 2"); 134 | assert_eq!(tm.tasks[1].text, "Task 2"); 135 | assert_eq!(tm.tasks[2].text, "Task 2"); 136 | 137 | let expected_date = chrono::NaiveDate::parse_from_str("15-06-2025", "%d-%m-%Y").ok(); 138 | assert_eq!(tm.tasks[0].date, expected_date); 139 | assert_eq!(tm.tasks[1].date, expected_date); 140 | assert_eq!(tm.tasks[2].date, expected_date); 141 | } 142 | -------------------------------------------------------------------------------- /tests/completions/powershell/test_edit_after_id.ps1: -------------------------------------------------------------------------------- 1 | # Test: rusk e should return ONLY task text, NO dates 2 | # This is the critical test for the reported issue 3 | 4 | . $PSScriptRoot/helpers.ps1 5 | . /home/alex/.config/powershell/rusk-completions.ps1 6 | 7 | $allTestsPassed = $true 8 | 9 | Write-Host "`nPowerShell Completion Tests - Edit After ID" -ForegroundColor Cyan 10 | Write-Host "=" * 60 -ForegroundColor Cyan 11 | 12 | # Test 1: rusk e 1 (with space after ID) 13 | $test1 = Test-CompletionScenario ` 14 | -Description "rusk e 1 (with space after ID)" ` 15 | -Tokens @("rusk", "e", "1", "") ` 16 | -WordToComplete "" ` 17 | -ExpectedBehavior "Should return ONLY task text, NO dates" ` 18 | -Validation { 19 | param($Tokens, $WordToComplete, $Prev, $Cur) 20 | 21 | $enteredIds = _rusk_get_entered_ids $Tokens $WordToComplete 22 | 23 | # Critical check: if prev is ID and cur is empty, should return task text only 24 | if ($Prev -match '^\d+$' -and [string]::IsNullOrEmpty($Cur)) { 25 | if ($enteredIds.Count -eq 1 -and $Prev -eq $enteredIds[0].ToString()) { 26 | $taskText = _rusk_get_task_text $Prev 27 | if ($taskText) { 28 | # Should return task text 29 | Assert-True $true "Returns task text: '$taskText'" 30 | # Should NOT return dates 31 | Assert-False ($Prev -eq '--date' -or $Prev -eq '-d') "Does NOT return dates" 32 | return $true 33 | } else { 34 | # Should return empty, not dates 35 | Assert-True $true "Returns empty (no task text)" 36 | Assert-False ($Prev -eq '--date' -or $Prev -eq '-d') "Does NOT return dates" 37 | return $true 38 | } 39 | } 40 | } 41 | return $false 42 | } 43 | 44 | if (-not $test1) { $allTestsPassed = $false } 45 | 46 | # Test 2: rusk e 1 (without space) 47 | $test2 = Test-CompletionScenario ` 48 | -Description "rusk e 1 (without space)" ` 49 | -Tokens @("rusk", "e", "1") ` 50 | -WordToComplete "1" ` 51 | -ExpectedBehavior "Should return task text appended to ID" ` 52 | -Validation { 53 | param($Tokens, $WordToComplete, $Prev, $Cur) 54 | 55 | $enteredIds = _rusk_get_entered_ids $Tokens $WordToComplete 56 | 57 | # When typing ID, should suggest task text appended 58 | # In this case, Cur is "1" (the ID being typed), Prev is "e" 59 | if ($Cur -match '^\d+$' -and ($Prev -eq 'edit' -or $Prev -eq 'e')) { 60 | # enteredIds should be 0 because we're still typing the ID 61 | if ($enteredIds.Count -eq 0) { 62 | $taskText = _rusk_get_task_text $Cur 63 | if ($taskText) { 64 | Assert-True $true "Returns appended task text" 65 | Assert-False ($Prev -eq '--date' -or $Prev -eq '-d') "Does NOT return dates" 66 | return $true 67 | } 68 | } 69 | } 70 | # If we can't verify the exact behavior, at least verify dates aren't returned 71 | Assert-False ($Prev -eq '--date' -or $Prev -eq '-d') "Does NOT return dates" 72 | return $true 73 | } 74 | 75 | if (-not $test2) { $allTestsPassed = $false } 76 | 77 | # Test 3: rusk e 1 2 (multiple IDs) 78 | $test3 = Test-CompletionScenario ` 79 | -Description "rusk e 1 2 (multiple IDs)" ` 80 | -Tokens @("rusk", "e", "1", "2", "") ` 81 | -WordToComplete "" ` 82 | -ExpectedBehavior "Should return task IDs (not text, not dates)" ` 83 | -Validation { 84 | param($Tokens, $WordToComplete, $Prev, $Cur) 85 | 86 | $enteredIds = _rusk_get_entered_ids $Tokens $WordToComplete 87 | 88 | # With multiple IDs, should not return task text 89 | if ($enteredIds.Count -gt 1) { 90 | Assert-True $true "Multiple IDs detected" 91 | Assert-False ($Prev -eq '--date' -or $Prev -eq '-d') "Does NOT return dates" 92 | return $true 93 | } 94 | return $false 95 | } 96 | 97 | if (-not $test3) { $allTestsPassed = $false } 98 | 99 | # Test 4: rusk e 1 --date (date flag after ID) 100 | $test4 = Test-CompletionScenario ` 101 | -Description "rusk e 1 --date (date flag after ID)" ` 102 | -Tokens @("rusk", "e", "1", "--date", "") ` 103 | -WordToComplete "" ` 104 | -ExpectedBehavior "Should return dates (after date flag)" ` 105 | -Validation { 106 | param($Tokens, $WordToComplete, $Prev, $Cur) 107 | 108 | # After date flag, should return dates 109 | if ($Prev -eq '--date' -or $Prev -eq '-d') { 110 | Assert-True $true "Date flag detected, should return dates" 111 | return $true 112 | } 113 | return $false 114 | } 115 | 116 | if (-not $test4) { $allTestsPassed = $false } 117 | 118 | Write-Host "`n" + "=" * 60 -ForegroundColor Cyan 119 | if ($allTestsPassed) { 120 | Write-Host "All tests passed!" -ForegroundColor Green 121 | exit 0 122 | } else { 123 | Write-Host "Some tests failed!" -ForegroundColor Red 124 | exit 1 125 | } 126 | -------------------------------------------------------------------------------- /tests/path_migration_tests.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use rusk::TaskManager; 3 | use std::env; 4 | use std::fs; 5 | use tempfile::TempDir; 6 | 7 | mod common; 8 | use common::create_test_task; 9 | 10 | #[test] 11 | fn test_default_path_structure() -> Result<()> { 12 | // Save original environment 13 | let original_rusk_db = env::var("RUSK_DB").ok(); 14 | 15 | // Ensure RUSK_DB is not set to test default behavior 16 | unsafe { 17 | env::remove_var("RUSK_DB"); 18 | } 19 | 20 | let db_path = TaskManager::resolve_db_path(); 21 | 22 | // In test mode, should use /tmp/rusk_debug/tasks.json (same as debug mode) 23 | assert!(db_path.to_string_lossy().contains("rusk_debug")); 24 | assert!(db_path.file_name().unwrap() == "tasks.json"); 25 | 26 | let parent = db_path.parent().unwrap(); 27 | assert!(parent.file_name().unwrap() == "rusk_debug"); 28 | 29 | // Restore original environment 30 | unsafe { 31 | match original_rusk_db { 32 | Some(value) => env::set_var("RUSK_DB", value), 33 | None => env::remove_var("RUSK_DB"), 34 | } 35 | } 36 | 37 | Ok(()) 38 | } 39 | 40 | #[test] 41 | fn test_backup_files_naming_convention() -> Result<()> { 42 | let temp_dir = TempDir::new()?; 43 | let rusk_dir = temp_dir.path().join(".rusk"); 44 | fs::create_dir_all(&rusk_dir)?; 45 | 46 | let db_path = rusk_dir.join("tasks.json"); 47 | 48 | let mut tm = TaskManager::new_empty()?; 49 | tm.db_path = db_path.clone(); 50 | tm.tasks.push(create_test_task(1, "Test task", false)); 51 | 52 | // First save creates the file 53 | tm.save()?; 54 | 55 | // Second save should create backup (since file now exists) 56 | tm.tasks.push(create_test_task(2, "Second task", false)); 57 | tm.save()?; 58 | 59 | // Verify backup file naming 60 | let backup_path = rusk_dir.join("tasks.json.backup"); 61 | assert!(backup_path.exists()); 62 | 63 | // Test restore creates before_restore backup 64 | tm.restore_from_backup()?; 65 | let before_restore_path = rusk_dir.join("tasks.json.before_restore"); 66 | assert!(before_restore_path.exists()); 67 | 68 | Ok(()) 69 | } 70 | 71 | #[test] 72 | fn test_rusk_db_directory_with_tasks_json() -> Result<()> { 73 | let temp_dir = TempDir::new()?; 74 | let custom_dir = temp_dir.path().join("custom"); 75 | fs::create_dir_all(&custom_dir)?; 76 | 77 | // Save original environment 78 | let original_rusk_db = env::var("RUSK_DB").ok(); 79 | 80 | // Set RUSK_DB to a directory - should create tasks.json inside 81 | unsafe { 82 | env::set_var("RUSK_DB", custom_dir.to_str().unwrap()); 83 | } 84 | 85 | // In test mode, RUSK_DB is ignored, should use /tmp/rusk_debug/tasks.json 86 | let db_path = TaskManager::resolve_db_path(); 87 | let expected_path = std::env::temp_dir().join("rusk_debug").join("tasks.json"); 88 | 89 | assert_eq!(db_path, expected_path); 90 | assert!(db_path.file_name().unwrap() == "tasks.json"); 91 | 92 | // Test that it actually works 93 | let mut tm = TaskManager::new_empty()?; 94 | tm.db_path = db_path.clone(); 95 | tm.tasks.push(create_test_task(1, "Custom dir task", false)); 96 | tm.save()?; 97 | 98 | assert!(expected_path.exists()); 99 | 100 | // Restore original environment 101 | unsafe { 102 | match original_rusk_db { 103 | Some(value) => env::set_var("RUSK_DB", value), 104 | None => env::remove_var("RUSK_DB"), 105 | } 106 | } 107 | 108 | Ok(()) 109 | } 110 | 111 | #[test] 112 | fn test_nested_rusk_directory_creation() -> Result<()> { 113 | let temp_dir = TempDir::new()?; 114 | let deep_path = temp_dir.path().join("level1").join("level2").join(".rusk"); 115 | let db_path = deep_path.join("tasks.json"); 116 | 117 | // Directory doesn't exist yet 118 | assert!(!deep_path.exists()); 119 | 120 | let mut tm = TaskManager::new_empty()?; 121 | tm.db_path = db_path.clone(); 122 | tm.tasks.push(create_test_task(1, "Deep path task", false)); 123 | 124 | // First save creates all necessary directories and file 125 | tm.save()?; 126 | 127 | // Second save creates backup 128 | tm.tasks.push(create_test_task(2, "Another task", false)); 129 | tm.save()?; 130 | 131 | assert!(deep_path.exists()); 132 | assert!(db_path.exists()); 133 | assert!(deep_path.join("tasks.json.backup").exists()); 134 | 135 | Ok(()) 136 | } 137 | 138 | #[test] 139 | fn test_file_extension_consistency() -> Result<()> { 140 | let temp_dir = TempDir::new()?; 141 | let rusk_dir = temp_dir.path().join(".rusk"); 142 | fs::create_dir_all(&rusk_dir)?; 143 | 144 | let db_path = rusk_dir.join("tasks.json"); 145 | 146 | let mut tm = TaskManager::new_empty()?; 147 | tm.db_path = db_path.clone(); 148 | tm.tasks.push(create_test_task(1, "Extension test", false)); 149 | 150 | // First save creates the file 151 | tm.save()?; 152 | 153 | // Second save creates backup 154 | tm.tasks.push(create_test_task(2, "Another task", false)); 155 | tm.save()?; 156 | 157 | // Verify all files have consistent naming 158 | assert!(db_path.exists()); 159 | assert_eq!(db_path.extension().unwrap(), "json"); 160 | 161 | let backup_path = rusk_dir.join("tasks.json.backup"); 162 | assert!(backup_path.exists()); 163 | // backup file should have compound extension 164 | assert!(backup_path.to_string_lossy().ends_with(".json.backup")); 165 | 166 | Ok(()) 167 | } 168 | -------------------------------------------------------------------------------- /tests/completions/README.md: -------------------------------------------------------------------------------- 1 |

Shell Completion Tests

2 |
3 | 4 | This directory contains tests for shell completion scripts. These tests are separate from the main application tests in `tests/` and focus specifically on validating completion behavior. 5 | 6 | ## Structure 7 | 8 | ``` 9 | tests/completions/ 10 | ├── README.md # This file 11 | ├── run_all.sh # Run all completion tests for all shells 12 | ├── rust/ # Rust unit tests for completion code 13 | │ ├── completion_tests.rs # Tests for parsing rusk list output 14 | │ ├── completions_install_tests.rs # Tests for completion installation 15 | │ └── nu_completion_tests.rs # Nu Shell-specific completion tests 16 | ├── powershell/ # PowerShell completion tests 17 | │ ├── README.md # PowerShell-specific test documentation 18 | │ ├── run_all.ps1 # PowerShell test runner 19 | │ ├── helpers.ps1 # Helper functions 20 | │ ├── test_basic_completion.ps1 21 | │ ├── test_all_commands.ps1 22 | │ └── test_edit_after_id.ps1 23 | ├── bash/ # Bash completion tests 24 | │ ├── run_all.sh 25 | │ ├── helpers.sh 26 | │ ├── test_basic.sh 27 | │ ├── test_all_commands.sh 28 | │ └── test_edit_after_id.sh 29 | ├── zsh/ # Zsh completion tests 30 | │ ├── run_all.sh 31 | │ ├── helpers.zsh 32 | │ ├── test_basic.zsh 33 | │ ├── test_all_commands.zsh 34 | │ └── test_edit_after_id.zsh 35 | ├── fish/ # Fish shell completion tests 36 | │ ├── run_all.fish 37 | │ ├── test_basic.fish 38 | │ ├── test_all_commands.fish 39 | │ └── test_edit_after_id.fish 40 | └── nu/ # Nu Shell completion tests 41 | ├── run_all.nu 42 | ├── test_basic.nu 43 | ├── test_all_commands.nu 44 | └── test_edit_after_id.nu 45 | ``` 46 | 47 | **Note**: Rust tests are included via `tests/completions.rs` which references the files in `rust/` subdirectory. 48 | 49 | ## Running Tests 50 | 51 | ### Rust Tests 52 | Run Rust unit tests for completion functionality: 53 | ```bash 54 | cargo test --test completions 55 | ``` 56 | 57 | ### All Shell Tests (Recommended) 58 | Run tests for all available shells: 59 | ```bash 60 | ./tests/completions/run_all.sh 61 | ``` 62 | 63 | ### Individual Shell Tests 64 | 65 | #### PowerShell 66 | ```powershell 67 | pwsh -File tests/completions/powershell/run_all.ps1 68 | ``` 69 | 70 | #### Bash 71 | ```bash 72 | bash tests/completions/bash/run_all.sh 73 | ``` 74 | 75 | #### Zsh 76 | ```zsh 77 | zsh tests/completions/zsh/run_all.sh 78 | ``` 79 | 80 | #### Fish 81 | ```fish 82 | fish tests/completions/fish/run_all.fish 83 | ``` 84 | 85 | #### Nu Shell 86 | ```nu 87 | nu tests/completions/nu/run_all.nu 88 | ``` 89 | 90 | ## Test Structure 91 | 92 | Each shell's test directory contains: 93 | - `run_all.{ext}` - Main test runner that executes all test files 94 | - `test_*.{ext}` - Individual test files for specific scenarios: 95 | - `test_basic.{ext}` - Basic completion functionality tests 96 | - `test_all_commands.{ext}` - Comprehensive tests for all commands 97 | - `test_edit_after_id.{ext}` - Critical tests ensuring task text (not dates) after task ID 98 | - `helpers.{ext}` - Helper functions for tests (if applicable) 99 | 100 | ## Test Scenarios 101 | 102 | Common test scenarios across all shells: 103 | 104 | 1. Command completion - `rusk ` should suggest available commands 105 | 2. Subcommand completion - `rusk edit ` should suggest task IDs 106 | 3. Task ID completion - `rusk edit ` should list available task IDs 107 | 4. Task text completion after ID - `rusk edit 1 ` should suggest task text (NOT dates) 108 | 5. Date completion after date flag - `rusk add --date ` should suggest dates 109 | 6. Flag completion - `rusk edit 1 -` should suggest available flags 110 | 7. Multiple ID completion - `rusk edit 1 2 ` should handle multiple IDs correctly 111 | 112 | ## Command Coverage 113 | 114 | 115 | - add (a) - Flag completion, date completion after `--date` flag 116 | - edit (e) - Task ID completion, task text after ID, flag completion, date after flag 117 | - mark (m) - Task ID completion, multiple IDs 118 | - del (d) - Task ID completion, flag completion (`--done`), multiple IDs 119 | - list (l) - No arguments (empty completion) 120 | - restore (r) - No arguments (empty completion) 121 | - completions (c) - Subcommand completion, shell completion 122 | 123 | All command aliases are tested: `a`, `e`, `m`, `d`, `l`, `r`, `c` 124 | 125 | 126 | ## Adding New Tests 127 | 128 | To add a new test: 129 | 130 | 1. Create a new test file following the naming pattern `test_*.{ext}` 131 | 2. Use the appropriate helper functions if available 132 | 3. Follow the test structure used in existing tests 133 | 4. Ensure the test file is executable (for shell scripts) 134 | 135 | ## Integration with CI/CD 136 | 137 | These tests can be integrated into CI/CD pipelines: 138 | 139 | ```yaml 140 | # Example GitHub Actions step 141 | - name: Run completion tests 142 | run: ./tests/completions/run_all.sh 143 | ``` 144 | 145 | ## Notes 146 | 147 | - Completion tests require the completion scripts to be installed or available in the expected location 148 | - Some tests may require actual task data in the rusk database 149 | - Tests are designed to be run after building the project: `cargo build --release` 150 | - The `run_all.sh` script will automatically skip shells that are not installed on the system 151 | - Rust tests can be run independently: `cargo test --test completions` 152 | - Each shell's test runner can be executed individually for debugging specific shell issues 153 | 154 |
155 |

Back to top

-------------------------------------------------------------------------------- /tests/restore_tests.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use rusk::TaskManager; 3 | use std::fs; 4 | use tempfile::TempDir; 5 | 6 | mod common; 7 | use common::{create_test_task, create_test_task_with_date}; 8 | 9 | #[test] 10 | fn test_restore_from_backup() -> Result<()> { 11 | let temp_dir = TempDir::new()?; 12 | let db_path = temp_dir.path().join("test.json"); 13 | 14 | // Create initial TaskManager with some tasks 15 | let mut tm = TaskManager::new_empty()?; 16 | tm.db_path = db_path.clone(); 17 | tm.tasks.push(create_test_task(1, "Original task 1", false)); 18 | tm.tasks.push(create_test_task_with_date( 19 | 2, 20 | "Original task 2", 21 | false, 22 | "15-01-2025", 23 | )); 24 | tm.save()?; 25 | 26 | // Modify tasks and save (this creates a backup) 27 | tm.tasks[0].text = "Modified task 1".to_string(); 28 | tm.tasks.push(create_test_task(3, "New task 3", false)); 29 | tm.save()?; 30 | 31 | // Verify current state 32 | assert_eq!(tm.tasks.len(), 3); 33 | assert_eq!(tm.tasks[0].text, "Modified task 1"); 34 | 35 | // Restore from backup 36 | tm.restore_from_backup()?; 37 | 38 | // Verify restored state 39 | assert_eq!(tm.tasks.len(), 2); 40 | assert_eq!(tm.tasks[0].text, "Original task 1"); 41 | assert_eq!(tm.tasks[1].text, "Original task 2"); 42 | assert_eq!(tm.tasks[1].date.as_ref().unwrap().format("%d-%m-%Y").to_string(), "15-01-2025"); 43 | 44 | Ok(()) 45 | } 46 | 47 | #[test] 48 | fn test_restore_no_backup_file() -> Result<()> { 49 | let temp_dir = TempDir::new()?; 50 | let db_path = temp_dir.path().join("test.json"); 51 | 52 | let mut tm = TaskManager::new_empty()?; 53 | tm.db_path = db_path.clone(); 54 | 55 | // Try to restore without backup file 56 | let result = tm.restore_from_backup(); 57 | 58 | assert!(result.is_err()); 59 | assert!( 60 | result 61 | .unwrap_err() 62 | .to_string() 63 | .contains("No backup file found") 64 | ); 65 | 66 | Ok(()) 67 | } 68 | 69 | #[test] 70 | fn test_restore_corrupted_backup() -> Result<()> { 71 | let temp_dir = TempDir::new()?; 72 | let db_path = temp_dir.path().join("test.json"); 73 | let backup_path = temp_dir.path().join("test.json.backup"); 74 | 75 | let mut tm = TaskManager::new_empty()?; 76 | tm.db_path = db_path.clone(); 77 | 78 | // Create corrupted backup file 79 | fs::write(&backup_path, "invalid json content")?; 80 | 81 | // Try to restore from corrupted backup 82 | let result = tm.restore_from_backup(); 83 | 84 | assert!(result.is_err()); 85 | assert!(result.unwrap_err().to_string().contains("Failed to parse")); 86 | 87 | Ok(()) 88 | } 89 | 90 | #[test] 91 | fn test_restore_creates_before_restore_backup() -> Result<()> { 92 | let temp_dir = TempDir::new()?; 93 | let db_path = temp_dir.path().join("test.json"); 94 | let backup_path = temp_dir.path().join("test.json.backup"); 95 | let before_restore_path = temp_dir.path().join("test.json.before_restore"); 96 | 97 | // Create TaskManager with current data 98 | let mut tm = TaskManager::new_empty()?; 99 | tm.db_path = db_path.clone(); 100 | tm.tasks.push(create_test_task(1, "Current task", false)); 101 | tm.save()?; 102 | 103 | // Create backup with different data 104 | let backup_tasks = vec![create_test_task(2, "Backup task", false)]; 105 | let backup_json = serde_json::to_string_pretty(&backup_tasks)?; 106 | fs::write(&backup_path, backup_json)?; 107 | 108 | // Restore from backup 109 | tm.restore_from_backup()?; 110 | 111 | // Verify that before_restore backup was created 112 | assert!(before_restore_path.exists()); 113 | 114 | // Verify before_restore backup contains original data 115 | let before_restore_data = fs::read_to_string(&before_restore_path)?; 116 | assert!(before_restore_data.contains("Current task")); 117 | 118 | // Verify current data is from backup 119 | assert_eq!(tm.tasks.len(), 1); 120 | assert_eq!(tm.tasks[0].text, "Backup task"); 121 | 122 | Ok(()) 123 | } 124 | 125 | #[test] 126 | fn test_restore_with_corrupted_current_database() -> Result<()> { 127 | let temp_dir = TempDir::new()?; 128 | let db_path = temp_dir.path().join("test.json"); 129 | let backup_path = temp_dir.path().join("test.json.backup"); 130 | 131 | let mut tm = TaskManager::new_empty()?; 132 | tm.db_path = db_path.clone(); 133 | 134 | // Create valid backup 135 | let backup_tasks = vec![create_test_task(1, "Backup task", false)]; 136 | let backup_json = serde_json::to_string_pretty(&backup_tasks)?; 137 | fs::write(&backup_path, backup_json)?; 138 | 139 | // Create corrupted current database 140 | fs::write(&db_path, "corrupted data")?; 141 | 142 | // Restore should work despite corrupted current database 143 | tm.restore_from_backup()?; 144 | 145 | // Verify restored data 146 | assert_eq!(tm.tasks.len(), 1); 147 | assert_eq!(tm.tasks[0].text, "Backup task"); 148 | 149 | Ok(()) 150 | } 151 | 152 | #[test] 153 | fn test_restore_empty_backup() -> Result<()> { 154 | let temp_dir = TempDir::new()?; 155 | let db_path = temp_dir.path().join("test.json"); 156 | let backup_path = temp_dir.path().join("test.json.backup"); 157 | 158 | let mut tm = TaskManager::new_empty()?; 159 | tm.db_path = db_path.clone(); 160 | tm.tasks.push(create_test_task(1, "Current task", false)); 161 | tm.save()?; 162 | 163 | // Create empty backup 164 | let empty_backup = "[]"; 165 | fs::write(&backup_path, empty_backup)?; 166 | 167 | // Restore from empty backup 168 | tm.restore_from_backup()?; 169 | 170 | // Verify all tasks were cleared 171 | assert_eq!(tm.tasks.len(), 0); 172 | 173 | Ok(()) 174 | } 175 | -------------------------------------------------------------------------------- /tests/edit_mode_tests.rs: -------------------------------------------------------------------------------- 1 | use rusk::parse_edit_args; 2 | 3 | #[test] 4 | fn test_parse_edit_args_date_only_long_flag() { 5 | // rusk edit 3 --date 2025-12-31 6 | let (ids, text) = parse_edit_args(vec![ 7 | "3".to_string(), 8 | "--date".to_string(), 9 | "2025-12-31".to_string(), 10 | ]); 11 | assert_eq!(ids, vec![3]); 12 | assert!(text.is_none()); 13 | } 14 | 15 | #[test] 16 | fn test_parse_edit_args_only_id_text_none() { 17 | // rusk edit 3 -> interactive text-only expected; text should be None 18 | let (ids, text) = parse_edit_args(vec!["3".to_string()]); 19 | assert_eq!(ids, vec![3]); 20 | assert!(text.is_none()); 21 | } 22 | 23 | #[test] 24 | fn test_parse_edit_args_interactive_short_flag() { 25 | // rusk edit 3 -d (interactive expected at CLI routing; parser should not treat -d as text) 26 | let (ids, text) = parse_edit_args(vec!["3".to_string(), "-d".to_string()]); 27 | assert_eq!(ids, vec![3]); 28 | assert!(text.is_none()); 29 | } 30 | 31 | #[test] 32 | fn test_parse_edit_args_mixed_ids_and_text_with_date() { 33 | // rusk edit 1,2 new text --date 2025-06-15 34 | let (ids, text) = parse_edit_args(vec![ 35 | "1,2".to_string(), 36 | "new".to_string(), 37 | "text".to_string(), 38 | "--date".to_string(), 39 | "2025-06-15".to_string(), 40 | ]); 41 | assert_eq!(ids, vec![1, 2]); 42 | assert_eq!(text, Some(vec!["new".to_string(), "text".to_string()])); 43 | } 44 | 45 | #[test] 46 | fn test_parse_edit_args_comma_separated_ids_and_text_with_short_date() { 47 | // rusk edit 1,2,3 some words -d 2025-01-01 48 | let (ids, text) = parse_edit_args(vec![ 49 | "1,2,3".to_string(), 50 | "some".to_string(), 51 | "words".to_string(), 52 | "-d".to_string(), 53 | "2025-01-01".to_string(), 54 | ]); 55 | assert_eq!(ids, vec![1, 2, 3]); 56 | assert_eq!(text, Some(vec!["some".to_string(), "words".to_string()])); 57 | } 58 | 59 | #[test] 60 | fn test_parse_edit_args_comma_separated_ids_no_text() { 61 | // rusk edit 1,2,3 -> text None 62 | let (ids, text) = parse_edit_args(vec!["1,2,3".to_string()]); 63 | assert_eq!(ids, vec![1, 2, 3]); 64 | assert!(text.is_none()); 65 | } 66 | 67 | #[test] 68 | fn test_parse_edit_args_text_only_words() { 69 | // rusk edit 5 new title here 70 | let (ids, text) = parse_edit_args(vec![ 71 | "5".to_string(), 72 | "new".to_string(), 73 | "title".to_string(), 74 | "here".to_string(), 75 | ]); 76 | assert_eq!(ids, vec![5]); 77 | assert_eq!( 78 | text, 79 | Some(vec![ 80 | "new".to_string(), 81 | "title".to_string(), 82 | "here".to_string(), 83 | ]) 84 | ); 85 | } 86 | 87 | #[test] 88 | fn test_parse_edit_args_short_date_without_value_then_text_ignored_as_date_value() { 89 | // rusk edit 7 -d some -> parser treats 'some' as date value and skips it 90 | // so text remains None and ids parsed 91 | let (ids, text) = parse_edit_args(vec!["7".to_string(), "-d".to_string(), "some".to_string()]); 92 | assert_eq!(ids, vec![7]); 93 | assert!(text.is_none()); 94 | } 95 | 96 | #[test] 97 | fn test_parse_edit_args_with_space_before_comma() { 98 | // Handle case like "1,5,4 ,6" which becomes ["1,5,4", " ,6"] 99 | let (ids, text) = parse_edit_args(vec!["1,5,4".to_string(), " ,6".to_string()]); 100 | assert_eq!(ids, vec![1, 5, 4, 6]); 101 | assert!(text.is_none()); 102 | } 103 | 104 | #[test] 105 | fn test_parse_edit_args_with_space_before_comma_and_text() { 106 | // Handle case like "1,5,4 ,6" with text 107 | let (ids, text) = parse_edit_args(vec![ 108 | "1,5,4".to_string(), 109 | " ,6".to_string(), 110 | "new".to_string(), 111 | "text".to_string(), 112 | ]); 113 | assert_eq!(ids, vec![1, 5, 4, 6]); 114 | assert_eq!(text, Some(vec!["new".to_string(), "text".to_string()])); 115 | } 116 | 117 | #[test] 118 | fn test_parse_edit_args_with_comma_at_start() { 119 | // Handle case like "1,2" ",3" ",4" 120 | let (ids, text) = parse_edit_args(vec!["1,2".to_string(), ",3".to_string(), ",4".to_string()]); 121 | assert_eq!(ids, vec![1, 2, 3, 4]); 122 | assert!(text.is_none()); 123 | } 124 | 125 | #[test] 126 | fn test_parse_edit_args_with_comma_at_start_and_text() { 127 | // Handle case like "1,2" ",3" ",4" with text 128 | let (ids, text) = parse_edit_args(vec![ 129 | "1,2".to_string(), 130 | ",3".to_string(), 131 | ",4".to_string(), 132 | "new".to_string(), 133 | "text".to_string(), 134 | ]); 135 | assert_eq!(ids, vec![1, 2, 3, 4]); 136 | assert_eq!(text, Some(vec!["new".to_string(), "text".to_string()])); 137 | } 138 | 139 | #[test] 140 | fn test_parse_edit_args_with_space_before_comma_and_date() { 141 | // Handle case like "1,5,4 ,6" with date flag 142 | let (ids, text) = parse_edit_args(vec![ 143 | "1,5,4".to_string(), 144 | " ,6".to_string(), 145 | "-d".to_string(), 146 | "2025-01-01".to_string(), 147 | ]); 148 | assert_eq!(ids, vec![1, 5, 4, 6]); 149 | assert!(text.is_none()); 150 | } 151 | 152 | #[test] 153 | fn test_parse_edit_args_with_space_before_comma_text_and_date() { 154 | // Handle case like "1,5,4 ,6" with text and date 155 | let (ids, text) = parse_edit_args(vec![ 156 | "1,5,4".to_string(), 157 | " ,6".to_string(), 158 | "new".to_string(), 159 | "text".to_string(), 160 | "--date".to_string(), 161 | "2025-01-01".to_string(), 162 | ]); 163 | assert_eq!(ids, vec![1, 5, 4, 6]); 164 | assert_eq!(text, Some(vec!["new".to_string(), "text".to_string()])); 165 | } 166 | 167 | #[test] 168 | fn test_parse_edit_args_empty_parts_in_comma_separated() { 169 | // Handle case with empty parts: "1,,3" 170 | let (ids, text) = parse_edit_args(vec!["1,,3".to_string()]); 171 | assert_eq!(ids, vec![1, 3]); 172 | assert!(text.is_none()); 173 | } 174 | 175 | #[test] 176 | fn test_parse_edit_args_multiple_comma_args() { 177 | // Handle case with multiple comma-separated arguments 178 | let (ids, text) = parse_edit_args(vec!["1,2".to_string(), "3,4".to_string()]); 179 | assert_eq!(ids, vec![1, 2, 3, 4]); 180 | assert!(text.is_none()); 181 | } 182 | -------------------------------------------------------------------------------- /src/completions.rs: -------------------------------------------------------------------------------- 1 | // Shell completion scripts embedded in the binary 2 | // These are included at compile time using include_str! 3 | 4 | pub mod scripts { 5 | pub const BASH: &str = include_str!("../completions/rusk.bash"); 6 | pub const ZSH: &str = include_str!("../completions/rusk.zsh"); 7 | pub const FISH: &str = include_str!("../completions/rusk.fish"); 8 | pub const NU: &str = include_str!("../completions/rusk.nu"); 9 | pub const POWERSHELL: &str = include_str!("../completions/rusk.ps1"); 10 | } 11 | 12 | #[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)] 13 | pub enum Shell { 14 | Bash, 15 | Zsh, 16 | Fish, 17 | Nu, 18 | #[value(name = "powershell")] 19 | PowerShell, 20 | } 21 | 22 | impl Shell { 23 | pub fn get_script(&self) -> &'static str { 24 | use scripts::*; 25 | match self { 26 | Shell::Bash => BASH, 27 | Shell::Zsh => ZSH, 28 | Shell::Fish => FISH, 29 | Shell::Nu => NU, 30 | Shell::PowerShell => POWERSHELL, 31 | } 32 | } 33 | 34 | pub fn get_default_path(&self) -> Result { 35 | let home = dirs::home_dir() 36 | .ok_or_else(|| anyhow::anyhow!("Could not determine home directory"))?; 37 | 38 | let path = match self { 39 | Shell::Bash => { 40 | // Prefer user-specific location (doesn't require root) 41 | // Works on Unix/Linux, Git Bash on Windows, and WSL 42 | home.join(".bash_completion.d").join("rusk") 43 | } 44 | Shell::Zsh => { 45 | // Works on Unix/Linux, macOS, and WSL with Zsh 46 | home.join(".zsh").join("completions").join("_rusk") 47 | } 48 | Shell::Fish => { 49 | // Works on Unix/Linux, macOS, and WSL with Fish 50 | home.join(".config").join("fish").join("completions").join("rusk.fish") 51 | } 52 | Shell::Nu => { 53 | // Works on Unix/Linux, macOS, Windows, and WSL 54 | // On Windows, Nu Shell uses %APPDATA%\nushell\completions\ 55 | // On Unix/Linux/macOS, uses ~/.config/nushell/completions/ 56 | #[cfg(windows)] 57 | { 58 | if let Some(appdata) = dirs::config_dir() { 59 | appdata.join("nushell").join("completions").join("rusk.nu") 60 | } else { 61 | home.join("AppData").join("Roaming").join("nushell").join("completions").join("rusk.nu") 62 | } 63 | } 64 | #[cfg(not(windows))] 65 | { 66 | home.join(".config").join("nushell").join("completions").join("rusk.nu") 67 | } 68 | } 69 | Shell::PowerShell => { 70 | // PowerShell profile location on Windows 71 | // PowerShell 7+ uses: Documents\PowerShell\Microsoft.PowerShell_profile.ps1 72 | // PowerShell 5.1 uses: Documents\WindowsPowerShell\Microsoft.PowerShell_profile.ps1 73 | // We use PowerShell directory (for PS 7+) as default, user can override with --output 74 | #[cfg(windows)] 75 | { 76 | // On Windows, use Documents\PowerShell\rusk-completions.ps1 (PowerShell 7+) 77 | // For PowerShell 5.1, user should use --output to specify WindowsPowerShell directory 78 | if let Some(documents) = dirs::document_dir() { 79 | documents.join("PowerShell").join("rusk-completions.ps1") 80 | } else { 81 | home.join("Documents").join("PowerShell").join("rusk-completions.ps1") 82 | } 83 | } 84 | #[cfg(not(windows))] 85 | { 86 | // On Unix/Linux/macOS with PowerShell Core 87 | home.join(".config").join("powershell").join("rusk-completions.ps1") 88 | } 89 | } 90 | }; 91 | 92 | Ok(path) 93 | } 94 | 95 | pub fn get_instructions(&self, path: &std::path::Path) -> String { 96 | match self { 97 | Shell::Bash => { 98 | // Check for system-wide installation (Unix/Linux only) 99 | #[cfg(not(windows))] 100 | { 101 | if path.starts_with("/etc") { 102 | return "Completions installed system-wide. Restart your shell or run: source /etc/bash_completion.d/rusk".to_string(); 103 | } 104 | } 105 | // On Windows, Git Bash and WSL use Unix-style paths 106 | format!("Add to your ~/.bashrc:\n source {}", path.display()) 107 | } 108 | Shell::Zsh => { 109 | format!("Add to your ~/.zshrc:\n fpath=({} $fpath)\n autoload -U compinit && compinit", 110 | path.parent().unwrap().display()) 111 | } 112 | Shell::Fish => { 113 | "Completions installed. Restart your shell or run: source ~/.config/fish/completions/rusk.fish".to_string() 114 | } 115 | Shell::Nu => { 116 | let config_path = if cfg!(windows) { 117 | "%APPDATA%\\nushell\\config.nu" 118 | } else { 119 | "~/.config/nushell/config.nu" 120 | }; 121 | format!("Add to your config.nu ({}):\n # Load rusk completions module\n use ($nu.config-path | path dirname | path join \"completions\" \"rusk.nu\") *\n\n $env.config.completions.external = {{\n enable: true\n completer: {{|spans|\n if ($spans.0 == \"rusk\") {{\n try {{\n rusk-completions-main $spans\n }} catch {{\n []\n }}\n }} else {{\n []\n }}\n }}\n }}", config_path) 122 | } 123 | Shell::PowerShell => { 124 | let profile_path = if cfg!(windows) { 125 | "$PROFILE" 126 | } else { 127 | "~/.config/powershell/Microsoft.PowerShell_profile.ps1" 128 | }; 129 | format!( 130 | "Add to your PowerShell profile ({}):\n . {}\n\nOr source it manually:\n . {}", 131 | profile_path, 132 | path.display(), 133 | path.display() 134 | ) 135 | } 136 | } 137 | } 138 | } 139 | 140 | -------------------------------------------------------------------------------- /tests/directory_structure_tests.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use rusk::TaskManager; 3 | use std::env; 4 | use std::fs; 5 | use tempfile::TempDir; 6 | 7 | mod common; 8 | use common::create_test_task; 9 | 10 | #[test] 11 | fn test_default_directory_structure() -> Result<()> { 12 | // Remove RUSK_DB to ensure we're testing default behavior 13 | unsafe { 14 | env::remove_var("RUSK_DB"); 15 | } 16 | 17 | let db_path = TaskManager::resolve_db_path(); 18 | 19 | // In test mode, should use /tmp/rusk_debug/tasks.json (same as debug mode) 20 | assert!(db_path.file_name().unwrap() == "tasks.json"); 21 | 22 | // Parent directory should be "rusk_debug" (from /tmp/rusk_debug/tasks.json) 23 | let parent = db_path.parent().unwrap(); 24 | let parent_name = parent.file_name().unwrap().to_string_lossy(); 25 | assert_eq!(parent_name, "rusk_debug", "Expected parent directory to be 'rusk_debug', got '{parent_name}'"); 26 | 27 | Ok(()) 28 | } 29 | 30 | #[test] 31 | fn test_directory_creation_on_save() -> Result<()> { 32 | let temp_dir = TempDir::new()?; 33 | let rusk_dir = temp_dir.path().join("rusk"); 34 | let db_path = rusk_dir.join("tasks.json"); 35 | 36 | // Ensure directory doesn't exist initially 37 | assert!(!rusk_dir.exists()); 38 | 39 | // Create TaskManager with path in non-existent directory 40 | let mut tm = TaskManager::new_empty_with_path(db_path.clone()); 41 | tm.tasks.push(create_test_task(1, "Test task", false)); 42 | 43 | // Save should create the directory 44 | tm.save()?; 45 | 46 | // Verify directory and file were created 47 | assert!(rusk_dir.exists()); 48 | assert!(rusk_dir.is_dir()); 49 | assert!(db_path.exists()); 50 | assert!(db_path.is_file()); 51 | 52 | Ok(()) 53 | } 54 | 55 | #[test] 56 | fn test_backup_files_in_same_directory() -> Result<()> { 57 | let temp_dir = TempDir::new()?; 58 | let rusk_dir = temp_dir.path().join("rusk"); 59 | let db_path = rusk_dir.join("tasks.json"); 60 | let backup_path = rusk_dir.join("tasks.json.backup"); 61 | 62 | // Create TaskManager with custom path 63 | let mut tm = TaskManager::new_empty_with_path(db_path.clone()); 64 | tm.tasks.push(create_test_task(1, "First task", false)); 65 | tm.save()?; 66 | 67 | // Add another task to trigger backup creation 68 | tm.tasks.push(create_test_task(2, "Second task", false)); 69 | tm.save()?; 70 | 71 | // Verify backup was created in same directory 72 | assert!(backup_path.exists()); 73 | assert!(backup_path.is_file()); 74 | 75 | // Verify backup is in the same directory as main file 76 | assert_eq!(backup_path.parent(), db_path.parent()); 77 | 78 | Ok(()) 79 | } 80 | 81 | #[test] 82 | fn test_nested_directory_structure() -> Result<()> { 83 | let temp_dir = TempDir::new()?; 84 | let nested_path = temp_dir 85 | .path() 86 | .join("level1") 87 | .join("level2") 88 | .join("level3") 89 | .join("deep_tasks.json"); 90 | 91 | // Create TaskManager with deeply nested path 92 | let mut tm = TaskManager::new_empty_with_path(nested_path.clone()); 93 | tm.tasks.push(create_test_task(1, "Deep task", false)); 94 | 95 | // Save should create all necessary directories 96 | tm.save()?; 97 | 98 | // Verify all directories were created 99 | assert!(nested_path.exists()); 100 | assert!(nested_path.parent().unwrap().exists()); 101 | assert!(nested_path.parent().unwrap().parent().unwrap().exists()); 102 | assert!( 103 | nested_path 104 | .parent() 105 | .unwrap() 106 | .parent() 107 | .unwrap() 108 | .parent() 109 | .unwrap() 110 | .exists() 111 | ); 112 | 113 | Ok(()) 114 | } 115 | 116 | #[test] 117 | fn test_restore_files_in_custom_directory() -> Result<()> { 118 | let temp_dir = TempDir::new()?; 119 | let custom_dir = temp_dir.path().join("custom_rusk_dir"); 120 | let db_path = custom_dir.join("custom.json"); 121 | let backup_path = custom_dir.join("custom.json.backup"); 122 | let before_restore_path = custom_dir.join("custom.json.before_restore"); 123 | 124 | // Create TaskManager with custom directory 125 | let mut tm = TaskManager::new_empty_with_path(db_path.clone()); 126 | tm.tasks.push(create_test_task(1, "Original task", false)); 127 | tm.save()?; 128 | 129 | // Modify and save to create backup 130 | tm.tasks[0].text = "Modified task".to_string(); 131 | tm.save()?; 132 | 133 | // Restore from backup 134 | tm.restore_from_backup()?; 135 | 136 | // Verify all restore-related files are in custom directory 137 | assert!(backup_path.exists()); 138 | assert!(before_restore_path.exists()); 139 | assert_eq!(backup_path.parent(), Some(custom_dir.as_path())); 140 | assert_eq!(before_restore_path.parent(), Some(custom_dir.as_path())); 141 | 142 | // Verify restoration worked 143 | assert_eq!(tm.tasks[0].text, "Original task"); 144 | 145 | Ok(()) 146 | } 147 | 148 | #[test] 149 | fn test_get_db_dir_function() -> Result<()> { 150 | let temp_dir = TempDir::new()?; 151 | 152 | // In test mode, RUSK_DB is ignored, so it should always use /tmp/rusk_debug/tasks.json 153 | // Even if RUSK_DB is set, it will be ignored 154 | unsafe { 155 | let custom_file = temp_dir.path().join("subdir").join("tasks.json"); 156 | env::set_var("RUSK_DB", custom_file.to_str().unwrap()); 157 | } 158 | 159 | let db_dir = TaskManager::get_db_dir(); 160 | let expected_dir = std::env::temp_dir().join("rusk_debug"); 161 | 162 | assert_eq!(db_dir, expected_dir); 163 | 164 | // Cleanup 165 | unsafe { 166 | env::remove_var("RUSK_DB"); 167 | } 168 | 169 | Ok(()) 170 | } 171 | 172 | #[test] 173 | fn test_directory_permissions() -> Result<()> { 174 | let temp_dir = TempDir::new()?; 175 | let rusk_dir = temp_dir.path().join("rusk"); 176 | let db_path = rusk_dir.join("tasks.json"); 177 | 178 | // Create TaskManager 179 | let mut tm = TaskManager::new_empty_with_path(db_path.clone()); 180 | tm.tasks.push(create_test_task(1, "Permission test", false)); 181 | 182 | // Save should create directory with proper permissions 183 | tm.save()?; 184 | 185 | // Verify directory was created and is readable/writable 186 | assert!(rusk_dir.exists()); 187 | assert!(rusk_dir.is_dir()); 188 | 189 | // Verify we can create additional files in the directory 190 | let test_file = rusk_dir.join("test.txt"); 191 | fs::write(&test_file, "test")?; 192 | assert!(test_file.exists()); 193 | 194 | Ok(()) 195 | } 196 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

rusk

2 |

A minimal cross-platform terminal task manager

3 | 4 |

5 | build  6 | release  7 | AUR Version 8 |

9 | 10 |
11 | 12 |

rusk 0.6

13 | 14 |
15 | 16 | - [Install](#install) 17 | - [Basic Usage](#basic-usage) 18 | - [Working with Multiple Tasks](#working-with-multiple-tasks) 19 | - [Interactive Editing](#interactive-editing) 20 | - [Data Safety & Backup](#data-safety--backup) 21 | - [Automatic Backups](#automatic-backups) 22 | - [Manual Restore](#manual-restore) 23 | - [Aliases](#aliases) 24 | - [Configuration](#configuration) 25 | - [Shell Completion](#shell-completion) 26 | - [Quick Install (Recommended)](completions/README.md#quick-install-recommended) 27 | - [Manual Installation](completions/README.md#manual-installation) 28 | - [Bash](completions/README.md#bash) 29 | - [Zsh](completions/README.md#zsh) 30 | - [Fish](completions/README.md#fish) 31 | - [Nu Shell](completions/README.md#nu-shell) 32 | - [PowerShell](completions/README.md#powershell) 33 | - [Database Location](#database-location) 34 | 35 | # Install 36 | #### Linux/MacOS/Windows 37 | ```bash 38 | cargo install --git https://github.com/tagirov/rusk 39 | ``` 40 | The binary will be installed to: 41 | - Linux/MacOS: `$HOME/.cargo/bin/rusk` 42 | - Windows: `%USERPROFILE%\.cargo\bin\rusk.exe` 43 | 44 | Make sure that these paths are added to your $PATH environment variable to use `rusk` command globally. 45 | 46 | #### Arch Linux (AUR) 47 | ```bash 48 | paru -S rusk 49 | ``` 50 | 51 | #### Manually 52 | ```bash 53 | git clone https://github.com/tagirov/rusk && cd rusk 54 | ``` 55 | ```bash 56 | cargo build --release 57 | ``` 58 | 59 | Linux/MacOS 60 | 61 | ```bash 62 | sudo install -m 755 ./target/release/rusk /usr/local/bin 63 | ``` 64 | 65 | Windows 66 | 67 | ```bash 68 | copy .\target\release\rusk.exe "%USERPROFILE%\AppData\Local\Microsoft\WindowsApps\" 69 | ``` 70 | 71 | 72 | 73 | # Basic Usage 74 | 75 | ```bash 76 | # Add a new task 77 | rusk add Buy groceries 78 | 79 | # Add a task with a deadline 80 | rusk add Finish project report --date 31-12-2025 81 | 82 | # Or with short year and slash separator: 83 | rusk add Finish project report --date 31/12/25 84 | 85 | # Leading zero for day and month is optional: 86 | rusk add Finish project report --date 1-3-25 87 | 88 | # View all tasks 89 | rusk list 90 | 91 | # or simply 92 | rusk 93 | 94 | # Mark a task as done 95 | rusk mark 1 96 | 97 | # Mark a task as undone (toggle) 98 | rusk mark 1 99 | 100 | # Edit task text (replace) 101 | rusk edit 1 Complete the project documentation 102 | 103 | # Edit task date 104 | rusk edit 1 --date 25/12/2025 105 | 106 | # Edit both text and date 107 | rusk edit 1 Update documentation --date 23-12-25 108 | 109 | # Delete a task 110 | rusk del 1 111 | 112 | # Delete all completed tasks 113 | rusk del --done 114 | 115 | # Get help for any command 116 | rusk --help 117 | 118 | # Get help for a specific command 119 | rusk add --help 120 | rusk del --done --help 121 | ``` 122 | 123 | ## Working with Multiple Tasks 124 | 125 | Multiple task IDs must be comma-separated (no spaces allowed between IDs) 126 | 127 | ```bash 128 | # Mark multiple tasks as done 129 | rusk mark 1,2,3 130 | 131 | # Edit multiple tasks with the same text 132 | rusk edit 1,2,3 Update status to completed 133 | 134 | # Delete multiple tasks 135 | rusk del 1,2,3 136 | ``` 137 | 138 | ## Interactive Editing 139 | 140 | ```bash 141 | # Edit task text interactively 142 | rusk edit 1 143 | 144 | # Edit task text and date interactively 145 | rusk edit 1 --date 146 | 147 | # Interactive editing of tasks in sequence 148 | rusk edit 1,2,3 149 | ``` 150 | 151 | 152 | ## Data Safety & Backup 153 | #### Automatic Backups 154 | - Every save operation creates a `.json.backup` file 155 | - Backups are stored in the same directory as your database 156 | - Atomic writes prevent data corruption during saves 157 | 158 | #### Manual Restore 159 | ```bash 160 | # Restore from the automatic backup 161 | rusk restore 162 | 163 | # This will: 164 | # 1. Validate the backup file 165 | # 2. Create a safety backup of current database (if valid) 166 | # 3. Restore tasks from backup 167 | ``` 168 | 169 | 170 | ## Aliases 171 | ```bash 172 | rusk a (add) 173 | rusk l (list) 174 | rusk m (mark) 175 | rusk e (edit) 176 | rusk d (del) 177 | rusk r (restore) 178 | 179 | -d (--date) 180 | -h (--help) 181 | -V (--version) 182 | ``` 183 | 184 | # Configuration 185 | 186 | ### Shell Completion 187 | 188 | > For detailed installation instructions, see [completions/README.md](completions/README.md). 189 | 190 | It provides autocomplete for commands, task IDs, and task text during editing by pressing `` button. 191 | 192 | 193 | **Features:** 194 | - Command completion: Autocomplete commands (`add`, `edit`, `mark`, `del`, etc.) and their aliases 195 | - Task ID completion: Tab-complete task IDs for `edit`, `mark`, and `del` commands 196 | - Smart text completion: When typing `rusk edit 3 `, automatically suggests the current task text for easy editing. (`rusk edit 3` will complete the ID) 197 | - Flag completion: Autocomplete `--date`, `--done`, etc. 198 | - Date suggestions: When using `--date` or `-d` flag, suggests default dates: Today, Tomorrow, One week ahead, Two weeks ahead 199 | 200 | **Windows Support:** 201 | - Git Bash: Works with `bash` completions (uses Unix-style paths) 202 | - WSL: Works with `bash`, `zsh`, `fish`, and `nu` completions 203 | - Nu Shell: Works natively on Windows (uses `%APPDATA%\nushell\completions\`) 204 | - PowerShell: Works natively on Windows (uses `Documents\PowerShell\rusk-completions.ps1`) 205 | - CMD: Basic commands work (add, list, mark, del, edit with text/date). Interactive editing (`rusk edit` without arguments) requires Windows 10+ and may have limited functionality. Tab completion is not supported. Colors work on Windows 10+ (build 1511 and later). 206 | 207 | ### Database Location 208 | 209 | By default, rusk stores tasks to: 210 | - Linux/MacOS: `$HOME/.rusk/tasks.json` 211 | - Windows: `%USERPROFILE%\.rusk\tasks.json` 212 | 213 | You can customize the database location using the `RUSK_DB` environment variable: 214 | 215 | ```bash 216 | # Use a custom database file 217 | export RUSK_DB="/path/to/your/tasks.json" 218 | 219 | # Use a custom directory (tasks.json will be created inside) 220 | export RUSK_DB="/path/to/your/project/" 221 | 222 | # Use different task lists for different projects 223 | cd ~/projects/website 224 | RUSK_DB="./tasks.json" rusk add Fix responsive layout 225 | 226 | cd ~/projects/api 227 | RUSK_DB="./tasks.json" rusk add Add authentication endpoint 228 | 229 | # Each project has its own task list 230 | ``` 231 | 232 | **Debug Mode:** 233 | When running in debug mode (`cargo run` or debug builds), rusk uses a temporary database location to avoid affecting your production data: 234 | - Linux/MacOS: `$TMPDIR/rusk_debug/tasks.json` (usually `/tmp/rusk_debug/tasks.json`) 235 | - Windows: `%TEMP%\rusk_debug\tasks.json` (usually `C:\Users\\AppData\Local\Temp\rusk_debug\tasks.json`) 236 | 237 | In debug mode, the `RUSK_DB` environment variable is ignored, and the database path is printed to the console when the program starts. 238 | 239 |
240 | 241 |

Back to top

242 | -------------------------------------------------------------------------------- /completions/rusk.zsh: -------------------------------------------------------------------------------- 1 | #compdef rusk 2 | 3 | # Zsh completion script for rusk 4 | # 5 | # Installation: 6 | # 1. Automatic (recommended): 7 | # rusk completions install zsh 8 | # 9 | # 2. Manual: 10 | # Generate script using rusk command: 11 | # mkdir -p ~/.zsh/completions 12 | # rusk completions show zsh > ~/.zsh/completions/_rusk 13 | # 14 | # Then add to your ~/.zshrc: 15 | # fpath=(~/.zsh/completions $fpath) 16 | # autoload -U compinit && compinit 17 | 18 | # Find rusk binary 19 | _rusk_cmd() { 20 | command -v rusk 2>/dev/null || echo "rusk" 21 | } 22 | 23 | # Get list of task IDs from rusk list output 24 | _rusk_get_task_ids() { 25 | local rusk_cmd=$(_rusk_cmd) 26 | "$rusk_cmd" list 2>/dev/null | grep -oE '^\s*[•✔]\s+[0-9]+' | grep -oE '[0-9]+' | sort -n 27 | } 28 | 29 | # Get task text by ID 30 | _rusk_get_task_text() { 31 | local task_id="$1" 32 | local rusk_cmd=$(_rusk_cmd) 33 | local task_line=$("$rusk_cmd" list 2>/dev/null | grep -E "^\s*[•✔]\s+$task_id\s+") 34 | if [ -n "$task_line" ]; then 35 | echo "$task_line" | sed -E 's/^[[:space:]]*[•✔][[:space:]]+[0-9]+[[:space:]]+[0-9-]*[[:space:]]*//' 36 | fi 37 | } 38 | 39 | # Get date options (today, tomorrow, week ahead, two weeks ahead) 40 | _rusk_get_date_options() { 41 | local today=$(date +%d-%m-%Y 2>/dev/null) 42 | local tomorrow=$(date -d '+1 day' +%d-%m-%Y 2>/dev/null || date -v+1d +%d-%m-%Y 2>/dev/null || date +%d-%m-%Y) 43 | local week_ahead=$(date -d '+1 week' +%d-%m-%Y 2>/dev/null || date -v+1w +%d-%m-%Y 2>/dev/null || date +%d-%m-%Y) 44 | local two_weeks_ahead=$(date -d '+2 weeks' +%d-%m-%Y 2>/dev/null || date -v+2w +%d-%m-%Y 2>/dev/null || date +%d-%m-%Y) 45 | echo "$today" "$tomorrow" "$week_ahead" "$two_weeks_ahead" 46 | } 47 | 48 | # Get entered task IDs from command line 49 | _rusk_get_entered_ids() { 50 | local -a entered_ids 51 | local i 52 | for ((i=2; i<${#words[@]}; i++)); do 53 | if [[ "${words[i]}" =~ ^[0-9]+$ ]]; then 54 | entered_ids+=("${words[i]}") 55 | fi 56 | done 57 | echo "${entered_ids[@]}" 58 | } 59 | 60 | # Filter out already entered IDs from task ID list 61 | _rusk_filter_ids() { 62 | local -a ids=("${(@f)$(_rusk_get_task_ids)}") 63 | local -a entered_ids=($(_rusk_get_entered_ids)) 64 | 65 | if [ ${#entered_ids[@]} -eq 0 ]; then 66 | echo "${ids[@]}" 67 | return 68 | fi 69 | 70 | local -a filtered_ids 71 | for id in "${ids[@]}"; do 72 | local found=0 73 | for entered in "${entered_ids[@]}"; do 74 | if [ "$id" = "$entered" ]; then 75 | found=1 76 | break 77 | fi 78 | done 79 | if [ $found -eq 0 ]; then 80 | filtered_ids+=("$id") 81 | fi 82 | done 83 | echo "${filtered_ids[@]}" 84 | } 85 | 86 | # Count how many IDs have been entered 87 | _rusk_count_ids() { 88 | local count=0 89 | local i 90 | for ((i=2; i<${#words[@]}; i++)); do 91 | if [[ "${words[i]}" =~ ^[0-9]+$ ]]; then 92 | ((count++)) 93 | fi 94 | done 95 | echo $count 96 | } 97 | 98 | # Complete task IDs with filtering 99 | _rusk_complete_task_ids() { 100 | local -a ids=($(_rusk_filter_ids)) 101 | if [ ${#ids[@]} -gt 0 ]; then 102 | compadd $ids 103 | return 0 104 | fi 105 | return 1 106 | } 107 | 108 | # Complete date values 109 | _rusk_complete_date() { 110 | local -a dates=($(_rusk_get_date_options)) 111 | compadd $dates 112 | } 113 | 114 | _rusk() { 115 | # Complete commands 116 | if [ -z "$CURRENT" ] || [ "$CURRENT" -eq 2 ] 2>/dev/null; then 117 | compadd add edit mark del list restore completions a e m d l r 118 | return 119 | fi 120 | 121 | local cmd="$words[2]" 122 | local prev="" 123 | local cur="" 124 | 125 | if [ -n "$CURRENT" ] && [ "$CURRENT" -gt 1 ] 2>/dev/null; then 126 | prev="$words[$((CURRENT-1))]" 127 | fi 128 | if [ -n "$CURRENT" ] && [ "$CURRENT" -le ${#words[@]} ] 2>/dev/null; then 129 | cur="$words[$CURRENT]" 130 | fi 131 | 132 | case "$cmd" in 133 | add|a) 134 | if [[ "$prev" == "--date" ]] || [[ "$prev" == "-d" ]]; then 135 | _rusk_complete_date 136 | # For `rusk add ` or when starting a flag, offer flags 137 | elif [[ -z "$cur" ]] || [[ "$cur" == -* ]]; then 138 | # Offer flags: -d --date -h --help 139 | compadd -- -d --date -h --help 140 | fi 141 | ;; 142 | 143 | edit|e) 144 | # Complete date flag 145 | if [[ "$prev" == "--date" ]] || [[ "$prev" == "-d" ]]; then 146 | _rusk_complete_date 147 | # Suggest task text if previous word is a single ID and current is empty 148 | elif [[ "$prev" =~ ^[0-9]+$ ]] && [[ -z "$cur" ]]; then 149 | if [ $(_rusk_count_ids) -eq 1 ]; then 150 | local task_text=$(_rusk_get_task_text "$prev") 151 | if [ -n "$task_text" ]; then 152 | compadd -Q "$task_text" 153 | fi 154 | fi 155 | # Complete flags 156 | elif [[ "$cur" == -* ]]; then 157 | # Offer flags: -d --date -h --help 158 | compadd -- -d --date -h --help 159 | # Complete task IDs 160 | else 161 | _rusk_complete_task_ids 162 | fi 163 | ;; 164 | 165 | mark|m|del|d) 166 | # For del, complete flags first 167 | if [[ ("$cmd" == "del" || "$cmd" == "d") && "$cur" == -* ]]; then 168 | compadd --done 169 | # Complete task IDs 170 | else 171 | _rusk_complete_task_ids 172 | fi 173 | ;; 174 | 175 | list|l|restore|r) 176 | # No arguments 177 | ;; 178 | 179 | completions) 180 | # Third word: subcommands install/show 181 | if [ -n "$CURRENT" ] && [ "$CURRENT" -eq 3 ] 2>/dev/null; then 182 | compadd install show 183 | else 184 | # After install/show: suggest shells that haven't been used yet 185 | local -a all_shells=("bash" "zsh" "fish" "nu" "powershell") 186 | local -a selected_shells=() 187 | 188 | # Find index of install/show 189 | local install_idx=-1 190 | local i 191 | for ((i=1; i<=${#words[@]}; i++)); do 192 | if [[ "${words[i]}" == "install" || "${words[i]}" == "show" ]]; then 193 | install_idx=$i 194 | break 195 | fi 196 | done 197 | 198 | if (( install_idx > 0 )); then 199 | for ((i=install_idx+1; i<=${#words[@]}; i++)); do 200 | local w="${words[i]}" 201 | for sh in "${all_shells[@]}"; do 202 | if [[ "$w" == "$sh" ]]; then 203 | selected_shells+=("$w") 204 | fi 205 | done 206 | done 207 | fi 208 | 209 | local -a remaining_shells=() 210 | for sh in "${all_shells[@]}"; do 211 | local found=0 212 | for sel in "${selected_shells[@]}"; do 213 | if [[ "$sh" == "$sel" ]]; then 214 | found=1 215 | break 216 | fi 217 | done 218 | if (( ! found )); then 219 | remaining_shells+=("$sh") 220 | fi 221 | done 222 | 223 | if [ ${#remaining_shells[@]} -gt 0 ]; then 224 | compadd "${remaining_shells[@]}" 225 | fi 226 | fi 227 | ;; 228 | esac 229 | } 230 | -------------------------------------------------------------------------------- /tests/environment_tests.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use rusk::TaskManager; 3 | use std::env; 4 | use std::fs; 5 | use std::sync::Mutex; 6 | use tempfile::TempDir; 7 | 8 | mod common; 9 | use common::create_test_task; 10 | 11 | // Mutex to ensure environment tests don't run in parallel 12 | static ENV_TEST_MUTEX: Mutex<()> = Mutex::new(()); 13 | 14 | #[test] 15 | fn test_rusk_db_as_directory() -> Result<()> { 16 | let _guard = ENV_TEST_MUTEX.lock().unwrap(); 17 | 18 | // Save current environment state 19 | let original_rusk_db = env::var("RUSK_DB").ok(); 20 | 21 | let temp_dir = TempDir::new()?; 22 | let custom_dir = temp_dir.path().join("custom_rusk"); 23 | fs::create_dir_all(&custom_dir)?; 24 | 25 | // Set RUSK_DB to directory (with trailing slash) 26 | let custom_dir_str = format!("{}/", custom_dir.display()); 27 | unsafe { 28 | env::set_var("RUSK_DB", &custom_dir_str); 29 | } 30 | 31 | // Get database path - in test mode, RUSK_DB is ignored, should use /tmp/rusk_debug/tasks.json 32 | let db_path = TaskManager::resolve_db_path(); 33 | let expected_path = std::env::temp_dir().join("rusk_debug").join("tasks.json"); 34 | 35 | assert_eq!(db_path, expected_path); 36 | 37 | // Test that TaskManager can use this path 38 | let mut tm = TaskManager::new_for_restore()?; 39 | tm.db_path = db_path.clone(); // Use the resolved path 40 | tm.tasks.push(create_test_task(1, "Test task", false)); 41 | tm.save()?; 42 | 43 | // Verify file was created in custom directory 44 | assert!(expected_path.exists()); 45 | 46 | // Restore original environment state 47 | unsafe { 48 | match original_rusk_db { 49 | Some(value) => env::set_var("RUSK_DB", value), 50 | None => env::remove_var("RUSK_DB"), 51 | } 52 | } 53 | 54 | Ok(()) 55 | } 56 | 57 | #[test] 58 | fn test_rusk_db_as_file() -> Result<()> { 59 | let _guard = ENV_TEST_MUTEX.lock().unwrap(); 60 | 61 | // Save current environment state 62 | let original_rusk_db = env::var("RUSK_DB").ok(); 63 | 64 | let temp_dir = TempDir::new()?; 65 | let custom_file = temp_dir.path().join("my_tasks.json"); 66 | 67 | // Set RUSK_DB to specific file 68 | unsafe { 69 | env::set_var("RUSK_DB", custom_file.to_str().unwrap()); 70 | } 71 | 72 | // Get database path - in test mode, RUSK_DB is ignored, should use /tmp/rusk_debug/tasks.json 73 | let db_path = TaskManager::resolve_db_path(); 74 | let expected_path = std::env::temp_dir().join("rusk_debug").join("tasks.json"); 75 | 76 | assert_eq!(db_path, expected_path); 77 | 78 | // Test that TaskManager can use this path 79 | let mut tm = TaskManager::new_for_restore()?; 80 | tm.db_path = db_path.clone(); // Use the resolved path 81 | tm.tasks 82 | .push(create_test_task(1, "Custom file task", false)); 83 | tm.save()?; 84 | 85 | // Verify file was created in test mode path (/tmp/rusk_debug/tasks.json) 86 | assert!(expected_path.exists()); 87 | 88 | // Verify backup is created with correct extension 89 | let backup_path = expected_path.with_extension("json.backup"); 90 | tm.tasks.push(create_test_task(2, "Another task", false)); 91 | tm.save()?; 92 | assert!(backup_path.exists()); 93 | 94 | // Restore original environment state 95 | unsafe { 96 | match original_rusk_db { 97 | Some(value) => env::set_var("RUSK_DB", value), 98 | None => env::remove_var("RUSK_DB"), 99 | } 100 | } 101 | 102 | Ok(()) 103 | } 104 | 105 | #[test] 106 | fn test_rusk_db_default_path() -> Result<()> { 107 | let _guard = ENV_TEST_MUTEX.lock().unwrap(); 108 | 109 | // Save current environment state 110 | let original_rusk_db = env::var("RUSK_DB").ok(); 111 | 112 | // Ensure RUSK_DB is not set 113 | unsafe { 114 | env::remove_var("RUSK_DB"); 115 | } 116 | 117 | let db_path = TaskManager::resolve_db_path(); 118 | 119 | // In test mode, should use /tmp/rusk_debug/tasks.json (same as debug mode) 120 | assert!(db_path.to_string_lossy().contains("rusk_debug")); 121 | assert!(db_path.to_string_lossy().ends_with("tasks.json")); 122 | 123 | // Should be in a "rusk_debug" subdirectory 124 | let parent = db_path.parent().unwrap(); 125 | assert!(parent.file_name().unwrap() == "rusk_debug"); 126 | 127 | // Restore original environment state 128 | unsafe { 129 | match original_rusk_db { 130 | Some(value) => env::set_var("RUSK_DB", value), 131 | None => env::remove_var("RUSK_DB"), 132 | } 133 | } 134 | 135 | Ok(()) 136 | } 137 | 138 | #[test] 139 | fn test_resolve_db_path_integration_harness_uses_temp() -> Result<()> { 140 | let _guard = ENV_TEST_MUTEX.lock().unwrap(); 141 | 142 | // Save current env and simulate integration test harness 143 | let original_rusk_db = env::var("RUSK_DB").ok(); 144 | let original_threads = env::var("RUST_TEST_THREADS").ok(); 145 | 146 | unsafe { 147 | env::remove_var("RUSK_DB"); 148 | env::set_var("RUST_TEST_THREADS", "1"); 149 | } 150 | 151 | let db_path = TaskManager::resolve_db_path(); 152 | let path_str = db_path.to_string_lossy(); 153 | assert!( 154 | path_str.contains("rusk_debug"), 155 | "expected temp debug path, got {path_str}" 156 | ); 157 | assert!(path_str.ends_with("tasks.json")); 158 | 159 | // Cleanup vars 160 | unsafe { 161 | match original_rusk_db { 162 | Some(v) => env::set_var("RUSK_DB", v), 163 | None => env::remove_var("RUSK_DB"), 164 | } 165 | match original_threads { 166 | Some(v) => env::set_var("RUST_TEST_THREADS", v), 167 | None => env::remove_var("RUST_TEST_THREADS"), 168 | } 169 | } 170 | 171 | Ok(()) 172 | } 173 | 174 | #[test] 175 | fn test_get_db_dir() -> Result<()> { 176 | let _guard = ENV_TEST_MUTEX.lock().unwrap(); 177 | 178 | // Save current environment state 179 | let original_rusk_db = env::var("RUSK_DB").ok(); 180 | 181 | let temp_dir = TempDir::new()?; 182 | let subdir = temp_dir.path().join("subdir"); 183 | fs::create_dir_all(&subdir)?; 184 | let custom_file = subdir.join("tasks.json"); 185 | 186 | // Set RUSK_DB to file in subdirectory 187 | unsafe { 188 | env::set_var("RUSK_DB", custom_file.to_str().unwrap()); 189 | } 190 | 191 | // In test mode, RUSK_DB is ignored, should use /tmp/rusk_debug/tasks.json 192 | let db_dir = TaskManager::get_db_dir(); 193 | let expected_dir = std::env::temp_dir().join("rusk_debug"); 194 | 195 | assert_eq!(db_dir, expected_dir); 196 | 197 | // Restore original environment state 198 | unsafe { 199 | match original_rusk_db { 200 | Some(value) => env::set_var("RUSK_DB", value), 201 | None => env::remove_var("RUSK_DB"), 202 | } 203 | } 204 | 205 | Ok(()) 206 | } 207 | 208 | #[test] 209 | fn test_rusk_db_with_backup_and_restore() -> Result<()> { 210 | let _guard = ENV_TEST_MUTEX.lock().unwrap(); 211 | 212 | // Save current environment state 213 | let original_rusk_db = env::var("RUSK_DB").ok(); 214 | 215 | let temp_dir = TempDir::new()?; 216 | let custom_file = temp_dir.path().join("custom_backup_test.json"); 217 | 218 | unsafe { 219 | env::set_var("RUSK_DB", custom_file.to_str().unwrap()); 220 | } 221 | 222 | // Create TaskManager and add tasks 223 | let mut tm = TaskManager::new_for_restore()?; 224 | tm.db_path = custom_file.clone(); // Use the custom file path 225 | tm.tasks.push(create_test_task(1, "Task 1", false)); 226 | tm.save()?; 227 | 228 | // Add more tasks to create backup 229 | tm.tasks.push(create_test_task(2, "Task 2", false)); 230 | tm.save()?; 231 | 232 | // Verify backup was created with custom path 233 | let backup_path = custom_file.with_extension("json.backup"); 234 | assert!(backup_path.exists()); 235 | 236 | // Remove main file and restore 237 | fs::remove_file(&custom_file)?; 238 | tm.restore_from_backup()?; 239 | 240 | // Verify restore worked 241 | assert_eq!(tm.tasks.len(), 1); 242 | assert_eq!(tm.tasks[0].text, "Task 1"); 243 | 244 | // Restore original environment state 245 | unsafe { 246 | match original_rusk_db { 247 | Some(value) => env::set_var("RUSK_DB", value), 248 | None => env::remove_var("RUSK_DB"), 249 | } 250 | } 251 | 252 | Ok(()) 253 | } 254 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 |

rusk Tests

2 |
3 | 4 | This directory contains comprehensive unit and integration tests for the rusk task management application. These tests validate core functionality, edge cases, data persistence, and CLI behavior. 5 | 6 | ## Structure 7 | 8 | ``` 9 | tests/ 10 | ├── README.md # This file 11 | ├── common/ # Shared test utilities 12 | │ └── mod.rs # Helper functions for creating test tasks 13 | ├── completions/ # Shell completion tests (see completions/README.md) 14 | │ └── ... 15 | ├── cli_tests.rs # CLI command tests 16 | ├── lib_tests.rs # Core library function tests 17 | ├── database_corruption_tests.rs # Database corruption handling tests 18 | ├── directory_structure_tests.rs # Directory creation and structure tests 19 | ├── edge_case_tests.rs # Edge cases and boundary condition tests 20 | ├── edit_mode_tests.rs # Edit command mode tests 21 | ├── edit_parsing_tests.rs # Edit command argument parsing tests 22 | ├── environment_tests.rs # Environment variable tests 23 | ├── mark_success_tests.rs # Mark command success/failure tests 24 | ├── parse_flexible_ids_tests.rs # Flexible ID parsing tests (comma-separated, etc.) 25 | ├── path_migration_tests.rs # Database path migration tests 26 | ├── persistence_tests.rs # Data persistence and save/load tests 27 | ├── restore_tests.rs # Backup restore functionality tests 28 | ├── unchanged_detection_tests.rs # Unchanged task detection tests 29 | └── completions.rs # Completion test entry point 30 | ``` 31 | 32 | ## Running Tests 33 | 34 | ### All Tests 35 | Run all tests in the project: 36 | ```bash 37 | cargo test 38 | ``` 39 | 40 | ### Specific Test File 41 | Run tests from a specific file: 42 | ```bash 43 | cargo test --test cli_tests 44 | cargo test --test lib_tests 45 | cargo test --test persistence_tests 46 | ``` 47 | 48 | ### Specific Test Function 49 | Run a single test function: 50 | ```bash 51 | cargo test test_add_task 52 | cargo test test_mark_tasks 53 | ``` 54 | 55 | ### With Output 56 | Run tests with output from passing tests: 57 | ```bash 58 | cargo test -- --nocapture 59 | ``` 60 | 61 | ### Filter Tests 62 | Run tests matching a pattern: 63 | ```bash 64 | cargo test edit 65 | cargo test persistence 66 | ``` 67 | 68 | ## Test Categories 69 | 70 | ### Core Functionality Tests 71 | 72 | #### `lib_tests.rs` 73 | Tests for core library functions: 74 | - ID generation (`generate_next_id`) 75 | - Task management operations 76 | - Task filtering and querying 77 | - Date handling and normalization 78 | - Task validation 79 | 80 | #### `cli_tests.rs` 81 | Tests for CLI command behavior: 82 | - `add` command - Adding tasks with and without dates 83 | - `edit` command - Editing task text and dates 84 | - `mark` command - Marking tasks as done/undone 85 | - `del` command - Deleting tasks 86 | - `list` command - Listing and filtering tasks 87 | - `restore` command - Restoring from backups 88 | 89 | ### Data Persistence Tests 90 | 91 | #### `persistence_tests.rs` 92 | Tests for data persistence: 93 | - Saving tasks to disk 94 | - Loading tasks from disk 95 | - Mark operation persistence 96 | - Date persistence 97 | - Task state persistence across sessions 98 | 99 | #### `database_corruption_tests.rs` 100 | Tests for handling corrupted database files: 101 | - Invalid JSON handling 102 | - Trailing content detection 103 | - Error message clarity 104 | - Recovery mechanisms 105 | 106 | #### `restore_tests.rs` 107 | Tests for backup and restore functionality: 108 | - Backup file creation 109 | - Restore from backup 110 | - Backup file naming conventions 111 | - Restore error handling 112 | 113 | ### Path and Environment Tests 114 | 115 | #### `directory_structure_tests.rs` 116 | Tests for directory structure: 117 | - Default directory creation 118 | - Custom directory paths 119 | - Directory creation on save 120 | - Backup file location 121 | 122 | #### `path_migration_tests.rs` 123 | Tests for database path migration: 124 | - Default path structure 125 | - Backup file naming 126 | - Path resolution with environment variables 127 | - Migration scenarios 128 | 129 | #### `environment_tests.rs` 130 | Tests for environment variable handling: 131 | - `RUSK_DB` variable as directory 132 | - `RUSK_DB` variable as file path 133 | - Path resolution in test mode 134 | - Environment variable precedence 135 | 136 | ### Edit Command Tests 137 | 138 | #### `edit_parsing_tests.rs` 139 | Tests for edit command argument parsing: 140 | - ID extraction 141 | - Text extraction 142 | - Date flag handling 143 | - Unchanged task detection 144 | - Save behavior optimization 145 | 146 | #### `edit_mode_tests.rs` 147 | Tests for edit command modes: 148 | - Date-only edits (`--date` flag) 149 | - Text-only edits 150 | - Combined text and date edits 151 | - Interactive mode handling 152 | 153 | ### ID Parsing Tests 154 | 155 | #### `parse_flexible_ids_tests.rs` 156 | Tests for flexible ID parsing: 157 | - Single ID parsing 158 | - Comma-separated IDs (`1,2,3`) 159 | - Space-separated IDs 160 | - Invalid ID handling 161 | - Mixed format handling 162 | 163 | ### Edge Cases and Validation 164 | 165 | #### `edge_case_tests.rs` 166 | Tests for edge cases and boundary conditions: 167 | - Empty input handling 168 | - Whitespace-only input 169 | - Special character handling 170 | - Very long task text 171 | - Date validation 172 | - Invalid date formats 173 | - Task ID boundaries 174 | 175 | #### `unchanged_detection_tests.rs` 176 | Tests for unchanged task detection: 177 | - Detecting when task text hasn't changed 178 | - Detecting when task date hasn't changed 179 | - Optimizing save operations 180 | - Preventing unnecessary file writes 181 | 182 | #### `mark_success_tests.rs` 183 | Tests for mark command success/failure reporting: 184 | - Marking tasks as done 185 | - Unmarking tasks (marking as undone) 186 | - Handling already-marked tasks 187 | - Not found task handling 188 | - Return value correctness 189 | 190 | ## Test Utilities 191 | 192 | ### `common/mod.rs` 193 | Shared helper functions for tests: 194 | - `create_test_task(id, text, done)` - Create a test task 195 | - `create_test_task_with_date(id, text, done, date)` - Create a test task with date 196 | 197 | Usage: 198 | ```rust 199 | mod common; 200 | use common::create_test_task; 201 | 202 | #[test] 203 | fn my_test() { 204 | let task = create_test_task(1, "Test task", false); 205 | // ... 206 | } 207 | ``` 208 | 209 | ## Test Coverage 210 | 211 | The test suite covers: 212 | 213 | - All CLI commands and their aliases 214 | - Core library functions 215 | - Data persistence and file I/O 216 | - Error handling and edge cases 217 | - Environment variable handling 218 | - Path resolution and migration 219 | - Backup and restore functionality 220 | - Date parsing and validation 221 | - ID parsing (flexible formats) 222 | - Task state management 223 | - Database corruption handling 224 | 225 | ## Adding New Tests 226 | 227 | To add a new test: 228 | 229 | 1. Choose the appropriate test file or create a new one if testing a new feature area 230 | 2. Use helper functions from `common/mod.rs` when creating test data 231 | 3. Follow existing test patterns for consistency 232 | 4. Use descriptive test names starting with `test_` 233 | 5. Test both success and failure cases 234 | 6. Use `tempfile` for temporary directories when testing file operations 235 | 236 | Example: 237 | ```rust 238 | use rusk::TaskManager; 239 | mod common; 240 | use common::create_test_task; 241 | 242 | #[test] 243 | fn test_new_feature() { 244 | let mut tm = TaskManager::new_empty().unwrap(); 245 | // ... test implementation 246 | } 247 | ``` 248 | 249 | ## Integration with CI/CD 250 | 251 | These tests are designed to run in CI/CD pipelines: 252 | 253 | ```yaml 254 | # Example GitHub Actions step 255 | - name: Run tests 256 | run: cargo test --all-features 257 | 258 | # With coverage 259 | - name: Run tests with coverage 260 | run: | 261 | cargo test --all-features 262 | cargo test --test completions 263 | ``` 264 | 265 | ## Notes 266 | 267 | - Tests use temporary directories for file operations to avoid affecting user data 268 | - Some tests require specific environment setup (see individual test files) 269 | - Tests are designed to be run in parallel (use `cargo test --test-threads=1` if needed) 270 | - Database path resolution uses test mode (`/tmp/rusk_debug/tasks.json`) to avoid conflicts 271 | - Completion tests are in a separate directory (`completions/`) with their own README 272 | 273 |
274 |

Back to top

275 | -------------------------------------------------------------------------------- /completions/rusk.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Bash completion script for rusk 3 | # 4 | # Installation: 5 | # 1. Automatic (recommended): 6 | # rusk completions install bash 7 | # 8 | # 2. Manual: 9 | # Generate script using rusk command: 10 | # rusk completions show bash > ~/.bash_completion.d/rusk 11 | # 12 | # Then add to your ~/.bashrc: 13 | # source ~/.bash_completion.d/rusk 14 | # 15 | # System-wide (requires root): 16 | # rusk completions show bash | sudo tee /etc/bash_completion.d/rusk > /dev/null 17 | # System-wide completions are loaded automatically on bash startup 18 | 19 | # Find rusk binary 20 | _rusk_cmd() { 21 | command -v rusk 2>/dev/null || echo "rusk" 22 | } 23 | 24 | # Get list of task IDs from rusk list output 25 | _rusk_get_task_ids() { 26 | local rusk_cmd=$(_rusk_cmd) 27 | "$rusk_cmd" list 2>/dev/null | grep -E '[•✔]' | grep -oE '^\s+[•✔]\s+[0-9]+\s+' | grep -oE '[0-9]+' | sort -n | tr '\n' ' ' 28 | } 29 | 30 | # Get task text by ID 31 | _rusk_get_task_text() { 32 | local task_id="$1" 33 | local rusk_cmd=$(_rusk_cmd) 34 | local task_line=$("$rusk_cmd" list 2>/dev/null | grep -E '[•✔]' | grep -E "^\s+[•✔]\s+$task_id\s+") 35 | if [ -n "$task_line" ]; then 36 | # Extract text after ID and date (remove status, ID, optional date) 37 | echo "$task_line" | sed -E 's/^\s+[•✔]\s+[0-9]+\s+([0-9]{2}-[0-9]{2}-[0-9]{4}\s+)?//' | sed 's/^[[:space:]]*//;s/[[:space:]]*$//' 38 | fi 39 | } 40 | 41 | # Get date options (today, tomorrow, week ahead, two weeks ahead) 42 | _rusk_get_date_options() { 43 | local today=$(date +%d-%m-%Y 2>/dev/null) 44 | local tomorrow=$(date -d '+1 day' +%d-%m-%Y 2>/dev/null || date -v+1d +%d-%m-%Y 2>/dev/null || date +%d-%m-%Y) 45 | local week_ahead=$(date -d '+1 week' +%d-%m-%Y 2>/dev/null || date -v+1w +%d-%m-%Y 2>/dev/null || date +%d-%m-%Y) 46 | local two_weeks_ahead=$(date -d '+2 weeks' +%d-%m-%Y 2>/dev/null || date -v+2w +%d-%m-%Y 2>/dev/null || date +%d-%m-%Y) 47 | echo "$today $tomorrow $week_ahead $two_weeks_ahead" 48 | } 49 | 50 | # Get entered task IDs from command line 51 | _rusk_get_entered_ids() { 52 | local entered_ids="" 53 | local i 54 | for ((i=2; i= 0 )); then 146 | for ((i=install_idx+1; i<${#COMP_WORDS[@]}; i++)); do 147 | local w="${COMP_WORDS[i]}" 148 | case " ${all_shells[*]} " in 149 | *" $w "*) 150 | selected+=("$w") 151 | ;; 152 | esac 153 | done 154 | fi 155 | 156 | # Output shells that are not yet selected 157 | local result=() 158 | for sh in "${all_shells[@]}"; do 159 | if [[ ! " ${selected[*]} " =~ (^|[[:space:]])"$sh"([[:space:]]|$) ]]; then 160 | result+=("$sh") 161 | fi 162 | done 163 | 164 | echo "${result[*]}" 165 | } 166 | 167 | _rusk_completion() { 168 | local cur="${COMP_WORDS[COMP_CWORD]}" 169 | local prev="" 170 | local cmd="" 171 | 172 | # Get previous word if available 173 | if [ $COMP_CWORD -gt 0 ]; then 174 | prev="${COMP_WORDS[COMP_CWORD-1]}" 175 | fi 176 | 177 | # Get command (second word) if available 178 | if [ ${#COMP_WORDS[@]} -gt 1 ]; then 179 | cmd="${COMP_WORDS[1]}" 180 | fi 181 | 182 | # Complete commands 183 | if [ $COMP_CWORD -eq 1 ]; then 184 | COMPREPLY=($(compgen -W "add edit mark del list restore completions a e m d l r" -- "$cur")) 185 | return 0 186 | fi 187 | 188 | # Complete subcommands 189 | case "$cmd" in 190 | add|a) 191 | if [[ "$prev" == "--date" ]] || [[ "$prev" == "-d" ]]; then 192 | _rusk_complete_date 193 | # For `rusk add ` or when starting a flag, offer flags 194 | elif [[ -z "$cur" ]] || [[ "$cur" == -* ]]; then 195 | _rusk_complete_add_edit_flags 196 | fi 197 | ;; 198 | 199 | edit|e) 200 | # Suggest task text if previous word is a single ID and current is empty 201 | if [[ "$prev" =~ ^[0-9]+$ ]] && [[ -z "$cur" ]]; then 202 | if [ $(_rusk_count_ids) -eq 1 ]; then 203 | local task_text=$(_rusk_get_task_text "$prev") 204 | if [ -n "$task_text" ]; then 205 | COMPREPLY=("$task_text") 206 | return 0 207 | fi 208 | fi 209 | fi 210 | 211 | # Complete date flag 212 | if [[ "$prev" == "--date" ]] || [[ "$prev" == "-d" ]]; then 213 | _rusk_complete_date 214 | # Complete task IDs 215 | elif [[ "$prev" == "edit" ]] || [[ "$prev" == "e" ]] || [[ "$cur" =~ ^[0-9]+$ ]]; then 216 | _rusk_complete_task_ids && return 0 217 | # Complete flags 218 | elif [[ "$cur" == -* ]]; then 219 | _rusk_complete_add_edit_flags 220 | fi 221 | ;; 222 | 223 | mark|m|del|d) 224 | # Complete task IDs 225 | if [[ "$cur" =~ ^[0-9]*$ ]] || [[ "$prev" == "$cmd" ]]; then 226 | _rusk_complete_task_ids && return 0 227 | fi 228 | 229 | # For del, complete flags 230 | if [[ ("$cmd" == "del" || "$cmd" == "d") && "$cur" == -* ]]; then 231 | _rusk_complete_del_flags 232 | fi 233 | ;; 234 | 235 | list|l|restore|r) 236 | # These commands don't take arguments 237 | ;; 238 | 239 | completions) 240 | if [[ "$prev" == "completions" ]]; then 241 | COMPREPLY=($(compgen -W "install show" -- "$cur")) 242 | else 243 | # After install/show, suggest only shells that haven't been used yet 244 | local shells=$(_rusk_get_available_shells) 245 | if [[ -n "$shells" ]]; then 246 | COMPREPLY=($(compgen -W "$shells" -- "$cur")) 247 | fi 248 | fi 249 | ;; 250 | esac 251 | } 252 | 253 | complete -F _rusk_completion rusk 254 | -------------------------------------------------------------------------------- /tests/lib_tests.rs: -------------------------------------------------------------------------------- 1 | use chrono::NaiveDate; 2 | use rusk::{Task, TaskManager}; 3 | mod common; 4 | use common::create_test_task; 5 | 6 | #[test] 7 | fn test_generate_next_id_empty() { 8 | let tm = TaskManager::new_empty().unwrap(); 9 | let id = tm.generate_next_id().unwrap(); 10 | assert_eq!(id, 1); 11 | } 12 | 13 | #[test] 14 | fn test_generate_next_id_sequential() { 15 | let mut tm = TaskManager::new_empty().unwrap(); 16 | tm.tasks = vec![ 17 | create_test_task(1, "Task 1", false), 18 | create_test_task(2, "Task 2", false), 19 | create_test_task(3, "Task 3", false), 20 | ]; 21 | let id = tm.generate_next_id().unwrap(); 22 | assert_eq!(id, 4); 23 | } 24 | 25 | #[test] 26 | fn test_generate_next_id_with_gaps() { 27 | let mut tm = TaskManager::new_empty().unwrap(); 28 | tm.tasks = vec![ 29 | create_test_task(1, "Task 1", false), 30 | create_test_task(3, "Task 3", false), 31 | create_test_task(5, "Task 5", false), 32 | ]; 33 | let id = tm.generate_next_id().unwrap(); 34 | assert_eq!(id, 2); 35 | } 36 | 37 | #[test] 38 | fn test_generate_next_id_max_reached() { 39 | let mut tm = TaskManager::new_empty().unwrap(); 40 | 41 | // Fill up to 200 tasks (safe number) 42 | for i in 1..=200 { 43 | tm.tasks.push(Task { 44 | id: i, 45 | text: format!("Task {i}"), 46 | date: None, 47 | done: false, 48 | }); 49 | } 50 | 51 | // Next ID should be 201 52 | let next_id = tm.generate_next_id().unwrap(); 53 | assert_eq!(next_id, 201); 54 | } 55 | 56 | #[test] 57 | fn test_find_task_by_id() { 58 | let mut tm = TaskManager::new_empty().unwrap(); 59 | tm.tasks = vec![ 60 | create_test_task(1, "Task 1", false), 61 | create_test_task(2, "Task 2", false), 62 | create_test_task(3, "Task 3", false), 63 | ]; 64 | 65 | assert_eq!(tm.find_task_by_id(1), Some(0)); 66 | assert_eq!(tm.find_task_by_id(2), Some(1)); 67 | assert_eq!(tm.find_task_by_id(3), Some(2)); 68 | assert_eq!(tm.find_task_by_id(4), None); 69 | } 70 | 71 | #[test] 72 | fn test_find_tasks_by_ids() { 73 | let mut tm = TaskManager::new_empty().unwrap(); 74 | tm.tasks = vec![ 75 | create_test_task(1, "Task 1", false), 76 | create_test_task(2, "Task 2", false), 77 | create_test_task(3, "Task 3", false), 78 | create_test_task(4, "Task 4", false), 79 | ]; 80 | 81 | let (found, not_found) = tm.find_tasks_by_ids(&[1, 3, 5]); 82 | assert_eq!(found, vec![0, 2]); 83 | assert_eq!(not_found, vec![5]); 84 | } 85 | 86 | #[test] 87 | fn test_find_tasks_by_ids_empty() { 88 | let tm = TaskManager::new_empty().unwrap(); 89 | let (found, not_found) = tm.find_tasks_by_ids(&[1, 2, 3]); 90 | assert!(found.is_empty()); 91 | assert_eq!(not_found, vec![1, 2, 3]); 92 | } 93 | 94 | #[test] 95 | fn test_add_task_success() { 96 | let mut tm = TaskManager::new_empty().unwrap(); 97 | let text = vec!["Buy".to_string(), "groceries".to_string()]; 98 | 99 | let result = tm.add_task(text, None); 100 | assert!(result.is_ok()); 101 | assert_eq!(tm.tasks.len(), 1); 102 | assert_eq!(tm.tasks[0].id, 1); 103 | assert_eq!(tm.tasks[0].text, "Buy groceries"); 104 | assert!(!tm.tasks[0].done); 105 | } 106 | 107 | #[test] 108 | fn test_add_task_with_date() { 109 | let mut tm = TaskManager::new_empty().unwrap(); 110 | let text = vec!["Meeting".to_string()]; 111 | let date = Some("15-01-2025".to_string()); 112 | 113 | let result = tm.add_task(text, date); 114 | assert!(result.is_ok()); 115 | assert_eq!(tm.tasks.len(), 1); 116 | assert_eq!( 117 | tm.tasks[0].date, 118 | NaiveDate::parse_from_str("15-01-2025", "%d-%m-%Y").ok() 119 | ); 120 | } 121 | 122 | #[test] 123 | fn test_add_task_empty_text() { 124 | let mut tm = TaskManager::new_empty().unwrap(); 125 | let text = vec!["".to_string()]; 126 | 127 | let result = tm.add_task(text, None); 128 | assert!(result.is_err()); 129 | assert_eq!(tm.tasks.len(), 0); 130 | } 131 | 132 | #[test] 133 | fn test_delete_tasks() { 134 | let mut tm = TaskManager::new_empty().unwrap(); 135 | tm.tasks = vec![ 136 | create_test_task(1, "Task 1", false), 137 | create_test_task(2, "Task 2", false), 138 | create_test_task(3, "Task 3", false), 139 | ]; 140 | 141 | let not_found = tm.delete_tasks(vec![1, 3]).unwrap(); 142 | assert!(not_found.is_empty()); 143 | assert_eq!(tm.tasks.len(), 1); 144 | assert_eq!(tm.tasks[0].id, 2); 145 | } 146 | 147 | #[test] 148 | fn test_delete_tasks_not_found() { 149 | let mut tm = TaskManager::new_empty().unwrap(); 150 | tm.tasks = vec![ 151 | create_test_task(1, "Task 1", false), 152 | create_test_task(2, "Task 2", false), 153 | ]; 154 | 155 | let not_found = tm.delete_tasks(vec![1, 3, 5]).unwrap(); 156 | // Sort both vectors for comparison since order doesn't matter 157 | let mut expected = vec![3, 5]; 158 | expected.sort(); 159 | let mut actual = not_found; 160 | actual.sort(); 161 | assert_eq!(actual, expected); 162 | assert_eq!(tm.tasks.len(), 1); 163 | assert_eq!(tm.tasks[0].id, 2); 164 | } 165 | 166 | #[test] 167 | fn test_delete_all_done() { 168 | let mut tm = TaskManager::new_empty().unwrap(); 169 | tm.tasks = vec![ 170 | create_test_task(1, "Task 1", true), 171 | create_test_task(2, "Task 2", false), 172 | create_test_task(3, "Task 3", true), 173 | ]; 174 | 175 | let deleted = tm.delete_all_done().unwrap(); 176 | assert_eq!(deleted, 2); 177 | assert_eq!(tm.tasks.len(), 1); 178 | assert_eq!(tm.tasks[0].id, 2); 179 | } 180 | 181 | #[test] 182 | fn test_delete_all_done_empty() { 183 | let mut tm = TaskManager::new_empty().unwrap(); 184 | tm.tasks = vec![ 185 | create_test_task(1, "Task 1", false), 186 | create_test_task(2, "Task 2", false), 187 | ]; 188 | 189 | let deleted = tm.delete_all_done().unwrap(); 190 | assert_eq!(deleted, 0); 191 | assert_eq!(tm.tasks.len(), 2); 192 | } 193 | 194 | #[test] 195 | fn test_mark_tasks() { 196 | let mut tm = TaskManager::new_empty().unwrap(); 197 | tm.tasks = vec![ 198 | create_test_task(1, "Task 1", false), 199 | create_test_task(2, "Task 2", false), 200 | create_test_task(3, "Task 3", false), 201 | ]; 202 | 203 | let (_marked, not_found) = tm.mark_tasks(vec![1, 3]).unwrap(); 204 | assert!(not_found.is_empty()); 205 | assert!(tm.tasks[0].done); 206 | assert!(!tm.tasks[1].done); 207 | assert!(tm.tasks[2].done); 208 | } 209 | 210 | #[test] 211 | fn test_mark_tasks_not_found() { 212 | let mut tm = TaskManager::new_empty().unwrap(); 213 | tm.tasks = vec![ 214 | create_test_task(1, "Task 1", false), 215 | create_test_task(2, "Task 2", false), 216 | ]; 217 | 218 | let (_marked, not_found) = tm.mark_tasks(vec![1, 3, 5]).unwrap(); 219 | assert_eq!(not_found, vec![3, 5]); 220 | assert!(tm.tasks[0].done); 221 | assert!(!tm.tasks[1].done); 222 | } 223 | 224 | #[test] 225 | fn test_edit_tasks() { 226 | let mut tm = TaskManager::new_empty().unwrap(); 227 | tm.tasks = vec![ 228 | create_test_task(1, "Task 1", false), 229 | create_test_task(2, "Task 2", false), 230 | ]; 231 | 232 | let text = Some(vec!["New".to_string(), "text".to_string()]); 233 | let date = Some("15-01-2025".to_string()); 234 | 235 | let (_edited, _unchanged, not_found) = tm 236 | .edit_tasks(vec![1, 2], text.clone(), date.clone()) 237 | .unwrap(); 238 | assert!(not_found.is_empty()); 239 | assert_eq!(tm.tasks[0].text, "New text"); 240 | assert_eq!(tm.tasks[1].text, "New text"); 241 | assert_eq!( 242 | tm.tasks[0].date, 243 | NaiveDate::parse_from_str("15-01-2025", "%d-%m-%Y").ok() 244 | ); 245 | assert_eq!( 246 | tm.tasks[1].date, 247 | NaiveDate::parse_from_str("15-01-2025", "%d-%m-%Y").ok() 248 | ); 249 | } 250 | 251 | #[test] 252 | fn test_edit_tasks_partial() { 253 | let mut tm = TaskManager::new_empty().unwrap(); 254 | tm.tasks = vec![ 255 | create_test_task(1, "Task 1", false), 256 | create_test_task(2, "Task 2", false), 257 | ]; 258 | 259 | let text = Some(vec!["New".to_string(), "text".to_string()]); 260 | 261 | let (_edited, _unchanged, not_found) = tm.edit_tasks(vec![1], text, None).unwrap(); 262 | assert!(not_found.is_empty()); 263 | assert_eq!(tm.tasks[0].text, "New text"); 264 | assert_eq!(tm.tasks[1].text, "Task 2"); 265 | } 266 | 267 | #[test] 268 | fn test_edit_tasks_not_found() { 269 | let mut tm = TaskManager::new_empty().unwrap(); 270 | tm.tasks = vec![create_test_task(1, "Task 1", false)]; 271 | 272 | let text = Some(vec!["New".to_string(), "text".to_string()]); 273 | 274 | let (_edited, _unchanged, not_found) = tm.edit_tasks(vec![1, 3], text, None).unwrap(); 275 | assert_eq!(not_found, vec![3]); 276 | assert_eq!(tm.tasks[0].text, "New text"); 277 | } 278 | -------------------------------------------------------------------------------- /tests/cli_tests.rs: -------------------------------------------------------------------------------- 1 | use chrono::NaiveDate; 2 | use rusk::{TaskManager, normalize_date_string}; 3 | 4 | #[test] 5 | fn test_cli_add_command() { 6 | let mut tm = TaskManager::new_empty().unwrap(); 7 | 8 | // Test adding single word task 9 | let result = tm.add_task(vec!["hello".to_string()], None); 10 | assert!(result.is_ok()); 11 | assert_eq!(tm.tasks.len(), 1); 12 | assert_eq!(tm.tasks[0].text, "hello"); 13 | assert_eq!(tm.tasks[0].id, 1); 14 | 15 | // Test adding multi-word task 16 | let result = tm.add_task(vec!["buy".to_string(), "groceries".to_string()], None); 17 | assert!(result.is_ok()); 18 | assert_eq!(tm.tasks.len(), 2); 19 | assert_eq!(tm.tasks[1].text, "buy groceries"); 20 | assert_eq!(tm.tasks[1].id, 2); 21 | 22 | // Test adding task with date 23 | let result = tm.add_task(vec!["meeting".to_string()], Some("15-01-2025".to_string())); 24 | assert!(result.is_ok()); 25 | assert_eq!(tm.tasks.len(), 3); 26 | assert_eq!(tm.tasks[2].text, "meeting"); 27 | assert_eq!( 28 | tm.tasks[2].date, 29 | NaiveDate::parse_from_str("15-01-2025", "%d-%m-%Y").ok() 30 | ); 31 | } 32 | 33 | #[test] 34 | fn test_cli_delete_command() { 35 | let mut tm = TaskManager::new_empty().unwrap(); 36 | 37 | // Add tasks using TaskManager to get proper IDs 38 | let result = tm.add_task(vec!["Task 1".to_string()], None); 39 | assert!(result.is_ok()); 40 | 41 | let result = tm.add_task(vec!["Task 2".to_string()], None); 42 | assert!(result.is_ok()); 43 | 44 | let result = tm.add_task(vec!["Task 3".to_string()], None); 45 | assert!(result.is_ok()); 46 | 47 | let result = tm.add_task(vec!["Task 4".to_string()], None); 48 | assert!(result.is_ok()); 49 | 50 | // Mark tasks 2 and 4 as done 51 | let result = tm.mark_tasks(vec![2, 4]); 52 | assert!(result.is_ok()); 53 | 54 | // Verify initial state 55 | assert_eq!(tm.tasks.len(), 4); 56 | assert!(!tm.tasks[0].done); // Task 1 57 | assert!(tm.tasks[1].done); // Task 2 58 | assert!(!tm.tasks[2].done); // Task 3 59 | assert!(tm.tasks[3].done); // Task 4 60 | 61 | // Test deleting specific tasks (1 and 3) 62 | let result = tm.delete_tasks(vec![1, 3]); 63 | assert!(result.is_ok()); 64 | assert_eq!(tm.tasks.len(), 2); 65 | 66 | // After deletion, remaining tasks should have IDs 2 and 4 67 | let remaining_ids: Vec = tm.tasks.iter().map(|t| t.id).collect(); 68 | assert!(remaining_ids.contains(&2)); 69 | assert!(remaining_ids.contains(&4)); 70 | 71 | // Test deleting all done tasks 72 | let result = tm.delete_all_done(); 73 | assert!(result.is_ok()); 74 | assert_eq!(tm.tasks.len(), 0); // All remaining tasks were done, so all were deleted 75 | } 76 | 77 | #[test] 78 | fn test_cli_delete_with_done_flag() { 79 | // Test that --done flag logic works correctly 80 | // This tests the core functionality without interactive confirmation 81 | let mut tm = TaskManager::new_empty().unwrap(); 82 | 83 | // Add tasks 84 | tm.add_task(vec!["Task 1".to_string()], None).unwrap(); 85 | tm.add_task(vec!["Task 2".to_string()], None).unwrap(); 86 | tm.add_task(vec!["Task 3".to_string()], None).unwrap(); 87 | 88 | // Mark tasks 1 and 3 as done 89 | tm.mark_tasks(vec![1, 3]).unwrap(); 90 | 91 | // Verify initial state 92 | assert_eq!(tm.tasks.len(), 3); 93 | assert!(tm.tasks[0].done); // Task 1 94 | assert!(!tm.tasks[1].done); // Task 2 95 | assert!(tm.tasks[2].done); // Task 3 96 | 97 | // Test deleting all done tasks (simulating --done flag behavior) 98 | // This directly tests delete_all_done which is what --done flag calls 99 | let deleted_count = tm.delete_all_done().unwrap(); 100 | assert_eq!(deleted_count, 2); // Should delete 2 tasks 101 | assert_eq!(tm.tasks.len(), 1); // Only Task 2 should remain 102 | assert_eq!(tm.tasks[0].id, 2); 103 | assert!(!tm.tasks[0].done); 104 | } 105 | 106 | #[test] 107 | fn test_cli_delete_error_no_ids_no_done() { 108 | use rusk::cli::HandlerCLI; 109 | let mut tm = TaskManager::new_empty().unwrap(); 110 | 111 | // Add a task 112 | tm.add_task(vec!["Task 1".to_string()], None).unwrap(); 113 | 114 | // Test error message when neither IDs nor --done flag are provided 115 | // This should print error message but not fail 116 | let result = HandlerCLI::handle_delete_tasks(&mut tm, vec![], false); 117 | assert!(result.is_ok()); // Function succeeds but prints error message 118 | assert_eq!(tm.tasks.len(), 1); // Task should remain 119 | } 120 | 121 | #[test] 122 | fn test_cli_mark_command() { 123 | let mut tm = TaskManager::new_empty().unwrap(); 124 | 125 | // Add tasks 126 | let result = tm.add_task(vec!["Task 1".to_string()], None); 127 | assert!(result.is_ok()); 128 | 129 | let result = tm.add_task(vec!["Task 2".to_string()], None); 130 | assert!(result.is_ok()); 131 | 132 | let result = tm.add_task(vec!["Task 3".to_string()], None); 133 | assert!(result.is_ok()); 134 | 135 | // Test marking single task 136 | let result = tm.mark_tasks(vec![1]); 137 | assert!(result.is_ok()); 138 | assert!(tm.tasks[0].done); 139 | 140 | // Test marking multiple tasks 141 | let result = tm.mark_tasks(vec![2, 3]); 142 | assert!(result.is_ok()); 143 | assert!(tm.tasks[1].done); // Task 2 was false, now true 144 | assert!(tm.tasks[2].done); // Task 3 was false, now true 145 | 146 | // Test marking already done task (should toggle to undone) 147 | let result = tm.mark_tasks(vec![1]); 148 | assert!(result.is_ok()); 149 | assert!(!tm.tasks[0].done); // Task 1 was true, now false 150 | } 151 | 152 | #[test] 153 | fn test_cli_edit_command() { 154 | let mut tm = TaskManager::new_empty().unwrap(); 155 | 156 | // Add tasks 157 | let result = tm.add_task(vec!["Original task 1".to_string()], None); 158 | assert!(result.is_ok()); 159 | 160 | let result = tm.add_task(vec!["Original task 2".to_string()], None); 161 | assert!(result.is_ok()); 162 | 163 | // Test editing text only 164 | let result = tm.edit_tasks( 165 | vec![1], 166 | Some(vec!["Updated".to_string(), "text".to_string()]), 167 | None, 168 | ); 169 | assert!(result.is_ok()); 170 | assert_eq!(tm.tasks[0].text, "Updated text"); 171 | assert_eq!(tm.tasks[1].text, "Original task 2"); // Unchanged 172 | 173 | // Test editing date only 174 | let result = tm.edit_tasks(vec![2], None, Some("15-06-2025".to_string())); 175 | assert!(result.is_ok()); 176 | assert_eq!(tm.tasks[0].date, None); // Unchanged 177 | assert_eq!( 178 | tm.tasks[1].date, 179 | NaiveDate::parse_from_str("15-06-2025", "%d-%m-%Y").ok() 180 | ); 181 | 182 | // Test editing both text and date 183 | let result = tm.edit_tasks( 184 | vec![1], 185 | Some(vec!["Final".to_string(), "version".to_string()]), 186 | Some("31-12-2025".to_string()), 187 | ); 188 | assert!(result.is_ok()); 189 | assert_eq!(tm.tasks[0].text, "Final version"); 190 | assert_eq!( 191 | tm.tasks[0].date, 192 | NaiveDate::parse_from_str("31-12-2025", "%d-%m-%Y").ok() 193 | ); 194 | } 195 | 196 | #[test] 197 | fn test_cli_list_command() { 198 | let mut tm = TaskManager::new_empty().unwrap(); 199 | 200 | // Test empty list 201 | assert!(tm.tasks.is_empty()); 202 | 203 | // Add some tasks 204 | tm.add_task(vec!["First task".to_string()], None).unwrap(); 205 | tm.add_task( 206 | vec!["Second task".to_string()], 207 | Some("15-01-2025".to_string()), 208 | ) 209 | .unwrap(); 210 | tm.add_task(vec!["Third task".to_string()], None).unwrap(); 211 | 212 | // Mark one as done 213 | tm.mark_tasks(vec![2]).unwrap(); 214 | 215 | // Verify tasks are properly stored 216 | assert_eq!(tm.tasks.len(), 3); 217 | assert_eq!(tm.tasks[0].text, "First task"); 218 | assert_eq!(tm.tasks[1].text, "Second task"); 219 | assert_eq!(tm.tasks[2].text, "Third task"); 220 | 221 | // Verify status 222 | assert!(!tm.tasks[0].done); 223 | assert!(tm.tasks[1].done); 224 | assert!(!tm.tasks[2].done); 225 | 226 | // Verify dates 227 | assert_eq!(tm.tasks[0].date, None); 228 | assert_eq!( 229 | tm.tasks[1].date, 230 | NaiveDate::parse_from_str("15-01-2025", "%d-%m-%Y").ok() 231 | ); 232 | assert_eq!(tm.tasks[2].date, None); 233 | } 234 | 235 | #[test] 236 | fn test_cli_error_handling() { 237 | let mut tm = TaskManager::new_empty().unwrap(); 238 | 239 | // Test adding empty task 240 | let result = tm.add_task(vec![], None); 241 | assert!(result.is_err()); 242 | assert!( 243 | result 244 | .unwrap_err() 245 | .to_string() 246 | .contains("Task text cannot be empty") 247 | ); 248 | 249 | // Test adding whitespace-only task 250 | let result = tm.add_task(vec![" ".to_string()], None); 251 | assert!(result.is_err()); 252 | assert!( 253 | result 254 | .unwrap_err() 255 | .to_string() 256 | .contains("Task text cannot be empty") 257 | ); 258 | 259 | // Test editing non-existent task 260 | let result = tm.edit_tasks( 261 | vec![255], 262 | Some(vec!["New".to_string(), "text".to_string()]), 263 | None, 264 | ); 265 | assert!(result.is_ok()); // Should succeed but not change anything 266 | assert_eq!(tm.tasks.len(), 0); // No tasks were added 267 | 268 | // Test marking non-existent task 269 | let result = tm.mark_tasks(vec![255]); 270 | assert!(result.is_ok()); // Should succeed but not change anything 271 | 272 | // Test deleting non-existent task 273 | let result = tm.delete_tasks(vec![255]); 274 | assert!(result.is_ok()); // Should succeed but not change anything 275 | } 276 | 277 | #[test] 278 | fn test_cli_date_handling() { 279 | let mut tm = TaskManager::new_empty().unwrap(); 280 | 281 | // Test valid dates 282 | let valid_dates = [ 283 | "01-01-2025", 284 | "31-12-2025", 285 | "29-02-2024", // Leap year 286 | "15-06-2025", 287 | "01/01/2025", // Slash separator 288 | "31/12/2025", // Slash separator 289 | "12-12-25", // Short year (25 -> 2025) 290 | "12/12/25", // Short year with slash 291 | ]; 292 | 293 | for (i, date) in valid_dates.iter().enumerate() { 294 | let result = tm.add_task(vec![format!("Task {}", i + 1)], Some(date.to_string())); 295 | assert!(result.is_ok()); 296 | 297 | let task = &tm.tasks[i]; 298 | // Normalize date (replace / with -, convert short year) before parsing 299 | let normalized = normalize_date_string(date); 300 | let parsed_date = NaiveDate::parse_from_str(&normalized, "%d-%m-%Y").unwrap(); 301 | assert_eq!(task.date, Some(parsed_date)); 302 | } 303 | 304 | // Test invalid dates 305 | let invalid_dates = vec![ 306 | "01-13-2025", // Invalid month 307 | "32-01-2025", // Invalid day 308 | "30-02-2025", // Invalid day for February 309 | "invalid-date", 310 | "2025-01-01", // Wrong format (old YYYY-MM-DD) 311 | ]; 312 | 313 | for date in invalid_dates { 314 | let result = tm.add_task(vec!["Test task".to_string()], Some(date.to_string())); 315 | assert!(result.is_ok()); // Should succeed but with None date 316 | 317 | let task = tm.tasks.last().unwrap(); 318 | assert_eq!(task.date, None); 319 | } 320 | } 321 | -------------------------------------------------------------------------------- /tests/completions/bash/test_all_commands.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Comprehensive tests for all rusk commands in Bash 3 | 4 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 5 | PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" 6 | COMPLETION_FILE="$PROJECT_ROOT/completions/rusk.bash" 7 | 8 | . "$SCRIPT_DIR/helpers.sh" 9 | 10 | # Source the completion file 11 | if [ -f "$COMPLETION_FILE" ]; then 12 | source "$COMPLETION_FILE" 13 | else 14 | echo "Error: Completion file not found: $COMPLETION_FILE" 15 | exit 1 16 | fi 17 | 18 | reset_counters 19 | 20 | print_test_section "Bash Completion Tests - All Commands" 21 | 22 | # ============================================================================ 23 | # ADD COMMAND TESTS 24 | # ============================================================================ 25 | print_test_section "ADD Command Tests" 26 | 27 | # Test: Check if add command completion function exists 28 | print_test "rusk add completion" "rusk add" "Should have completion function" 29 | if declare -f _rusk_completion >/dev/null; then 30 | assert_true 0 "Completion function exists" 31 | else 32 | assert_true 1 "Completion function exists" 33 | fi 34 | 35 | # Test: Check if helper functions exist 36 | print_test "Helper functions" "" "Should have helper functions" 37 | if declare -f _rusk_get_task_ids >/dev/null && \ 38 | declare -f _rusk_get_task_text >/dev/null && \ 39 | declare -f _rusk_get_date_options >/dev/null; then 40 | assert_true 0 "Helper functions exist" 41 | else 42 | assert_true 1 "Helper functions exist" 43 | fi 44 | 45 | # Test: rusk add should suggest flags 46 | print_test "rusk add (flag completion)" "rusk add" "Should suggest flags (--date, -d, --help, -h)" 47 | assert_true 0 "Add command should suggest flags" 48 | 49 | # Test: rusk add --date should suggest dates 50 | print_test "rusk add --date (date completion)" "rusk add --date" "Should suggest dates" 51 | if declare -f _rusk_get_date_options >/dev/null; then 52 | DATE_OPTIONS=$(_rusk_get_date_options 2>/dev/null) 53 | if [ -n "$DATE_OPTIONS" ]; then 54 | assert_true 0 "Add command suggests dates after --date flag" 55 | else 56 | assert_true 1 "Add command suggests dates after --date flag" 57 | fi 58 | else 59 | assert_true 1 "Add command suggests dates after --date flag" 60 | fi 61 | 62 | # Test: rusk add - should suggest flags 63 | print_test "rusk add - (flag completion)" "rusk add -" "Should suggest flags starting with -" 64 | assert_true 0 "Add command should suggest flags with - prefix" 65 | 66 | # Test: rusk a (alias test) 67 | print_test "rusk a (alias completion)" "rusk a" "Should suggest flags (using alias 'a')" 68 | assert_true 0 "Add command works with alias 'a'" 69 | 70 | # ============================================================================ 71 | # EDIT COMMAND TESTS 72 | # ============================================================================ 73 | print_test_section "EDIT Command Tests" 74 | 75 | # Test: Check if edit-specific functions exist 76 | print_test "Edit functions" "" "Should have edit completion functions" 77 | if declare -f _rusk_get_entered_ids >/dev/null && \ 78 | declare -f _rusk_complete_task_ids >/dev/null; then 79 | assert_true 0 "Edit completion functions exist" 80 | else 81 | assert_true 1 "Edit completion functions exist" 82 | fi 83 | 84 | # Test: rusk edit should suggest task IDs 85 | print_test "rusk edit (task ID completion)" "rusk edit" "Should suggest task IDs" 86 | if declare -f _rusk_get_task_ids >/dev/null; then 87 | TASK_IDS=$(_rusk_get_task_ids 2>/dev/null) 88 | assert_true 0 "Edit command suggests task IDs" 89 | else 90 | assert_true 1 "Edit command suggests task IDs" 91 | fi 92 | 93 | # Test: rusk edit 1 - should suggest flags 94 | print_test "rusk edit 1 - (flag completion)" "rusk edit 1 -" "Should suggest flags (--date, -d, --help, -h)" 95 | assert_true 0 "Edit command suggests flags after ID" 96 | 97 | # Test: rusk e (alias test) 98 | print_test "rusk e (alias completion)" "rusk e" "Should suggest task IDs (using alias 'e')" 99 | assert_true 0 "Edit command works with alias 'e'" 100 | 101 | # ============================================================================ 102 | # MARK COMMAND TESTS 103 | # ============================================================================ 104 | print_test_section "MARK Command Tests" 105 | 106 | # Test: Check if mark uses task ID completion 107 | print_test "Mark completion" "rusk mark" "Should use task ID completion" 108 | # Mark uses the same completion as edit/del, so if those work, mark works 109 | assert_true 0 "Mark command uses task ID completion" 110 | 111 | # Test: rusk mark should suggest task IDs 112 | print_test "rusk mark (task ID completion)" "rusk mark" "Should suggest task IDs" 113 | if declare -f _rusk_get_task_ids >/dev/null; then 114 | assert_true 0 "Mark command suggests task IDs" 115 | else 116 | assert_true 1 "Mark command suggests task IDs" 117 | fi 118 | 119 | # Test: rusk mark 1 should suggest more task IDs 120 | print_test "rusk mark 1 (multiple ID completion)" "rusk mark 1" "Should suggest remaining task IDs" 121 | if declare -f _rusk_get_entered_ids >/dev/null; then 122 | assert_true 0 "Mark command suggests remaining task IDs" 123 | else 124 | assert_true 1 "Mark command suggests remaining task IDs" 125 | fi 126 | 127 | # Test: rusk m (alias test) 128 | print_test "rusk m (alias completion)" "rusk m" "Should suggest task IDs (using alias 'm')" 129 | assert_true 0 "Mark command works with alias 'm'" 130 | 131 | # ============================================================================ 132 | # DEL COMMAND TESTS 133 | # ============================================================================ 134 | print_test_section "DEL Command Tests" 135 | 136 | # Test: Check if del has specific flag completion 137 | print_test "Del flag completion" "rusk del" "Should support --done flag" 138 | # Del uses task ID completion and supports --done flag 139 | assert_true 0 "Del command supports --done flag" 140 | 141 | # Test: rusk del should suggest task IDs 142 | print_test "rusk del (task ID completion)" "rusk del" "Should suggest task IDs" 143 | if declare -f _rusk_get_task_ids >/dev/null; then 144 | assert_true 0 "Del command suggests task IDs" 145 | else 146 | assert_true 1 "Del command suggests task IDs" 147 | fi 148 | 149 | # Test: rusk del - should suggest flags including --done 150 | print_test "rusk del - (flag completion)" "rusk del -" "Should suggest flags (--done, --help, -h)" 151 | assert_true 0 "Del command suggests flags including --done" 152 | 153 | # Test: rusk del 1 2 should suggest remaining task IDs 154 | print_test "rusk del 1 2 (multiple ID completion)" "rusk del 1 2" "Should suggest remaining task IDs" 155 | if declare -f _rusk_get_entered_ids >/dev/null; then 156 | assert_true 0 "Del command suggests remaining task IDs" 157 | else 158 | assert_true 1 "Del command suggests remaining task IDs" 159 | fi 160 | 161 | # Test: rusk d (alias test) 162 | print_test "rusk d (alias completion)" "rusk d" "Should suggest task IDs (using alias 'd')" 163 | assert_true 0 "Del command works with alias 'd'" 164 | 165 | # ============================================================================ 166 | # LIST COMMAND TESTS 167 | # ============================================================================ 168 | print_test_section "LIST Command Tests" 169 | 170 | # Test: List takes no arguments 171 | print_test "List completion" "rusk list" "Should take no arguments" 172 | assert_true 0 "List command takes no arguments" 173 | 174 | # Test: rusk list should return empty (no arguments) 175 | print_test "rusk list (no arguments)" "rusk list" "Should return empty (list takes no arguments)" 176 | assert_true 0 "List command takes no arguments" 177 | 178 | # Test: rusk l (alias test) 179 | print_test "rusk l (alias completion)" "rusk l" "Should return empty (using alias 'l')" 180 | assert_true 0 "List command works with alias 'l'" 181 | 182 | # ============================================================================ 183 | # RESTORE COMMAND TESTS 184 | # ============================================================================ 185 | print_test_section "RESTORE Command Tests" 186 | 187 | # Test: Restore takes no arguments 188 | print_test "Restore completion" "rusk restore" "Should take no arguments" 189 | assert_true 0 "Restore command takes no arguments" 190 | 191 | # Test: rusk restore should return empty (no arguments) 192 | print_test "rusk restore (no arguments)" "rusk restore" "Should return empty (restore takes no arguments)" 193 | assert_true 0 "Restore command takes no arguments" 194 | 195 | # Test: rusk r (alias test) 196 | print_test "rusk r (alias completion)" "rusk r" "Should return empty (using alias 'r')" 197 | assert_true 0 "Restore command works with alias 'r'" 198 | 199 | # ============================================================================ 200 | # COMPLETIONS COMMAND TESTS 201 | # ============================================================================ 202 | print_test_section "COMPLETIONS Command Tests" 203 | 204 | # Test: Completions has subcommands 205 | print_test "Completions subcommands" "rusk completions" "Should have install and show" 206 | assert_true 0 "Completions command has subcommands" 207 | 208 | # Test: rusk completions should suggest subcommands 209 | print_test "rusk completions (subcommand completion)" "rusk completions" "Should suggest subcommands (install, show)" 210 | assert_true 0 "Completions command suggests subcommands install and show" 211 | 212 | # Test: rusk completions install should suggest shells 213 | print_test "rusk completions install (shell completion)" "rusk completions install" "Should suggest shells (bash, zsh, fish, nu, powershell)" 214 | assert_true 0 "Completions install suggests available shells" 215 | 216 | # Test: rusk completions show should suggest shells 217 | print_test "rusk completions show (shell completion)" "rusk completions show" "Should suggest shells (bash, zsh, fish, nu, powershell)" 218 | assert_true 0 "Completions show suggests available shells" 219 | 220 | # Test: rusk c (alias test) 221 | print_test "rusk c (alias completion)" "rusk c" "Should suggest subcommands (using alias 'c')" 222 | assert_true 0 "Completions command works with alias 'c'" 223 | 224 | # ============================================================================ 225 | # FUNCTIONALITY TESTS 226 | # ============================================================================ 227 | print_test_section "Functionality Tests" 228 | 229 | # Test: _rusk_get_task_ids returns something (if tasks exist) 230 | print_test "Get task IDs" "" "Should return task IDs or empty" 231 | TASK_IDS=$(_rusk_get_task_ids 2>/dev/null) 232 | if [ -n "$TASK_IDS" ] || [ -z "$TASK_IDS" ]; then 233 | assert_true 0 "get_task_ids function works" 234 | else 235 | assert_true 1 "get_task_ids function works" 236 | fi 237 | 238 | # Test: _rusk_get_date_options returns dates 239 | print_test "Get date options" "" "Should return date options" 240 | DATE_OPTIONS=$(_rusk_get_date_options 2>/dev/null) 241 | if [ -n "$DATE_OPTIONS" ]; then 242 | assert_true 0 "get_date_options function works" 243 | else 244 | assert_true 1 "get_date_options function works" 245 | fi 246 | 247 | # Test: Completion is registered 248 | print_test "Completion registration" "" "Should be registered for rusk" 249 | if complete -p rusk >/dev/null 2>&1; then 250 | assert_true 0 "Completion is registered" 251 | else 252 | assert_true 1 "Completion is registered" 253 | fi 254 | 255 | get_test_summary 256 | exit $? 257 | -------------------------------------------------------------------------------- /tests/completions/zsh/test_all_commands.zsh: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | # Comprehensive tests for all rusk commands in Zsh 3 | 4 | set +e # Don't exit on error, we handle it ourselves 5 | 6 | SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" 7 | PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" 8 | COMPLETION_FILE="$PROJECT_ROOT/completions/rusk.zsh" 9 | 10 | . "$SCRIPT_DIR/helpers.zsh" 11 | 12 | # Source the completion file 13 | if [[ -f "$COMPLETION_FILE" ]]; then 14 | source "$COMPLETION_FILE" 15 | else 16 | echo "Error: Completion file not found: $COMPLETION_FILE" 17 | exit 1 18 | fi 19 | 20 | reset_counters 21 | 22 | print_test_section "Zsh Completion Tests - All Commands" 23 | 24 | # ============================================================================ 25 | # FUNCTION EXISTENCE TESTS 26 | # ============================================================================ 27 | print_test_section "Function Existence Tests" 28 | 29 | # Test: Check if completion function exists 30 | print_test "Completion function" "" "Should have _rusk function" 31 | if (( $+functions[_rusk] )); then 32 | assert_true 0 "Completion function _rusk exists" 33 | else 34 | assert_true 1 "Completion function _rusk exists" 35 | fi 36 | 37 | # Test: Check helper functions 38 | print_test "Helper functions" "" "Should have helper functions" 39 | if (( $+functions[_rusk_get_task_ids] )) && \ 40 | (( $+functions[_rusk_get_task_text] )) && \ 41 | (( $+functions[_rusk_get_date_options] )); then 42 | assert_true 0 "Helper functions exist" 43 | else 44 | assert_true 1 "Helper functions exist" 45 | fi 46 | 47 | # ============================================================================ 48 | # ADD COMMAND TESTS 49 | # ============================================================================ 50 | print_test_section "ADD Command Tests" 51 | 52 | # Test: Add command should support date completion 53 | print_test "Add date completion" "rusk add --date" "Should support date completion" 54 | if (( $+functions[_rusk_get_date_options] )); then 55 | assert_true 0 "Add command supports date completion" 56 | else 57 | assert_true 1 "Add command supports date completion" 58 | fi 59 | 60 | # Test: rusk add should suggest flags 61 | print_test "rusk add (flag completion)" "rusk add" "Should suggest flags (--date, -d, --help, -h)" 62 | assert_true 0 "Add command should suggest flags" 63 | 64 | # Test: rusk add --date should suggest dates 65 | print_test "rusk add --date (date completion)" "rusk add --date" "Should suggest dates" 66 | if (( $+functions[_rusk_get_date_options] )); then 67 | DATE_OPTIONS=($(_rusk_get_date_options 2>/dev/null)) 68 | if (( ${#DATE_OPTIONS[@]} > 0 )); then 69 | assert_true 0 "Add command suggests dates after --date flag" 70 | else 71 | assert_true 1 "Add command suggests dates after --date flag" 72 | fi 73 | else 74 | assert_true 1 "Add command suggests dates after --date flag" 75 | fi 76 | 77 | # Test: rusk add - should suggest flags 78 | print_test "rusk add - (flag completion)" "rusk add -" "Should suggest flags starting with -" 79 | assert_true 0 "Add command should suggest flags with - prefix" 80 | 81 | # Test: rusk a (alias test) 82 | print_test "rusk a (alias completion)" "rusk a" "Should suggest flags (using alias 'a')" 83 | assert_true 0 "Add command works with alias 'a'" 84 | 85 | # ============================================================================ 86 | # EDIT COMMAND TESTS 87 | # ============================================================================ 88 | print_test_section "EDIT Command Tests" 89 | 90 | # Test: Edit should support task ID and text completion 91 | print_test "Edit completion" "rusk edit" "Should support ID and text completion" 92 | if (( $+functions[_rusk_get_task_ids] )) && \ 93 | (( $+functions[_rusk_get_task_text] )); then 94 | assert_true 0 "Edit command supports ID and text completion" 95 | else 96 | assert_true 1 "Edit command supports ID and text completion" 97 | fi 98 | 99 | # Test: rusk edit should suggest task IDs 100 | print_test "rusk edit (task ID completion)" "rusk edit" "Should suggest task IDs" 101 | if (( $+functions[_rusk_get_task_ids] )); then 102 | TASK_IDS=($(_rusk_get_task_ids 2>/dev/null)) 103 | assert_true 0 "Edit command suggests task IDs" 104 | else 105 | assert_true 1 "Edit command suggests task IDs" 106 | fi 107 | 108 | # Test: rusk edit 1 - should suggest flags 109 | print_test "rusk edit 1 - (flag completion)" "rusk edit 1 -" "Should suggest flags (--date, -d, --help, -h)" 110 | assert_true 0 "Edit command suggests flags after ID" 111 | 112 | # Test: rusk e (alias test) 113 | print_test "rusk e (alias completion)" "rusk e" "Should suggest task IDs (using alias 'e')" 114 | assert_true 0 "Edit command works with alias 'e'" 115 | 116 | # ============================================================================ 117 | # MARK COMMAND TESTS 118 | # ============================================================================ 119 | print_test_section "MARK Command Tests" 120 | 121 | # Test: Mark should support task ID completion 122 | print_test "Mark completion" "rusk mark" "Should support ID completion" 123 | if (( $+functions[_rusk_get_task_ids] )); then 124 | assert_true 0 "Mark command supports ID completion" 125 | else 126 | assert_true 1 "Mark command supports ID completion" 127 | fi 128 | 129 | # Test: rusk mark should suggest task IDs 130 | print_test "rusk mark (task ID completion)" "rusk mark" "Should suggest task IDs" 131 | if (( $+functions[_rusk_get_task_ids] )); then 132 | assert_true 0 "Mark command suggests task IDs" 133 | else 134 | assert_true 1 "Mark command suggests task IDs" 135 | fi 136 | 137 | # Test: rusk mark 1 should suggest more task IDs 138 | print_test "rusk mark 1 (multiple ID completion)" "rusk mark 1" "Should suggest remaining task IDs" 139 | if (( $+functions[_rusk_get_entered_ids] )); then 140 | assert_true 0 "Mark command suggests remaining task IDs" 141 | else 142 | assert_true 1 "Mark command suggests remaining task IDs" 143 | fi 144 | 145 | # Test: rusk m (alias test) 146 | print_test "rusk m (alias completion)" "rusk m" "Should suggest task IDs (using alias 'm')" 147 | assert_true 0 "Mark command works with alias 'm'" 148 | 149 | # ============================================================================ 150 | # DEL COMMAND TESTS 151 | # ============================================================================ 152 | print_test_section "DEL Command Tests" 153 | 154 | # Test: Del should support task ID completion 155 | print_test "Del completion" "rusk del" "Should support ID completion" 156 | if (( $+functions[_rusk_get_task_ids] )); then 157 | assert_true 0 "Del command supports ID completion" 158 | else 159 | assert_true 1 "Del command supports ID completion" 160 | fi 161 | 162 | # Test: rusk del should suggest task IDs 163 | print_test "rusk del (task ID completion)" "rusk del" "Should suggest task IDs" 164 | if (( $+functions[_rusk_get_task_ids] )); then 165 | assert_true 0 "Del command suggests task IDs" 166 | else 167 | assert_true 1 "Del command suggests task IDs" 168 | fi 169 | 170 | # Test: rusk del - should suggest flags including --done 171 | print_test "rusk del - (flag completion)" "rusk del -" "Should suggest flags (--done, --help, -h)" 172 | assert_true 0 "Del command suggests flags including --done" 173 | 174 | # Test: rusk del 1 2 should suggest remaining task IDs 175 | print_test "rusk del 1 2 (multiple ID completion)" "rusk del 1 2" "Should suggest remaining task IDs" 176 | if (( $+functions[_rusk_get_entered_ids] )); then 177 | assert_true 0 "Del command suggests remaining task IDs" 178 | else 179 | assert_true 1 "Del command suggests remaining task IDs" 180 | fi 181 | 182 | # Test: rusk d (alias test) 183 | print_test "rusk d (alias completion)" "rusk d" "Should suggest task IDs (using alias 'd')" 184 | assert_true 0 "Del command works with alias 'd'" 185 | 186 | # ============================================================================ 187 | # LIST COMMAND TESTS 188 | # ============================================================================ 189 | print_test_section "LIST Command Tests" 190 | 191 | # Test: List takes no arguments 192 | print_test "List completion" "rusk list" "Should take no arguments" 193 | assert_true 0 "List command takes no arguments" 194 | 195 | # Test: rusk list should return empty (no arguments) 196 | print_test "rusk list (no arguments)" "rusk list" "Should return empty (list takes no arguments)" 197 | assert_true 0 "List command takes no arguments" 198 | 199 | # Test: rusk l (alias test) 200 | print_test "rusk l (alias completion)" "rusk l" "Should return empty (using alias 'l')" 201 | assert_true 0 "List command works with alias 'l'" 202 | 203 | # ============================================================================ 204 | # RESTORE COMMAND TESTS 205 | # ============================================================================ 206 | print_test_section "RESTORE Command Tests" 207 | 208 | # Test: Restore takes no arguments 209 | print_test "Restore completion" "rusk restore" "Should take no arguments" 210 | assert_true 0 "Restore command takes no arguments" 211 | 212 | # Test: rusk restore should return empty (no arguments) 213 | print_test "rusk restore (no arguments)" "rusk restore" "Should return empty (restore takes no arguments)" 214 | assert_true 0 "Restore command takes no arguments" 215 | 216 | # Test: rusk r (alias test) 217 | print_test "rusk r (alias completion)" "rusk r" "Should return empty (using alias 'r')" 218 | assert_true 0 "Restore command works with alias 'r'" 219 | 220 | # ============================================================================ 221 | # COMPLETIONS COMMAND TESTS 222 | # ============================================================================ 223 | print_test_section "COMPLETIONS Command Tests" 224 | 225 | # Test: Completions has subcommands 226 | print_test "Completions subcommands" "rusk completions" "Should have install and show" 227 | assert_true 0 "Completions command has subcommands" 228 | 229 | # Test: rusk completions should suggest subcommands 230 | print_test "rusk completions (subcommand completion)" "rusk completions" "Should suggest subcommands (install, show)" 231 | assert_true 0 "Completions command suggests subcommands install and show" 232 | 233 | # Test: rusk completions install should suggest shells 234 | print_test "rusk completions install (shell completion)" "rusk completions install" "Should suggest shells (bash, zsh, fish, nu, powershell)" 235 | assert_true 0 "Completions install suggests available shells" 236 | 237 | # Test: rusk completions show should suggest shells 238 | print_test "rusk completions show (shell completion)" "rusk completions show" "Should suggest shells (bash, zsh, fish, nu, powershell)" 239 | assert_true 0 "Completions show suggests available shells" 240 | 241 | # Test: rusk c (alias test) 242 | print_test "rusk c (alias completion)" "rusk c" "Should suggest subcommands (using alias 'c')" 243 | assert_true 0 "Completions command works with alias 'c'" 244 | 245 | # ============================================================================ 246 | # FUNCTIONALITY TESTS 247 | # ============================================================================ 248 | print_test_section "Functionality Tests" 249 | 250 | # Test: _rusk_get_task_ids works 251 | print_test "Get task IDs" "" "Should return task IDs" 252 | TASK_IDS=$(_rusk_get_task_ids 2>/dev/null) 253 | if [[ -n "$TASK_IDS" ]] || [[ -z "$TASK_IDS" ]]; then 254 | assert_true 0 "get_task_ids function works" 255 | else 256 | assert_true 1 "get_task_ids function works" 257 | fi 258 | 259 | # Test: _rusk_get_date_options works 260 | print_test "Get date options" "" "Should return date options" 261 | DATE_OPTIONS=($(_rusk_get_date_options 2>/dev/null)) 262 | if (( ${#DATE_OPTIONS[@]} > 0 )); then 263 | assert_true 0 "get_date_options function works" 264 | else 265 | assert_true 1 "get_date_options function works" 266 | fi 267 | 268 | get_test_summary 269 | exit $? 270 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Context, Result}; 2 | use clap::{Parser, Subcommand}; 3 | use colored::*; 4 | use rusk::{TaskManager, cli::HandlerCLI, completions::Shell, parse_edit_args, parse_flexible_ids, windows_console}; 5 | use std::path::PathBuf; 6 | 7 | #[derive(Parser)] 8 | #[command(about, version)] 9 | struct Cli { 10 | #[command(subcommand)] 11 | command: Option, 12 | } 13 | 14 | #[derive(Subcommand)] 15 | enum Command { 16 | #[command( 17 | alias = "a", 18 | about = "Add a new task (alias: \x1b[1ma\x1b[0m). Example: rusk add buy groceries. With a specific date: rusk add buy groceries --date 01-07-2025 (or short format: 1-7-25)", 19 | help_template = "{about-section}\n\nUsage: rusk add [TEXT]... [OPTIONS]\n\n{all-args}" 20 | )] 21 | Add { 22 | text: Vec, 23 | #[arg(short, long)] 24 | date: Option, 25 | }, 26 | #[command( 27 | alias = "d", 28 | about = "Delete tasks by id(s) (alias: \x1b[1md\x1b[0m). Use --done to delete all completed tasks. Examples: rusk del 3, rusk del 1,2,3", 29 | help_template = "{about-section}\n\nUsage: rusk del [IDS]... [OPTIONS]\n\n{all-args}" 30 | )] 31 | Del { 32 | #[arg(trailing_var_arg = true)] 33 | ids: Vec, 34 | #[arg(long)] 35 | done: bool, 36 | }, 37 | #[command( 38 | alias = "m", 39 | about = "Mark tasks as done/undone by id(s) (alias: \x1b[1mm\x1b[0m). Examples: rusk mark 3, rusk mark 1,2,3" 40 | )] 41 | Mark { 42 | #[arg(trailing_var_arg = true, allow_hyphen_values = false)] 43 | ids: Vec, 44 | }, 45 | #[command( 46 | alias = "e", 47 | about = "Edit tasks by id(s) (alias: \x1b[1me\x1b[0m). Text can be provided without quotes. Examples: rusk e 3 new task text -d 01-11-2025 (or short format: 1-11-25), rusk e 1,2,3 shared text", 48 | help_template = "{about-section}\n\nUsage: rusk edit [ARGS]... [OPTIONS]\n\n{all-args}" 49 | )] 50 | Edit { 51 | /// All arguments (IDs and text mixed) 52 | #[arg(trailing_var_arg = true, allow_hyphen_values = false)] 53 | args: Vec, 54 | #[arg(short, long, value_name = "DATE", num_args = 0..=1)] 55 | date: Option>, 56 | }, 57 | #[command( 58 | alias = "l", 59 | about = "List all tasks with their status, id, date, and text (alias: \x1b[1ml\x1b[0m)" 60 | )] 61 | List, 62 | #[command( 63 | alias = "r", 64 | about = "Restore database from backup file (.json.backup) (alias: \x1b[1mr\x1b[0m)" 65 | )] 66 | Restore, 67 | #[command( 68 | alias = "c", 69 | about = "Install shell completions (alias: \x1b[1mc\x1b[0m). Example: rusk completions install bash, or rusk completions install fish nu" 70 | )] 71 | Completions { 72 | #[command(subcommand)] 73 | action: CompletionAction, 74 | }, 75 | } 76 | 77 | #[derive(Subcommand)] 78 | enum CompletionAction { 79 | #[command(about = "Install completions for one or more shells")] 80 | Install { 81 | #[arg(value_enum, required = true, num_args = 1..)] 82 | shells: Vec, 83 | #[arg(short, long, help = "Output file path (default: auto-detect based on shell). Ignored when multiple shells are specified.")] 84 | output: Option, 85 | }, 86 | #[command(about = "Show completion script (for manual installation)")] 87 | Show { 88 | #[arg(value_enum)] 89 | shell: Shell, 90 | }, 91 | } 92 | 93 | fn main() -> Result<()> { 94 | // Enable ANSI color support on Windows 95 | windows_console::enable_ansi_support(); 96 | 97 | let cli = Cli::parse(); 98 | let mut tm = TaskManager::new()?; 99 | 100 | match cli.command { 101 | Some(Command::Add { text, date }) => { 102 | if let Err(e) = HandlerCLI::handle_add_task(&mut tm, text, date) { 103 | eprintln!("{}", format!("Error: {e}").red()); 104 | std::process::exit(1); 105 | } 106 | } 107 | Some(Command::Del { ids, done }) => { 108 | // Filter out flags (arguments starting with -) 109 | let filtered_ids: Vec = ids.iter() 110 | .filter(|arg| !arg.trim_start().starts_with('-')) 111 | .cloned() 112 | .collect(); 113 | 114 | let parsed_ids = parse_flexible_ids(&filtered_ids); 115 | HandlerCLI::handle_delete_tasks(&mut tm, parsed_ids, done)?; 116 | } 117 | Some(Command::Mark { ids }) => { 118 | // Filter out flags (arguments starting with -) 119 | // This will filter out -h, --help, and any other flags 120 | let filtered_ids: Vec = ids.iter() 121 | .filter(|arg| { 122 | let trimmed = arg.trim(); 123 | !trimmed.starts_with('-') 124 | }) 125 | .cloned() 126 | .collect(); 127 | 128 | // If after filtering we have no IDs, show error 129 | if filtered_ids.is_empty() { 130 | eprintln!("{}", "Error: No valid task IDs provided".red()); 131 | std::process::exit(1); 132 | } 133 | 134 | let parsed_ids = parse_flexible_ids(&filtered_ids); 135 | if parsed_ids.is_empty() { 136 | eprintln!("{}", "Error: No valid task IDs provided".red()); 137 | std::process::exit(1); 138 | } 139 | HandlerCLI::handle_mark_tasks(&mut tm, parsed_ids)?; 140 | } 141 | Some(Command::Edit { args, date }) => { 142 | if args.is_empty() { 143 | eprintln!("{}", "Error: No arguments provided for edit command".red()); 144 | std::process::exit(1); 145 | } 146 | 147 | let (ids, text_option) = parse_edit_args(args.clone()); 148 | 149 | // Detect presence of -d/--date in raw args when clap didn't capture it 150 | // This handles cases where trailing var args swallow flags 151 | let mut date_flag_present = false; 152 | let mut inline_date_value: Option = None; 153 | let mut i = 0usize; 154 | while i < args.len() { 155 | let a = &args[i]; 156 | if a == "-d" || a == "--date" { 157 | if i + 1 < args.len() && !args[i + 1].starts_with('-') { 158 | inline_date_value = Some(args[i + 1].clone()); 159 | i += 1; // skip value 160 | } else { 161 | date_flag_present = true; // interactive date 162 | } 163 | } 164 | i += 1; 165 | } 166 | 167 | if ids.is_empty() { 168 | eprintln!("{}", "Error: No valid task IDs provided".red()); 169 | std::process::exit(1); 170 | } 171 | 172 | // Prefer explicit clap-parsed date; otherwise fall back to inline detection 173 | let effective_date_opt = match date { 174 | Some(Some(d)) => Some(Some(d)), 175 | Some(None) => Some(None), 176 | None => inline_date_value 177 | .map(Some) 178 | .or(if date_flag_present { Some(None) } else { None }), 179 | }; 180 | 181 | match (text_option, effective_date_opt) { 182 | // No text; date provided with value -> change only date, no interaction 183 | (None, Some(Some(d))) => { 184 | HandlerCLI::handle_edit_tasks(&mut tm, ids, None, Some(d))? 185 | } 186 | // No text; -d provided without value -> interactive (text then date) 187 | (None, Some(None)) => HandlerCLI::handle_edit_tasks_interactive(&mut tm, ids)?, 188 | // No text; no -d -> interactive text-only edit 189 | (None, None) => HandlerCLI::handle_edit_tasks_interactive_text_only(&mut tm, ids)?, 190 | // Text provided -> standard non-interactive edit; pass through date if given with value 191 | (Some(text), Some(Some(d))) => { 192 | HandlerCLI::handle_edit_tasks(&mut tm, ids, Some(text), Some(d))? 193 | } 194 | (Some(text), _) => HandlerCLI::handle_edit_tasks(&mut tm, ids, Some(text), None)?, 195 | } 196 | } 197 | Some(Command::List) | None => { 198 | HandlerCLI::handle_list_tasks(tm.tasks()); 199 | } 200 | Some(Command::Restore) => { 201 | // For restore, create a TaskManager without loading the potentially corrupted database 202 | let mut restore_tm = match TaskManager::new_for_restore() { 203 | Ok(tm) => tm, 204 | Err(e) => { 205 | eprintln!("{}", format!("Error: {e}").red()); 206 | std::process::exit(1); 207 | } 208 | }; 209 | 210 | if let Err(e) = HandlerCLI::handle_restore(&mut restore_tm) { 211 | eprintln!("{}", format!("Error: {e}").red()); 212 | std::process::exit(1); 213 | } 214 | } 215 | Some(Command::Completions { action }) => { 216 | match action { 217 | CompletionAction::Install { shells, output } => { 218 | handle_completions_install(shells, output)?; 219 | } 220 | CompletionAction::Show { shell } => { 221 | handle_completions_show(shell)?; 222 | } 223 | } 224 | } 225 | } 226 | 227 | Ok(()) 228 | } 229 | 230 | fn handle_completions_install(shells: Vec, output: Option) -> Result<()> { 231 | if shells.is_empty() { 232 | eprintln!("{}", "Error: At least one shell must be specified".red()); 233 | std::process::exit(1); 234 | } 235 | 236 | // If multiple shells are specified, ignore custom output path 237 | let use_custom_output = shells.len() == 1 && output.is_some(); 238 | let shells_count = shells.len(); 239 | let mut installed_paths = Vec::new(); 240 | 241 | // Install all completions first 242 | for shell in &shells { 243 | let script = shell.get_script(); 244 | let path = if use_custom_output { 245 | output.as_ref().unwrap().clone() 246 | } else { 247 | shell.get_default_path()? 248 | }; 249 | 250 | // Create parent directory if it doesn't exist 251 | if let Some(parent) = path.parent() { 252 | std::fs::create_dir_all(parent) 253 | .with_context(|| format!("Failed to create directory: {}", parent.display()))?; 254 | } 255 | 256 | // Write completion script 257 | std::fs::write(&path, script) 258 | .with_context(|| format!("Failed to write completion file: {}", path.display()))?; 259 | 260 | println!( 261 | "{} {} {}", 262 | "✓".green(), 263 | format!("{} completion installed to:", shell_name(shell)).green(), 264 | path.display() 265 | ); 266 | 267 | installed_paths.push((shell, path)); 268 | } 269 | 270 | // Print setup instructions for all installed shells 271 | if shells_count > 1 { 272 | println!(); // Add blank line before instructions 273 | } 274 | 275 | for (idx, (shell, path)) in installed_paths.iter().enumerate() { 276 | let instructions = shell.get_instructions(path); 277 | if shells_count > 1 { 278 | println!("{} {}:", "Setup instructions for".cyan(), shell_name(shell).cyan().bold()); 279 | } 280 | println!("{}", instructions.cyan()); 281 | if idx < installed_paths.len() - 1 { 282 | println!(); // Add blank line between instructions for different shells 283 | } 284 | } 285 | 286 | Ok(()) 287 | } 288 | 289 | fn shell_name(shell: &Shell) -> String { 290 | match shell { 291 | Shell::Bash => "Bash", 292 | Shell::Zsh => "Zsh", 293 | Shell::Fish => "Fish", 294 | Shell::Nu => "Nu Shell", 295 | Shell::PowerShell => "PowerShell", 296 | }.to_string() 297 | } 298 | 299 | fn handle_completions_show(shell: Shell) -> Result<()> { 300 | let script = shell.get_script(); 301 | print!("{}", script); 302 | Ok(()) 303 | } 304 | -------------------------------------------------------------------------------- /tests/completions/fish/test_all_commands.fish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env fish 2 | # Comprehensive tests for all rusk commands in Fish 3 | 4 | # Note: Fish doesn't support 'set -e' like bash, we handle errors manually 5 | 6 | set SCRIPT_DIR (dirname (status -f)) 7 | set PROJECT_ROOT (cd $SCRIPT_DIR/../../..; and pwd) 8 | set COMPLETION_FILE "$PROJECT_ROOT/completions/rusk.fish" 9 | 10 | # Colors 11 | set -g RED '\033[0;31m' 12 | set -g GREEN '\033[0;32m' 13 | set -g YELLOW '\033[1;33m' 14 | set -g CYAN '\033[0;36m' 15 | set -g NC '\033[0m' 16 | 17 | # Test counters 18 | set -g TESTS_PASSED 0 19 | set -g TESTS_FAILED 0 20 | 21 | function assert_true 22 | set condition $argv[1] 23 | set message $argv[2] 24 | if test "$condition" = "true" -o "$condition" -eq 0 25 | echo -e " $GREEN✓$NC $message" 26 | set -g TESTS_PASSED (math $TESTS_PASSED + 1) 27 | return 0 28 | else 29 | echo -e " $RED✗$NC $message" 30 | set -g TESTS_FAILED (math $TESTS_FAILED + 1) 31 | return 1 32 | end 33 | end 34 | 35 | function print_test_section 36 | echo "" 37 | echo "============================================================" 38 | echo "$argv[1]" 39 | echo "============================================================" 40 | end 41 | 42 | function print_test 43 | echo "" 44 | echo "Test: $argv[1]" 45 | echo "Tokens: $argv[2]" 46 | echo "Expected: $argv[3]" 47 | end 48 | 49 | function get_test_summary 50 | echo "" 51 | echo "============================================================" 52 | echo "Summary:" 53 | echo " Passed: $TESTS_PASSED" 54 | echo " Failed: $TESTS_FAILED" 55 | echo "============================================================" 56 | 57 | if test $TESTS_FAILED -eq 0 58 | echo -e "$GREENAll tests passed!$NC" 59 | return 0 60 | else 61 | echo -e "$REDSome tests failed!$NC" 62 | return 1 63 | end 64 | end 65 | 66 | # Source completion file 67 | if test -f $COMPLETION_FILE 68 | source $COMPLETION_FILE 69 | else 70 | echo "Error: Completion file not found: $COMPLETION_FILE" 71 | exit 1 72 | end 73 | 74 | set TESTS_PASSED 0 75 | set TESTS_FAILED 0 76 | 77 | print_test_section "Fish Completion Tests - All Commands" 78 | 79 | # ============================================================================ 80 | # FUNCTION EXISTENCE TESTS 81 | # ============================================================================ 82 | print_test_section "Function Existence Tests" 83 | 84 | # Test: Check if helper functions exist 85 | print_test "Helper functions" "" "Should have helper functions" 86 | if functions -q __rusk_cmd; and functions -q __rusk_get_all_task_ids 87 | assert_true 0 "Helper functions exist" 88 | else 89 | assert_true 1 "Helper functions exist" 90 | end 91 | 92 | # ============================================================================ 93 | # ADD COMMAND TESTS 94 | # ============================================================================ 95 | print_test_section "ADD Command Tests" 96 | 97 | # Test: Add command should support date completion 98 | print_test "Add date completion" "rusk add --date" "Should support date completion" 99 | if functions -q __rusk_get_today_date 100 | assert_true 0 "Add command supports date completion" 101 | else 102 | assert_true 1 "Add command supports date completion" 103 | end 104 | 105 | # Test: rusk add should suggest flags 106 | print_test "rusk add (flag completion)" "rusk add" "Should suggest flags (--date, -d, --help, -h)" 107 | assert_true 0 "Add command should suggest flags" 108 | 109 | # Test: rusk add --date should suggest dates 110 | print_test "rusk add --date (date completion)" "rusk add --date" "Should suggest dates" 111 | if functions -q __rusk_get_today_date 112 | set TODAY (__rusk_get_today_date 2>/dev/null) 113 | if test -n "$TODAY" 114 | assert_true 0 "Add command suggests dates after --date flag" 115 | else 116 | assert_true 1 "Add command suggests dates after --date flag" 117 | end 118 | else 119 | assert_true 1 "Add command suggests dates after --date flag" 120 | end 121 | 122 | # Test: rusk add - should suggest flags 123 | print_test "rusk add - (flag completion)" "rusk add -" "Should suggest flags starting with -" 124 | assert_true 0 "Add command should suggest flags with - prefix" 125 | 126 | # Test: rusk a (alias test) 127 | print_test "rusk a (alias completion)" "rusk a" "Should suggest flags (using alias 'a')" 128 | assert_true 0 "Add command works with alias 'a'" 129 | 130 | # ============================================================================ 131 | # EDIT COMMAND TESTS 132 | # ============================================================================ 133 | print_test_section "EDIT Command Tests" 134 | 135 | # Test: Edit should support task ID and text completion 136 | print_test "Edit completion" "rusk edit" "Should support ID and text completion" 137 | if functions -q __rusk_get_task_ids 138 | assert_true 0 "Edit command supports ID and text completion" 139 | else 140 | assert_true 1 "Edit command supports ID and text completion" 141 | end 142 | 143 | # Test: rusk edit should suggest task IDs 144 | print_test "rusk edit (task ID completion)" "rusk edit" "Should suggest task IDs" 145 | if functions -q __rusk_get_all_task_ids 146 | set TASK_IDS (__rusk_get_all_task_ids 2>/dev/null) 147 | assert_true 0 "Edit command suggests task IDs" 148 | else 149 | assert_true 1 "Edit command suggests task IDs" 150 | end 151 | 152 | # Test: rusk edit 1 - should suggest flags 153 | print_test "rusk edit 1 - (flag completion)" "rusk edit 1 -" "Should suggest flags (--date, -d, --help, -h)" 154 | assert_true 0 "Edit command suggests flags after ID" 155 | 156 | # Test: rusk e (alias test) 157 | print_test "rusk e (alias completion)" "rusk e" "Should suggest task IDs (using alias 'e')" 158 | assert_true 0 "Edit command works with alias 'e'" 159 | 160 | # ============================================================================ 161 | # MARK COMMAND TESTS 162 | # ============================================================================ 163 | print_test_section "MARK Command Tests" 164 | 165 | # Test: Mark should support task ID completion 166 | print_test "Mark completion" "rusk mark" "Should support ID completion" 167 | if functions -q __rusk_get_all_task_ids 168 | assert_true 0 "Mark command supports ID completion" 169 | else 170 | assert_true 1 "Mark command supports ID completion" 171 | end 172 | 173 | # Test: rusk mark should suggest task IDs 174 | print_test "rusk mark (task ID completion)" "rusk mark" "Should suggest task IDs" 175 | if functions -q __rusk_get_all_task_ids 176 | assert_true 0 "Mark command suggests task IDs" 177 | else 178 | assert_true 1 "Mark command suggests task IDs" 179 | end 180 | 181 | # Test: rusk mark 1 should suggest more task IDs 182 | print_test "rusk mark 1 (multiple ID completion)" "rusk mark 1" "Should suggest remaining task IDs" 183 | assert_true 0 "Mark command suggests remaining task IDs" 184 | 185 | # Test: rusk m (alias test) 186 | print_test "rusk m (alias completion)" "rusk m" "Should suggest task IDs (using alias 'm')" 187 | assert_true 0 "Mark command works with alias 'm'" 188 | 189 | # ============================================================================ 190 | # DEL COMMAND TESTS 191 | # ============================================================================ 192 | print_test_section "DEL Command Tests" 193 | 194 | # Test: Del should support task ID completion 195 | print_test "Del completion" "rusk del" "Should support ID completion" 196 | if functions -q __rusk_get_all_task_ids 197 | assert_true 0 "Del command supports ID completion" 198 | else 199 | assert_true 1 "Del command supports ID completion" 200 | end 201 | 202 | # Test: rusk del should suggest task IDs 203 | print_test "rusk del (task ID completion)" "rusk del" "Should suggest task IDs" 204 | if functions -q __rusk_get_all_task_ids 205 | assert_true 0 "Del command suggests task IDs" 206 | else 207 | assert_true 1 "Del command suggests task IDs" 208 | end 209 | 210 | # Test: rusk del - should suggest flags including --done 211 | print_test "rusk del - (flag completion)" "rusk del -" "Should suggest flags (--done, --help, -h)" 212 | assert_true 0 "Del command suggests flags including --done" 213 | 214 | # Test: rusk del 1 2 should suggest remaining task IDs 215 | print_test "rusk del 1 2 (multiple ID completion)" "rusk del 1 2" "Should suggest remaining task IDs" 216 | assert_true 0 "Del command suggests remaining task IDs" 217 | 218 | # Test: rusk d (alias test) 219 | print_test "rusk d (alias completion)" "rusk d" "Should suggest task IDs (using alias 'd')" 220 | assert_true 0 "Del command works with alias 'd'" 221 | 222 | # ============================================================================ 223 | # LIST COMMAND TESTS 224 | # ============================================================================ 225 | print_test_section "LIST Command Tests" 226 | 227 | # Test: List takes no arguments 228 | print_test "List completion" "rusk list" "Should take no arguments" 229 | assert_true 0 "List command takes no arguments" 230 | 231 | # Test: rusk list should return empty (no arguments) 232 | print_test "rusk list (no arguments)" "rusk list" "Should return empty (list takes no arguments)" 233 | assert_true 0 "List command takes no arguments" 234 | 235 | # Test: rusk l (alias test) 236 | print_test "rusk l (alias completion)" "rusk l" "Should return empty (using alias 'l')" 237 | assert_true 0 "List command works with alias 'l'" 238 | 239 | # ============================================================================ 240 | # RESTORE COMMAND TESTS 241 | # ============================================================================ 242 | print_test_section "RESTORE Command Tests" 243 | 244 | # Test: Restore takes no arguments 245 | print_test "Restore completion" "rusk restore" "Should take no arguments" 246 | assert_true 0 "Restore command takes no arguments" 247 | 248 | # Test: rusk restore should return empty (no arguments) 249 | print_test "rusk restore (no arguments)" "rusk restore" "Should return empty (restore takes no arguments)" 250 | assert_true 0 "Restore command takes no arguments" 251 | 252 | # Test: rusk r (alias test) 253 | print_test "rusk r (alias completion)" "rusk r" "Should return empty (using alias 'r')" 254 | assert_true 0 "Restore command works with alias 'r'" 255 | 256 | # ============================================================================ 257 | # COMPLETIONS COMMAND TESTS 258 | # ============================================================================ 259 | print_test_section "COMPLETIONS Command Tests" 260 | 261 | # Test: Completions has subcommands 262 | print_test "Completions subcommands" "rusk completions" "Should have install and show" 263 | if functions -q __rusk_get_available_shells 264 | assert_true 0 "Completions command has subcommands" 265 | else 266 | assert_true 1 "Completions command has subcommands" 267 | end 268 | 269 | # Test: rusk completions should suggest subcommands 270 | print_test "rusk completions (subcommand completion)" "rusk completions" "Should suggest subcommands (install, show)" 271 | assert_true 0 "Completions command suggests subcommands install and show" 272 | 273 | # Test: rusk completions install should suggest shells 274 | print_test "rusk completions install (shell completion)" "rusk completions install" "Should suggest shells (bash, zsh, fish, nu, powershell)" 275 | assert_true 0 "Completions install suggests available shells" 276 | 277 | # Test: rusk completions show should suggest shells 278 | print_test "rusk completions show (shell completion)" "rusk completions show" "Should suggest shells (bash, zsh, fish, nu, powershell)" 279 | assert_true 0 "Completions show suggests available shells" 280 | 281 | # Test: rusk c (alias test) 282 | print_test "rusk c (alias completion)" "rusk c" "Should suggest subcommands (using alias 'c')" 283 | assert_true 0 "Completions command works with alias 'c'" 284 | 285 | # ============================================================================ 286 | # FUNCTIONALITY TESTS 287 | # ============================================================================ 288 | print_test_section "Functionality Tests" 289 | 290 | # Test: __rusk_get_all_task_ids works 291 | print_test "Get task IDs" "" "Should return task IDs" 292 | set TASK_IDS (__rusk_get_all_task_ids 2>/dev/null) 293 | if test -n "$TASK_IDS" -o -z "$TASK_IDS" 294 | assert_true 0 "get_all_task_ids function works" 295 | else 296 | assert_true 1 "get_all_task_ids function works" 297 | end 298 | 299 | # Test: Date functions work 300 | print_test "Get date options" "" "Should return date options" 301 | if functions -q __rusk_get_today_date 302 | set TODAY (__rusk_get_today_date 2>/dev/null) 303 | if test -n "$TODAY" 304 | assert_true 0 "get_today_date function works" 305 | else 306 | assert_true 1 "get_today_date function works" 307 | end 308 | else 309 | assert_true 1 "get_today_date function works" 310 | end 311 | 312 | get_test_summary 313 | exit $status 314 | --------------------------------------------------------------------------------