├── rust-toolchain.toml ├── src ├── lib │ ├── database │ │ ├── types │ │ │ ├── mod.rs │ │ │ └── pacman.rs │ │ ├── mod.rs │ │ ├── compressor.rs │ │ ├── tests.rs │ │ ├── decompressor.rs │ │ └── pacman.rs │ ├── clippy.toml │ ├── resolver │ │ ├── mod.rs │ │ ├── types │ │ │ ├── mod.rs │ │ │ ├── plan.rs │ │ │ ├── resolve.rs │ │ │ ├── graph.rs │ │ │ └── context.rs │ │ ├── tests.rs │ │ └── planner.rs │ ├── storage │ │ ├── mod.rs │ │ ├── types │ │ │ ├── mod.rs │ │ │ ├── lockfile.rs │ │ │ ├── package.rs │ │ │ └── bytestream.rs │ │ ├── providers │ │ │ ├── mod.rs │ │ │ ├── filesystem.rs │ │ │ └── s3.rs │ │ ├── transaction.rs │ │ ├── pool.rs │ │ └── tests.rs │ ├── parser │ │ ├── mod.rs │ │ ├── tests.rs │ │ └── pacman.rs │ ├── consts.rs │ ├── prelude.rs │ ├── types │ │ ├── mod.rs │ │ ├── custom_package.rs │ │ ├── depend.rs │ │ ├── pacman.rs │ │ ├── version.rs │ │ └── remote_package.rs │ ├── alpm.rs │ ├── repository │ │ ├── empty.rs │ │ ├── custom.rs │ │ ├── mod.rs │ │ ├── merged.rs │ │ ├── pacman.rs │ │ ├── aur.rs │ │ ├── cached.rs │ │ └── tests.rs │ ├── lib.rs │ ├── builder │ │ ├── mod.rs │ │ ├── tests.rs │ │ ├── bare.rs │ │ └── nspawn.rs │ ├── utils.rs │ ├── error.rs │ └── tests.rs └── bin │ └── main.rs ├── demo.jpg ├── .gitignore ├── tests ├── archives │ ├── test.tar.gz │ ├── test.tar.xz │ ├── test.tar.zst │ └── test.tar ├── build │ ├── archer_dummy_a │ │ ├── Makefile │ │ ├── archer_dummy_a.cpp │ │ └── PKGBUILD │ └── archer_dummy_b │ │ ├── archer_dummy_b_1.cpp │ │ ├── archer_dummy_b_2.cpp │ │ ├── Makefile │ │ └── PKGBUILD ├── pkgs │ ├── acl-2.3.1-1-x86_64.pkg.tar.zst │ ├── a52dec-0.7.4-11-x86_64.pkg.tar.zst │ ├── aalib-1.4rc5-14-x86_64.pkg.tar.zst │ ├── accounts-qml-module-0.7-3-x86_64.pkg.tar.zst │ └── accountsservice-0.6.55-3-x86_64.pkg.tar.zst ├── pacman_conf │ ├── mirrorlist_clean │ └── pacman.conf └── naive.rs ├── .mergify.yml ├── .github ├── dependabot.yml └── workflows │ └── test.yml ├── package_layout.txt ├── Cargo.toml └── README.md /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "nightly" -------------------------------------------------------------------------------- /src/lib/database/types/mod.rs: -------------------------------------------------------------------------------- 1 | pub use pacman::*; 2 | 3 | mod pacman; 4 | -------------------------------------------------------------------------------- /demo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LightQuantumArchive/archer/HEAD/demo.jpg -------------------------------------------------------------------------------- /src/lib/clippy.toml: -------------------------------------------------------------------------------- 1 | blacklisted-names = ["module-name-repetitions", "pub-enum-variant-names", "wildcard_imports"] -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /vgcore* 3 | /.idea 4 | /output.dot 5 | /tests/output 6 | /tests/stream.test 7 | /reference/* -------------------------------------------------------------------------------- /tests/archives/test.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LightQuantumArchive/archer/HEAD/tests/archives/test.tar.gz -------------------------------------------------------------------------------- /tests/archives/test.tar.xz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LightQuantumArchive/archer/HEAD/tests/archives/test.tar.xz -------------------------------------------------------------------------------- /tests/archives/test.tar.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LightQuantumArchive/archer/HEAD/tests/archives/test.tar.zst -------------------------------------------------------------------------------- /tests/build/archer_dummy_a/Makefile: -------------------------------------------------------------------------------- 1 | dummy_a: 2 | g++ archer_dummy_a.cpp -o archer_dummy_a 3 | clean: 4 | rm archer_dummy_a 5 | -------------------------------------------------------------------------------- /src/lib/database/mod.rs: -------------------------------------------------------------------------------- 1 | mod compressor; 2 | mod decompressor; 3 | mod pacman; 4 | mod types; 5 | 6 | #[cfg(test)] 7 | mod tests; 8 | -------------------------------------------------------------------------------- /tests/pkgs/acl-2.3.1-1-x86_64.pkg.tar.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LightQuantumArchive/archer/HEAD/tests/pkgs/acl-2.3.1-1-x86_64.pkg.tar.zst -------------------------------------------------------------------------------- /tests/pkgs/a52dec-0.7.4-11-x86_64.pkg.tar.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LightQuantumArchive/archer/HEAD/tests/pkgs/a52dec-0.7.4-11-x86_64.pkg.tar.zst -------------------------------------------------------------------------------- /tests/pkgs/aalib-1.4rc5-14-x86_64.pkg.tar.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LightQuantumArchive/archer/HEAD/tests/pkgs/aalib-1.4rc5-14-x86_64.pkg.tar.zst -------------------------------------------------------------------------------- /tests/build/archer_dummy_a/archer_dummy_a.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace std; 3 | 4 | int main() { 5 | cout << "dummy a" << endl; 6 | } 7 | -------------------------------------------------------------------------------- /tests/build/archer_dummy_b/archer_dummy_b_1.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace std; 3 | 4 | int main() { 5 | cout << "dummy b 1" << endl; 6 | } 7 | -------------------------------------------------------------------------------- /tests/build/archer_dummy_b/archer_dummy_b_2.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace std; 3 | 4 | int main() { 5 | cout << "dummy b 2" << endl; 6 | } 7 | -------------------------------------------------------------------------------- /tests/pkgs/accounts-qml-module-0.7-3-x86_64.pkg.tar.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LightQuantumArchive/archer/HEAD/tests/pkgs/accounts-qml-module-0.7-3-x86_64.pkg.tar.zst -------------------------------------------------------------------------------- /tests/pkgs/accountsservice-0.6.55-3-x86_64.pkg.tar.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LightQuantumArchive/archer/HEAD/tests/pkgs/accountsservice-0.6.55-3-x86_64.pkg.tar.zst -------------------------------------------------------------------------------- /src/lib/resolver/mod.rs: -------------------------------------------------------------------------------- 1 | pub use planner::PlanBuilder; 2 | pub use tree_resolv::TreeResolver; 3 | 4 | mod planner; 5 | mod tree_resolv; 6 | pub mod types; 7 | 8 | #[cfg(test)] 9 | mod tests; 10 | -------------------------------------------------------------------------------- /tests/pacman_conf/mirrorlist_clean: -------------------------------------------------------------------------------- 1 | Server = http://mirrors.evowise.com/archlinux/$repo/os/$arch 2 | Server = http://mirror.rackspace.com/archlinux/$repo/os/$arch 3 | Server = https://mirror.rackspace.com/archlinux/$repo/os/$arch -------------------------------------------------------------------------------- /src/lib/storage/mod.rs: -------------------------------------------------------------------------------- 1 | pub use pool::PackagePool; 2 | pub use providers::StorageProvider; 3 | 4 | pub mod pool; 5 | pub mod providers; 6 | pub mod transaction; 7 | pub mod types; 8 | 9 | #[cfg(test)] 10 | mod tests; 11 | -------------------------------------------------------------------------------- /src/lib/resolver/types/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::use_self)] 2 | 3 | pub use context::*; 4 | pub use graph::*; 5 | pub use plan::*; 6 | pub use resolve::*; 7 | 8 | mod context; 9 | mod graph; 10 | mod plan; 11 | mod resolve; 12 | -------------------------------------------------------------------------------- /tests/build/archer_dummy_b/Makefile: -------------------------------------------------------------------------------- 1 | all: dummy_b_1 dummy_b_2 2 | dummy_b_1: 3 | g++ archer_dummy_b_1.cpp -o archer_dummy_b_1 4 | dummy_b_2: 5 | g++ archer_dummy_b_2.cpp -o archer_dummy_b_2 6 | clean: 7 | rm archer_dummy_b_1 archer_dummy_b_2 -------------------------------------------------------------------------------- /.mergify.yml: -------------------------------------------------------------------------------- 1 | pull_request_rules: 2 | - name: Automatic merge on approval 3 | conditions: 4 | - author=dependabot[bot] 5 | - check-success=Lint 6 | - check-success=Test 7 | actions: 8 | merge: 9 | method: squash 10 | -------------------------------------------------------------------------------- /src/lib/parser/mod.rs: -------------------------------------------------------------------------------- 1 | use lazy_static::lazy_static; 2 | 3 | pub use pacman::*; 4 | 5 | pub mod pacman; 6 | 7 | #[cfg(test)] 8 | mod tests; 9 | 10 | lazy_static! { 11 | pub static ref GLOBAL_CONFIG: PacmanConf = PacmanConf::new().unwrap(); 12 | } 13 | -------------------------------------------------------------------------------- /src/lib/consts.rs: -------------------------------------------------------------------------------- 1 | pub const ROOT_PATH: &str = "/"; 2 | pub const PACMAN_DB_PATH: &str = "/var/lib/pacman"; 3 | pub const PACMAN_CONF_PATH: &str = "/etc/pacman.conf"; 4 | pub const MAKEPKG_CONF_PATH: &str = "/etc/makepkg.conf"; 5 | pub const STORAGE_MEMORY_LIMIT: u64 = 104_857_600; // 100 MB 6 | pub const LOCK_FILE_VERSION: u32 = 1; 7 | -------------------------------------------------------------------------------- /src/lib/prelude.rs: -------------------------------------------------------------------------------- 1 | pub use crate::alpm::GLOBAL_ALPM; 2 | pub use crate::consts::*; 3 | pub use crate::error::{DependencyError, Error, ParseError, S3Error, StorageError}; 4 | pub use crate::parser::{PacmanConf, GLOBAL_CONFIG}; 5 | pub use crate::repository::*; 6 | pub use crate::resolver::{types::*, PlanBuilder, TreeResolver}; 7 | pub use crate::storage::{providers, types::*, StorageProvider}; 8 | pub use crate::types::*; 9 | -------------------------------------------------------------------------------- /src/lib/storage/types/mod.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::path::PathBuf; 3 | 4 | pub use bytestream::*; 5 | pub use lockfile::*; 6 | pub use package::*; 7 | 8 | use crate::error::StorageError; 9 | 10 | mod bytestream; 11 | mod lockfile; 12 | mod package; 13 | 14 | pub(crate) type Result = std::result::Result; 15 | pub(crate) type MetaKeyMap = HashMap; 16 | -------------------------------------------------------------------------------- /src/lib/types/mod.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | pub use alpm::Package as PacmanPackage; 4 | pub use raur::Package as AurPackage; 5 | 6 | pub use custom_package::*; 7 | pub use depend::*; 8 | pub use pacman::*; 9 | pub use remote_package::*; 10 | pub use version::*; 11 | 12 | use crate::repository::Repository; 13 | 14 | mod custom_package; 15 | mod depend; 16 | mod pacman; 17 | mod remote_package; 18 | mod version; 19 | 20 | pub type ArcRepo = Arc; 21 | pub type ArcPackage = Arc; 22 | -------------------------------------------------------------------------------- /src/lib/alpm.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{Arc, Mutex}; 2 | 3 | use alpm::Alpm; 4 | use lazy_static::lazy_static; 5 | 6 | use crate::consts::*; 7 | use crate::parser::GLOBAL_CONFIG; 8 | 9 | lazy_static! { 10 | pub static ref GLOBAL_ALPM: Arc> = { 11 | let alpm = Alpm::new(ROOT_PATH, PACMAN_DB_PATH).unwrap(); 12 | for db in GLOBAL_CONFIG.sync_dbs() { 13 | alpm.register_syncdb(db.name.to_string(), db.sig_level) 14 | .unwrap(); 15 | } 16 | Arc::new(Mutex::new(alpm)) 17 | }; 18 | } 19 | -------------------------------------------------------------------------------- /tests/build/archer_dummy_a/PKGBUILD: -------------------------------------------------------------------------------- 1 | pkgname=archer_dummy_a 2 | pkgver=0.0.1 3 | pkgrel=1 4 | pkgdesc='Dummy package a.' 5 | arch=('x86_64') 6 | license=('custom') 7 | depends=() 8 | makedepends=('make' 'gcc') 9 | source=(archer_dummy_a.cpp Makefile) 10 | sha256sums=('ea359bf033312b0c1b03608ef6a962e4b63013ae38338ed8521492c96d0e3e55' 11 | '9b3724ca7ab67377207be005ab6e57b8c709818ee854972f0140d4e955abe083') 12 | 13 | build() { 14 | make 15 | } 16 | 17 | package() { 18 | install -Dm755 archer_dummy_a "$pkgdir/usr/bin/archer_dummy_a" 19 | } 20 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "daily" 12 | -------------------------------------------------------------------------------- /src/lib/repository/empty.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use crate::error::Result; 4 | use crate::repository::Repository; 5 | use crate::types::*; 6 | 7 | #[derive(Copy, Clone, Debug, Default)] 8 | pub struct EmptyRepository {} 9 | 10 | impl EmptyRepository { 11 | pub fn new() -> Self { 12 | Default::default() 13 | } 14 | } 15 | 16 | impl Repository for EmptyRepository { 17 | fn find_package(&self, _pkg: &Depend) -> Result> { 18 | Ok(vec![]) 19 | } 20 | 21 | fn find_packages(&self, _pkgs: &[Depend]) -> Result>> { 22 | Ok(HashMap::new()) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/lib/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::module_name_repetitions, 3 | clippy::wildcard_imports, 4 | clippy::default_trait_access 5 | )] 6 | #![feature(box_patterns)] 7 | #![feature(bool_to_option)] 8 | #![feature(never_type)] 9 | #![feature(hash_drain_filter)] 10 | #![feature(box_syntax)] 11 | #![feature(destructuring_assignment)] 12 | 13 | pub use utils::load_alpm; 14 | 15 | #[cfg(test)] 16 | #[macro_use] 17 | mod tests; 18 | 19 | #[macro_use] 20 | mod utils; 21 | 22 | pub mod alpm; 23 | pub mod builder; 24 | pub mod consts; 25 | pub mod database; 26 | pub mod error; 27 | pub mod parser; 28 | pub mod prelude; 29 | pub mod repository; 30 | pub mod resolver; 31 | pub mod storage; 32 | pub mod types; 33 | -------------------------------------------------------------------------------- /package_layout.txt: -------------------------------------------------------------------------------- 1 | * id 2 | + name 3 | * package_base (git url) 4 | check_md5sum 5 | should_ignore 6 | filename 7 | base 8 | + version 9 | origin 10 | + desc 11 | + url 12 | * votes 13 | * popularity 14 | * out_of_date 15 | build_date 16 | install_date 17 | * maintainer 18 | * first_submitted 19 | * last_modified 20 | * url_path (download tarball) 21 | packager 22 | sha256sum 23 | arch 24 | size 25 | isize 26 | reason 27 | validation 28 | + licenses (*-s) 29 | + groups 30 | + depends 31 | + optdepends 32 | + checkdepends 33 | + makedepends 34 | + conflicts 35 | + provides 36 | + replaces 37 | files 38 | backup 39 | db 40 | changelog 41 | (mtree) 42 | required_by 43 | optional_for 44 | base64_sig 45 | has_scriptlet 46 | * keywords 47 | 48 | id 49 | -------------------------------------------------------------------------------- /src/lib/types/custom_package.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io::Read; 3 | use std::path::PathBuf; 4 | 5 | use rustympkglib::pkgdata::PkgData; 6 | 7 | use crate::error::Result; 8 | 9 | #[derive(Debug, Clone, Eq, PartialEq)] 10 | pub struct CustomPackage { 11 | pub name: String, 12 | pub path: PathBuf, 13 | pub data: PkgData, 14 | } 15 | 16 | impl CustomPackage { 17 | pub fn from_file(name: String, path: PathBuf) -> Result { 18 | // TODO error handling 19 | let mut buffer = String::new(); 20 | File::open(path.clone())?.read_to_string(&mut buffer)?; 21 | Ok(Self { 22 | name, 23 | path, 24 | data: PkgData::from_source(&*buffer).unwrap(), 25 | }) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/lib/repository/custom.rs: -------------------------------------------------------------------------------- 1 | use crate::error::Result; 2 | use crate::repository::{sort_pkgs_mut, Repository}; 3 | use crate::types::*; 4 | 5 | #[derive(Debug, Clone)] 6 | pub struct CustomRepository { 7 | packages: Vec, 8 | } 9 | 10 | impl CustomRepository { 11 | pub fn new(packages: Vec) -> Self { 12 | Self { packages } 13 | } 14 | } 15 | 16 | impl Repository for CustomRepository { 17 | fn find_package(&self, pkg: &Depend) -> Result> { 18 | let mut result = self 19 | .packages 20 | .iter() 21 | .filter(|candidate| pkg.satisfied_by(candidate)) 22 | .cloned() 23 | .collect(); 24 | sort_pkgs_mut(&mut result, pkg); 25 | Ok(result) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /tests/build/archer_dummy_b/PKGBUILD: -------------------------------------------------------------------------------- 1 | pkgbase=archer_dummy_b 2 | pkgname=(archer_dummy_b_1 archer_dummy_b_2) 3 | pkgver=0.0.1 4 | pkgrel=1 5 | pkgdesc='Dummy package a.' 6 | arch=('x86_64') 7 | license=('custom') 8 | depends=() 9 | makedepends=('make' 'gcc' 'archer_dummy_a') 10 | source=(archer_dummy_b_1.cpp archer_dummy_b_2.cpp Makefile) 11 | sha256sums=('09d6d10a886ac50828c828bc6f9a5944e8a03c27cc5482ff288b486f2b9cef26' 12 | '5a88ee98e7e018cfbbae4e6148badfaf417ffc30f34a9071040ac49d9eb1ad4f' 13 | 'd5f273118e44ec9ae27e00ee23315481e7acaf78c1d3a171bd3fc346f6e5d669') 14 | 15 | build() { 16 | archer_dummy_a 17 | make 18 | } 19 | 20 | package_archer_dummy_b_1() { 21 | install -Dm755 archer_dummy_b_1 "$pkgdir/usr/bin/archer_dummy_b_1" 22 | } 23 | 24 | package_archer_dummy_b_2() { 25 | install -Dm755 archer_dummy_b_2 "$pkgdir/usr/bin/archer_dummy_b_2" 26 | } 27 | -------------------------------------------------------------------------------- /src/lib/resolver/types/plan.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Display, Formatter}; 2 | 3 | use itertools::Itertools; 4 | 5 | use crate::types::*; 6 | 7 | #[derive(Clone, Eq, PartialEq, Hash)] 8 | pub enum PlanAction { 9 | Install(Package), 10 | InstallGroup(Vec), 11 | Build(Package), 12 | CopyToDest(Package), 13 | } 14 | 15 | impl Display for PlanAction { 16 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 17 | match self { 18 | PlanAction::Install(pkg) => write!(f, "Install({})", pkg), 19 | PlanAction::Build(pkg) => write!(f, "Build({})", pkg), 20 | PlanAction::CopyToDest(pkg) => write!(f, "CopyToDest({})", pkg), 21 | PlanAction::InstallGroup(pkgs) => write!( 22 | f, 23 | "InstallGroup({})", 24 | pkgs.iter().map(ToString::to_string).join(", ") 25 | ), 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/lib/storage/providers/mod.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use async_trait::async_trait; 4 | 5 | pub use filesystem::*; 6 | pub use s3::*; 7 | 8 | use crate::error::StorageError; 9 | 10 | use super::types::*; 11 | 12 | mod filesystem; 13 | mod s3; 14 | 15 | #[async_trait] 16 | pub trait StorageProvider: Sync + Send { 17 | async fn get_file(&self, path: &Path) -> Result; 18 | // async fn get_file_meta(); 19 | async fn put_file(&self, path: &Path, data: ByteStream) -> Result<()>; 20 | // fn set_file_meta(); 21 | async fn delete_file(&self, path: &Path) -> Result<()>; 22 | // async fn list_files(prefix: String) -> ; 23 | } 24 | 25 | fn get_fullpath(base: &Path, path: &Path) -> Result { 26 | let fullpath = base.join(path); 27 | if !fullpath.starts_with(base) { 28 | return Err(StorageError::InvalidPath(path.to_path_buf())); 29 | } 30 | Ok(fullpath) 31 | } 32 | -------------------------------------------------------------------------------- /src/lib/database/compressor.rs: -------------------------------------------------------------------------------- 1 | use std::io::Cursor; 2 | use std::path::Path; 3 | 4 | use tar::{Builder, Header}; 5 | 6 | use crate::error::Result; 7 | 8 | pub struct ArchiveBuilder { 9 | builder: Builder>, 10 | } 11 | 12 | impl Default for ArchiveBuilder { 13 | fn default() -> Self { 14 | let builder = Builder::new(vec![]); 15 | Self { builder } 16 | } 17 | } 18 | 19 | impl ArchiveBuilder { 20 | pub fn new() -> Self { 21 | Default::default() 22 | } 23 | 24 | pub fn append_data(&mut self, path: impl AsRef, data: &[u8]) -> Result<()> { 25 | let mut header = Header::new_gnu(); 26 | header.set_size(data.len() as u64); 27 | header.set_mode(0o644); 28 | header.set_cksum(); 29 | self.builder.append_data(&mut header, path, data)?; 30 | Ok(()) 31 | } 32 | 33 | pub fn build(mut self) -> Result> { 34 | self.builder.finish()?; 35 | let tar = self.builder.into_inner()?; 36 | Ok(zstd::encode_all(Cursor::new(tar), 0)?) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/lib/builder/mod.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use async_trait::async_trait; 4 | 5 | use crate::error::BuildError; 6 | 7 | pub use self::bare::*; 8 | pub use self::nspawn::*; 9 | 10 | mod bare; 11 | mod nspawn; 12 | #[cfg(test)] 13 | mod tests; 14 | 15 | type Result = std::result::Result; 16 | type IOResult = std::result::Result; 17 | 18 | #[derive(Debug, Clone, Eq, PartialEq, Default)] 19 | pub struct BuildOptions { 20 | check: bool, 21 | sign: bool, 22 | skip_checksum: bool, 23 | skip_pgp_check: bool, 24 | verbose: bool, 25 | } 26 | 27 | impl BuildOptions { 28 | pub fn new() -> Self { 29 | Default::default() 30 | } 31 | setter_copy!(check, bool); 32 | setter_copy!(sign, bool); 33 | setter_copy!(skip_checksum, bool); 34 | setter_copy!(skip_pgp_check, bool); 35 | setter_copy!(verbose, bool); 36 | } 37 | 38 | #[async_trait] 39 | pub trait Builder { 40 | async fn setup(&self) -> Result<()>; 41 | async fn teardown(&self) -> Result<()>; 42 | async fn sync_system(&self) -> Result<()>; 43 | async fn install_local(&self, path: &Path) -> Result<()>; 44 | async fn install_remote(&self, packages: &[&str]) -> Result<()>; 45 | async fn remove(&self, packages: &[&str]) -> Result<()>; 46 | async fn build(&self, path: &Path) -> Result>; 47 | } 48 | -------------------------------------------------------------------------------- /src/lib/storage/types/lockfile.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | 5 | use crate::consts::LOCK_FILE_VERSION; 6 | use crate::utils::unix_timestamp; 7 | 8 | use super::*; 9 | 10 | #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] 11 | pub struct LockFile { 12 | pub version: u32, 13 | pub timestamp: u128, 14 | pub packages: HashSet, 15 | } 16 | 17 | impl Default for LockFile { 18 | fn default() -> Self { 19 | Self { 20 | version: LOCK_FILE_VERSION, 21 | timestamp: unix_timestamp(), 22 | packages: HashSet::new(), 23 | } 24 | } 25 | } 26 | 27 | impl LockFile { 28 | pub fn new() -> Self { 29 | Default::default() 30 | } 31 | } 32 | 33 | impl From<&MetaKeyMap> for LockFile { 34 | fn from(m: &MetaKeyMap) -> Self { 35 | Self { 36 | version: LOCK_FILE_VERSION, 37 | timestamp: unix_timestamp(), 38 | packages: m 39 | .iter() 40 | .map(|(meta, key)| RemotePackageUnit { 41 | meta: meta.clone(), 42 | key: key.clone(), 43 | }) 44 | .collect(), 45 | } 46 | } 47 | } 48 | 49 | impl From<&LockFile> for MetaKeyMap { 50 | fn from(l: &LockFile) -> Self { 51 | l.packages 52 | .iter() 53 | .map(|unit| (unit.meta.clone(), unit.key.clone())) 54 | .collect() 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "archer" 3 | version = "0.1.0" 4 | authors = ["PhotonQuantum "] 5 | edition = "2021" 6 | 7 | [lib] 8 | name = "archer_lib" 9 | path = "src/lib/lib.rs" 10 | 11 | [[bin]] 12 | name = "archer_bin" 13 | path = "src/bin/main.rs" 14 | 15 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 16 | 17 | [dependencies] 18 | raur = {version="5.0", features=["blocking"]} 19 | alpm = "2" 20 | anyhow = "1.0" 21 | thiserror = "1.0" 22 | chrono = {version="0.4", features=["serde"]} 23 | url = "2.2" 24 | ranges = "0.3" 25 | itertools = "0.10" 26 | lazy_static = "1.4" 27 | reqwest = {version="0.11", features=["blocking", "gzip", "brotli"]} 28 | rayon = "1.5" 29 | maplit = "1.0" 30 | enumflags2 = "0.7" 31 | online-scc-graph = {git="https://github.com/PhotonQuantum/online-transitive-closure-rs", branch="simplify"} 32 | indexmap = "1.7" 33 | rustympkglib = {git="https://github.com/PhotonQuantum/rustympkglib"} 34 | archlinux-repo-parser = "0.1" 35 | serde = "1.0" 36 | serde_with = "1.11" 37 | serde_json = "1.0" 38 | pkginfo = {git="https://github.com/PhotonQuantum/pkginfo-rs"} 39 | tar = "0.4" 40 | zstd = "0" 41 | flate2 = "1.0" 42 | xz2 = "0.1" 43 | infer = "0.5" 44 | derive_builder = "0.10" 45 | md5 = "0.7" 46 | sha2 = "0.10" 47 | async-trait = "0.1" 48 | tokio = {version="1.15", features=["full"]} 49 | tempfile = "3.2" 50 | rusoto_core = "0.47" 51 | rusoto_s3 = "0.47" 52 | futures = "0.3" 53 | bytes = "1.0" 54 | regex = "1.5" 55 | users = "0.11" 56 | rust-ini = "0.17" 57 | fs3 = "0.5" 58 | 59 | [dev-dependencies] 60 | rstest = "0.12" 61 | testcontainers = "0.12" 62 | rand = "0.8" 63 | fs_extra = "1.2" 64 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Archer - a repository builder for ArchLinux 2 | 3 | [![Test](https://github.com/PhotonQuantum/archer/actions/workflows/test.yml/badge.svg)](https://github.com/PhotonQuantum/archer/actions/workflows/test.yml) 4 | 5 | > This project is suspended because I no longer actively use ArchLinux anymore. 6 | 7 | ## Current Progress 8 | 9 | ### Naive Dependency Resolving 10 | ![deps](demo.jpg) 11 | 12 | ## Todos 13 | - [ ] dependency resolving 14 | + [ ] dfs search 15 | * [x] basic impl 16 | * [ ] nice error reporting 17 | + [x] graph output 18 | * [x] use petgraph impl 19 | * [x] migrate to custom impl 20 | * [x] custom dot output 21 | + [x] skip policy (ignore packages existing in certain repo) 22 | + [x] handle cyclic deps 23 | + [x] toposort & SCC 24 | * [x] basic impl 25 | * [x] complete impl 26 | + [x] support for makedepends 27 | + [x] batch query 28 | + [x] parallel query for aur 29 | + [ ] custom pkgbuild support 30 | * [x] basic impl 31 | * [ ] custom repo 32 | * [ ] support .SRCINFO 33 | + [ ] plan builder 34 | * [x] basic impl 35 | * [ ] complete impl 36 | + [ ] unittest 37 | * [ ] package 38 | * [x] repository 39 | * [x] basic resolve 40 | * [ ] cyclic deps 41 | * [ ] plan builder 42 | * [ ] parser 43 | - [ ] build environment setup 44 | + [ ] bare metal 45 | + [ ] bubblewrap 46 | + [ ] official container buildtools 47 | - [ ] build workflow 48 | + [ ] split package 49 | + [ ] error handling 50 | - [ ] storage support 51 | + [ ] file 52 | + [ ] aliyun oss 53 | + [ ] s3 54 | - [ ] update checker 55 | + [ ] support for vcs packages 56 | - [ ] metadata & build report (json, plain) 57 | + [ ] basic functionality 58 | + [ ] frontend (optional) -------------------------------------------------------------------------------- /src/lib/database/tests.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::io::Read; 3 | use std::path::PathBuf; 4 | 5 | use rstest::rstest; 6 | 7 | use crate::database::pacman::{BuildTarget, DBBuilder}; 8 | 9 | use super::decompressor::ArchiveReader; 10 | 11 | #[rstest] 12 | #[case("test.tar")] 13 | #[case("test.tar.gz")] 14 | #[case("test.tar.xz")] 15 | #[case("test.tar.zst")] 16 | fn must_decompress(#[case] name: &str) { 17 | println!("decompressing {}", name); 18 | let path = PathBuf::from("tests/archives/").join(name); 19 | let archive = ArchiveReader::from_filepath(&path).expect("unable to read archive"); 20 | let mut tar = archive.into_tar(); 21 | let mut entries = tar.entries().expect("unable to read entries"); 22 | 23 | let mut entry = entries 24 | .next() 25 | .expect("archive length mismatch") 26 | .expect("unable to read entry"); 27 | assert_eq!( 28 | entry 29 | .path() 30 | .expect("unable to read path") 31 | .file_name() 32 | .unwrap() 33 | .to_str() 34 | .unwrap(), 35 | "test", 36 | "filename mismatch" 37 | ); 38 | let mut buffer = String::new(); 39 | entry 40 | .read_to_string(&mut buffer) 41 | .expect("unable to read file"); 42 | assert_eq!(buffer, "test\n", "file content mismatch"); 43 | } 44 | 45 | #[test] 46 | pub fn must_build_dir() { 47 | let mut builder = DBBuilder::new(); 48 | for path in fs::read_dir("tests/pkgs").expect("missing test directory") { 49 | let path = path.expect("invalid dir entry"); 50 | builder.add_file_mut(path.path()); 51 | } 52 | drop(fs::remove_dir_all("tests/output")); 53 | fs::create_dir("tests/output").expect("unable to create test directory"); 54 | builder 55 | .build(BuildTarget::new("tests/output", None)) 56 | .expect("unable to build db folder"); 57 | builder 58 | .build(BuildTarget::new("tests/output", Some("test"))) 59 | .expect("unable to build db archive"); 60 | } 61 | -------------------------------------------------------------------------------- /src/lib/storage/types/package.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use lazy_static::lazy_static; 4 | use regex::Regex; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | use crate::types::*; 8 | 9 | lazy_static! { 10 | static ref RE: Regex = Regex::new(r"\.tar(\..*)?").unwrap(); 11 | } 12 | 13 | #[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] 14 | pub struct PackageMeta { 15 | pub name: String, 16 | pub version: Version, 17 | pub checksum: u64, 18 | } 19 | 20 | impl AsRef for &PackageMeta { 21 | fn as_ref(&self) -> &PackageMeta { 22 | self 23 | } 24 | } 25 | 26 | impl PackageMeta { 27 | pub fn new(name: &str, version: &Version, checksum: u64) -> Self { 28 | Self { 29 | name: name.to_string(), 30 | version: version.clone(), 31 | checksum, 32 | } 33 | } 34 | 35 | pub fn short_chksum(&self) -> String { 36 | let mut str_chksum = format!("{:x}", self.checksum); 37 | str_chksum.truncate(8); 38 | str_chksum 39 | } 40 | pub fn filename(&self) -> String { 41 | format!("{}-{}-{}", self.name, self.version, self.short_chksum()) 42 | } 43 | } 44 | 45 | #[derive(Debug, Clone, Eq, PartialEq, Hash)] 46 | pub struct LocalPackageUnit { 47 | pub meta: PackageMeta, 48 | pub path: PathBuf, 49 | } 50 | 51 | impl LocalPackageUnit { 52 | pub fn new(meta: impl AsRef, path: impl AsRef) -> Self { 53 | Self { 54 | meta: meta.as_ref().clone(), 55 | path: path.as_ref().to_path_buf(), 56 | } 57 | } 58 | fn get_ext(&self) -> &str { 59 | RE.find(self.path.file_name().unwrap().to_str().unwrap()) 60 | .unwrap() 61 | .as_str() 62 | } 63 | pub fn canonicalize_filename(&self) -> String { 64 | format!("{}{}", self.meta.filename(), self.get_ext()) 65 | } 66 | } 67 | 68 | #[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] 69 | pub struct RemotePackageUnit { 70 | pub meta: PackageMeta, 71 | pub key: PathBuf, 72 | } 73 | -------------------------------------------------------------------------------- /src/lib/repository/mod.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::Ordering; 2 | use std::collections::HashMap; 3 | use std::fmt::Debug; 4 | 5 | pub use aur::AurRepo; 6 | pub use cached::CachedRepository; 7 | pub use custom::CustomRepository; 8 | pub use empty::EmptyRepository; 9 | pub use merged::MergedRepository; 10 | pub use pacman::{PacmanLocal, PacmanRemote}; 11 | 12 | use crate::error::Result; 13 | use crate::types::*; 14 | 15 | mod aur; 16 | mod cached; 17 | mod custom; 18 | mod empty; 19 | mod merged; 20 | mod pacman; 21 | 22 | #[cfg(test)] 23 | mod tests; 24 | 25 | pub trait Repository: Debug + Send + Sync { 26 | fn find_package(&self, pkg: &Depend) -> Result>; 27 | fn find_packages(&self, pkgs: &[Depend]) -> Result>> { 28 | let mut result = HashMap::new(); 29 | for pkg in pkgs { 30 | match self.find_package(pkg) { 31 | Err(e) => return Err(e), 32 | Ok(v) => { 33 | result.insert(pkg.clone(), v); 34 | } 35 | } 36 | } 37 | Ok(result) 38 | } 39 | } 40 | 41 | fn sort_pkgs_mut(pkgs: &mut Vec, preferred: &Depend) { 42 | pkgs.sort_unstable_by(|a, b| { 43 | if a.name() == preferred.name && b.name() != preferred.name { 44 | Ordering::Less 45 | } else if a.name() != preferred.name && b.name() == preferred.name { 46 | Ordering::Greater 47 | } else { 48 | match a 49 | .partial_cmp(b) 50 | .unwrap_or_else(|| a.version().cmp(&b.version())) 51 | { 52 | Ordering::Less => Ordering::Greater, 53 | Ordering::Greater => Ordering::Less, 54 | Ordering::Equal => Ordering::Equal, 55 | } 56 | } 57 | }); 58 | } 59 | 60 | fn classify_package( 61 | candidate: Package, 62 | target_deps: &[Depend], 63 | ) -> impl Iterator> + '_ { 64 | target_deps.iter().map(move |dep| { 65 | dep.satisfied_by(&candidate) 66 | .then_some((dep.clone(), candidate.clone())) 67 | }) 68 | } 69 | -------------------------------------------------------------------------------- /src/lib/repository/merged.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::sync::Arc; 3 | 4 | use itertools::Itertools; 5 | 6 | use crate::error::Result; 7 | use crate::repository::Repository; 8 | use crate::types::*; 9 | 10 | #[derive(Default, Debug, Clone)] 11 | pub struct MergedRepository { 12 | repos: Vec>, 13 | } 14 | 15 | impl MergedRepository { 16 | pub fn new(repos: Vec>) -> Self { 17 | Self { repos } 18 | } 19 | } 20 | 21 | impl Repository for MergedRepository { 22 | // NOTE 23 | // once there's valid response from a repo for each package, it won't be queried against succeeding repos 24 | 25 | fn find_package(&self, pkg: &Depend) -> Result> { 26 | self.repos 27 | .iter() 28 | .map(|repo| repo.find_package(pkg)) 29 | .find(|result| { 30 | if let Ok(pkgs) = result { 31 | !pkgs.is_empty() 32 | } else { 33 | true 34 | } 35 | }) 36 | .unwrap_or_else(|| Ok(vec![])) 37 | } 38 | 39 | fn find_packages(&self, pkgs: &[Depend]) -> Result>> { 40 | let mut base = HashMap::new(); 41 | for name in pkgs { 42 | base.insert(name.clone().clone(), vec![]); 43 | } 44 | 45 | // use try_fold instead of try_find here because we want Vec instead of ArcRepo 46 | self.repos.iter().try_fold(base, |mut acc, repo| { 47 | let missed_pkgs = acc 48 | .iter() 49 | .filter(|(_, pkgs)| pkgs.is_empty()) 50 | .map(|(name, _)| name) 51 | .cloned() 52 | .collect_vec(); 53 | if !missed_pkgs.is_empty() { 54 | let mut result = repo.find_packages(missed_pkgs.as_ref())?; 55 | for (name, pkgs) in &mut acc { 56 | if let Some(new_pkgs) = result.remove(name) { 57 | pkgs.extend(new_pkgs); 58 | } 59 | } 60 | } 61 | Ok(acc) 62 | }) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/lib/database/decompressor.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io::{Cursor, Read}; 3 | use std::path::Path; 4 | 5 | use tar::Archive as TarArchive; 6 | 7 | use crate::error::{Error, Result}; 8 | 9 | pub struct ArchiveReader { 10 | data: Vec, 11 | } 12 | 13 | impl ArchiveReader { 14 | pub fn from_reader(mut reader: impl Read) -> Result { 15 | let mut head = [0; 512]; 16 | let head_bytes = reader.read(&mut head)?; 17 | let mime = infer::get(&head).ok_or(Error::ArchiveError)?; 18 | 19 | let mut reader = if head_bytes == 512 { 20 | Box::new(Cursor::new(head).chain(reader)) as Box 21 | } else { 22 | Box::new(&head[..head_bytes]) as Box 23 | }; 24 | let mut data: Vec = Vec::new(); 25 | match mime.mime_type() { 26 | "application/zstd" => { 27 | zstd::stream::copy_decode(reader, &mut data)?; 28 | } 29 | "application/gzip" => { 30 | let mut decoder = flate2::read::GzDecoder::new(reader); 31 | decoder.read_to_end(&mut data)?; 32 | } 33 | "application/x-xz" => { 34 | let mut decoder = xz2::read::XzDecoder::new(reader); 35 | decoder.read_to_end(&mut data)?; 36 | } 37 | "application/x-tar" => { 38 | reader.read_to_end(&mut data)?; 39 | } 40 | _ => return Err(Error::ArchiveError), 41 | } 42 | Ok(Self { data }) 43 | } 44 | pub fn from_u8(file: &[u8]) -> Result { 45 | Self::from_reader(file) 46 | } 47 | pub fn from_filepath(path: &Path) -> Result { 48 | Self::from_reader(File::open(path)?) 49 | } 50 | 51 | pub fn to_tar(&self) -> TarArchive>> { 52 | TarArchive::new(Cursor::new(&self.data)) 53 | } 54 | 55 | pub fn into_tar(self) -> TarArchive>> { 56 | TarArchive::new(Cursor::new(self.data)) 57 | } 58 | 59 | pub fn inner(&self) -> &[u8] { 60 | &self.data 61 | } 62 | 63 | pub fn into_inner(self) -> Vec { 64 | self.data 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/lib/repository/pacman.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{Arc, Mutex}; 2 | 3 | use alpm::Alpm; 4 | use itertools::Itertools; 5 | 6 | use crate::alpm::GLOBAL_ALPM; 7 | use crate::error::Result; 8 | use crate::repository::{sort_pkgs_mut, Repository}; 9 | use crate::types::*; 10 | 11 | #[derive(Clone, Debug)] 12 | pub struct PacmanRemote { 13 | alpm: Arc>, 14 | } 15 | 16 | #[derive(Clone, Debug)] 17 | pub struct PacmanLocal { 18 | alpm: Arc>, 19 | } 20 | 21 | impl PacmanRemote { 22 | pub fn new() -> Self { 23 | Default::default() 24 | } 25 | } 26 | 27 | impl Default for PacmanRemote { 28 | fn default() -> Self { 29 | Self { 30 | alpm: GLOBAL_ALPM.clone(), 31 | } 32 | } 33 | } 34 | 35 | impl Repository for PacmanRemote { 36 | fn find_package(&self, pkg: &Depend) -> Result> { 37 | let mut result = self 38 | .alpm 39 | .lock() 40 | .unwrap() 41 | .syncdbs() 42 | .iter() 43 | .map(|db| db.search([pkg.name.clone()].iter())) 44 | .try_collect::<_, Vec<_>, _>()? 45 | .into_iter() 46 | .flat_map(|pkgs| { 47 | pkgs.into_iter() 48 | .map(Package::from) 49 | .filter(|candidate| pkg.satisfied_by(candidate)) 50 | }) 51 | .collect(); 52 | sort_pkgs_mut(&mut result, pkg); 53 | Ok(result) 54 | } 55 | } 56 | 57 | impl PacmanLocal { 58 | pub fn new() -> Self { 59 | Default::default() 60 | } 61 | } 62 | 63 | impl Default for PacmanLocal { 64 | fn default() -> Self { 65 | Self { 66 | alpm: GLOBAL_ALPM.clone(), 67 | } 68 | } 69 | } 70 | 71 | // NOTE this repository only returns exact match 72 | impl Repository for PacmanLocal { 73 | fn find_package(&self, pkg: &Depend) -> Result> { 74 | Ok(self 75 | .alpm // acquire local db 76 | .lock() 77 | .unwrap() 78 | .localdb() 79 | .pkgs() // find exact match 80 | .find_satisfier(pkg.name.clone()) 81 | .map(|p| vec![Package::from(p)]) // convert to owned 82 | .unwrap_or_default()) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/lib/repository/aur.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use itertools::Itertools; 4 | use raur::blocking::{Handle, Raur}; 5 | use rayon::prelude::*; 6 | 7 | use crate::error::Result; 8 | use crate::repository::{classify_package, sort_pkgs_mut, Repository}; 9 | use crate::types::*; 10 | 11 | #[derive(Debug, Clone, Default)] 12 | pub struct AurRepo { 13 | handler: Handle, 14 | } 15 | 16 | impl AurRepo { 17 | pub fn new() -> Self { 18 | Default::default() 19 | } 20 | } 21 | 22 | impl Repository for AurRepo { 23 | fn find_package(&self, pkg: &Depend) -> Result> { 24 | println!("aur searching for {}", pkg); 25 | let search_result = self 26 | .handler 27 | .search(&pkg.name)? 28 | .into_iter() 29 | .map(|p| p.name) 30 | .collect_vec(); 31 | let mut detailed_info = self 32 | .handler 33 | .info(&search_result)? 34 | .into_iter() 35 | .map(Package::from) 36 | .filter(|candidate| pkg.satisfied_by(candidate)) 37 | .collect(); 38 | sort_pkgs_mut(&mut detailed_info, pkg); 39 | Ok(detailed_info) 40 | } 41 | 42 | fn find_packages(&self, pkgs: &[Depend]) -> Result>> { 43 | println!("aur searching for {}", pkgs.iter().join(", ")); 44 | // let search_result: HashMap> = pkgs.iter().map(|pkgname|self.handler.search(pkgname)); 45 | let search_result: Vec<_> = pkgs 46 | .into_par_iter() 47 | .map(|dep| self.handler.search(&dep.name)) // search candidates per package Iter>> 48 | .collect::, _>>()? 49 | .into_iter() 50 | .flatten() 51 | .map(|p| p.name) 52 | .collect(); 53 | 54 | let mut detailed_info = self 55 | .handler 56 | .info(&search_result)? // acquire detailed package info 57 | .into_iter() 58 | .map(Package::from) // convert to owned 59 | .flat_map(|p| classify_package(p, pkgs)) // classify packages by requested package name 60 | .flatten() // filter None 61 | .into_group_map(); 62 | 63 | for (pkgname, pkgs) in &mut detailed_info { 64 | sort_pkgs_mut(pkgs, pkgname); 65 | } 66 | 67 | Ok(detailed_info) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - master 5 | pull_request: 6 | branches: 7 | - master 8 | 9 | name: Test 10 | 11 | jobs: 12 | lint: 13 | name: Lint 14 | runs-on: ubuntu-latest 15 | container: archlinux:base-devel 16 | steps: 17 | - uses: actions/checkout@v2 18 | name: Checkout 🛎️ 19 | - uses: actions/cache@v2 20 | with: 21 | path: | 22 | ~/.cargo/registry 23 | ~/.cargo/git 24 | target 25 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}-lint 26 | - uses: actions-rs/toolchain@v1 27 | name: Setup Cargo Toolchain 🛎️ 28 | with: 29 | components: rustfmt, clippy 30 | toolchain: nightly 31 | default: true 32 | - name: Update System️️m 🛎️ 33 | run: pacman -Syu --noconfirm 34 | - uses: actions-rs/cargo@v1 35 | name: Check Code Format 🔧 36 | with: 37 | command: fmt 38 | args: -- --check 39 | - uses: actions-rs/cargo@v1 40 | name: Run Clippy Lints 🔨 41 | with: 42 | command: clippy 43 | args: --all-targets --all-features 44 | 45 | test: 46 | name: Test 47 | runs-on: ubuntu-latest 48 | container: archlinux:base-devel 49 | env: 50 | S3_ENDPOINT: http://s3_mock:9090 51 | BUILD_USER: archer 52 | NO_CONTAINER: 1 53 | services: 54 | s3_mock: 55 | image: adobe/s3mock 56 | ports: 57 | - 9090:9090 58 | env: 59 | initialBuckets: test-bucket 60 | steps: 61 | - uses: actions/checkout@v2 62 | name: Checkout 🛎️ 63 | - uses: actions/cache@v2 64 | with: 65 | path: | 66 | ~/.cargo/registry 67 | ~/.cargo/git 68 | target 69 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}-test 70 | - uses: actions-rs/toolchain@v1 71 | name: Setup Cargo Toolchain 🛎️ 72 | with: 73 | profile: minimal 74 | toolchain: nightly 75 | default: true 76 | - name: Populate Pacman Database️️ 🛎️ 77 | run: pacman -Syu --noconfirm 78 | - name: Create test user 🚀 79 | run: useradd -m archer 80 | - uses: actions-rs/cargo@v1 81 | name: Running Tests 🚀 82 | with: 83 | command: test 84 | args: --all-features --workspace 85 | -------------------------------------------------------------------------------- /src/lib/repository/cached.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fmt::Debug; 3 | use std::sync::{Arc, RwLock}; 4 | 5 | use itertools::Itertools; 6 | 7 | use crate::error::Result; 8 | use crate::repository::Repository; 9 | use crate::types::*; 10 | 11 | #[derive(Debug, Clone)] 12 | pub struct CachedRepository { 13 | inner: Arc, 14 | cache: Arc>>>, 15 | } 16 | 17 | impl CachedRepository { 18 | pub fn new(repo: Arc) -> Self { 19 | Self { 20 | inner: repo, 21 | cache: Arc::new(Default::default()), 22 | } 23 | } 24 | } 25 | 26 | impl Repository for CachedRepository { 27 | fn find_package(&self, pkg: &Depend) -> Result> { 28 | // search in cache first 29 | if let Some(hit) = self.cache.read().unwrap().get(pkg) { 30 | return Ok(hit.clone()); 31 | } 32 | 33 | let missed = self.inner.find_package(pkg)?; // query missed packages 34 | self.cache 35 | .write() 36 | .unwrap() 37 | .insert(pkg.clone(), missed.clone()); // write back into cache 38 | Ok(missed) 39 | } 40 | 41 | fn find_packages(&self, pkgs: &[Depend]) -> Result>> { 42 | // search in cache first 43 | let (mut hit_deps, missed_deps) = { 44 | let cache_read = self.cache.read().unwrap(); 45 | let hit_deps: HashMap> = pkgs 46 | .iter() 47 | .filter_map(|dep| cache_read.get(dep).map(|pkg| (dep.clone(), pkg.clone()))) 48 | .collect(); 49 | let missed_deps = pkgs 50 | .iter() 51 | .filter(|pkgname| !hit_deps.contains_key(pkgname)) 52 | .cloned() 53 | .collect_vec(); 54 | (hit_deps, missed_deps) 55 | }; 56 | 57 | // query missed packages 58 | let missed_packages = self.inner.find_packages(&missed_deps)?; 59 | 60 | // write back into cache 61 | { 62 | let mut cache_write = self.cache.write().unwrap(); 63 | for (dep, packages) in &missed_packages { 64 | cache_write.insert(dep.clone(), packages.clone()); 65 | } 66 | } 67 | 68 | // merge hit and missed set 69 | hit_deps.extend(missed_packages.into_iter()); 70 | 71 | Ok(hit_deps) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/bin/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(box_syntax)] 2 | 3 | use std::fs::File; 4 | use std::io::Write; 5 | use std::str::FromStr; 6 | use std::sync::Arc; 7 | 8 | use anyhow::Result; 9 | use itertools::Itertools; 10 | 11 | use archer_lib::prelude::*; 12 | 13 | fn main() -> Result<()> { 14 | demo_planner()?; 15 | demo_deps() 16 | } 17 | 18 | fn demo_planner() -> Result<()> { 19 | let mut planner = PlanBuilder::new(); 20 | println!("finding package"); 21 | // planner.add_package(&Depend::from_str("firedragon").unwrap())?; 22 | planner.add_package(&Depend::from_str("fcft").unwrap())?; 23 | println!("building plan"); 24 | let result = planner.build()?; 25 | println!( 26 | "Plan: {:#?}", 27 | result.into_iter().map(|act| act.to_string()).collect_vec() 28 | ); 29 | Ok(()) 30 | } 31 | 32 | fn demo_deps() -> Result<()> { 33 | let pacman_remote_repo = Arc::new(PacmanRemote::new()) as Arc; 34 | let local_repo = Arc::new(PacmanLocal::new()) as Arc; 35 | let aur = Arc::new(AurRepo::new()) as Arc; 36 | let remote_repo = Arc::new(CachedRepository::new(Arc::new(MergedRepository::new( 37 | vec![pacman_remote_repo.clone(), aur], 38 | )))); 39 | let policy = ResolvePolicy::new( 40 | remote_repo.clone(), 41 | Arc::new(EmptyRepository::new()), 42 | // Arc::new(CachedRepository::new(pacman_remote_repo)), 43 | // Arc::new(EmptyRepository::new()), 44 | Arc::new(CachedRepository::new(local_repo)), 45 | ); 46 | let resolver = TreeResolver::new(policy, box always_depend, box allow_if_pacman); 47 | let initial_package = remote_repo 48 | .find_package(&Depend::from_str("electron").unwrap())? 49 | .iter() 50 | .find(|p| p.name() == "electron") 51 | .unwrap() 52 | .clone(); 53 | let solution = resolver.resolve(&[initial_package])?; 54 | println!( 55 | "{} packages: \n{:#?}", 56 | solution.packages.len(), 57 | solution 58 | .strongly_connected_components() 59 | .into_iter() 60 | .map(|pkgs| format!("[{}]", pkgs.iter().map(ToString::to_string).join(", "))) 61 | .join(", ") 62 | ); 63 | let mut f = File::create("output.dot")?; 64 | write!(f, "{}", solution.graph.dot())?; 65 | // let graph = Graph::from(&solution); 66 | // write!(f, "{}", Dot::with_config(&graph, &[Config::EdgeNoLabel])).unwrap(); 67 | Ok(()) 68 | } 69 | -------------------------------------------------------------------------------- /src/lib/resolver/types/resolve.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::sync::{Arc, RwLock}; 3 | 4 | use enumflags2::{bitflags, BitFlags}; 5 | 6 | use crate::error::Result; 7 | use crate::types::*; 8 | 9 | #[derive(Clone)] 10 | pub struct ResolvePolicy { 11 | pub from_repo: ArcRepo, 12 | pub skip_repo: ArcRepo, 13 | pub immortal_repo: ArcRepo, 14 | pub immortal_cache: Arc>>, 15 | } 16 | 17 | #[bitflags] 18 | #[repr(u8)] 19 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 20 | pub enum DependChoice { 21 | Depends, 22 | MakeDepends, 23 | } 24 | 25 | pub type DependPolicy = BitFlags; 26 | 27 | pub fn always_depend(_: &Package) -> DependPolicy { 28 | BitFlags::from(DependChoice::Depends) 29 | } 30 | 31 | pub fn makedepend_if_aur_custom(pkg: &Package) -> DependPolicy { 32 | match pkg { 33 | Package::PacmanPackage(_) => BitFlags::from(DependChoice::Depends), 34 | Package::AurPackage(_) | Package::CustomPackage(_) => { 35 | DependChoice::Depends | DependChoice::MakeDepends 36 | } 37 | } 38 | } 39 | 40 | pub const fn always_allow_cyclic(_: &[&Package]) -> bool { 41 | true 42 | } 43 | 44 | pub const fn always_deny_cyclic(_: &[&Package]) -> bool { 45 | false 46 | } 47 | 48 | pub fn allow_if_pacman(pkgs: &[&Package]) -> bool { 49 | pkgs.iter() 50 | .all(|pkg| matches!(pkg, Package::PacmanPackage(_))) 51 | } 52 | 53 | impl ResolvePolicy { 54 | pub fn new(from_repo: ArcRepo, skip_repo: ArcRepo, immortal_repo: ArcRepo) -> Self { 55 | Self { 56 | from_repo, 57 | skip_repo, 58 | immortal_repo, 59 | immortal_cache: Arc::new(Default::default()), 60 | } 61 | } 62 | pub fn is_mortal_blade(&self, pkg: &Package) -> Result { 63 | let dep = Depend::from(&pkg.clone()); 64 | if let Some(mortal_blade) = self.immortal_cache.read().unwrap().get(&dep) { 65 | return Ok(*mortal_blade); 66 | } 67 | let mortal_blade = self.immortal_repo.find_package(&dep).map(|immortals| { 68 | immortals 69 | .into_iter() 70 | .any(|immortal| immortal.version() != pkg.version()) 71 | })?; 72 | self.immortal_cache 73 | .write() 74 | .unwrap() 75 | .insert(dep, mortal_blade); 76 | Ok(mortal_blade) 77 | } 78 | 79 | pub fn is_immortal(&self, pkg: &Package) -> Result { 80 | let dep = Depend::from(&pkg.clone()); 81 | let immortal = self.immortal_repo.find_package(&dep).map(|immortals| { 82 | immortals 83 | .into_iter() 84 | .any(|immortal| immortal.version() == pkg.version()) 85 | })?; 86 | Ok(immortal) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/lib/utils.rs: -------------------------------------------------------------------------------- 1 | use std::os::linux::fs::MetadataExt; 2 | use std::path::Path; 3 | use std::time::{SystemTime, UNIX_EPOCH}; 4 | 5 | use alpm::Alpm; 6 | 7 | use crate::consts::*; 8 | use crate::error::{GpgError, MakepkgError, Result}; 9 | use crate::parser::PacmanConf; 10 | 11 | pub fn load_alpm() -> Result { 12 | let alpm = Alpm::new(ROOT_PATH, PACMAN_DB_PATH)?; 13 | let conf = PacmanConf::new()?; 14 | for db in conf.sync_dbs() { 15 | alpm.register_syncdb(db.name.as_str(), db.sig_level)?; 16 | } 17 | Ok(alpm) 18 | } 19 | 20 | // Get stdev of the nearest valid path of the given path (e.g. /home for /home/some/non/exist/path) 21 | // NOTE: '..' in path is not handled 22 | fn get_stdev(path: impl AsRef) -> Option { 23 | let target = path.as_ref(); 24 | target 25 | .ancestors() 26 | .find_map(|try_path| try_path.metadata().ok().map(|path| path.st_dev())) 27 | } 28 | 29 | pub fn is_same_fs(path_1: impl AsRef, path_2: impl AsRef) -> bool { 30 | get_stdev(path_1) 31 | .and_then(|stdev_1| get_stdev(path_2).map(|stdev_2| stdev_1 == stdev_2)) 32 | .unwrap_or(false) 33 | } 34 | 35 | pub fn unix_timestamp() -> u128 { 36 | SystemTime::now() 37 | .duration_since(UNIX_EPOCH) 38 | .unwrap() 39 | .as_millis() 40 | } 41 | 42 | pub const fn map_makepkg_code(status_code: i32) -> Option { 43 | match status_code { 44 | 0 | 13 => None, 45 | 2 => Some(MakepkgError::Configuration), 46 | 3 => Some(MakepkgError::InvalidOption), 47 | 4 => Some(MakepkgError::InvalidFunction), 48 | 5 => Some(MakepkgError::InviablePackage), 49 | 6 => Some(MakepkgError::MissingSrc), 50 | 7 => Some(MakepkgError::MissingPkgDir), 51 | 10 => Some(MakepkgError::RunAsRoot), 52 | 11 => Some(MakepkgError::NoPermission), 53 | 12 => Some(MakepkgError::ParseError), 54 | 15 => Some(MakepkgError::MissingProgram), 55 | 16 => Some(MakepkgError::SignFailure), 56 | _ => Some(MakepkgError::Unknown), 57 | } 58 | } 59 | 60 | pub const fn map_gpg_code(status_code: i32) -> Option { 61 | match status_code { 62 | 0 => None, 63 | 1 => Some(GpgError::BadSignature), 64 | _ => Some(GpgError::Unknown), 65 | } 66 | } 67 | 68 | #[macro_export] 69 | macro_rules! setter_copy { 70 | ($name: ident, $tyty: ty) => { 71 | pub const fn $name(mut self, $name: $tyty) -> Self { 72 | self.$name = $name; 73 | self 74 | } 75 | }; 76 | } 77 | 78 | #[macro_export] 79 | macro_rules! setter_option_clone { 80 | ($name: ident, $tyty: ty) => { 81 | pub fn $name(mut self, $name: &$tyty) -> Self { 82 | self.$name = Some($name.clone()); 83 | self 84 | } 85 | }; 86 | } 87 | -------------------------------------------------------------------------------- /tests/naive.rs: -------------------------------------------------------------------------------- 1 | #![feature(box_syntax)] 2 | 3 | use std::str::FromStr; 4 | use std::sync::Arc; 5 | 6 | use itertools::Itertools; 7 | use rstest::rstest; 8 | 9 | use archer_lib::prelude::*; 10 | use std::path::PathBuf; 11 | use std::time::Duration; 12 | 13 | fn wait_pacman_lock() { 14 | let lock_path = PathBuf::from_str("/var/lib/pacman/db.lck").unwrap(); 15 | while lock_path.exists() { 16 | std::thread::sleep(Duration::from_secs(1)); 17 | } 18 | } 19 | 20 | fn must_plan(pkg: &str) { 21 | println!("Planning {}", pkg); 22 | let mut planner = PlanBuilder::new(); 23 | planner 24 | .add_package(&Depend::from_str(pkg).unwrap()) 25 | .expect("can't search package"); 26 | let plan = planner.build().expect("can't build plan"); 27 | assert!(!plan.is_empty(), "plan is empty"); 28 | println!( 29 | "Plan: {:#?}", 30 | plan.iter().map(|act| act.to_string()).collect_vec() 31 | ); 32 | } 33 | 34 | fn must_resolve(pkg: &str, skip_remote: bool) { 35 | wait_pacman_lock(); 36 | println!("Resolving {}", pkg); 37 | let pacman_remote_repo = Arc::new(PacmanRemote::new()) as Arc; 38 | let local_repo = Arc::new(PacmanLocal::new()) as Arc; 39 | let aur = Arc::new(AurRepo::new()) as Arc; 40 | let remote_repo = Arc::new(CachedRepository::new(Arc::new(MergedRepository::new( 41 | vec![pacman_remote_repo.clone(), aur], 42 | )))); 43 | let policy = ResolvePolicy::new( 44 | remote_repo.clone(), 45 | if skip_remote { 46 | Arc::new(CachedRepository::new(pacman_remote_repo)) 47 | } else { 48 | Arc::new(EmptyRepository::new()) 49 | }, 50 | Arc::new(CachedRepository::new(local_repo)), 51 | ); 52 | let resolver = TreeResolver::new(policy, box always_depend, box allow_if_pacman); 53 | let initial_package = remote_repo 54 | .find_package(&Depend::from_str(pkg).unwrap()) 55 | .expect("can't search package") 56 | .iter() 57 | .find(|p| p.name() == pkg) 58 | .unwrap() 59 | .clone(); 60 | let solution = resolver.resolve(&[initial_package]).expect("can't resolve"); 61 | assert!(!solution.packages.is_empty(), "solution is empty"); 62 | println!( 63 | "Result: {:#?}", 64 | solution 65 | .strongly_connected_components() 66 | .into_iter() 67 | .map(|pkgs| format!("[{}]", pkgs.iter().map(|pkg| pkg.to_string()).join(", "))) 68 | .collect_vec() 69 | ); 70 | } 71 | 72 | #[rstest] 73 | #[case("systemd", false)] 74 | #[case("electron", false)] 75 | #[case("agda-git", false)] 76 | #[case("agda-git", true)] 77 | fn test_resolve(#[case] pkg: &str, #[case] skip: bool) { 78 | must_resolve(pkg, skip); 79 | } 80 | 81 | #[rstest] 82 | #[case("fcft")] 83 | #[case("agda-git")] 84 | #[case("firedragon")] 85 | fn test_plan(#[case] pkg: &str) { 86 | must_plan(pkg); 87 | } 88 | -------------------------------------------------------------------------------- /src/lib/storage/providers/filesystem.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | use std::io::Cursor; 3 | use std::path::{Path, PathBuf}; 4 | 5 | use async_trait::async_trait; 6 | use tempfile::NamedTempFile; 7 | use tokio::fs::File; 8 | use tokio::io::AsyncReadExt; 9 | 10 | use crate::consts::STORAGE_MEMORY_LIMIT; 11 | use crate::error::StorageError; 12 | use crate::storage::types::*; 13 | 14 | use super::Result; 15 | use super::{get_fullpath, StorageProvider}; 16 | 17 | pub struct FSStorage { 18 | base: PathBuf, 19 | memory_limit: u64, 20 | } 21 | 22 | impl FSStorage { 23 | pub fn new(base: impl AsRef) -> Self { 24 | Self { 25 | base: base.as_ref().to_path_buf(), 26 | memory_limit: STORAGE_MEMORY_LIMIT, 27 | } 28 | } 29 | 30 | pub fn new_with_limit(base: impl AsRef, memory_limit: u64) -> Self { 31 | Self { 32 | base: base.as_ref().to_path_buf(), 33 | memory_limit, 34 | } 35 | } 36 | } 37 | 38 | async fn path_exists(path: &Path) -> bool { 39 | tokio::fs::metadata(path).await.is_ok() 40 | } 41 | 42 | async fn file_exists(path: &Path) -> bool { 43 | tokio::fs::metadata(path) 44 | .await 45 | .map(|m| m.is_file()) 46 | .unwrap_or(false) 47 | } 48 | 49 | #[async_trait] 50 | impl StorageProvider for FSStorage { 51 | async fn get_file(&self, path: &Path) -> Result { 52 | let fullpath = get_fullpath(&*self.base, path)?; 53 | if !file_exists(&fullpath).await { 54 | return Err(StorageError::FileNotExists(path.to_path_buf())); 55 | } 56 | 57 | let mut src = File::open(&fullpath).await?; 58 | if src.metadata().await?.len() > self.memory_limit { 59 | let sync_dest = NamedTempFile::new()?; 60 | let mut dest = File::from_std(sync_dest.reopen()?); 61 | 62 | tokio::io::copy(&mut src, &mut dest).await?; 63 | dest.sync_all().await?; 64 | 65 | Ok(ByteStream::try_from(sync_dest)?) 66 | } else { 67 | let mut buf = vec![]; 68 | src.read_to_end(&mut buf).await?; 69 | 70 | Ok(ByteStream::Memory(Cursor::new(buf))) 71 | } 72 | } 73 | 74 | async fn put_file(&self, path: &Path, mut data: ByteStream) -> Result<()> { 75 | let fullpath = get_fullpath(&*self.base, path)?; 76 | if path_exists(&fullpath).await { 77 | return Err(StorageError::FileExists(path.to_path_buf())); 78 | } 79 | 80 | let mut dest = File::create(&fullpath).await?; 81 | tokio::io::copy(&mut data, &mut dest).await?; 82 | 83 | Ok(()) 84 | } 85 | 86 | async fn delete_file(&self, path: &Path) -> Result<()> { 87 | let fullpath = get_fullpath(&*self.base, path)?; 88 | if !file_exists(&fullpath).await { 89 | return Err(StorageError::FileNotExists(path.to_path_buf())); 90 | } 91 | 92 | tokio::fs::remove_file(fullpath).await?; 93 | 94 | Ok(()) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/lib/resolver/tests.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | use std::sync::Arc; 3 | 4 | use itertools::Itertools; 5 | use rstest::rstest; 6 | 7 | use crate::tests::*; 8 | 9 | #[rstest] 10 | #[case(vec![pkg!("a"), pkg!("b", "1.0.0", deps!("a")), pkg!("c", "1.0.0", deps!("a")), pkg!("d"), pkg!("e", "1.0.0", deps!("b")), pkg!("f", "1.0.0", deps!("c", "e"))], 11 | "f", 12 | vec![asrt!("a" < "b" < "e" < "f"), asrt!("a" < "c" < "f"), asrt!(!"d")])] 13 | #[case(vec![pkg!("b", "1.0.0", deps!("a>=2")), pkg!("a", "1.0.0"), pkg!("a", "2.0.0")], "b", vec![asrt!("a=2.0.0" < "b"), asrt!(!"a=1.0.0")])] 14 | fn simple_deps( 15 | #[case] pkgs: Vec, 16 | #[case] target: &str, 17 | #[case] asrts: Vec, 18 | ) { 19 | let repo = Arc::new(CustomRepository::new(pkgs)); 20 | let empty_repo = Arc::new(EmptyRepository::new()); 21 | let policy = ResolvePolicy::new(repo.clone(), empty_repo.clone(), empty_repo); 22 | let resolver = TreeResolver::new(policy, box always_depend, box allow_if_pacman); 23 | 24 | let pkg = repo 25 | .find_package(&Depend::from_str(target).unwrap()) 26 | .unwrap() 27 | .pop() 28 | .unwrap(); 29 | let result = resolver.resolve(&[pkg]).expect("can't find solution"); 30 | let scc = result.strongly_connected_components(); 31 | println!( 32 | "{:?}", 33 | scc.iter() 34 | .map(|pkgs| format!("[{}]", pkgs.iter().map(|pkg| pkg.to_string()).join(", "))) 35 | .collect_vec() 36 | ); 37 | for asrt in asrts { 38 | asrt.assert(&scc.iter().flatten().map(|pkg| pkg.as_ref()).collect_vec()) 39 | } 40 | } 41 | 42 | #[rstest] 43 | #[case(vec![pkg!("a", "1.0.0", deps!("c")), pkg!("b", "1.0.0", deps!("a")), pkg!("c", "1.0.0", deps!("b"))], "a", 44 | vec![asrt!("a"), asrt!("b"), asrt!("c")])] 45 | #[case(vec![pkg!("a", "1.0.0", deps!("c")), pkg!("b", "1.0.0", deps!("a")), pkg!("c", "1.0.0", deps!("b"))], "c", 46 | vec![asrt!("a"), asrt!("b"), asrt!("c")])] 47 | fn cyclic_deps( 48 | #[case] pkgs: Vec, 49 | #[case] target: &str, 50 | #[case] asrts: Vec, 51 | ) { 52 | let repo = Arc::new(CustomRepository::new(pkgs)); 53 | let empty_repo = Arc::new(EmptyRepository::new()); 54 | let policy = ResolvePolicy::new(repo.clone(), empty_repo.clone(), empty_repo); 55 | let resolver = TreeResolver::new(policy, box always_depend, box allow_if_pacman); 56 | 57 | let pkg = repo 58 | .find_package(&Depend::from_str(target).unwrap()) 59 | .unwrap() 60 | .pop() 61 | .unwrap(); 62 | let result = resolver.resolve(&[pkg]).expect("can't find solution"); 63 | let scc = result.strongly_connected_components(); 64 | println!( 65 | "{:?}", 66 | scc.iter() 67 | .map(|pkgs| format!("[{}]", pkgs.iter().map(|pkg| pkg.to_string()).join(", "))) 68 | .collect_vec() 69 | ); 70 | for asrt in asrts { 71 | asrt.assert(&scc.iter().flatten().map(|pkg| pkg.as_ref()).collect_vec()) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/lib/storage/transaction.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | use std::path::PathBuf; 3 | 4 | use itertools::Itertools; 5 | use tokio::io::AsyncReadExt; 6 | 7 | use crate::error::StorageError; 8 | 9 | use super::types::*; 10 | use super::StorageProvider; 11 | 12 | // NOTE 13 | // The assertion here doesn't guarantee atomicity because S3 doesn't provide it. 14 | // It can only be used as a naive safety check, and its result can't be trusted 15 | // especially when the file is large. 16 | pub enum TxnAction { 17 | Put(PathBuf, ByteStream), 18 | Delete(PathBuf), 19 | Assertion(PathBuf, Box>) -> Result<()> + Send>), 20 | Barrier, 21 | } 22 | 23 | impl TxnAction { 24 | pub async fn execute(self, target: &T) -> Result<()> { 25 | match self { 26 | TxnAction::Put(key, data) => target.put_file(&key, data).await?, 27 | TxnAction::Delete(key) => target.delete_file(&key).await?, 28 | TxnAction::Barrier => panic!("barrier can't be executed"), 29 | TxnAction::Assertion(key, func) => { 30 | let stream = target.get_file(&key).await.map(Some).or_else(|e| { 31 | if let StorageError::FileNotExists(_) = e { 32 | Ok(None) 33 | } else { 34 | Err(e) 35 | } 36 | })?; 37 | let buf = if let Some(mut stream) = stream { 38 | let mut buf: Vec = Vec::new(); 39 | stream.read_to_end(&mut buf).await?; 40 | Some(buf) 41 | } else { 42 | None 43 | }; 44 | func(buf)?; 45 | } 46 | } 47 | Ok(()) 48 | } 49 | } 50 | 51 | // NOTE 52 | // There's no rollback support now. 53 | // Also, atomicity can't be ensured because S3 doesn't support atomic move operation. 54 | #[derive(Default)] 55 | pub struct Txn { 56 | seq: VecDeque, 57 | } 58 | 59 | impl Txn { 60 | pub fn new() -> Self { 61 | Default::default() 62 | } 63 | pub fn add(&mut self, action: TxnAction) { 64 | self.seq.push_back(action); 65 | } 66 | async fn join_commit( 67 | staging: &mut Vec, 68 | target: &T, 69 | ) -> Result<()> { 70 | let staging_futures = staging 71 | .drain(..) 72 | .map(|act: TxnAction| act.execute(target)) 73 | .collect_vec(); 74 | futures::future::try_join_all(staging_futures).await?; 75 | staging.clear(); 76 | Ok(()) 77 | } 78 | pub async fn commit(mut self, target: &T) -> Result<()> { 79 | let mut staging = vec![]; 80 | while let Some(action) = self.seq.pop_front() { 81 | match action { 82 | TxnAction::Assertion(_, _) => { 83 | Self::join_commit(&mut staging, target).await?; 84 | action.execute(target).await?; 85 | } 86 | TxnAction::Barrier => Self::join_commit(&mut staging, target).await?, 87 | _ => staging.push(action), 88 | } 89 | } 90 | if !staging.is_empty() { 91 | Self::join_commit(&mut staging, target).await?; 92 | } 93 | Ok(()) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /tests/pacman_conf/pacman.conf: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/pacman.conf 3 | # 4 | # See the pacman.conf(5) manpage for option and repository directives 5 | 6 | # 7 | # GENERAL OPTIONS 8 | # 9 | [options] 10 | # The following paths are commented out with their default values listed. 11 | # If you wish to use different paths, uncomment and update the paths. 12 | #RootDir = / 13 | #DBPath = /var/lib/pacman/ 14 | #CacheDir = /var/cache/pacman/pkg/ 15 | LogFile = /var/log/pacman_archer.log 16 | #GPGDir = /etc/pacman.d/gnupg/ 17 | #HookDir = /etc/pacman.d/hooks/ 18 | HoldPkg = pacman glibc 19 | #XferCommand = /usr/bin/curl -L -C - -f -o %o %u 20 | #XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u 21 | #CleanMethod = KeepInstalled 22 | Architecture = auto 23 | 24 | # Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup 25 | #IgnorePkg = 26 | #IgnoreGroup = 27 | 28 | #NoUpgrade = 29 | #NoExtract = 30 | 31 | # Misc options 32 | #UseSyslog 33 | #Color 34 | #NoProgressBar 35 | CheckSpace 36 | #VerbosePkgLists 37 | #ParallelDownloads = 5 38 | 39 | # By default, pacman accepts packages signed by keys that its local keyring 40 | # trusts (see pacman-key and its man page), as well as unsigned packages. 41 | SigLevel = Required DatabaseOptional 42 | LocalFileSigLevel = Optional 43 | #RemoteFileSigLevel = Required 44 | 45 | # NOTE: You must run `pacman-key --init` before first using pacman; the local 46 | # keyring can then be populated with the keys of all official Arch Linux 47 | # packagers with `pacman-key --populate archlinux`. 48 | 49 | # 50 | # REPOSITORIES 51 | # - can be defined here or included from another file 52 | # - pacman will search repositories in the order defined here 53 | # - local/custom mirrors can be added here or in separate files 54 | # - repositories listed first will take precedence when packages 55 | # have identical names, regardless of version number 56 | # - URLs will have $repo replaced by the name of the current repo 57 | # - URLs will have $arch replaced by the name of the architecture 58 | # 59 | # Repository entries are of the format: 60 | # [repo-name] 61 | # Server = ServerName 62 | # Include = IncludePath 63 | # 64 | # The header [repo-name] is crucial - it must be present and 65 | # uncommented to enable the repo. 66 | # 67 | 68 | # The testing repositories are disabled by default. To enable, uncomment the 69 | # repo name header and Include lines. You can add preferred servers immediately 70 | # after the header, and they will be used before the default mirrors. 71 | 72 | #[testing] 73 | #Include = /etc/pacman.d/mirrorlist 74 | 75 | [core] 76 | Include = mirrorlist 77 | 78 | [extra] 79 | Include = mirrorlist 80 | 81 | #[community-testing] 82 | #Include = /etc/pacman.d/mirrorlist 83 | 84 | [community] 85 | Include = mirrorlist 86 | 87 | # If you want to run 32 bit applications on your x86_64 system, 88 | # enable the multilib repositories as required here. 89 | 90 | #[multilib-testing] 91 | #Include = /etc/pacman.d/mirrorlist 92 | 93 | #[multilib] 94 | #Include = /etc/pacman.d/mirrorlist 95 | 96 | [archlinuxcn] 97 | SigLevel = PackageRequired 98 | Server = https://mirror.sjtu.edu.cn/archlinux-cn/$arch 99 | 100 | # An example of a custom package repository. See the pacman manpage for 101 | # tips on creating your own repositories. 102 | [custom] 103 | SigLevel = Optional TrustAll 104 | Server = file:///home/custompkgs 105 | -------------------------------------------------------------------------------- /src/lib/parser/tests.rs: -------------------------------------------------------------------------------- 1 | use alpm::SigLevel; 2 | 3 | use crate::parser::{PacmanConf, PacmanConfCtx, SyncDB}; 4 | 5 | #[test] 6 | fn must_parse_pacman() { 7 | let sig_level = SigLevel::PACKAGE | SigLevel::DATABASE | SigLevel::DATABASE_OPTIONAL; 8 | let servers = |repo: &str| -> Vec { 9 | vec![ 10 | format!("http://mirrors.evowise.com/archlinux/{}/os/x86_64", repo), 11 | format!("http://mirror.rackspace.com/archlinux/{}/os/x86_64", repo), 12 | format!("https://mirror.rackspace.com/archlinux/{}/os/x86_64", repo), 13 | ] 14 | }; 15 | 16 | let expect_mirrors = vec![ 17 | "http://mirrors.evowise.com/archlinux/$repo/os/$arch", 18 | "http://mirror.rackspace.com/archlinux/$repo/os/$arch", 19 | "https://mirror.rackspace.com/archlinux/$repo/os/$arch", 20 | ]; 21 | let expect_mirror_list = include_str!("../../../tests/pacman_conf/mirrorlist_clean"); 22 | let expect_sync_dbs = vec![ 23 | SyncDB { 24 | name: String::from("core"), 25 | sig_level, 26 | servers: servers("core"), 27 | usage: vec![String::from("All")], 28 | }, 29 | SyncDB { 30 | name: String::from("extra"), 31 | sig_level, 32 | servers: servers("extra"), 33 | usage: vec![String::from("All")], 34 | }, 35 | SyncDB { 36 | name: String::from("community"), 37 | sig_level, 38 | servers: servers("community"), 39 | usage: vec![String::from("All")], 40 | }, 41 | SyncDB { 42 | name: String::from("archlinuxcn"), 43 | sig_level: SigLevel::PACKAGE | SigLevel::DATABASE | SigLevel::DATABASE_OPTIONAL, 44 | servers: vec![String::from( 45 | "https://mirror.sjtu.edu.cn/archlinux-cn/x86_64", 46 | )], 47 | usage: vec![String::from("All")], 48 | }, 49 | SyncDB { 50 | name: String::from("custom"), 51 | sig_level: SigLevel::PACKAGE 52 | | SigLevel::PACKAGE_OPTIONAL 53 | | SigLevel::DATABASE 54 | | SigLevel::DATABASE_OPTIONAL 55 | | SigLevel::PACKAGE_MARGINAL_OK 56 | | SigLevel::PACKAGE_UNKNOWN_OK 57 | | SigLevel::DATABASE_MARGINAL_OK 58 | | SigLevel::DATABASE_UNKNOWN_OK, 59 | servers: vec![String::from("file:///home/custompkgs")], 60 | usage: vec![String::from("All")], 61 | }, 62 | ]; 63 | 64 | let parser = PacmanConf::with(&PacmanConfCtx::new().path("tests/pacman_conf/pacman.conf")) 65 | .expect("unable to parse config"); 66 | let dbs = parser.sync_dbs(); 67 | assert_eq!(dbs, &expect_sync_dbs, "sync dbs mismatch"); 68 | 69 | let mirrors = parser.host_mirrors(); 70 | let mirror_list = parser.mirror_list(); 71 | assert_eq!(mirrors, expect_mirrors, "mirrors mismatch"); 72 | assert_eq!(mirror_list, expect_mirror_list, "mirror list mismatch"); 73 | 74 | assert_eq!(parser.option("nonsense"), None); 75 | assert_eq!(parser.option("GPGDir"), Some("/etc/pacman.d/gnupg/")); 76 | assert_eq!(parser.option("LogFile"), Some("/var/log/pacman_archer.log")); 77 | assert_eq!(parser.option("RootDir"), Some("/")); 78 | 79 | let parser_with_root = PacmanConf::with( 80 | &PacmanConfCtx::new() 81 | .path("tests/pacman_conf/pacman.conf") 82 | .root("/archer"), 83 | ) 84 | .expect("unable to parse config"); 85 | assert_eq!(parser_with_root.option("RootDir"), Some("/archer")); 86 | } 87 | -------------------------------------------------------------------------------- /src/lib/types/depend.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Display, Formatter}; 2 | use std::str::FromStr; 3 | 4 | use alpm::Dep; 5 | use ranges::{GenericRange, Ranges}; 6 | use serde::de::Visitor; 7 | use serde::{Deserialize, Deserializer, Serialize, Serializer}; 8 | 9 | use super::*; 10 | 11 | #[derive(Debug, Eq, PartialEq, Clone, Hash)] 12 | pub struct Depend { 13 | pub name: String, 14 | pub version: DependVersion, 15 | } 16 | 17 | impl<'de> Deserialize<'de> for Depend { 18 | fn deserialize(deserializer: D) -> Result>::Error> 19 | where 20 | D: Deserializer<'de>, 21 | { 22 | use serde::de::Error; 23 | struct VisitorImpl; 24 | 25 | impl<'de> Visitor<'de> for VisitorImpl { 26 | type Value = Depend; 27 | 28 | fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { 29 | write!( 30 | formatter, 31 | "dependency name(like 'test') with or without version constraint" 32 | ) 33 | } 34 | 35 | fn visit_str(self, v: &str) -> Result 36 | where 37 | E: Error, 38 | { 39 | Ok(Depend::from_str(v).unwrap()) 40 | } 41 | } 42 | 43 | deserializer.deserialize_str(VisitorImpl) 44 | } 45 | } 46 | 47 | impl Serialize for Depend { 48 | fn serialize(&self, serializer: S) -> Result<::Ok, ::Error> 49 | where 50 | S: Serializer, 51 | { 52 | serializer.serialize_str(&self.to_string()) 53 | } 54 | } 55 | 56 | impl Depend { 57 | pub fn satisfied_by(&self, candidate: &Package) -> bool { 58 | (candidate.name() == self.name && self.version.satisfied_by(&candidate.version())) 59 | || candidate 60 | .provides() 61 | .iter() 62 | .any(|provide| provide.name == self.name && self.version.contains(&provide.version)) 63 | } 64 | pub fn split_ver(&self) -> Vec { 65 | self.version 66 | .split() 67 | .into_iter() 68 | .map(|ver| Self { 69 | name: self.name.clone(), 70 | version: ver, 71 | }) 72 | .collect() 73 | } 74 | } 75 | 76 | impl Display for Depend { 77 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 78 | match self.split_ver().as_slice() { 79 | [v] => write!(f, "{} {}", self.name, v.version), 80 | [v1, v2] => write!(f, "{} {} && {}", self.name, v1.version, v2.version), 81 | _ => write!(f, "{} {}", self.name, self.version), 82 | } 83 | } 84 | } 85 | 86 | impl From<&Package> for Depend { 87 | fn from(pkg: &Package) -> Self { 88 | Self { 89 | name: pkg.name().to_string(), 90 | version: DependVersion(Ranges::from(pkg.version().into_owned())), 91 | } 92 | } 93 | } 94 | 95 | macro_rules! split_cmp_op { 96 | ($s: ident, $sep: expr, $rel: expr) => { 97 | $s.split_once($sep).map(|(name, ver)| { 98 | ( 99 | name.to_string(), 100 | DependVersion(Ranges::from($rel(Version(ver.to_string())))), 101 | ) 102 | }) 103 | }; 104 | } 105 | 106 | impl FromStr for Depend { 107 | type Err = !; 108 | 109 | fn from_str(s: &str) -> std::result::Result { 110 | // TODO parse neq (!=? <>?) 111 | let (name, version) = split_cmp_op!(s, ">=", GenericRange::new_at_least) 112 | .or_else(|| split_cmp_op!(s, "<=", GenericRange::new_at_most)) 113 | .or_else(|| split_cmp_op!(s, ">", GenericRange::new_greater_than)) 114 | .or_else(|| split_cmp_op!(s, "<", GenericRange::new_less_than)) 115 | .or_else(|| split_cmp_op!(s, "=", GenericRange::singleton)) 116 | .unwrap_or((s.to_string(), DependVersion(Ranges::full()))); 117 | Ok(Self { name, version }) 118 | } 119 | } 120 | 121 | impl<'a> From> for Depend { 122 | fn from(dep: Dep<'a>) -> Self { 123 | Self { 124 | name: dep.name().to_string(), 125 | version: dep.depmodver().into(), 126 | } 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /src/lib/resolver/types/graph.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fmt::Display; 3 | use std::hash::Hash; 4 | use std::ops::Index; 5 | 6 | use indexmap::IndexSet; 7 | use itertools::Itertools; 8 | use online_scc_graph::{EdgeEffect as BaseEdgeEffect, Graph as BaseGraph}; 9 | 10 | use crate::error::{GraphError, Result}; 11 | 12 | #[derive(Debug, Clone)] 13 | pub struct SCCGraph { 14 | base: BaseGraph, 15 | proj: HashMap, 16 | proj_rev: IndexSet, 17 | } 18 | 19 | #[derive(Debug, Clone, Eq, PartialEq)] 20 | pub enum EdgeEffect { 21 | None, 22 | NewEdge(Option>), // is_cycle 23 | } 24 | 25 | impl Default for SCCGraph { 26 | fn default() -> Self { 27 | Self { 28 | base: Default::default(), 29 | proj: Default::default(), 30 | proj_rev: Default::default(), 31 | } 32 | } 33 | } 34 | 35 | impl SCCGraph { 36 | pub fn new() -> Self { 37 | Default::default() 38 | } 39 | 40 | pub fn with_capacity(n: usize) -> Self { 41 | Self { 42 | base: BaseGraph::with_capacity(n), 43 | proj: HashMap::with_capacity(n), 44 | proj_rev: IndexSet::with_capacity(n), 45 | } 46 | } 47 | 48 | pub fn count(&self) -> usize { 49 | self.base.count() 50 | } 51 | 52 | pub fn has_cycle(&self) -> bool { 53 | self.base.has_cycle() 54 | } 55 | 56 | pub fn add_node(&mut self, n: T) { 57 | if !self.proj_rev.contains(&n) { 58 | let idx = self.base.add_node(); 59 | self.proj.insert(n.clone(), idx); 60 | self.proj_rev.insert(n); 61 | } 62 | } 63 | 64 | pub fn insert(&mut self, i: &T, j: &T) -> Result> { 65 | let ix = self.proj.get(i).ok_or(GraphError::InvalidNode)?; 66 | let jx = self.proj.get(j).ok_or(GraphError::InvalidNode)?; 67 | 68 | let eff = self 69 | .base 70 | .insert(*ix, *jx) 71 | .map_err(GraphError::SCCGraphError)?; 72 | 73 | Ok(match eff { 74 | BaseEdgeEffect::None => EdgeEffect::None, 75 | BaseEdgeEffect::NewEdge(Some(cycles)) => EdgeEffect::NewEdge(Some( 76 | cycles 77 | .into_iter() 78 | .map(|p| self.proj_rev.index(p).clone()) 79 | .collect(), 80 | )), 81 | BaseEdgeEffect::NewEdge(None) => EdgeEffect::NewEdge(None), 82 | }) 83 | } 84 | 85 | pub fn nodes(&self) -> Vec<&T> { 86 | self.proj_rev.iter().collect() 87 | } 88 | 89 | pub fn edges(&self) -> Vec<(&T, &T)> { 90 | self.base 91 | .edges() 92 | .map(|(i, j)| (self.proj_rev.index(i), self.proj_rev.index(j))) 93 | .collect() 94 | } 95 | 96 | pub fn strongly_connected_components(&self, reversed: bool) -> Vec> { 97 | self.base 98 | .SCC(reversed) 99 | .into_iter() 100 | .map(|component| { 101 | component 102 | .into_iter() 103 | .map(|node| self.proj_rev.index(node)) 104 | .collect() 105 | }) 106 | .collect() 107 | } 108 | 109 | pub fn merge(&mut self, other: &Self) -> Result<()> { 110 | let missing_vertices = other 111 | .proj_rev 112 | .difference(&self.proj_rev) 113 | .cloned() 114 | .collect_vec(); 115 | for v in missing_vertices { 116 | self.add_node(v.clone()); 117 | } 118 | for (i, j) in other.edges() { 119 | self.insert(i, j)?; 120 | } 121 | Ok(()) 122 | } 123 | } 124 | 125 | impl SCCGraph { 126 | pub fn dot(&self) -> String { 127 | let mut output = String::from("digraph {\n"); 128 | for (idx, data) in self.proj_rev.iter().enumerate() { 129 | output.push_str(&*format!(" {} [ label = \"{}\" ]\n", idx, data)); 130 | } 131 | for (i, j) in self.base.edges() { 132 | output.push_str(&*format!(" {} -> {} [ ]\n", i, j)); 133 | } 134 | output.push('}'); 135 | output 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /src/lib/storage/pool.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use tokio::sync::Mutex; 4 | 5 | use crate::storage::transaction::{Txn, TxnAction}; 6 | use crate::storage::StorageProvider; 7 | 8 | use super::types::*; 9 | 10 | pub struct PackagePool { 11 | remote: T, 12 | // remote storage 13 | local: PathBuf, 14 | // local cache base 15 | remote_map: Mutex, 16 | // meta->key 17 | local_map: Mutex, 18 | // meta->filename 19 | stage_map: MetaKeyMap, // meta->path 20 | } 21 | 22 | impl PackagePool { 23 | pub fn new(remote: T, local: PathBuf) -> Self { 24 | Self { 25 | remote, 26 | local, 27 | remote_map: Mutex::new(Default::default()), 28 | local_map: Mutex::new(Default::default()), 29 | stage_map: Default::default(), 30 | } 31 | } 32 | 33 | // generate & commit transaction to remote, and clear stage area 34 | pub async fn commit(&mut self) -> Result<()> { 35 | let mut txn = Txn::new(); 36 | // locking remote and local maps, preventing inconsistency when getting file 37 | let mut remote_map = self.remote_map.lock().await; 38 | let mut local_map = self.local_map.lock().await; 39 | 40 | for (meta, path) in &self.stage_map { 41 | let unit = LocalPackageUnit::new(meta, path); 42 | // update remote & local maps 43 | let key = PathBuf::from(unit.canonicalize_filename()); 44 | remote_map.insert(meta.clone(), key.clone()); 45 | local_map.insert(meta.clone(), key.clone()); 46 | 47 | // put package transaction 48 | txn.add(TxnAction::Put(key.clone(), ByteStream::from_path(path)?)); 49 | 50 | // pre-cache package 51 | // file will be copied into dest before txn is committed 52 | // this is safe because we locked local_map 53 | tokio::fs::copy(path, self.local.join(key)).await?; 54 | } 55 | 56 | // ensure all packages are saved 57 | txn.add(TxnAction::Barrier); 58 | 59 | // generate & put lock file 60 | txn.add(TxnAction::Delete(PathBuf::from("index.lock"))); 61 | txn.add(TxnAction::Barrier); // ensure order (s3 doesn't support atomic renaming, so...) 62 | let new_lock_file = LockFile::from(&*remote_map); 63 | let lockfile_data = serde_json::to_vec(&new_lock_file)?; 64 | txn.add(TxnAction::Put( 65 | PathBuf::from("index.lock"), 66 | ByteStream::from(lockfile_data), 67 | )); 68 | 69 | // commit transaction 70 | txn.commit(&self.remote).await?; 71 | 72 | // update stage map 73 | self.stage_map.clear(); 74 | 75 | Ok(()) 76 | } 77 | 78 | // stage built package 79 | pub fn stage(&mut self, unit: LocalPackageUnit) { 80 | self.stage_map.insert(unit.meta, unit.path); 81 | } 82 | 83 | // get package path (first from stage, then local cache, then remote) 84 | pub async fn get(&mut self, meta: &PackageMeta) -> Result> { 85 | if let Some(path) = self.stage_map.get(meta) { 86 | // exists in staged area 87 | return Ok(Some(path.clone())); 88 | } 89 | 90 | let maybe_local_filename = self.local_map.lock().await.get(meta).cloned(); 91 | if let Some(filename) = maybe_local_filename { 92 | // exists in local cache 93 | return Ok(Some(self.local.join(filename))); 94 | } 95 | 96 | let maybe_remote_key = self.remote_map.lock().await.get(meta).cloned(); 97 | return if let Some(key) = maybe_remote_key { 98 | // optimistic lock: first try to download 99 | let data = self.remote.get_file(&key).await?; 100 | let mut local_map = self.local_map.lock().await; 101 | if let Some(filename) = local_map.get(meta) { 102 | // conflict, take the previously downloaded file 103 | Ok(Some(self.local.join(filename))) 104 | } else { 105 | // save the file into local cache and return its path 106 | // NOTE 107 | // assume that remote file is at root directory 108 | let local_path = self.local.join(&key); // take its remote key as cache name 109 | data.into_file(&local_path).await?; 110 | 111 | local_map.insert(meta.clone(), key); // update local map 112 | 113 | Ok(Some(local_path)) 114 | } 115 | } else { 116 | Ok(None) 117 | }; 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /src/lib/error.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use online_scc_graph::Error as SCCGraphError; 4 | use rusoto_s3::{DeleteObjectError, GetObjectError, PutObjectError}; 5 | use thiserror::Error; 6 | 7 | use crate::types::*; 8 | 9 | pub type Result = std::result::Result; 10 | 11 | #[derive(Debug, Eq, PartialEq, Error)] 12 | pub enum GpgError { 13 | #[error("Unknown fatal error")] 14 | Unknown, 15 | #[error("At least a signature was bad")] 16 | BadSignature, 17 | #[error("Interrupted by signal")] 18 | Signal, 19 | } 20 | 21 | #[derive(Debug, Eq, PartialEq, Error)] 22 | pub enum MakepkgError { 23 | #[error("Unknown cause of failure")] 24 | Unknown, 25 | #[error("Error in configuration file")] 26 | Configuration, 27 | #[error("User specified an invalid option. This is likely an internal error. Fire a bug report if you meet one.")] 28 | InvalidOption, 29 | #[error("Error in user-supplied function in PKGBUILD")] 30 | InvalidFunction, 31 | #[error("Failed to create a viable package")] 32 | InviablePackage, 33 | #[error("A source or auxiliary file specified in the PKGBUILD is missing")] 34 | MissingSrc, 35 | #[error("The PKGDIR is missing")] 36 | MissingPkgDir, 37 | #[error("User attempted to run makepkg as root")] 38 | RunAsRoot, 39 | #[error("User lacks permissions to build or install to a given location")] 40 | NoPermission, 41 | #[error("Error parsing PKGBUILD")] 42 | ParseError, 43 | #[error("Programs necessary to run makepkg are missing")] 44 | MissingProgram, 45 | #[error("Specified GPG key does not exist or failed to sign package")] 46 | SignFailure, 47 | #[error("Interrupted by signal")] 48 | Signal, 49 | } 50 | 51 | #[derive(Debug, Eq, PartialEq, Error)] 52 | pub enum CommandError { 53 | #[error("unknown command")] 54 | Unknown, 55 | #[error("pacman")] 56 | Pacman, 57 | #[error("makepkg: {0}")] 58 | Makepkg(MakepkgError), 59 | #[error("chown")] 60 | Chown, 61 | #[error("gpg: {0}")] 62 | Gpg(GpgError), 63 | #[error("pacman-key")] 64 | PacmanKey, 65 | #[error("mkarchroot")] 66 | MkArchRoot, 67 | #[error("cp")] 68 | Cp, 69 | } 70 | 71 | #[derive(Debug, Error)] 72 | pub enum BuildError { 73 | #[error("io error: {0}")] 74 | IOError(#[from] std::io::Error), 75 | #[error("command execution failure: {0}")] 76 | CommandError(#[from] CommandError), 77 | #[error("unable to acquire lock")] 78 | LockError, 79 | } 80 | 81 | #[derive(Debug, Error)] 82 | pub enum S3Error { 83 | #[error("get error: {0}")] 84 | GetError(#[from] rusoto_core::RusotoError), 85 | #[error("put error: {0}")] 86 | PutError(#[from] rusoto_core::RusotoError), 87 | #[error("delete error: {0}")] 88 | DeleteError(#[from] rusoto_core::RusotoError), 89 | #[error("builder error: {0}")] 90 | BuilderError(String), 91 | } 92 | 93 | #[derive(Debug, Error)] 94 | pub enum StorageError { 95 | #[error("invalid path: {0}")] 96 | InvalidPath(PathBuf), 97 | #[error("io error: {0}")] 98 | IOError(#[from] std::io::Error), 99 | #[error("file exists: {0}")] 100 | FileExists(PathBuf), 101 | #[error("file doesn't exist: {0}")] 102 | FileNotExists(PathBuf), 103 | #[error("storage is in an inconsistent state")] 104 | Conflict, 105 | #[error("s3 error: {0}")] 106 | S3Error(#[from] S3Error), 107 | #[error("json error: {0}")] 108 | JSONError(#[from] serde_json::Error), 109 | } 110 | 111 | #[derive(Debug, Error)] 112 | pub enum ParseError { 113 | #[error("pacman: {0}")] 114 | PacmanError(String), 115 | #[error("command execution failure: {0}")] 116 | CommandError(CommandError), 117 | #[error("io error: {0}")] 118 | IOError(#[from] std::io::Error), 119 | } 120 | 121 | #[derive(Debug, Clone, Eq, PartialEq, Error)] 122 | pub enum DependencyError { 123 | #[error("missing dependency - {0}")] 124 | MissingDependency(String), 125 | #[error("conflict dependency - {0}")] 126 | ConflictDependency(String), 127 | #[error("cyclic dependency - {0:?}")] 128 | CyclicDependency(Vec), 129 | } 130 | 131 | #[derive(Debug, Clone, Eq, PartialEq, Error)] 132 | pub enum GraphError { 133 | #[error("internal scc graph error - {0}")] 134 | SCCGraphError(#[from] SCCGraphError), 135 | #[error("invalid node")] 136 | InvalidNode, 137 | } 138 | 139 | #[derive(Debug, Error)] 140 | pub enum Error { 141 | #[error("pacman error: {0}")] 142 | PacmanError(#[from] alpm::Error), 143 | #[error("aur error: {0}")] 144 | AurError(#[from] raur::Error), 145 | #[error("parse error: {0}")] 146 | ParseError(#[from] ParseError), 147 | #[error("dependency error: {0}")] 148 | DependencyError(#[from] DependencyError), 149 | #[error("max recursion depth exceeded")] 150 | RecursionError, 151 | #[error("internal graph error: {0}")] 152 | GraphError(#[from] GraphError), 153 | #[error("io error: {0}")] 154 | IOError(#[from] std::io::Error), 155 | #[error("unrecognized archive format")] 156 | ArchiveError, 157 | #[error("invalid package format")] 158 | PackageError, 159 | #[error("storage error: {0}")] 160 | StorageError(#[from] StorageError), 161 | #[error("build error: {0}")] 162 | BuildError(#[from] BuildError), 163 | } 164 | -------------------------------------------------------------------------------- /src/lib/repository/tests.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::str::FromStr; 3 | use std::sync::{Arc, Mutex}; 4 | 5 | use itertools::Itertools; 6 | use rstest::rstest; 7 | 8 | use crate::error::Result; 9 | use crate::tests::*; 10 | 11 | #[derive(Debug, Clone)] 12 | pub struct DebugRepository { 13 | inner: ArcRepo, 14 | query_map: Arc>>, 15 | } 16 | 17 | impl DebugRepository { 18 | pub fn new(inner: ArcRepo) -> Self { 19 | DebugRepository { 20 | inner, 21 | query_map: Default::default(), 22 | } 23 | } 24 | 25 | pub fn get_count(&self, pkg: &Depend) -> usize { 26 | *self.query_map.lock().unwrap().get(pkg).unwrap_or(&0) 27 | } 28 | } 29 | 30 | impl Repository for DebugRepository { 31 | fn find_package(&self, pkg: &Depend) -> Result> { 32 | self.query_map 33 | .lock() 34 | .unwrap() 35 | .entry(pkg.clone()) 36 | .and_modify(|count| *count += 1) 37 | .or_insert(1); 38 | self.inner.find_package(pkg) 39 | } 40 | } 41 | 42 | #[rstest] 43 | #[case(PacmanLocal::new(), "m4")] 44 | #[case(PacmanRemote::new(), "electron")] 45 | #[case(AurRepo::new(), "systemd-git")] 46 | #[case(CustomRepository::new(vec ! [pkg ! ("a-alt", "1.0.0", vec ! [], vec ! [], vec ! [], deps ! ("a")), pkg ! ("b"), pkg ! ("a")]), "a")] 47 | fn must_search(#[case] repo: impl Repository, #[case] name: &str) { 48 | wait_pacman_lock(); 49 | let dep = dep!(name); 50 | let results = repo 51 | .find_package(&dep) 52 | .expect("failed to search aur package"); 53 | let first_pkg = results.first().expect("no package found"); 54 | assert_eq!(first_pkg.name(), name, "first package incorrect"); 55 | assert!( 56 | results.iter().all(|pkg| dep.satisfied_by(pkg)), 57 | "packages incorrect" 58 | ); 59 | } 60 | 61 | #[rstest] 62 | #[case(PacmanLocal::new(), deps!("m4", "bash"))] 63 | #[case(PacmanRemote::new(), deps!("electron", "systemd"))] 64 | #[case(AurRepo::new(), deps!("systemd-git", "agda-git"))] 65 | #[case(CustomRepository::new(vec![pkg!("a-alt", "1.0.0", vec![], vec![], vec![], deps!("a")), pkg!("b"), pkg!("a")]), deps!("a", "b"))] 66 | fn must_search_multi(#[case] repo: impl Repository, #[case] deps: Vec) { 67 | wait_pacman_lock(); 68 | let results = repo 69 | .find_packages(&*deps) 70 | .expect("failed to search aur package"); 71 | assert!( 72 | results.keys().all(|key| deps.contains(key)), 73 | "unexpected key in result" 74 | ); 75 | for req in &deps { 76 | let pkgs = results.get(req).expect("missing package in result"); 77 | let first_pkg = pkgs.first().expect("no package found"); 78 | assert_eq!(first_pkg.name(), req.name, "first package incorrect"); 79 | assert!( 80 | pkgs.iter().all(|pkg| req.satisfied_by(pkg)), 81 | "packages incorrect" 82 | ); 83 | } 84 | } 85 | 86 | #[test] 87 | fn must_merge() { 88 | let repo_1 = CustomRepository::new(vec![ 89 | pkg!("a"), 90 | pkg!("a-alt", "1.0.0", vec![], vec![], vec![], deps!("a")), 91 | pkg!("b"), 92 | ]); 93 | let repo_2 = CustomRepository::new(vec![ 94 | pkg!("a"), 95 | pkg!("a-alt-2", "1.0.0", vec![], vec![], vec![], deps!("a")), 96 | pkg!("c"), 97 | ]); 98 | let merged = MergedRepository::new(vec![Arc::new(repo_1), Arc::new(repo_2)]); 99 | 100 | let result = merged.find_package(&dep!("c")).expect("can't find package"); 101 | asrt!("c").assert(&result.iter().collect_vec()); 102 | 103 | let asrts = vec![asrt!("a"), asrt!("a-alt"), asrt!(!"a-alt2")]; 104 | let result = merged.find_package(&dep!("a")).expect("can't find package"); 105 | for asrt in asrts { 106 | asrt.assert(&result.iter().collect_vec()); 107 | } 108 | 109 | let results = merged 110 | .find_packages(&*deps!("b", "c")) 111 | .expect("can't find packages"); 112 | results 113 | .get(&dep!("b")) 114 | .expect("can't find packages") 115 | .iter() 116 | .any(|pkg| dep!("b").satisfied_by(pkg)); 117 | results 118 | .get(&dep!("c")) 119 | .expect("can't find packages") 120 | .iter() 121 | .any(|pkg| dep!("c").satisfied_by(pkg)); 122 | } 123 | 124 | #[test] 125 | fn must_cache() { 126 | let inner_repo = CustomRepository::new(vec![pkg!("a"), pkg!("b"), pkg!("c"), pkg!("d")]); 127 | let debug_repo = Arc::new(DebugRepository::new(Arc::new(inner_repo))); 128 | let repo = CachedRepository::new(debug_repo.clone()); 129 | 130 | assert_eq!(debug_repo.get_count(&dep!("a")), 0); 131 | repo.find_package(&dep!("a")).unwrap(); 132 | assert_eq!(debug_repo.get_count(&dep!("a")), 1); 133 | repo.find_package(&dep!("a")).unwrap(); 134 | assert_eq!( 135 | debug_repo.get_count(&dep!("a")), 136 | 1, 137 | "single find request not cached" 138 | ); 139 | 140 | assert_eq!(debug_repo.get_count(&dep!("b")), 0); 141 | assert_eq!(debug_repo.get_count(&dep!("c")), 0); 142 | repo.find_packages(&*deps!("b", "c")).unwrap(); 143 | assert_eq!( 144 | debug_repo.get_count(&dep!("b")), 145 | 1, 146 | "multiple find request not cached" 147 | ); 148 | assert_eq!( 149 | debug_repo.get_count(&dep!("c")), 150 | 1, 151 | "multiple find request not cached" 152 | ); 153 | 154 | repo.find_packages(&*deps!("a", "c")).unwrap(); 155 | assert_eq!( 156 | debug_repo.get_count(&dep!("a")), 157 | 1, 158 | "multiple find request not cached" 159 | ); 160 | assert_eq!( 161 | debug_repo.get_count(&dep!("c")), 162 | 1, 163 | "multiple find request not cached" 164 | ); 165 | } 166 | -------------------------------------------------------------------------------- /src/lib/types/pacman.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Display, Formatter}; 2 | 3 | use super::*; 4 | 5 | macro_rules! option_owned { 6 | ($e: expr) => { 7 | $e.map(ToOwned::to_owned) 8 | }; 9 | } 10 | 11 | #[derive(Debug, Eq, PartialEq, Clone, Hash)] 12 | pub struct PacmanFile { 13 | name: String, 14 | size: i64, 15 | mode: u32, 16 | } 17 | 18 | #[derive(Debug, Eq, PartialEq, Clone, Hash)] 19 | pub struct OwnedPacmanPackage { 20 | pub name: String, 21 | pub should_ignore: bool, 22 | pub filename: String, 23 | pub base: Option, 24 | pub version: Version, 25 | pub origin: alpm::PackageFrom, 26 | pub desc: Option, 27 | pub url: Option, 28 | pub build_date: chrono::NaiveDateTime, 29 | pub install_date: Option, 30 | pub packager: Option, 31 | pub md5sum: Option, 32 | pub sha256sum: Option, 33 | pub arch: Option, 34 | pub size: i64, 35 | pub install_size: i64, 36 | pub reason: alpm::PackageReason, 37 | pub validation: alpm::PackageValidation, 38 | pub licenses: Vec, 39 | pub groups: Vec, 40 | pub depends: Vec, 41 | pub optdepends: Vec, 42 | pub checkdepends: Vec, 43 | pub makedepends: Vec, 44 | pub conflicts: Vec, 45 | pub provides: Vec, 46 | pub replaces: Vec, 47 | pub files: Vec, 48 | pub backup: Vec, 49 | pub db: Option, 50 | pub required_by: Vec, 51 | pub optional_for: Vec, 52 | pub base64_sig: Option, 53 | pub has_scriptlet: bool, 54 | } 55 | 56 | impl Default for OwnedPacmanPackage { 57 | fn default() -> Self { 58 | Self { 59 | name: Default::default(), 60 | should_ignore: Default::default(), 61 | filename: Default::default(), 62 | base: Default::default(), 63 | version: Version(String::from("0")), 64 | origin: alpm::PackageFrom::File, 65 | desc: Default::default(), 66 | url: Default::default(), 67 | build_date: chrono::NaiveDateTime::from_timestamp(0, 0), 68 | install_date: Default::default(), 69 | packager: Default::default(), 70 | md5sum: Default::default(), 71 | sha256sum: Default::default(), 72 | arch: Default::default(), 73 | size: Default::default(), 74 | install_size: Default::default(), 75 | reason: alpm::PackageReason::Explicit, 76 | validation: alpm::PackageValidation::NONE, 77 | licenses: Default::default(), 78 | groups: Default::default(), 79 | depends: Default::default(), 80 | optdepends: Default::default(), 81 | checkdepends: Default::default(), 82 | makedepends: Default::default(), 83 | conflicts: Default::default(), 84 | provides: Default::default(), 85 | replaces: Default::default(), 86 | files: Default::default(), 87 | backup: Default::default(), 88 | db: Default::default(), 89 | required_by: Default::default(), 90 | optional_for: Default::default(), 91 | base64_sig: Default::default(), 92 | has_scriptlet: Default::default(), 93 | } 94 | } 95 | } 96 | 97 | impl Display for OwnedPacmanPackage { 98 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 99 | write!(f, "{} {}", self.name, self.version) 100 | } 101 | } 102 | 103 | impl From> for OwnedPacmanPackage { 104 | fn from(pkg: PacmanPackage) -> Self { 105 | Self::from(&pkg) 106 | } 107 | } 108 | 109 | impl From<&PacmanPackage<'_>> for OwnedPacmanPackage { 110 | fn from(pkg: &PacmanPackage) -> Self { 111 | println!("converting {}", pkg.name()); 112 | Self { 113 | name: pkg.name().to_owned(), 114 | should_ignore: pkg.should_ignore(), 115 | // TODO a workaround for upstream bug alpm #18 116 | // filename: pkg.filename().to_owned(), 117 | filename: String::new(), 118 | base: option_owned!(pkg.base()), 119 | version: Version(pkg.version().to_string()), 120 | origin: pkg.origin(), 121 | desc: pkg.desc().map(ToOwned::to_owned), 122 | url: option_owned!(pkg.url()), 123 | build_date: chrono::NaiveDateTime::from_timestamp(pkg.build_date(), 0), 124 | install_date: pkg 125 | .install_date() 126 | .map(|dt| chrono::NaiveDateTime::from_timestamp(dt, 0)), 127 | packager: option_owned!(pkg.packager()), 128 | md5sum: option_owned!(pkg.md5sum()), 129 | sha256sum: option_owned!(pkg.sha256sum()), 130 | arch: option_owned!(pkg.arch()), 131 | size: pkg.size(), 132 | install_size: pkg.isize(), 133 | reason: pkg.reason(), 134 | validation: pkg.validation(), 135 | licenses: vec![], 136 | groups: vec![], 137 | depends: pkg.depends().iter().map(Depend::from).collect(), 138 | optdepends: pkg.optdepends().iter().map(Depend::from).collect(), 139 | checkdepends: pkg.checkdepends().iter().map(Depend::from).collect(), 140 | makedepends: pkg.makedepends().iter().map(Depend::from).collect(), 141 | conflicts: pkg.conflicts().iter().map(Depend::from).collect(), 142 | provides: pkg.provides().iter().map(Depend::from).collect(), 143 | replaces: pkg.replaces().iter().map(Depend::from).collect(), 144 | files: vec![], 145 | backup: vec![], 146 | db: pkg.db().map(|db| db.name().to_owned()), 147 | required_by: vec![], 148 | optional_for: vec![], 149 | base64_sig: option_owned!(pkg.base64_sig()), 150 | has_scriptlet: pkg.has_scriptlet(), 151 | } 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /src/lib/database/types/pacman.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use derive_builder::Builder; 4 | use pkginfo::PkgInfo; 5 | use serde::{Deserialize, Serialize}; 6 | use serde_with::skip_serializing_none; 7 | 8 | use crate::types::*; 9 | 10 | #[skip_serializing_none] 11 | #[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Debug, Builder)] 12 | #[builder(pattern = "owned")] 13 | pub struct PacmanEntry { 14 | /// file name 15 | #[serde(rename = "FILENAME")] 16 | pub file_name: String, 17 | /// name 18 | #[serde(rename = "NAME")] 19 | pub name: String, 20 | /// base 21 | #[serde(rename = "BASE")] 22 | #[builder(default)] 23 | pub base: Option, 24 | /// version 25 | #[serde(rename = "VERSION")] 26 | pub version: Version, 27 | /// description 28 | #[serde(rename = "DESC")] 29 | #[builder(default)] 30 | pub description: Option, 31 | /// package groups 32 | #[serde(rename = "GROUPS")] 33 | #[builder(default)] 34 | pub groups: Option>, 35 | /// tar.xz archive size 36 | #[serde(rename = "CSIZE")] 37 | pub compressed_size: u64, 38 | /// installed files size 39 | #[serde(rename = "ISIZE")] 40 | pub installed_size: u64, 41 | /// MD5 checksum 42 | #[serde(rename = "MD5SUM")] 43 | pub md5_sum: String, 44 | /// SHA256 checksum 45 | #[serde(rename = "SHA256SUM")] 46 | pub sha256_sum: String, 47 | /// PGP signature 48 | #[serde(rename = "PGPSIG")] 49 | #[builder(default)] 50 | pub pgp_signature: Option, 51 | /// package home url 52 | #[serde(rename = "URL")] 53 | #[builder(default)] 54 | pub url: Option, 55 | /// license name 56 | #[serde(rename = "LICENSE")] 57 | #[builder(default)] 58 | pub license: Option>, 59 | /// processor architecture 60 | #[serde(rename = "ARCH")] 61 | pub arch: String, 62 | /// build date 63 | #[serde(rename = "BUILDDATE")] 64 | pub build_date: chrono::NaiveDateTime, 65 | /// who created this package 66 | #[serde(rename = "PACKAGER")] 67 | pub packager: String, 68 | /// packages which this package replaces 69 | #[serde(rename = "REPLACES")] 70 | #[builder(default)] 71 | pub replaces: Option>, 72 | /// packages which cannot be used with this package 73 | #[serde(rename = "CONFLICTS")] 74 | #[builder(default)] 75 | pub conflicts: Option>, 76 | /// packages provided by this package 77 | #[serde(rename = "PROVIDES")] 78 | #[builder(default)] 79 | pub provides: Option>, 80 | /// run-time dependencies 81 | #[serde(rename = "DEPENDS")] 82 | #[builder(default)] 83 | pub depends: Option>, 84 | #[serde(rename = "OPTDEPENDS")] 85 | #[builder(default)] 86 | pub optdepends: Option>, 87 | /// build-time dependencies 88 | #[serde(rename = "MAKEDEPENDS")] 89 | #[builder(default)] 90 | pub makedepends: Option>, 91 | #[serde(rename = "CHECKDEPENDS")] 92 | #[builder(default)] 93 | pub checkdepends: Option>, 94 | } 95 | 96 | impl From for PacmanEntryBuilder { 97 | fn from(info: PkgInfo) -> Self { 98 | // missing fields? (e.g. checkdepends) 99 | PacmanEntryBuilder::default() 100 | .name(info.pkg_name) 101 | .base(info.pkg_base) 102 | .version(Version(info.pkg_ver)) 103 | .description((!info.pkg_desc.is_empty()).then_some(info.pkg_desc)) 104 | .url(info.url) 105 | .installed_size(u64::from(info.size)) 106 | .arch(info.arch.to_string()) 107 | .packager(info.packager) 108 | .build_date(chrono::NaiveDateTime::from_timestamp( 109 | info.build_date as i64, 110 | 0, 111 | )) 112 | .groups((!info.groups.is_empty()).then_some(info.groups)) 113 | .license((!info.license.is_empty()).then(|| { 114 | info.license 115 | .into_iter() 116 | .map(|item| item.to_string()) 117 | .collect() 118 | })) 119 | .conflicts((!info.conflict.is_empty()).then(|| { 120 | info.conflict 121 | .into_iter() 122 | .map(|item| Depend::from_str(&*item).unwrap()) 123 | .collect() 124 | })) 125 | .provides((!info.provides.is_empty()).then(|| { 126 | info.provides 127 | .into_iter() 128 | .map(|item| Depend::from_str(&*item).unwrap()) 129 | .collect() 130 | })) 131 | .replaces((!info.replaces.is_empty()).then(|| { 132 | info.replaces 133 | .into_iter() 134 | .map(|item| Depend::from_str(&*item).unwrap()) 135 | .collect() 136 | })) 137 | .depends((!info.depend.is_empty()).then(|| { 138 | info.depend 139 | .into_iter() 140 | .map(|item| Depend::from_str(&*item).unwrap()) 141 | .collect() 142 | })) 143 | .makedepends((!info.make_depend.is_empty()).then(|| { 144 | info.make_depend 145 | .into_iter() 146 | .map(|item| Depend::from_str(&*item).unwrap()) 147 | .collect() 148 | })) 149 | .checkdepends((!info.check_depend.is_empty()).then(|| { 150 | info.check_depend 151 | .into_iter() 152 | .map(|item| Depend::from_str(&*item).unwrap()) 153 | .collect() 154 | })) 155 | .optdepends((!info.opt_depend.is_empty()).then(|| { 156 | info.opt_depend 157 | .into_iter() 158 | .map(|item| Depend::from_str(&*item).unwrap()) 159 | .collect() 160 | })) 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /src/lib/builder/tests.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use fs_extra; 4 | use fs_extra::dir::CopyOptions; 5 | use tempfile::{tempdir, TempDir}; 6 | 7 | use crate::builder::{ 8 | BareBuildOptions, BareBuilder, BuildOptions, Builder, NspawnBuildOptions, NspawnBuilder, 9 | }; 10 | use crate::tests::*; 11 | 12 | fn setup_bare_builder() -> BareBuilder { 13 | let options = BareBuildOptions::new(&BuildOptions::new().verbose(true)); 14 | let options = if let Some(user) = option_env!("BUILD_USER") { 15 | options.build_as(user) 16 | } else { 17 | options 18 | }; 19 | BareBuilder::new_with_options(&options) 20 | } 21 | 22 | fn setup_nspawn_builder() -> (TempDir, NspawnBuilder) { 23 | let working_dir = tempdir().expect("unable to create working dir"); 24 | let options = NspawnBuildOptions::new( 25 | &BuildOptions::new().verbose(true), 26 | working_dir.path().join("working_dir"), 27 | ); 28 | (working_dir, NspawnBuilder::new(&options)) 29 | } 30 | 31 | async fn build_install_a(builder: &impl Builder) { 32 | builder 33 | .install_remote(&["gcc", "make"]) 34 | .await 35 | .expect("unable to install remote package"); 36 | let workdir = tempdir().expect("unable to create workdir"); 37 | fs_extra::dir::copy( 38 | "tests/build/archer_dummy_a", 39 | workdir.path(), 40 | &CopyOptions::new(), 41 | ) 42 | .expect("unable to copy files"); 43 | let mut files = builder 44 | .build(&*workdir.path().join("archer_dummy_a")) 45 | .await 46 | .expect("unable to build package"); 47 | assert_eq!(files.len(), 1, "package count mismatch"); 48 | let file = files.pop().unwrap(); 49 | assert!( 50 | file.file_name() 51 | .unwrap() 52 | .to_str() 53 | .unwrap() 54 | .contains("archer_dummy_a"), 55 | "package name mismatch" 56 | ); 57 | builder 58 | .install_local(&file) 59 | .await 60 | .expect("unable to install local package"); 61 | } 62 | 63 | async fn build_install_b(builder: &impl Builder) { 64 | let workdir = tempdir().expect("unable to create workdir"); 65 | fs_extra::dir::copy( 66 | "tests/build/archer_dummy_b", 67 | workdir.path(), 68 | &CopyOptions::new(), 69 | ) 70 | .expect("unable to copy files"); 71 | let files = builder 72 | .build(&workdir.path().join("archer_dummy_b")) 73 | .await 74 | .expect("unable to build package"); 75 | assert_eq!(files.len(), 2, "package count mismatch"); 76 | let mut marks = [false; 2]; 77 | for file in &files { 78 | let filename = file.file_name().unwrap().to_str().unwrap(); 79 | if filename.contains("archer_dummy_b_1") { 80 | marks[0] = true; 81 | } 82 | if filename.contains("archer_dummy_b_2") { 83 | marks[1] = true; 84 | } 85 | } 86 | assert!(marks.iter().all(|i| *i), "package name mismatch"); 87 | 88 | // join tasks to test pacman mutex 89 | let install_tasks = files.iter().map(|file| builder.install_local(file)); 90 | futures::future::join_all(install_tasks) 91 | .await 92 | .into_iter() 93 | .all(|result| result.is_ok()) 94 | .then_some(()) 95 | .expect("unable to install multiple packages"); 96 | } 97 | 98 | fn must_a() { 99 | let output = std::process::Command::new("archer_dummy_a") 100 | .output() 101 | .expect("missing binary"); 102 | assert_eq!( 103 | std::str::from_utf8(&output.stdout).expect("unable to parse output"), 104 | "dummy a\n", 105 | "output mismatch" 106 | ); 107 | } 108 | 109 | fn must_b() { 110 | let output = std::process::Command::new("archer_dummy_b_1") 111 | .output() 112 | .expect("missing binary"); 113 | assert_eq!( 114 | std::str::from_utf8(&output.stdout).expect("unable to parse output"), 115 | "dummy b 1\n", 116 | "output mismatch" 117 | ); 118 | let output = std::process::Command::new("archer_dummy_b_2") 119 | .output() 120 | .expect("missing binary"); 121 | assert_eq!( 122 | std::str::from_utf8(&output.stdout).expect("unable to parse output"), 123 | "dummy b 2\n", 124 | "output mismatch" 125 | ); 126 | } 127 | 128 | async fn bare_cleanup(builder: &BareBuilder) { 129 | builder 130 | .remove(&["archer_dummy_a", "archer_dummy_b_1", "archer_dummy_b_2"]) 131 | .await 132 | .expect("unable to uninstall packages") 133 | } 134 | 135 | #[tokio::test(flavor = "multi_thread", worker_threads = 6)] 136 | async fn must_bare_build() { 137 | if option_env!("NO_SUDO").is_some() { 138 | println!("must_bare_build skipped"); 139 | return; 140 | } 141 | wait_pacman_lock(); 142 | let builder = setup_bare_builder(); 143 | build_install_a(&builder).await; 144 | must_a(); 145 | build_install_b(&builder).await; 146 | must_b(); 147 | bare_cleanup(&builder).await; 148 | } 149 | 150 | async fn must_nspawn_setup(builder: &NspawnBuilder) { 151 | builder.setup().await.expect("unable to setup"); 152 | } 153 | 154 | #[tokio::test(flavor = "multi_thread", worker_threads = 6)] 155 | async fn must_nspawn_build() { 156 | if option_env!("NO_SUDO").is_some() || option_env!("NO_CONTAINER").is_some() { 157 | println!("must_bare_build skipped"); 158 | return; 159 | } 160 | wait_pacman_lock(); 161 | let (_working_dir, builder) = setup_nspawn_builder(); 162 | must_nspawn_setup(&builder).await; 163 | } 164 | 165 | #[tokio::test] 166 | async fn must_unshare() { 167 | if option_env!("NO_SUDO").is_some() { 168 | println!("must_bare_build skipped"); 169 | return; 170 | } 171 | assert_eq!( 172 | NspawnBuilder::test_unshare().await, 173 | option_env!("NO_CONTAINER").is_none(), 174 | "unshare mismatch" 175 | ); 176 | } 177 | 178 | #[test] 179 | fn must_lock() { 180 | let (_working_dir, builder) = setup_nspawn_builder(); 181 | builder.lock_workdir().expect("unable to lock dir"); 182 | std::thread::sleep(Duration::from_secs(1)); 183 | builder.unlock_workdir().expect("unable to unlock dir"); 184 | } 185 | -------------------------------------------------------------------------------- /src/lib/builder/bare.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::OsStr; 2 | use std::path::{Path, PathBuf}; 3 | use std::process::Stdio; 4 | 5 | use async_trait::async_trait; 6 | use tokio::process::Command; 7 | use tokio::sync::Mutex; 8 | 9 | use crate::builder::{BuildOptions, Builder}; 10 | use crate::error::{BuildError, CommandError, MakepkgError}; 11 | use crate::utils::map_makepkg_code; 12 | 13 | use super::{IOResult, Result}; 14 | 15 | #[derive(Debug, Clone, Eq, PartialEq, Default)] 16 | pub struct BareBuildOptions { 17 | base: BuildOptions, 18 | build_as: Option, 19 | } 20 | 21 | impl BareBuildOptions { 22 | pub fn new(base_option: &BuildOptions) -> Self { 23 | Self { 24 | base: base_option.clone(), 25 | build_as: None, 26 | } 27 | } 28 | pub fn build_as(mut self, user: &str) -> Self { 29 | self.build_as = Some(user.to_string()); 30 | self 31 | } 32 | } 33 | 34 | #[derive(Debug, Default)] 35 | pub struct BareBuilder { 36 | pacman_lock: Mutex<()>, 37 | options: BareBuildOptions, 38 | } 39 | 40 | impl BareBuilder { 41 | pub fn new() -> Self { 42 | Default::default() 43 | } 44 | 45 | pub fn new_with_options(options: &BareBuildOptions) -> Self { 46 | Self { 47 | pacman_lock: Default::default(), 48 | options: options.clone(), 49 | } 50 | } 51 | 52 | async fn pacman, T: IntoIterator + Send>( 53 | &self, 54 | args: T, 55 | ) -> Result<()> { 56 | let _lock = self.pacman_lock.lock().await; 57 | let mut cmd = Command::new("sudo"); 58 | cmd.arg("pacman"); 59 | cmd.arg("--noconfirm"); 60 | for arg in args { 61 | cmd.arg(arg); 62 | } 63 | if !self.options.base.verbose { 64 | cmd.stdout(Stdio::null()); 65 | cmd.stderr(Stdio::null()); 66 | } 67 | let mut child = cmd.spawn()?; 68 | 69 | let status = child.wait().await?; 70 | if status.success() { 71 | Ok(()) 72 | } else { 73 | Err(BuildError::CommandError(CommandError::Pacman)) 74 | } 75 | } 76 | } 77 | 78 | #[async_trait] 79 | impl Builder for BareBuilder { 80 | async fn setup(&self) -> Result<()> { 81 | Ok(()) 82 | } 83 | 84 | async fn teardown(&self) -> Result<()> { 85 | Ok(()) 86 | } 87 | 88 | async fn sync_system(&self) -> Result<()> { 89 | self.pacman(&["-Syu"]).await 90 | } 91 | 92 | async fn install_local(&self, path: &Path) -> Result<()> { 93 | self.pacman(&[OsStr::new("-U"), path.as_os_str(), OsStr::new("--needed")]) 94 | .await 95 | } 96 | 97 | async fn install_remote(&self, packages: &[&str]) -> Result<()> { 98 | let mut args = vec!["-S"]; 99 | args.extend(packages); 100 | args.push("--needed"); 101 | self.pacman(&args).await 102 | } 103 | 104 | async fn remove(&self, packages: &[&str]) -> Result<()> { 105 | let mut args = vec!["-R"]; 106 | args.extend(packages); 107 | self.pacman(&args).await 108 | } 109 | 110 | async fn build(&self, path: &Path) -> Result> { 111 | let output_dir = path.join("output"); 112 | let mut cmd = self.options.build_as.as_ref().map_or_else( 113 | || { 114 | let mut cmd = Command::new("makepkg"); 115 | cmd.env("PKGDEST", &output_dir); 116 | cmd 117 | }, 118 | |user| { 119 | let mut cmd = Command::new("sudo"); 120 | cmd.arg("-u"); 121 | cmd.arg(user); 122 | cmd.arg(format!("PKGDEST={}", output_dir.to_str().unwrap())); 123 | cmd.arg("makepkg"); 124 | cmd 125 | }, 126 | ); 127 | 128 | if !output_dir.exists() { 129 | tokio::fs::create_dir(&output_dir).await?; 130 | if let Some(user) = &self.options.build_as { 131 | let status = Command::new("sudo") 132 | .arg("chown") 133 | .arg("-R") 134 | .arg(format!("{}:{}", user, user)) 135 | .arg(&path) 136 | .spawn()? 137 | .wait() 138 | .await?; 139 | if !status.success() { 140 | return Err(BuildError::CommandError(CommandError::Chown)); 141 | } 142 | } 143 | } 144 | 145 | cmd.current_dir(path).env("PKGDEST", &output_dir); 146 | 147 | if self.options.base.check { 148 | cmd.arg("--check"); 149 | } 150 | if self.options.base.sign { 151 | cmd.arg("--sign"); 152 | } 153 | if self.options.base.skip_checksum { 154 | cmd.arg("--skipchecksums"); 155 | } 156 | if self.options.base.skip_pgp_check { 157 | cmd.arg("--skippgpcheck"); 158 | } 159 | if !self.options.base.verbose { 160 | cmd.stdout(Stdio::null()); 161 | cmd.stderr(Stdio::null()); 162 | } 163 | 164 | let mut child = cmd.spawn()?; 165 | let status = child.wait().await?; 166 | 167 | status 168 | .code() 169 | .map_or(Some(MakepkgError::Signal), map_makepkg_code) 170 | .map_or(Ok(()), |e| Err(CommandError::Makepkg(e)))?; 171 | 172 | if self.options.build_as.is_some() { 173 | let status = Command::new("sudo") 174 | .arg("chown") 175 | .arg("-R") 176 | .arg(format!( 177 | "{}:{}", 178 | users::get_current_uid(), 179 | users::get_current_gid() 180 | )) 181 | .arg(&output_dir) 182 | .spawn()? 183 | .wait() 184 | .await?; 185 | if !status.success() { 186 | return Err(BuildError::CommandError(CommandError::Chown)); 187 | } 188 | } 189 | 190 | Ok(std::fs::read_dir(&output_dir)? 191 | .map(|entry| entry.map(|entry| entry.path())) 192 | .collect::>>()?) 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /src/lib/resolver/planner.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | use std::sync::Arc; 3 | 4 | use itertools::Itertools; 5 | 6 | use crate::error::Result; 7 | use crate::repository::*; 8 | use crate::types::*; 9 | 10 | use super::{tree_resolv::TreeResolver, types::*}; 11 | 12 | pub struct PlanBuilder { 13 | pkgs: Vec, 14 | local_repo: Arc, 15 | global_repo: Arc, 16 | pacman_resolver: TreeResolver, 17 | global_resolver: TreeResolver, 18 | } 19 | 20 | impl Default for PlanBuilder { 21 | fn default() -> Self { 22 | let aur_repo = Arc::new(CachedRepository::new(Arc::new(AurRepo::new()))); 23 | let local_repo = Arc::new(CachedRepository::new(Arc::new(PacmanLocal::new()))); 24 | let remote_repo = Arc::new(CachedRepository::new(Arc::new(PacmanRemote::new()))); 25 | let global_repo = Arc::new(MergedRepository::new(vec![remote_repo.clone(), aur_repo])); 26 | 27 | let remote_policy = ResolvePolicy::new(remote_repo, local_repo.clone(), local_repo.clone()); 28 | let global_policy = 29 | ResolvePolicy::new(global_repo.clone(), local_repo.clone(), local_repo.clone()); 30 | 31 | let pacman_resolver = 32 | TreeResolver::new(remote_policy, box always_depend, box allow_if_pacman); 33 | let global_resolver = TreeResolver::new( 34 | global_policy, 35 | box makedepend_if_aur_custom, 36 | box allow_if_pacman, 37 | ); 38 | Self { 39 | pkgs: vec![], 40 | local_repo, 41 | global_repo, 42 | pacman_resolver, 43 | global_resolver, 44 | } 45 | } 46 | } 47 | 48 | impl PlanBuilder { 49 | #[must_use] 50 | pub fn new() -> Self { 51 | Default::default() 52 | } 53 | 54 | pub fn add_package(&mut self, pkg: &Depend) -> Result<()> { 55 | let mut pkg = self.global_repo.find_package(pkg)?; 56 | if let Some(pkg) = pkg.pop() { 57 | self.add_package_exact(pkg); 58 | } 59 | Ok(()) 60 | } 61 | 62 | pub fn add_package_exact(&mut self, pkg: Package) { 63 | if let Package::AurPackage(_) = pkg { 64 | self.pkgs.push(pkg); 65 | } 66 | } 67 | 68 | pub fn build(self) -> Result> { 69 | let mut plan = vec![]; 70 | let mut pkgs_to_build: VecDeque = VecDeque::new(); 71 | pkgs_to_build.extend(self.pkgs); 72 | while let Some(pkg_to_build) = pkgs_to_build.pop_front() { 73 | // search makedepends 74 | let make_deps = self 75 | .global_repo 76 | .find_packages(&**pkg_to_build.make_depends())?; 77 | 78 | // search aur depends 79 | let aur_custom_deps = self 80 | .global_repo 81 | .find_packages(&**pkg_to_build.depends())? 82 | .into_iter() 83 | .filter_map(|(_, mut v)| { 84 | let first_pkg = v.pop().unwrap(); 85 | match first_pkg { 86 | Package::PacmanPackage(_) => None, 87 | Package::AurPackage(_) | Package::CustomPackage(_) => Some(first_pkg), 88 | } 89 | }) 90 | .collect_vec(); 91 | let mut aur_custom_make_deps = vec![]; 92 | let mut pacman_make_deps = vec![]; 93 | 94 | // split make depends by source (aur/pacman) 95 | for (_, mut deps) in make_deps { 96 | let mut skip = false; 97 | for dep in &deps { 98 | if !self.local_repo.find_package(&Depend::from(dep))?.is_empty() { 99 | // skip existing packages in local repo 100 | skip = true; 101 | break; 102 | } 103 | } 104 | if skip { 105 | continue; 106 | } 107 | if let Some(pkg) = deps.pop() { 108 | match pkg { 109 | Package::PacmanPackage(_) => pacman_make_deps.push(pkg), 110 | Package::AurPackage(_) | Package::CustomPackage(_) => { 111 | aur_custom_make_deps.push(pkg); 112 | } 113 | } 114 | } 115 | } 116 | 117 | // build & install aur make dependencies 118 | for mut pkgs in self 119 | .global_resolver 120 | .resolve(&*aur_custom_make_deps)? 121 | .strongly_connected_components() 122 | { 123 | // TODO avoid dup build 124 | if pkgs.len() > 1 { 125 | plan.push(PlanAction::InstallGroup( 126 | pkgs.into_iter().map(|p| p.as_ref().clone()).collect(), 127 | )); 128 | } else { 129 | let pkg = pkgs.pop().unwrap(); 130 | if let Package::AurPackage(_) = pkg.as_ref() { 131 | plan.push(PlanAction::Build(pkg.as_ref().clone())); 132 | } 133 | plan.push(PlanAction::Install(pkg.as_ref().clone())); 134 | } 135 | } 136 | 137 | // install pacman make dependencies 138 | // Note 139 | // pacman makedeps are installed behind aur deps to avoid being uninstalled later by deps of aur makedeps 140 | for pkgs in self 141 | .pacman_resolver 142 | .resolve(&*pacman_make_deps)? 143 | .strongly_connected_components() 144 | { 145 | plan.push(PlanAction::InstallGroup( 146 | pkgs.into_iter().map(|p| p.as_ref().clone()).collect(), 147 | )); 148 | } 149 | 150 | // need to build its aur dependencies 151 | pkgs_to_build.extend(aur_custom_deps); 152 | 153 | // build this package 154 | // TODO avoid dup build 155 | plan.push(PlanAction::Build(pkg_to_build.clone())); 156 | plan.push(PlanAction::CopyToDest(pkg_to_build)); 157 | } 158 | Ok(plan) 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /src/lib/tests.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Display, Formatter}; 2 | use std::path::PathBuf; 3 | use std::str::FromStr; 4 | use std::time::Duration; 5 | 6 | use itertools::Itertools; 7 | 8 | pub use crate::prelude::*; 9 | 10 | #[derive(Debug, Clone, Eq, PartialEq, Hash)] 11 | pub struct PackageAssertion { 12 | name: String, 13 | version: DependVersion, 14 | } 15 | 16 | impl FromStr for PackageAssertion { 17 | type Err = !; 18 | 19 | fn from_str(s: &str) -> std::result::Result { 20 | let dep = Depend::from_str(s).unwrap(); 21 | Ok(PackageAssertion::new(dep.name, dep.version)) 22 | } 23 | } 24 | 25 | #[macro_export] 26 | macro_rules! assert_pkg { 27 | ($s: literal) => { 28 | PackageAssertion::from_str($s).unwrap() 29 | }; 30 | } 31 | 32 | impl Display for PackageAssertion { 33 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 34 | write!(f, "?[{}{}]", self.name, self.version) 35 | } 36 | } 37 | 38 | impl PackageAssertion { 39 | pub fn new(name: String, version: DependVersion) -> Self { 40 | PackageAssertion { name, version } 41 | } 42 | pub fn assert(&self, pkg: &Package) -> bool { 43 | self.name == pkg.name() && self.version.satisfied_by(&*pkg.version()) 44 | } 45 | } 46 | 47 | pub fn test_pkg( 48 | name: String, 49 | version: Version, 50 | depends: Vec, 51 | makedepends: Vec, 52 | conflicts: Vec, 53 | provides: Vec, 54 | ) -> Package { 55 | Package::PacmanPackage(OwnedPacmanPackage { 56 | name, 57 | version, 58 | depends, 59 | makedepends, 60 | conflicts, 61 | provides, 62 | ..Default::default() 63 | }) 64 | } 65 | 66 | #[macro_export] 67 | macro_rules! dep { 68 | ($s: expr) => { 69 | Depend::from_str($s).unwrap() 70 | }; 71 | } 72 | 73 | #[macro_export] 74 | macro_rules! deps { 75 | ($($s: expr), *) => { 76 | vec![$(Depend::from_str($s).unwrap()),*] 77 | } 78 | } 79 | 80 | #[macro_export] 81 | macro_rules! pkg { 82 | ($name: literal) => { 83 | test_pkg( 84 | String::from($name), 85 | Version(String::from("1.0.0")), 86 | vec![], 87 | vec![], 88 | vec![], 89 | vec![], 90 | ) 91 | }; 92 | ($name: literal, $ver: literal) => { 93 | test_pkg( 94 | String::from($name), 95 | Version(String::from($ver)), 96 | vec![], 97 | vec![], 98 | vec![], 99 | vec![], 100 | ) 101 | }; 102 | ($name: literal, $ver: literal, $depends: expr) => { 103 | test_pkg( 104 | String::from($name), 105 | Version(String::from($ver)), 106 | $depends, 107 | vec![], 108 | vec![], 109 | vec![], 110 | ) 111 | }; 112 | ($name: literal, $ver: literal, $depends: expr, $conflicts: expr) => { 113 | test_pkg( 114 | String::from($name), 115 | Version(String::from($ver)), 116 | $depends, 117 | vec![], 118 | $conflicts, 119 | vec![], 120 | ) 121 | }; 122 | ($name: literal, $ver: literal, $depends: expr, $makedepends: expr, $conflicts: expr, $provides: expr) => { 123 | test_pkg( 124 | String::from($name), 125 | Version(String::from($ver)), 126 | $depends, 127 | $makedepends, 128 | $conflicts, 129 | $provides, 130 | ) 131 | }; 132 | } 133 | 134 | fn must_pkg_order(tgt: &[&Package], pkgs: &[PackageAssertion]) { 135 | let info_prefix = format!( 136 | "AssertOrder({:?})", 137 | pkgs.iter().map(|s| s.to_string()).collect_vec() 138 | ); 139 | println!("{}", info_prefix); 140 | let positions = pkgs.iter().enumerate().map(|(idx, pkg)| { 141 | ( 142 | idx, 143 | tgt.iter() 144 | .enumerate() 145 | .find(|(_, candidate)| pkg.assert(candidate)) 146 | .map(|(pos, _)| pos), 147 | ) 148 | }); 149 | assert!( 150 | positions 151 | .tuple_windows() 152 | .into_iter() 153 | .all(|((idx1, pos1), (idx2, pos2))| pos1.expect(&*format!( 154 | "{} {} not found", 155 | info_prefix, 156 | pkgs.get(idx1).unwrap() 157 | )) < pos2.expect(&*format!( 158 | "{} {} not found", 159 | info_prefix, 160 | pkgs.get(idx2).unwrap() 161 | ))), 162 | "{} assertion failed", 163 | info_prefix 164 | ); 165 | } 166 | 167 | fn is_pkg_exists(pkgs: &[&Package], pkg: &PackageAssertion) -> bool { 168 | pkgs.iter().any(|candidate| pkg.assert(candidate)) 169 | } 170 | 171 | pub enum PkgsAssertion { 172 | Order(Vec), 173 | Exist(PackageAssertion), 174 | NotExist(PackageAssertion), 175 | } 176 | 177 | impl PkgsAssertion { 178 | pub fn assert(&self, pkgs: &[&Package]) { 179 | match self { 180 | PkgsAssertion::Order(s) => must_pkg_order(pkgs, s), 181 | PkgsAssertion::Exist(pkg) => { 182 | let info_prefix = format!("AssertExist({})", pkg); 183 | println!("{}", info_prefix); 184 | assert!(is_pkg_exists(pkgs, pkg), "{} assertion failed", info_prefix) 185 | } 186 | PkgsAssertion::NotExist(pkg) => { 187 | let info_prefix = format!("AssertNotExist({})", pkg); 188 | println!("{}", info_prefix); 189 | assert!( 190 | !is_pkg_exists(pkgs, pkg), 191 | "{} assertion failed", 192 | info_prefix 193 | ) 194 | } 195 | } 196 | } 197 | } 198 | 199 | #[macro_export] 200 | macro_rules! asrt { 201 | ($s: literal < $($ss: literal)< *) => { 202 | PkgsAssertion::Order(vec![assert_pkg!($s), $(assert_pkg!($ss)),*]) 203 | }; 204 | ($s: literal) => { 205 | PkgsAssertion::Exist(assert_pkg!($s)) 206 | }; 207 | (!$s: literal) => { 208 | PkgsAssertion::NotExist(assert_pkg!($s)) 209 | }; 210 | } 211 | 212 | pub fn wait_pacman_lock() { 213 | let lock_path = PathBuf::from("/var/lib/pacman/db.lck"); 214 | while lock_path.exists() { 215 | std::thread::sleep(Duration::from_secs(1)); 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /src/lib/database/pacman.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::fs::File; 3 | use std::io::{Read, Write}; 4 | use std::path::{Path, PathBuf}; 5 | 6 | use pkginfo::errors::Error as PkgInfoError; 7 | use pkginfo::PkgInfo; 8 | use sha2::{Digest, Sha256}; 9 | use tar::Archive as TarArchive; 10 | 11 | use crate::error::{Error, Result}; 12 | 13 | use super::compressor::ArchiveBuilder; 14 | use super::decompressor::ArchiveReader; 15 | use super::types::*; 16 | 17 | pub enum BuildTarget { 18 | Folder(PathBuf), 19 | Archive { 20 | path: PathBuf, 21 | repo: String, 22 | desc_builder: ArchiveBuilder, 23 | files_builder: ArchiveBuilder, 24 | }, 25 | } 26 | 27 | impl BuildTarget { 28 | pub fn new(path: impl AsRef, archive_repo: Option<&str>) -> Self { 29 | archive_repo.map_or_else( 30 | || Self::Folder(path.as_ref().to_path_buf()), 31 | |repo| Self::Archive { 32 | path: path.as_ref().to_path_buf(), 33 | repo: repo.to_string(), 34 | desc_builder: Default::default(), 35 | files_builder: Default::default(), 36 | }, 37 | ) 38 | } 39 | 40 | // append package to target 41 | pub fn append_pkg(&mut self, desc: &PacmanEntry, files: &[String]) -> Result<()> { 42 | let dir_name = PathBuf::from(format!("{}-{}", desc.name, desc.version).as_str()); 43 | let desc_content = archlinux_repo_parser::to_string(desc).unwrap(); // TODO error handling 44 | let files_content = format!("%FILES%\n{}", files.join("\n")); 45 | match self { 46 | BuildTarget::Folder(target) => { 47 | let pkg_dir = target.join(dir_name); 48 | fs::create_dir(&pkg_dir)?; 49 | fs::write(pkg_dir.join("desc"), desc_content)?; 50 | fs::write(pkg_dir.join("files"), files_content)?; 51 | } 52 | BuildTarget::Archive { 53 | path: _, 54 | repo: _, 55 | desc_builder, 56 | files_builder, 57 | } => { 58 | desc_builder.append_data(dir_name.join("desc"), desc_content.as_ref())?; 59 | files_builder.append_data(dir_name.join("desc"), desc_content.as_ref())?; 60 | files_builder.append_data(dir_name.join("files"), files_content.as_ref())?; 61 | } 62 | } 63 | Ok(()) 64 | } 65 | 66 | // finalize (only needed by archive) 67 | pub fn build(self) -> Result<()> { 68 | if let BuildTarget::Archive { 69 | path, 70 | repo, 71 | desc_builder, 72 | files_builder, 73 | } = self 74 | { 75 | let desc_path = path.join(format!("{}.db.tar.zst", repo)); 76 | let desc_data = desc_builder.build()?; 77 | let mut desc_file = File::create(desc_path)?; 78 | desc_file.write_all(&*desc_data)?; 79 | desc_file.sync_all()?; 80 | 81 | let files_path = path.join(format!("{}.files.tar.zst", repo)); 82 | let files_data = files_builder.build()?; 83 | let mut files_file = File::create(files_path)?; 84 | files_file.write_all(&*files_data)?; 85 | files_file.sync_all()?; 86 | } 87 | Ok(()) 88 | } 89 | } 90 | 91 | #[derive(Debug, Default, Clone)] 92 | pub struct DBBuilder { 93 | pkgs: Vec, 94 | } 95 | 96 | // Build .db & .files folder 97 | impl DBBuilder { 98 | pub fn new() -> Self { 99 | Default::default() 100 | } 101 | 102 | pub fn add_file(mut self, file: PathBuf) -> Self { 103 | self.add_file_mut(file); 104 | self 105 | } 106 | 107 | pub fn add_file_mut(&mut self, file: PathBuf) { 108 | self.pkgs.push(file); 109 | } 110 | 111 | pub fn build(&self, mut target: BuildTarget) -> Result<()> { 112 | for pkg in &self.pkgs { 113 | Self::build_single(pkg, &mut target)?; 114 | } 115 | target.build()?; 116 | 117 | Ok(()) 118 | } 119 | 120 | // parse package files & .PKGINFO 121 | fn collect_info(mut tar: TarArchive) -> Result<(Vec, Option)> { 122 | let mut files = vec![]; 123 | let mut info = None; 124 | // iterate archive 125 | for entry in tar.entries()? { 126 | let entry = entry?; 127 | let path = entry.path()?; 128 | if !path.to_str().unwrap().starts_with('.') { 129 | // collect package files 130 | files.push(path.to_string_lossy().to_string()); 131 | } 132 | if info.is_none() && entry.header().entry_type().is_file() { 133 | let name = path.file_name().unwrap().to_string_lossy(); 134 | if name == ".PKGINFO" { 135 | // parse .PKGINFO 136 | info = Some(PkgInfo::parse_file(entry).map_err(|err| match err { 137 | PkgInfoError::IoError(e) => Error::IOError(e), 138 | PkgInfoError::InvalidPackageFormat => Error::PackageError, 139 | })?); 140 | } 141 | } 142 | } 143 | Ok((files, info)) 144 | } 145 | 146 | // build a single package 147 | fn build_single(pkg: &Path, target: &mut BuildTarget) -> Result<()> { 148 | // unarchive package 149 | let raw = fs::read(pkg)?; 150 | let archive = ArchiveReader::from_u8(&raw)?; 151 | let tar = archive.to_tar(); 152 | 153 | // parse package files and .PKGINFO 154 | let (files, info) = Self::collect_info(tar)?; 155 | let info = info.ok_or(Error::PackageError)?; 156 | 157 | // convert .PKGINFO to desc format 158 | let desc_builder: PacmanEntryBuilder = info.into(); 159 | // TODO PGP 160 | // add remaining fields 161 | let desc: PacmanEntry = desc_builder 162 | .file_name(pkg.file_name().unwrap().to_string_lossy().to_string()) 163 | .compressed_size(fs::metadata(pkg)?.len()) 164 | .md5_sum(format!("{:?}", md5::compute(&raw))) 165 | .sha256_sum({ 166 | let mut hasher = Sha256::new(); 167 | hasher.update(&raw); 168 | format!("{:x}", hasher.finalize()) 169 | }) 170 | .build() 171 | .unwrap(); 172 | 173 | // output files 174 | target.append_pkg(&desc, &files) 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/lib/types/version.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::Ordering; 2 | use std::collections::Bound; 3 | use std::fmt::{Display, Formatter}; 4 | use std::hash::{Hash, Hasher}; 5 | use std::ops::RangeBounds; 6 | 7 | use alpm::DepModVer; 8 | use ranges::{Domain, GenericRange, Ranges}; 9 | use serde::de::Visitor; 10 | use serde::{Deserialize, Deserializer, Serialize, Serializer}; 11 | 12 | // TODO figure out a way to handle `epoch` field. see https://wiki.archlinux.org/index.php/PKGBUILD#Version 13 | #[derive(Debug, Clone)] 14 | pub struct Version(pub String); 15 | 16 | impl Serialize for Version { 17 | fn serialize(&self, serializer: S) -> Result<::Ok, ::Error> 18 | where 19 | S: Serializer, 20 | { 21 | serializer.serialize_str(&*self.0) 22 | } 23 | } 24 | 25 | impl<'de> Deserialize<'de> for Version { 26 | fn deserialize(deserializer: D) -> Result>::Error> 27 | where 28 | D: Deserializer<'de>, 29 | { 30 | use serde::de::Error; 31 | struct VisitorImpl; 32 | 33 | impl<'de> Visitor<'de> for VisitorImpl { 34 | type Value = Version; 35 | 36 | fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { 37 | write!(formatter, "version") 38 | } 39 | 40 | fn visit_str(self, v: &str) -> Result 41 | where 42 | E: Error, 43 | { 44 | Ok(Version(String::from(v))) 45 | } 46 | 47 | fn visit_string(self, v: String) -> Result 48 | where 49 | E: Error, 50 | { 51 | Ok(Version(v)) 52 | } 53 | } 54 | 55 | deserializer.deserialize_string(VisitorImpl) 56 | } 57 | } 58 | 59 | impl From<&alpm::Ver> for Version { 60 | fn from(ver: &alpm::Ver) -> Self { 61 | Self(ver.to_string()) 62 | } 63 | } 64 | 65 | impl Hash for Version { 66 | fn hash(&self, state: &mut H) { 67 | // FIX: not reliable because of custom partial eq implementation 68 | self.0.hash(state); 69 | } 70 | } 71 | 72 | impl Display for Version { 73 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 74 | Display::fmt(&self.0, f) 75 | } 76 | } 77 | 78 | impl<'a> AsRef for Version { 79 | fn as_ref(&self) -> &str { 80 | self.0.as_ref() 81 | } 82 | } 83 | 84 | impl PartialEq for Version { 85 | fn eq(&self, other: &Self) -> bool { 86 | matches!(alpm::vercmp(self.as_ref(), other.as_ref()), Ordering::Equal) 87 | } 88 | } 89 | 90 | impl PartialOrd for Version { 91 | fn partial_cmp(&self, other: &Self) -> Option { 92 | Some(alpm::vercmp(self.as_ref(), other.as_ref())) 93 | } 94 | } 95 | 96 | impl Eq for Version {} 97 | 98 | impl Ord for Version { 99 | fn cmp(&self, other: &Self) -> Ordering { 100 | alpm::vercmp(self.as_ref(), other.as_ref()) 101 | } 102 | } 103 | 104 | impl Domain for Version {} 105 | 106 | #[derive(Clone, Debug, Hash, PartialEq, Eq)] 107 | pub struct DependVersion(pub Ranges); 108 | 109 | const fn bound_of(bound: Bound<&Version>) -> Option<&Version> { 110 | match bound { 111 | Bound::Included(v) | Bound::Excluded(v) => Some(v), 112 | Bound::Unbounded => None, 113 | } 114 | } 115 | 116 | impl Display for DependVersion { 117 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 118 | if self.0.as_slice().len() > 1 { 119 | write!(f, "multi_ranges") // archlinux doesn't support multi range constraint 120 | } else if let Some(range) = self.0.as_slice().first() { 121 | if range.is_full() { 122 | write!(f, "") 123 | } else if range.is_empty() { 124 | write!(f, " ∅") 125 | } else if range.is_singleton() { 126 | write!(f, " = {}", bound_of(range.start_bound()).unwrap()) 127 | } else if !range.is_right_unbounded() && range.is_left_unbounded() { 128 | if range.is_right_closed() { 129 | write!(f, " <= {}", bound_of(range.end_bound()).unwrap()) 130 | } else { 131 | write!(f, " < {}", bound_of(range.end_bound()).unwrap()) 132 | } 133 | } else if !range.is_left_unbounded() && range.is_right_unbounded() { 134 | if range.is_left_closed() { 135 | write!(f, " >= {}", bound_of(range.start_bound()).unwrap()) 136 | } else { 137 | write!(f, " > {}", bound_of(range.start_bound()).unwrap()) 138 | } 139 | } else { 140 | write!(f, "double_ended_range") // archlinux doesn't support double end constraint in one string 141 | } 142 | } else { 143 | write!(f, ": ∅") 144 | } 145 | } 146 | } 147 | 148 | impl DependVersion { 149 | pub fn is_empty(&self) -> bool { 150 | !self.0.as_slice().iter().any(|range| !range.is_empty()) 151 | } 152 | 153 | pub fn is_legal(&self) -> bool { 154 | !(self.is_empty() || self.0.as_slice().len() > 1) 155 | } 156 | 157 | pub fn split(&self) -> Vec { 158 | // TODO support <> 159 | if self.is_legal() { 160 | let range = self.0.as_slice().first().unwrap(); 161 | if !range.is_left_unbounded() && !range.is_right_unbounded() { 162 | vec![ 163 | Self(Ranges::from(GenericRange::new_with_bounds( 164 | range.start_bound().cloned(), 165 | Bound::Unbounded, 166 | ))), 167 | Self(Ranges::from(GenericRange::new_with_bounds( 168 | Bound::Unbounded, 169 | range.end_bound().cloned(), 170 | ))), 171 | ] 172 | } else { 173 | vec![self.clone()] 174 | } 175 | } else { 176 | vec![] 177 | } 178 | } 179 | 180 | pub fn intersect(&self, other: &Self) -> Self { 181 | Self(self.0.clone().intersect(other.0.clone())) 182 | } 183 | 184 | pub fn union(&self, other: &Self) -> Self { 185 | Self(self.0.clone().union(other.0.clone())) 186 | } 187 | 188 | pub fn contains(&self, other: &Self) -> bool { 189 | self.0.clone().intersect(other.0.clone()) == other.0 190 | } 191 | 192 | pub fn complement(&self) -> Self { 193 | Self(self.0.clone().invert()) 194 | } 195 | 196 | pub fn satisfied_by(&self, target: &Version) -> bool { 197 | self.0.contains(target) 198 | } 199 | } 200 | 201 | impl<'a> From> for DependVersion { 202 | fn from(dep_ver: DepModVer<'a>) -> Self { 203 | Self(match dep_ver { 204 | DepModVer::Any => Ranges::full(), 205 | DepModVer::Eq(ver) => Ranges::from(Version::from(ver)), 206 | DepModVer::Ge(ver) => Ranges::from(Version::from(ver)..), 207 | DepModVer::Le(ver) => Ranges::from(..=Version::from(ver)), 208 | DepModVer::Gt(ver) => Ranges::from(GenericRange::new_greater_than(Version::from(ver))), 209 | DepModVer::Lt(ver) => Ranges::from(..Version::from(ver)), 210 | }) 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /src/lib/parser/pacman.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use alpm::SigLevel; 4 | use ini::Ini; 5 | use itertools::Itertools; 6 | use lazy_static::lazy_static; 7 | use regex::{NoExpand, Regex}; 8 | 9 | use crate::consts::PACMAN_CONF_PATH; 10 | use crate::error::ParseError; 11 | 12 | type Result = std::result::Result; 13 | 14 | lazy_static! { 15 | static ref RE_EXTRA: Regex = Regex::new(r"extra/os/.*").unwrap(); 16 | } 17 | 18 | #[derive(Debug, Clone, Default, Eq, PartialEq, Hash)] 19 | pub struct PacmanConfCtx { 20 | path: Option, 21 | root: Option, 22 | } 23 | 24 | impl PacmanConfCtx { 25 | pub fn new() -> Self { 26 | Default::default() 27 | } 28 | 29 | pub fn path(self, path: impl AsRef) -> Self { 30 | Self { 31 | path: Some(path.as_ref().to_path_buf()), 32 | ..self 33 | } 34 | } 35 | 36 | pub fn root(self, root: impl AsRef) -> Self { 37 | Self { 38 | root: Some(root.as_ref().to_path_buf()), 39 | ..self 40 | } 41 | } 42 | } 43 | 44 | #[derive(Clone)] 45 | pub struct PacmanConf { 46 | inner: Ini, 47 | sync_dbs: Vec, 48 | path: PathBuf, 49 | } 50 | 51 | #[derive(Debug, Clone, Eq, PartialEq, Hash)] 52 | pub struct SyncDB { 53 | pub name: String, 54 | pub sig_level: alpm::SigLevel, 55 | pub servers: Vec, 56 | pub usage: Vec, 57 | } 58 | 59 | fn parse_siglevel<'a>(content: impl IntoIterator) -> Option { 60 | let mut siglevel = SigLevel::NONE; 61 | for signal in content { 62 | let (mut package, mut database) = (true, true); 63 | let flag = signal 64 | .strip_prefix("Package") 65 | .map(|s| { 66 | database = false; 67 | s 68 | }) 69 | .or_else(|| { 70 | signal.strip_prefix("Database").map(|s| { 71 | package = false; 72 | s 73 | }) 74 | }) 75 | .unwrap_or(signal); 76 | match flag { 77 | "Never" => { 78 | if package { 79 | siglevel.remove(SigLevel::PACKAGE); 80 | } 81 | if database { 82 | siglevel.remove(SigLevel::DATABASE); 83 | } 84 | } 85 | "Optional" => { 86 | if package { 87 | siglevel.insert(SigLevel::PACKAGE | SigLevel::PACKAGE_OPTIONAL); 88 | } 89 | if database { 90 | siglevel.insert(SigLevel::DATABASE | SigLevel::DATABASE_OPTIONAL); 91 | } 92 | } 93 | "Required" => { 94 | if package { 95 | siglevel.insert(SigLevel::PACKAGE); 96 | siglevel.remove(SigLevel::PACKAGE_OPTIONAL); 97 | } 98 | if database { 99 | siglevel.insert(SigLevel::DATABASE); 100 | siglevel.remove(SigLevel::DATABASE_OPTIONAL); 101 | } 102 | } 103 | "TrustedOnly" => { 104 | if package { 105 | siglevel.remove(SigLevel::PACKAGE_MARGINAL_OK | SigLevel::PACKAGE_UNKNOWN_OK); 106 | } 107 | if database { 108 | siglevel.remove(SigLevel::DATABASE_MARGINAL_OK | SigLevel::DATABASE_UNKNOWN_OK); 109 | } 110 | } 111 | "TrustAll" => { 112 | if package { 113 | siglevel.insert(SigLevel::PACKAGE_MARGINAL_OK | SigLevel::PACKAGE_UNKNOWN_OK); 114 | } 115 | if database { 116 | siglevel.insert(SigLevel::DATABASE_MARGINAL_OK | SigLevel::DATABASE_UNKNOWN_OK); 117 | } 118 | } 119 | _ => return None, 120 | } 121 | } 122 | if siglevel == SigLevel::NONE { 123 | None 124 | } else { 125 | Some(siglevel) 126 | } 127 | } 128 | 129 | impl PacmanConf { 130 | pub fn new() -> Result { 131 | Self::with(&PacmanConfCtx::default()) 132 | } 133 | 134 | pub fn with(ctx: &PacmanConfCtx) -> Result { 135 | let mut cmd = std::process::Command::new("pacman-conf"); 136 | if let Some(path) = &ctx.path { 137 | let canonical_path = path.canonicalize()?; 138 | cmd.current_dir(canonical_path.parent().unwrap()); 139 | cmd.arg("-c").arg(canonical_path.file_name().unwrap()); 140 | } 141 | if let Some(root) = &ctx.root { 142 | cmd.arg("-R").arg(root); 143 | } 144 | 145 | let output = cmd.output()?; 146 | let raw_conf = std::str::from_utf8(&*output.stdout) 147 | .map_err(|_| ParseError::PacmanError(String::from("utf8 parse error")))?; 148 | 149 | let ini = 150 | Ini::load_from_str(raw_conf).map_err(|e| ParseError::PacmanError(e.to_string()))?; 151 | let sync_dbs = Self::parse_sync_dbs(&ini); 152 | 153 | let path = ctx 154 | .path 155 | .as_ref() 156 | .map_or_else(|| PathBuf::from(PACMAN_CONF_PATH), |path| path.clone()); 157 | 158 | Ok(Self { 159 | inner: ini, 160 | sync_dbs, 161 | path, 162 | }) 163 | } 164 | 165 | pub fn path(&self) -> &Path { 166 | self.path.as_path() 167 | } 168 | 169 | pub fn option(&self, field: &str) -> Option<&str> { 170 | self.inner 171 | .section(Some("options")) 172 | .and_then(|options| options.get(field)) 173 | } 174 | 175 | pub fn host_mirrors(&self) -> Vec { 176 | self.sync_dbs() 177 | .iter() 178 | .find(|db| db.name == "extra") 179 | .unwrap() 180 | .servers 181 | .iter() 182 | .map(|server| { 183 | RE_EXTRA 184 | .replace(server, NoExpand("$repo/os/$arch")) 185 | .to_string() 186 | }) 187 | .collect() 188 | } 189 | 190 | pub fn mirror_list(&self) -> String { 191 | self.host_mirrors() 192 | .into_iter() 193 | .map(|server| format!("Server = {}", server)) 194 | .join("\n") 195 | } 196 | 197 | pub fn sync_dbs(&self) -> &[SyncDB] { 198 | &self.sync_dbs 199 | } 200 | 201 | fn parse_sync_dbs(ini: &Ini) -> Vec { 202 | let global_siglevel = ini 203 | .section(Some("options")) 204 | .map(|options| options.get_all("SigLevel")) 205 | .and_then(parse_siglevel) 206 | .unwrap_or(SigLevel::USE_DEFAULT); 207 | 208 | ini.sections() 209 | .filter(|section| section.map(|name| name != "options").unwrap_or(false)) 210 | .map(|name| (name.unwrap(), ini.section(name).unwrap())) 211 | .map(|(name, section)| SyncDB { 212 | name: name.to_string(), 213 | sig_level: parse_siglevel(section.get_all("SigLevel")).unwrap_or(global_siglevel), 214 | servers: section.get_all("Server").map(ToString::to_string).collect(), 215 | usage: section.get_all("Usage").map(ToString::to_string).collect(), 216 | }) 217 | .collect() 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /src/lib/storage/providers/s3.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | use std::io::{Cursor, SeekFrom}; 3 | use std::path::{Path, PathBuf}; 4 | 5 | use async_trait::async_trait; 6 | use rusoto_core::credential::StaticProvider; 7 | use rusoto_core::{Client, Region, RusotoError}; 8 | use rusoto_s3::{ 9 | DeleteObjectRequest, GetObjectError, GetObjectRequest, PutObjectRequest, S3Client, 10 | StreamingBody, S3, 11 | }; 12 | use tempfile::NamedTempFile; 13 | use tokio::fs::File; 14 | use tokio::io::{AsyncReadExt, AsyncSeekExt}; 15 | 16 | use crate::consts::STORAGE_MEMORY_LIMIT; 17 | use crate::error::{S3Error, StorageError}; 18 | use crate::storage::providers::{get_fullpath, StorageProvider}; 19 | use crate::storage::types::ByteStream; 20 | 21 | use super::Result; 22 | 23 | pub struct S3Storage { 24 | client: S3Client, 25 | bucket: String, 26 | base: PathBuf, 27 | memory_limit: u64, 28 | } 29 | 30 | #[derive(Clone, Eq, PartialEq, Default, Hash)] 31 | pub struct S3StorageBuilder { 32 | name: Option, 33 | endpoint: Option, 34 | credential: Option<(String, String)>, 35 | bucket: Option, 36 | base: Option, 37 | memory_limit: Option, 38 | } 39 | 40 | impl S3StorageBuilder { 41 | pub fn new() -> Self { 42 | Default::default() 43 | } 44 | 45 | pub fn with_name(self, name: impl ToString) -> Self { 46 | Self { 47 | name: Some(name.to_string()), 48 | ..self 49 | } 50 | } 51 | 52 | pub fn with_endpoint(self, endpoint: impl ToString) -> Self { 53 | Self { 54 | endpoint: Some(endpoint.to_string()), 55 | ..self 56 | } 57 | } 58 | 59 | pub fn with_credential(self, key: impl ToString, secret: impl ToString) -> Self { 60 | Self { 61 | credential: Some((key.to_string(), secret.to_string())), 62 | ..self 63 | } 64 | } 65 | 66 | pub fn with_bucket(self, bucket: impl ToString) -> Self { 67 | Self { 68 | bucket: Some(bucket.to_string()), 69 | ..self 70 | } 71 | } 72 | 73 | pub fn with_base(self, base: impl AsRef) -> Self { 74 | Self { 75 | base: Some(base.as_ref().to_path_buf()), 76 | ..self 77 | } 78 | } 79 | 80 | pub fn with_memory_limit(self, memory_limit: u64) -> Self { 81 | Self { 82 | memory_limit: Some(memory_limit), 83 | ..self 84 | } 85 | } 86 | 87 | pub fn build_with_client(self, client: S3Client) -> Result { 88 | Ok(S3Storage { 89 | client, 90 | bucket: self 91 | .bucket 92 | .ok_or_else(|| S3Error::BuilderError(String::from("missing bucket field")))?, 93 | base: self.base.unwrap_or_default(), 94 | memory_limit: self.memory_limit.unwrap_or(STORAGE_MEMORY_LIMIT), 95 | }) 96 | } 97 | 98 | pub fn build(self) -> Result { 99 | let (key, secret) = self 100 | .credential 101 | .ok_or_else(|| S3Error::BuilderError(String::from("missing credential field")))?; 102 | let name = self 103 | .name 104 | .ok_or_else(|| S3Error::BuilderError(String::from("missing name field")))?; 105 | let endpoint = self 106 | .endpoint 107 | .ok_or_else(|| S3Error::BuilderError(String::from("missing endpoint field")))?; 108 | let bucket = self 109 | .bucket 110 | .ok_or_else(|| S3Error::BuilderError(String::from("missing bucket field")))?; 111 | 112 | let credential = StaticProvider::new_minimal(key, secret); 113 | let http_client = rusoto_core::HttpClient::new().unwrap(); 114 | let common_client = Client::new_with(credential, http_client); 115 | 116 | let region = Region::Custom { name, endpoint }; 117 | let s3_client = S3Client::new_with_client(common_client, region); 118 | 119 | Ok(S3Storage { 120 | client: s3_client, 121 | bucket, 122 | base: self.base.unwrap_or_default(), 123 | memory_limit: self.memory_limit.unwrap_or(STORAGE_MEMORY_LIMIT), 124 | }) 125 | } 126 | } 127 | 128 | fn map_get_err(e: RusotoError) -> StorageError { 129 | match e { 130 | RusotoError::Service(e) => match e { 131 | GetObjectError::NoSuchKey(k) => StorageError::FileNotExists(PathBuf::from(k)), 132 | _ => StorageError::S3Error(RusotoError::Service(e).into()), 133 | }, 134 | _ => StorageError::S3Error(e.into()), 135 | } 136 | } 137 | 138 | async fn guess_mime(stream: &mut ByteStream) -> Option<&str> { 139 | let mut buf = [0; 512]; 140 | let bytes = stream.read(&mut buf).await.unwrap(); 141 | stream 142 | .seek(SeekFrom::Current(-(bytes as i64))) 143 | .await 144 | .unwrap(); 145 | infer::get(&buf).map(|mime| mime.mime_type()) 146 | } 147 | 148 | #[async_trait] 149 | impl StorageProvider for S3Storage { 150 | async fn get_file(&self, path: &Path) -> Result { 151 | let fullpath = get_fullpath(&self.base, path)?; 152 | 153 | let req = GetObjectRequest { 154 | bucket: self.bucket.clone(), 155 | key: fullpath.to_str().unwrap().to_string(), 156 | ..Default::default() 157 | }; 158 | let data = self.client.get_object(req).await.map_err(map_get_err)?; 159 | let mut src = data.body.unwrap().into_async_read(); 160 | 161 | if data 162 | .content_length 163 | .map_or(false, |l| l > self.memory_limit as i64) 164 | { 165 | let sync_dest = NamedTempFile::new()?; 166 | let mut dest = File::from_std(sync_dest.reopen()?); 167 | 168 | tokio::io::copy(&mut src, &mut dest).await?; 169 | dest.sync_all().await?; 170 | 171 | Ok(ByteStream::try_from(sync_dest)?) 172 | } else { 173 | let mut buf = vec![]; 174 | src.read_to_end(&mut buf).await?; 175 | 176 | Ok(ByteStream::Memory(Cursor::new(buf))) 177 | } 178 | } 179 | 180 | async fn put_file(&self, path: &Path, mut data: ByteStream) -> Result<()> { 181 | let fullpath = get_fullpath(&self.base, path)?; 182 | let content_length = data.size(); 183 | let content_type = guess_mime(&mut data).await.map(ToString::to_string); 184 | 185 | let req = PutObjectRequest { 186 | body: Some(StreamingBody::new(data)), 187 | bucket: self.bucket.clone(), 188 | content_length: Some(content_length as i64), 189 | content_type, 190 | key: fullpath.to_str().unwrap().to_string(), 191 | ..Default::default() 192 | }; 193 | 194 | self.client 195 | .put_object(req) 196 | .await 197 | .map_err(|e| StorageError::S3Error(e.into()))?; 198 | 199 | Ok(()) 200 | } 201 | 202 | async fn delete_file(&self, path: &Path) -> Result<()> { 203 | let fullpath = get_fullpath(&self.base, path)?; 204 | 205 | let req = DeleteObjectRequest { 206 | bucket: self.bucket.clone(), 207 | key: fullpath.to_str().unwrap().to_string(), 208 | ..Default::default() 209 | }; 210 | 211 | self.client 212 | .delete_object(req) 213 | .await 214 | .map_err(|e| StorageError::S3Error(e.into()))?; 215 | 216 | Ok(()) 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /src/lib/types/remote_package.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::cmp::Ordering; 3 | use std::fmt::{Display, Formatter}; 4 | use std::hash::{Hash, Hasher}; 5 | use std::str::FromStr; 6 | 7 | use super::*; 8 | 9 | #[derive(Debug, Clone)] 10 | pub enum Package { 11 | PacmanPackage(OwnedPacmanPackage), 12 | AurPackage(AurPackage), 13 | CustomPackage(CustomPackage), 14 | } 15 | 16 | impl PartialOrd for Package { 17 | fn partial_cmp(&self, other: &Self) -> Option { 18 | use Package::*; 19 | (self.name() == other.name()).then(|| match self.version().cmp(&other.version()) { 20 | Ordering::Equal => match (self, other) { 21 | (PacmanPackage(_) | CustomPackage(_), AurPackage(_)) 22 | | (PacmanPackage(_), CustomPackage(_)) => Ordering::Greater, 23 | (AurPackage(_) | CustomPackage(_), PacmanPackage(_)) 24 | | (AurPackage(_), CustomPackage(_)) => Ordering::Less, 25 | _ => other.depends().len().cmp(&self.depends().len()), 26 | }, 27 | ord => ord, 28 | }) 29 | } 30 | } 31 | 32 | impl Display for Package { 33 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 34 | let source = match self { 35 | Package::PacmanPackage(_) => "pacman", 36 | Package::AurPackage(_) => "aur", 37 | Package::CustomPackage(_) => "custom", 38 | }; 39 | write!(f, "[{}] {} {}", source, self.name(), self.version()) 40 | } 41 | } 42 | 43 | impl PartialEq for Package { 44 | fn eq(&self, other: &Self) -> bool { 45 | self.name() == other.name() && self.version() == other.version() 46 | } 47 | } 48 | 49 | impl Eq for Package {} 50 | 51 | impl Hash for Package { 52 | fn hash(&self, state: &mut H) { 53 | self.name().hash(state); 54 | self.version().hash(state); 55 | } 56 | } 57 | 58 | impl From> for Package { 59 | fn from(pkg: PacmanPackage) -> Self { 60 | Self::PacmanPackage(pkg.into()) 61 | } 62 | } 63 | 64 | impl From for Package { 65 | fn from(pkg: AurPackage) -> Self { 66 | Self::AurPackage(pkg) 67 | } 68 | } 69 | 70 | impl<'a> Package { 71 | pub fn name(&self) -> &str { 72 | match self { 73 | Package::PacmanPackage(pkg) => pkg.name.as_str(), 74 | Package::AurPackage(pkg) => pkg.name.as_str(), 75 | Package::CustomPackage(pkg) => pkg.name.as_str(), 76 | } 77 | } 78 | 79 | pub fn version(&'a self) -> Cow<'a, Version> { 80 | match self { 81 | Package::PacmanPackage(pkg) => Cow::Borrowed(&pkg.version), 82 | Package::AurPackage(pkg) => Cow::Owned(Version(pkg.version.clone())), 83 | Package::CustomPackage(pkg) => Cow::Owned(Version(pkg.data.pkgver.0.clone())), 84 | } 85 | } 86 | 87 | pub fn description(&self) -> Option<&str> { 88 | match self { 89 | Package::PacmanPackage(pkg) => pkg.desc.as_deref(), 90 | Package::AurPackage(pkg) => pkg.description.as_deref(), 91 | Package::CustomPackage(pkg) => pkg.data.pkgdesc.as_deref(), 92 | } 93 | } 94 | 95 | pub fn url(&self) -> Option<&str> { 96 | match self { 97 | Package::PacmanPackage(pkg) => pkg.url.as_deref(), 98 | Package::AurPackage(pkg) => pkg.url.as_deref(), 99 | Package::CustomPackage(pkg) => pkg.data.url.as_deref(), 100 | } 101 | } 102 | 103 | // TODO below: join same name into one DependVersion 104 | pub fn depends(&'a self) -> Cow<'a, Vec> { 105 | match self { 106 | Package::PacmanPackage(pkg) => Cow::Borrowed(&pkg.depends), 107 | Package::AurPackage(pkg) => Cow::Owned( 108 | pkg.depends 109 | .iter() 110 | .map(|s| Depend::from_str(s).unwrap()) 111 | .collect(), 112 | ), 113 | Package::CustomPackage(pkg) => Cow::Owned( 114 | pkg.data 115 | .depends 116 | .as_ref() 117 | .unwrap_or(&vec![]) 118 | .iter() 119 | .map(|s| Depend::from_str(s).unwrap()) 120 | .collect(), 121 | ), 122 | } 123 | } 124 | 125 | // TODO below: join same name into one DependVersion 126 | pub fn make_depends(&'a self) -> Cow<'a, Vec> { 127 | match self { 128 | Package::PacmanPackage(pkg) => Cow::Borrowed(&pkg.makedepends), 129 | Package::AurPackage(pkg) => Cow::Owned( 130 | pkg.make_depends 131 | .iter() 132 | .map(|s| Depend::from_str(s).unwrap()) 133 | .collect(), 134 | ), 135 | Package::CustomPackage(pkg) => Cow::Owned( 136 | pkg.data 137 | .makedepends 138 | .as_ref() 139 | .unwrap_or(&vec![]) 140 | .iter() 141 | .map(|s| Depend::from_str(s).unwrap()) 142 | .collect(), 143 | ), 144 | } 145 | } 146 | 147 | pub fn conflicts(&'a self) -> Cow<'a, Vec> { 148 | match self { 149 | Package::PacmanPackage(pkg) => Cow::Borrowed(&pkg.conflicts), 150 | Package::AurPackage(pkg) => Cow::Owned( 151 | pkg.conflicts 152 | .iter() 153 | .map(|s| Depend::from_str(s).unwrap()) 154 | .collect(), 155 | ), 156 | Package::CustomPackage(pkg) => Cow::Owned( 157 | pkg.data 158 | .conflicts 159 | .as_ref() 160 | .unwrap_or(&vec![]) 161 | .iter() 162 | .map(|s| Depend::from_str(s).unwrap()) 163 | .collect(), 164 | ), 165 | } 166 | } 167 | 168 | pub fn provides(&'a self) -> Cow<'a, Vec> { 169 | match self { 170 | Package::PacmanPackage(pkg) => Cow::Borrowed(&pkg.provides), 171 | Package::AurPackage(pkg) => Cow::Owned( 172 | pkg.provides 173 | .iter() 174 | .map(|s| Depend::from_str(s).unwrap()) 175 | .collect(), 176 | ), 177 | Package::CustomPackage(pkg) => Cow::Owned( 178 | pkg.data 179 | .provides 180 | .as_ref() 181 | .unwrap_or(&vec![]) 182 | .iter() 183 | .map(|s| Depend::from_str(s).unwrap()) 184 | .collect(), 185 | ), 186 | } 187 | } 188 | 189 | pub fn replaces(&'a self) -> Cow<'a, Vec> { 190 | match self { 191 | Package::PacmanPackage(pkg) => Cow::Borrowed(&pkg.replaces), 192 | Package::AurPackage(pkg) => Cow::Owned( 193 | pkg.replaces 194 | .iter() 195 | .map(|s| Depend::from_str(s).unwrap()) 196 | .collect(), 197 | ), 198 | Package::CustomPackage(pkg) => Cow::Owned( 199 | pkg.data 200 | .replaces 201 | .as_ref() 202 | .unwrap_or(&vec![]) 203 | .iter() 204 | .map(|s| Depend::from_str(s).unwrap()) 205 | .collect(), 206 | ), 207 | } 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/lib/resolver/types/context.rs: -------------------------------------------------------------------------------- 1 | use std::collections::hash_map::{Values, ValuesMut}; 2 | use std::collections::{HashMap, HashSet}; 3 | use std::hash::{Hash, Hasher}; 4 | use std::sync::Arc; 5 | 6 | use crate::error::Result; 7 | use crate::resolver::types::graph::{EdgeEffect, SCCGraph}; 8 | use crate::types::*; 9 | 10 | #[derive(Debug, Default, Clone)] 11 | pub struct Context { 12 | pub packages: HashMap, 13 | pub graph: SCCGraph, 14 | pub conflicts: HashMap>, 15 | pub provides: HashMap>, 16 | } 17 | 18 | impl PartialEq for Context { 19 | fn eq(&self, other: &Self) -> bool { 20 | self.packages == other.packages 21 | } 22 | } 23 | 24 | impl Eq for Context {} 25 | 26 | impl Hash for Context { 27 | fn hash(&self, state: &mut H) { 28 | for package in self.packages.values() { 29 | package.hash(state); 30 | } 31 | } 32 | } 33 | 34 | pub type MaybeCycle = Option>; 35 | 36 | impl Context { 37 | pub fn is_empty(&self) -> bool { 38 | self.packages.is_empty() 39 | } 40 | 41 | pub fn add_edge(&mut self, i: &ArcPackage, j: &ArcPackage) -> Result> { 42 | self.graph.insert(i, j) 43 | } 44 | 45 | pub fn pkgs(&self) -> Values { 46 | self.packages.values() 47 | } 48 | 49 | pub fn pkgs_mut(&mut self) -> ValuesMut { 50 | self.packages.values_mut() 51 | } 52 | 53 | pub fn conflicts(&self, dep: &Depend) -> bool { 54 | self.conflicts 55 | .get(&*dep.name) 56 | .map_or(false, |range| !range.intersect(&dep.version).is_empty()) 57 | } 58 | 59 | pub fn satisfies(&self, dep: &Depend) -> bool { 60 | self.provides 61 | .get(&*dep.name) 62 | .map_or(false, |range| !range.intersect(&dep.version).is_empty()) 63 | } 64 | 65 | pub fn is_superset(&self, other: &[&Package]) -> bool { 66 | other.iter().all(|pkg| self.contains_exact(pkg)) 67 | } 68 | 69 | pub fn union(mut self, other: Self) -> Option { 70 | for package in other.packages.values() { 71 | if !self.is_compatible(package) { 72 | return None; 73 | } 74 | } 75 | for (k, v2) in other.packages { 76 | if let Some(v1) = self.packages.get(&k) { 77 | if *v1 != v2 { 78 | eprint!("FATAL: {} != {}", v1, v2); 79 | return None; 80 | } 81 | } 82 | self.packages.insert(k, v2); 83 | } 84 | 85 | self.graph.merge(&other.graph).unwrap(); 86 | 87 | for (k, v2) in other.provides { 88 | self.provides 89 | .entry(k) 90 | .and_modify(|v1| *v1 = Arc::new(v1.union(&*v2))) 91 | .or_insert(v2); 92 | } 93 | for (k, v2) in other.conflicts { 94 | self.conflicts 95 | .entry(k) 96 | .and_modify(|v1| *v1 = Arc::new(v1.intersect(&*v2))) 97 | .or_insert(v2); 98 | } 99 | Some(self) 100 | } 101 | pub fn new() -> Self { 102 | Default::default() 103 | } 104 | pub fn get(&self, name: &str) -> Option<&Package> { 105 | self.packages.get(name).map(|pkg| &**pkg) 106 | } 107 | pub fn contains_exact(&self, pkg: &Package) -> bool { 108 | self.packages 109 | .get(pkg.name()) 110 | .map_or(false, |candidate| &**candidate == pkg) 111 | } 112 | pub fn is_compatible(&self, pkg: &Package) -> bool { 113 | if let Some(same_pkg_ver) = self 114 | .packages 115 | .get(pkg.name()) 116 | .map(|old| old.version() == pkg.version()) 117 | { 118 | return same_pkg_ver; 119 | }; 120 | 121 | // let mut pkg_provides = vec![Depend::from(pkg)]; 122 | let mut pkg_provides = vec![Depend::from(pkg)]; 123 | pkg_provides.extend(pkg.provides().into_owned()); 124 | let conflicts_conflict = pkg_provides.into_iter().any(|provide| { 125 | self.conflicts 126 | .get(provide.name.as_str()) 127 | .map_or(false, |conflict| { 128 | !conflict.intersect(&provide.version).is_empty() 129 | }) 130 | }); 131 | 132 | let provides_conflict = pkg.conflicts().iter().any(|conflict| { 133 | self.provides 134 | .get(conflict.name.as_str()) 135 | .map_or(false, |provide| { 136 | !provide.intersect(&conflict.version).is_empty() 137 | }) 138 | }); 139 | 140 | !(conflicts_conflict || provides_conflict) 141 | } 142 | pub fn insert( 143 | mut self, 144 | pkg: &ArcPackage, 145 | reasons: HashSet, 146 | ) -> Option<(Self, MaybeCycle)> { 147 | self.insert_mut(pkg, reasons) 148 | .map(|maybe_cycle| (self, maybe_cycle)) 149 | } 150 | 151 | // success(hascycle(cycle)) 152 | pub fn insert_mut( 153 | &mut self, 154 | pkg: &ArcPackage, 155 | reasons: HashSet, 156 | ) -> Option { 157 | // TODO unchecked insert 158 | if self.is_compatible(&*pkg) { 159 | let name = pkg.name().to_string(); 160 | if let Some(existing) = self.packages.get(&name) { 161 | return if existing.version() == pkg.version() { 162 | Some(None) 163 | } else { 164 | None 165 | }; 166 | } 167 | self.packages.insert(name, pkg.clone()); 168 | self.graph.add_node(pkg.clone()); 169 | let cycle = reasons.iter().fold(None, |acc, reason| { 170 | let eff = self.graph.insert(reason, pkg).unwrap(); 171 | if acc.is_none() { 172 | if let EdgeEffect::NewEdge(Some(cycle)) = eff { 173 | Some(cycle) 174 | } else { 175 | None 176 | } 177 | } else { 178 | None 179 | } 180 | }); 181 | for reason in reasons { 182 | self.graph.insert(&reason, pkg).unwrap(); 183 | } 184 | 185 | let mut provides = pkg.provides().into_owned(); 186 | provides.push(Depend::from(pkg.as_ref())); 187 | for provide in provides { 188 | let depend_version = if let Some(pkg) = self.provides.get(provide.name.as_str()) { 189 | pkg.union(&provide.version) 190 | } else { 191 | provide.version 192 | }; 193 | self.provides.insert(provide.name, Arc::new(depend_version)); 194 | } 195 | 196 | for conflict in pkg.conflicts().into_owned() { 197 | let conflict_version = if let Some(pkg) = self.conflicts.get(conflict.name.as_str()) 198 | { 199 | pkg.union(&conflict.version) 200 | } else { 201 | conflict.version 202 | }; 203 | self.conflicts 204 | .insert(conflict.name, Arc::new(conflict_version)); 205 | } 206 | 207 | Some(cycle) 208 | } else { 209 | None 210 | } 211 | } 212 | 213 | // TODO custom impl 214 | // This is actually SCC because we need to deal with loops 215 | pub fn strongly_connected_components(&self) -> Vec> { 216 | self.graph.strongly_connected_components(true) 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /src/lib/storage/types/bytestream.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | use std::fs::File; 3 | use std::io::Result as IOResult; 4 | use std::io::{Cursor, SeekFrom}; 5 | use std::path::{Path, PathBuf}; 6 | use std::pin::Pin; 7 | use std::sync::Arc; 8 | use std::task::{Context, Poll}; 9 | 10 | use bytes::Bytes; 11 | use futures::{ready, Stream}; 12 | use tempfile::NamedTempFile; 13 | use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt, AsyncWriteExt, ReadBuf}; 14 | 15 | use crate::utils::is_same_fs; 16 | 17 | #[derive(Debug, Clone)] 18 | pub enum FileObject { 19 | Unnamed, 20 | Path(PathBuf), // it's assumed that the file exists when object is alive 21 | NamedTemp(Arc), 22 | } 23 | 24 | #[derive(Debug)] 25 | pub enum ByteStream { 26 | Memory(Cursor>), 27 | File { 28 | handle: tokio::fs::File, 29 | object_type: FileObject, 30 | length: u64, 31 | }, 32 | } 33 | 34 | impl ByteStream { 35 | pub fn from_path(path: impl AsRef) -> IOResult { 36 | let handle = std::fs::File::open(path.as_ref())?; 37 | let length = handle.metadata()?.len(); 38 | Ok(Self::File { 39 | handle: tokio::fs::File::from_std(handle), 40 | object_type: FileObject::Path(path.as_ref().to_path_buf()), 41 | length, 42 | }) 43 | } 44 | 45 | pub const fn in_memory(&self) -> bool { 46 | matches!(self, ByteStream::Memory(_)) 47 | } 48 | 49 | pub fn size(&self) -> u64 { 50 | match self { 51 | ByteStream::Memory(v) => v.get_ref().len() as u64, 52 | ByteStream::File { length, .. } => *length, 53 | } 54 | } 55 | 56 | pub async fn into_file(self, path: impl AsRef + Clone + Send) -> IOResult<()> { 57 | use tokio::fs::File; 58 | match self { 59 | ByteStream::Memory(v) => { 60 | let data = v.into_inner(); 61 | let mut dest = File::create(path).await?; 62 | dest.write_all(&data).await?; 63 | dest.sync_all().await?; 64 | } 65 | ByteStream::File { 66 | object_type: FileObject::NamedTemp(file), 67 | .. 68 | } => { 69 | if is_same_fs(file.path(), path.clone()) { 70 | match Arc::try_unwrap(file) { 71 | Ok(file) => { 72 | // this stream is the only owner of the file, persist 73 | file.persist(path)?; 74 | } 75 | Err(file) => { 76 | // this stream isn't the only owner, copy file 77 | tokio::fs::copy(file.path(), path).await?; 78 | } 79 | } 80 | } else { 81 | // we can't persist tempfile across filesystems 82 | tokio::fs::copy(file.path(), path).await?; 83 | } 84 | } 85 | ByteStream::File { 86 | object_type: FileObject::Path(src_path), 87 | .. 88 | } => { 89 | tokio::fs::copy(src_path, path).await?; 90 | } 91 | ByteStream::File { 92 | handle: mut file, .. 93 | } => { 94 | file.seek(SeekFrom::Start(0)).await?; 95 | let mut dest = File::create(path).await?; 96 | tokio::io::copy(&mut file, &mut dest).await?; 97 | dest.sync_all().await?; 98 | } 99 | } 100 | Ok(()) 101 | } 102 | } 103 | 104 | impl Clone for ByteStream { 105 | // NOTE 106 | // the cloned bytestream will have its pointer rewound 107 | fn clone(&self) -> Self { 108 | match self { 109 | ByteStream::Memory(v) => Self::Memory(Cursor::new(v.clone().into_inner())), // TODO use custom cursor to avoid this clone 110 | ByteStream::File { 111 | object_type: FileObject::NamedTemp(temp_file), 112 | length, 113 | .. 114 | } => Self::File { 115 | handle: tokio::fs::File::from_std(temp_file.reopen().unwrap()), 116 | object_type: FileObject::NamedTemp(temp_file.clone()), 117 | length: *length, 118 | }, 119 | ByteStream::File { 120 | object_type: FileObject::Path(file_path), 121 | .. 122 | } => { 123 | let mut src = std::fs::File::open(file_path).unwrap(); 124 | let mut new_file = NamedTempFile::new().unwrap(); 125 | std::io::copy(&mut src, &mut new_file).unwrap(); 126 | Self::try_from(new_file).unwrap() 127 | } 128 | ByteStream::File { 129 | object_type: FileObject::Unnamed, 130 | .. 131 | } => { 132 | // NOTE 133 | // It's possible to support cloning unnamed file backed bytestream by 134 | // 1. create another handle on the same fd 135 | // 2. record its current pos (by seek(current)) 136 | // 3. rewind it 137 | // 4. copy it to another temp file 138 | // 5. create the new stream on the newly created temp file 139 | // 6. seek the original file to the previously saved pos 140 | // However, it's unsafe and won't sync well. 141 | panic!("unsupported") 142 | } 143 | } 144 | } 145 | } 146 | 147 | impl Stream for ByteStream { 148 | type Item = IOResult; 149 | 150 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 151 | let mut inner_buf = [0; 8192]; 152 | let mut buf = ReadBuf::new(&mut inner_buf); 153 | match ready!(self.poll_read(cx, &mut buf)) { 154 | Ok(_) => Some(Ok(Bytes::from(Vec::from(buf.filled())))).into(), 155 | Err(e) => Some(Err(e)).into(), 156 | } 157 | } 158 | } 159 | 160 | impl From> for ByteStream { 161 | fn from(v: Vec) -> Self { 162 | Self::Memory(Cursor::new(v)) 163 | } 164 | } 165 | 166 | impl TryFrom for ByteStream { 167 | type Error = std::io::Error; 168 | 169 | fn try_from(f: NamedTempFile) -> Result { 170 | let length = f.as_file().metadata()?.len(); 171 | Ok(Self::File { 172 | handle: f.reopen()?.into(), 173 | object_type: FileObject::NamedTemp(Arc::new(f)), 174 | length, 175 | }) 176 | } 177 | } 178 | 179 | impl TryFrom for ByteStream { 180 | type Error = std::io::Error; 181 | 182 | fn try_from(f: File) -> Result { 183 | let length = f.metadata()?.len(); 184 | Ok(Self::File { 185 | handle: f.into(), 186 | object_type: FileObject::Unnamed, 187 | length, 188 | }) 189 | } 190 | } 191 | 192 | impl AsyncRead for ByteStream { 193 | fn poll_read( 194 | self: Pin<&mut Self>, 195 | cx: &mut Context<'_>, 196 | buf: &mut ReadBuf<'_>, 197 | ) -> Poll> { 198 | match self.get_mut() { 199 | ByteStream::Memory(v) => Pin::new(v).poll_read(cx, buf), 200 | ByteStream::File { handle: f, .. } => Pin::new(f).poll_read(cx, buf), 201 | } 202 | } 203 | } 204 | 205 | impl AsyncSeek for ByteStream { 206 | fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> IOResult<()> { 207 | match self.get_mut() { 208 | ByteStream::Memory(v) => Pin::new(v).start_seek(position), 209 | ByteStream::File { handle: f, .. } => Pin::new(f).start_seek(position), 210 | } 211 | } 212 | 213 | fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 214 | match self.get_mut() { 215 | ByteStream::Memory(v) => Pin::new(v).poll_complete(cx), 216 | ByteStream::File { handle: f, .. } => Pin::new(f).poll_complete(cx), 217 | } 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /src/lib/builder/nspawn.rs: -------------------------------------------------------------------------------- 1 | use std::io::Write; 2 | use std::ops::DerefMut; 3 | use std::path::{Path, PathBuf}; 4 | use std::process::Stdio; 5 | use std::sync::{Arc, Mutex}; 6 | 7 | use async_trait::async_trait; 8 | use fs3::FileExt; 9 | use tempfile::NamedTempFile; 10 | 11 | use crate::consts::*; 12 | use crate::error::{BuildError, CommandError, GpgError}; 13 | use crate::parser::PacmanConf; 14 | use crate::parser::GLOBAL_CONFIG; 15 | use crate::utils::map_gpg_code; 16 | 17 | use super::{BuildOptions, Builder, Result}; 18 | 19 | #[derive(Clone)] 20 | pub struct NspawnBuildOptions { 21 | base: BuildOptions, 22 | working_dir: PathBuf, 23 | pacman_conf: Option, 24 | makepkg_conf: Option, 25 | } 26 | 27 | impl NspawnBuildOptions { 28 | pub fn new(base_option: &BuildOptions, working_dir: impl AsRef) -> Self { 29 | Self { 30 | base: base_option.clone(), 31 | working_dir: working_dir.as_ref().to_path_buf(), 32 | pacman_conf: None, 33 | makepkg_conf: None, 34 | } 35 | } 36 | setter_option_clone!(pacman_conf, PacmanConf); 37 | 38 | pub fn makepkg_conf(mut self, makepkg_conf: impl AsRef) -> Self { 39 | self.makepkg_conf = Some(makepkg_conf.as_ref().to_path_buf()); 40 | self 41 | } 42 | } 43 | 44 | #[derive(Clone)] 45 | pub struct NspawnBuilder { 46 | options: NspawnBuildOptions, 47 | workdir_lock: Arc>>, 48 | } 49 | 50 | impl NspawnBuilder { 51 | pub fn new(options: &NspawnBuildOptions) -> Self { 52 | Self { 53 | options: options.clone(), 54 | workdir_lock: Arc::new(Mutex::new(None)), 55 | } 56 | } 57 | 58 | fn pacman_conf(&self) -> &PacmanConf { 59 | self.options 60 | .pacman_conf 61 | .as_ref() 62 | .map_or(&GLOBAL_CONFIG, |conf| conf) 63 | } 64 | 65 | fn makepkg_conf(&self) -> PathBuf { 66 | self.options 67 | .makepkg_conf 68 | .as_ref() 69 | .map_or(PathBuf::from(MAKEPKG_CONF_PATH), Clone::clone) 70 | } 71 | 72 | fn set_stdout(&self, cmd: &mut tokio::process::Command) { 73 | if !self.options.base.verbose { 74 | cmd.stdout(Stdio::null()); 75 | cmd.stderr(Stdio::null()); 76 | } 77 | } 78 | 79 | pub(crate) async fn test_unshare() -> bool { 80 | let mut child = if let Ok(child) = tokio::process::Command::new("sudo") 81 | .args(&["unshare", "--fork", "--pid", "bash", "-c", "exit"]) 82 | .stdout(Stdio::null()) 83 | .stderr(Stdio::null()) 84 | .spawn() 85 | { 86 | child 87 | } else { 88 | return false; 89 | }; 90 | 91 | child 92 | .wait() 93 | .await 94 | .map(|status| status.success()) 95 | .unwrap_or(false) 96 | } 97 | 98 | pub(crate) fn lock_workdir(&self) -> Result<()> { 99 | std::fs::create_dir_all(&self.options.working_dir)?; 100 | 101 | let lock_file = std::fs::File::create(self.options.working_dir.join(".lock")) 102 | .map_err(|_| BuildError::LockError)?; 103 | lock_file 104 | .lock_exclusive() 105 | .map_err(|_| BuildError::LockError)?; 106 | 107 | *self.workdir_lock.lock().unwrap().deref_mut() = Some(lock_file); 108 | Ok(()) 109 | } 110 | 111 | pub(crate) fn unlock_workdir(&self) -> Result<()> { 112 | let mut maybe_lock_file = self.workdir_lock.lock().unwrap(); 113 | if let Some(lock_file) = &mut *maybe_lock_file { 114 | lock_file.unlock()?; 115 | } 116 | if maybe_lock_file.is_some() { 117 | *maybe_lock_file = None; 118 | } 119 | Ok(()) 120 | } 121 | 122 | async fn sudo_cp( 123 | &self, 124 | from: impl AsRef + Send + Sync, 125 | to: impl AsRef + Send + Sync, 126 | recursive: bool, 127 | ) -> Result<()> { 128 | let mut cmd = tokio::process::Command::new("sudo"); 129 | cmd.arg("cp"); 130 | if recursive { 131 | cmd.arg("-R"); 132 | } 133 | cmd.arg(from.as_ref()).arg(to.as_ref()); 134 | self.set_stdout(&mut cmd); 135 | 136 | if cmd.spawn()?.wait().await?.success() { 137 | Ok(()) 138 | } else { 139 | Err(CommandError::Cp.into()) 140 | } 141 | } 142 | 143 | async fn make_arch_root(&self) -> Result<()> { 144 | let pacman_conf = self.pacman_conf(); 145 | let makepkg_conf = self.makepkg_conf(); 146 | let cache_dir = pacman_conf.option("CacheDir").unwrap(); 147 | let working_dir = &self.options.working_dir; 148 | 149 | let mut mkarchroot_cmd = tokio::process::Command::new("mkarchroot"); 150 | mkarchroot_cmd 151 | .arg("-C") 152 | .arg(pacman_conf.path()) 153 | .arg("-M") 154 | .arg(makepkg_conf) 155 | .args(&["-c", cache_dir]) 156 | .arg(working_dir) 157 | .arg("base-devel"); 158 | self.set_stdout(&mut mkarchroot_cmd); 159 | if !mkarchroot_cmd.spawn()?.wait().await?.success() { 160 | return Err(CommandError::MkArchRoot.into()); 161 | } 162 | 163 | Ok(()) 164 | } 165 | 166 | async fn copy_hostconf(&self) -> Result<()> { 167 | let working_dir = &self.options.working_dir; 168 | let pacman_conf = self.pacman_conf(); 169 | let makepkg_conf = self.makepkg_conf(); 170 | 171 | let src_gpg_dir = PathBuf::from(pacman_conf.option("GPGDir").unwrap()); 172 | let dest_gpg_dir = working_dir.join("etc/pacman.d/gnupg"); 173 | 174 | let mut gpg_cmd = tokio::process::Command::new("sudo"); 175 | if Self::test_unshare().await { 176 | gpg_cmd.args(&["unshare", "--fork", "--pid", "gpg"]); 177 | } else { 178 | gpg_cmd.args(&["gpg"]); 179 | }; 180 | gpg_cmd 181 | .arg("--homedir") 182 | .arg(&dest_gpg_dir) 183 | .args(&["--no-permission-warning", "--quiet", "--batch", "--import"]) 184 | .args(&["--import-options", "import-local-sigs"]) 185 | .arg(src_gpg_dir.join("pubring.gpg")); 186 | self.set_stdout(&mut gpg_cmd); 187 | let gpg_code = gpg_cmd.spawn()?.wait().await?.code(); 188 | gpg_code 189 | .map_or(Some(GpgError::Signal), map_gpg_code) 190 | .map_or(Ok(()), |e| Err(CommandError::Gpg(e)))?; 191 | 192 | let mut key_trust_cmd = tokio::process::Command::new("sudo"); 193 | key_trust_cmd 194 | .args(&["pacman-key", "--gpgdir"]) 195 | .arg(&dest_gpg_dir) 196 | .arg("--import-trustdb") 197 | .arg(&src_gpg_dir); 198 | self.set_stdout(&mut key_trust_cmd); 199 | if !key_trust_cmd.spawn()?.wait().await?.success() { 200 | return Err(CommandError::PacmanKey.into()); 201 | } 202 | 203 | let dest_mirror_list = working_dir.join("etc/pacman.d/mirrorlist"); 204 | let dest_pac_conf = working_dir.join("etc/pacman.conf"); 205 | let dest_makepkg_conf = working_dir.join("etc/makepkg.conf"); 206 | { 207 | let mut temp_mirror_list = NamedTempFile::new()?; 208 | temp_mirror_list.write_all(pacman_conf.mirror_list().as_ref())?; 209 | self.sudo_cp(temp_mirror_list.path(), &dest_mirror_list, false) 210 | .await?; 211 | } 212 | self.sudo_cp(pacman_conf.path(), &dest_pac_conf, false) 213 | .await?; 214 | self.sudo_cp(&makepkg_conf, &dest_makepkg_conf, false) 215 | .await?; 216 | 217 | // TODO files 218 | 219 | // TODO sed cachedir 220 | 221 | Ok(()) 222 | } 223 | } 224 | 225 | #[async_trait] 226 | impl Builder for NspawnBuilder { 227 | async fn setup(&self) -> Result<()> { 228 | self.make_arch_root().await?; 229 | 230 | self.copy_hostconf().await?; 231 | 232 | // TODO mkchrootpkg 233 | 234 | Ok(()) 235 | } 236 | 237 | async fn teardown(&self) -> Result<()> { 238 | todo!() 239 | } 240 | 241 | async fn sync_system(&self) -> Result<()> { 242 | todo!() 243 | } 244 | 245 | async fn install_local(&self, path: &Path) -> Result<()> { 246 | todo!() 247 | } 248 | 249 | async fn install_remote(&self, packages: &[&str]) -> Result<()> { 250 | todo!() 251 | } 252 | 253 | async fn remove(&self, packages: &[&str]) -> Result<()> { 254 | todo!() 255 | } 256 | 257 | async fn build(&self, path: &Path) -> Result> { 258 | todo!() 259 | } 260 | } 261 | -------------------------------------------------------------------------------- /tests/archives/test.tar: -------------------------------------------------------------------------------- 1 | test0000644000175000017500000000000514052537145014007 0ustar lightquantumlightquantumtest 2 | -------------------------------------------------------------------------------- /src/lib/storage/tests.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::convert::TryFrom; 3 | use std::env; 4 | use std::io::{Seek, SeekFrom, Write}; 5 | use std::path::{Path, PathBuf}; 6 | use std::sync::{Arc, Mutex}; 7 | use std::time::Duration; 8 | 9 | use async_trait::async_trait; 10 | use itertools::Itertools; 11 | use rand::prelude::*; 12 | use rstest::rstest; 13 | use tempfile::{tempdir, tempfile, NamedTempFile}; 14 | use testcontainers::images::generic::{GenericImage, WaitFor}; 15 | use testcontainers::{clients, Docker, RunArgs}; 16 | use tokio::io::{AsyncReadExt, AsyncSeekExt}; 17 | 18 | use crate::storage::providers::{FSStorage, S3StorageBuilder}; 19 | use crate::tests::*; 20 | 21 | use super::transaction::*; 22 | 23 | fn setup_memory_bytestream() -> ByteStream { 24 | let data = vec![1, 2, 3, 4, 5]; 25 | ByteStream::from(data) 26 | } 27 | 28 | fn setup_memory_bytestream_with(data: Vec) -> ByteStream { 29 | ByteStream::from(data) 30 | } 31 | 32 | fn setup_unnamedfile_bytestream() -> ByteStream { 33 | let mut file = tempfile().expect("unable to create temp file"); 34 | assert_eq!(file.write(&[1, 2, 3, 4, 5]).expect("write failed"), 5); 35 | file.seek(SeekFrom::Start(0)).expect("unable to rewind"); 36 | ByteStream::try_from(file).unwrap() 37 | } 38 | 39 | fn setup_tempfile_bytestream() -> ByteStream { 40 | let mut file = NamedTempFile::new().expect("unable to create temp file"); 41 | assert_eq!(file.write(&[1, 2, 3, 4, 5]).expect("write failed"), 5); 42 | file.seek(SeekFrom::Start(0)).expect("unable to rewind"); 43 | ByteStream::try_from(file).unwrap() 44 | } 45 | 46 | fn setup_pathfile_bytestream() -> ByteStream { 47 | let mut file = std::fs::File::create("tests/stream.test").expect("unable to create file"); 48 | assert_eq!(file.write(&[1, 2, 3, 4, 5]).expect("write failed"), 5); 49 | file.seek(SeekFrom::Start(0)).expect("unable to rewind"); 50 | ByteStream::from_path("tests/stream.test").expect("unable to create stream") 51 | } 52 | 53 | #[rstest] 54 | #[case(setup_memory_bytestream())] 55 | #[case(setup_unnamedfile_bytestream())] 56 | #[case(setup_tempfile_bytestream())] 57 | #[case(setup_pathfile_bytestream())] 58 | #[tokio::test] 59 | async fn test_bytestream_read(#[case] mut stream: ByteStream) { 60 | let mut read_buf = vec![]; 61 | stream 62 | .read_to_end(&mut read_buf) 63 | .await 64 | .expect("read failed"); 65 | assert_eq!(read_buf, [1, 2, 3, 4, 5], "content mismatch"); 66 | 67 | stream.seek(SeekFrom::Start(1)).await.expect("seek failed"); 68 | let mut read_buf = vec![]; 69 | stream 70 | .read_to_end(&mut read_buf) 71 | .await 72 | .expect("read failed"); 73 | assert_eq!(read_buf, [2, 3, 4, 5], "content mismatch"); 74 | } 75 | 76 | #[rstest] 77 | #[case(setup_memory_bytestream())] 78 | #[case(setup_tempfile_bytestream())] 79 | #[case(setup_pathfile_bytestream())] 80 | #[tokio::test] 81 | async fn test_bytestream_clone(#[case] mut stream: ByteStream) { 82 | let mut read_buf = vec![]; 83 | stream 84 | .read_to_end(&mut read_buf) 85 | .await 86 | .expect("read failed"); 87 | assert_eq!(read_buf, [1, 2, 3, 4, 5], "content mismatch"); 88 | 89 | let mut cloned_stream: ByteStream = stream.clone(); 90 | let mut read_buf = vec![]; 91 | cloned_stream 92 | .read_to_end(&mut read_buf) 93 | .await 94 | .expect("read failed"); 95 | assert_eq!(read_buf, [1, 2, 3, 4, 5], "content mismatch"); 96 | } 97 | 98 | #[rstest] 99 | #[case(setup_memory_bytestream(), PathBuf::from("tests/persist.test.1"))] // in-memory stream 100 | #[case(setup_unnamedfile_bytestream(), PathBuf::from("tests/persist.test.2"))] // bare file stream 101 | #[case(setup_tempfile_bytestream(), PathBuf::from("tests/persist.test.3"))] // namedfile to different fs (on my pc) 102 | #[case(setup_tempfile_bytestream(), env::temp_dir().join("archer_persist.test"))] // namedfile to same fs 103 | #[case(setup_pathfile_bytestream(), PathBuf::from("tests/persist.test.4"))] // path backed file 104 | #[tokio::test] 105 | async fn test_bytestream_persist(#[case] stream: ByteStream, #[case] persist_path: PathBuf) { 106 | drop(std::fs::remove_file(&persist_path)); 107 | stream 108 | .into_file(&persist_path) 109 | .await 110 | .expect("unable to persist to file"); 111 | let data = std::fs::read(&persist_path).expect("unable to read file"); 112 | assert_eq!(data, [1, 2, 3, 4, 5], "content mismatch"); 113 | std::fs::remove_file(persist_path).expect("cleanup failed"); 114 | } 115 | 116 | async fn must_provider_work(storage: impl StorageProvider, strict: bool) { 117 | storage 118 | .put_file("test-1".as_ref(), vec![1, 2, 3, 4, 5].into()) 119 | .await 120 | .expect("put failed"); 121 | storage 122 | .put_file("test-2".as_ref(), vec![1, 2, 3, 4, 5, 6].into()) 123 | .await 124 | .expect("put failed"); 125 | 126 | if strict { 127 | assert!( 128 | matches!( 129 | storage 130 | .delete_file("invalid-file".as_ref()) 131 | .await 132 | .unwrap_err(), 133 | StorageError::FileNotExists(_) 134 | ), 135 | "deleting invalid file" 136 | ); 137 | } 138 | assert!( 139 | matches!( 140 | storage.get_file("invalid-file".as_ref()).await.unwrap_err(), 141 | StorageError::FileNotExists(_) 142 | ), 143 | "getting invalid file" 144 | ); 145 | 146 | let mut stream_1 = storage 147 | .get_file("test-1".as_ref()) 148 | .await 149 | .expect("get failed"); 150 | assert!(stream_1.in_memory()); 151 | let mut read_buf = vec![]; 152 | stream_1 153 | .read_to_end(&mut read_buf) 154 | .await 155 | .expect("read failed"); 156 | assert_eq!(read_buf, [1, 2, 3, 4, 5], "content mismatch"); 157 | 158 | let mut stream_2 = storage 159 | .get_file("test-2".as_ref()) 160 | .await 161 | .expect("get failed"); 162 | assert!(!stream_2.in_memory()); 163 | let mut read_buf = vec![]; 164 | stream_2 165 | .read_to_end(&mut read_buf) 166 | .await 167 | .expect("read failed"); 168 | assert_eq!(read_buf, [1, 2, 3, 4, 5, 6], "content mismatch"); 169 | 170 | storage 171 | .delete_file("test-2".as_ref()) 172 | .await 173 | .expect("delete failed"); 174 | assert!( 175 | matches!( 176 | storage.get_file("test-2".as_ref()).await.unwrap_err(), 177 | StorageError::FileNotExists(_) 178 | ), 179 | "getting deleted file" 180 | ); 181 | } 182 | 183 | #[tokio::test] 184 | async fn test_fs_provider() { 185 | let test_dir = tempdir().expect("temp dir creation failed"); 186 | let fs_storage = FSStorage::new_with_limit(test_dir.path(), 5); 187 | 188 | must_provider_work(fs_storage, true).await 189 | } 190 | 191 | #[tokio::test] 192 | async fn test_s3_provider() { 193 | let s3_storage = S3StorageBuilder::new() 194 | .with_name("mock-s3") 195 | .with_bucket("test-bucket") 196 | .with_credential("", "") 197 | .with_memory_limit(5); 198 | 199 | if let Some(endpoint) = option_env!("S3_ENDPOINT") { 200 | let s3_storage = s3_storage 201 | .with_endpoint(endpoint) 202 | .build() 203 | .expect("unable to build s3 storage"); 204 | 205 | must_provider_work(s3_storage, false).await 206 | } else { 207 | let client = Arc::new(clients::Cli::default()); 208 | let image = GenericImage::new("adobe/s3mock") 209 | .with_env_var("initialBuckets", "test-bucket") 210 | .with_wait_for(WaitFor::message_on_stdout("Started S3MockApplication")); 211 | let args = RunArgs::default().with_mapped_port((9090, 9090)); 212 | let _container = client.run_with_args(image, args); 213 | 214 | let s3_storage = s3_storage 215 | .with_endpoint("http://localhost:9090") 216 | .build() 217 | .expect("unable to build s3 storage"); 218 | 219 | must_provider_work(s3_storage, false).await 220 | } 221 | } 222 | 223 | #[derive(Default)] 224 | struct MockProvider { 225 | seq: Mutex>, 226 | } 227 | 228 | impl MockProvider { 229 | fn assert_ord(&self, path_1: &Path, path_2: &Path) { 230 | let pos_1 = self 231 | .seq 232 | .lock() 233 | .unwrap() 234 | .iter() 235 | .find_position(|p| p.as_path() == path_1) 236 | .unwrap() 237 | .0; 238 | let pos_2 = self 239 | .seq 240 | .lock() 241 | .unwrap() 242 | .iter() 243 | .find_position(|p| p.as_path() == path_2) 244 | .unwrap() 245 | .0; 246 | assert!(pos_1 < pos_2, "ord assertion failed"); 247 | } 248 | } 249 | 250 | #[async_trait] 251 | impl StorageProvider for MockProvider { 252 | async fn get_file(&self, _path: &Path) -> Result { 253 | panic!("get_file not supported") 254 | } 255 | 256 | async fn put_file(&self, path: &Path, _data: ByteStream) -> Result<()> { 257 | tokio::time::sleep(Duration::from_millis((random::() * 50.) as u64)).await; 258 | self.seq.lock().unwrap().push(path.to_path_buf()); 259 | tokio::time::sleep(Duration::from_millis((random::() * 50.) as u64)).await; 260 | Ok(()) 261 | } 262 | 263 | async fn delete_file(&self, path: &Path) -> Result<()> { 264 | tokio::time::sleep(Duration::from_millis((random::() * 20.) as u64)).await; 265 | self.seq.lock().unwrap().push(path.to_path_buf()); 266 | tokio::time::sleep(Duration::from_millis((random::() * 20.) as u64)).await; 267 | Ok(()) 268 | } 269 | } 270 | 271 | #[tokio::test(flavor = "multi_thread", worker_threads = 6)] 272 | async fn must_txn() { 273 | let mut txn = Txn::new(); 274 | txn.add(TxnAction::Put("1".into(), setup_memory_bytestream())); 275 | txn.add(TxnAction::Put("2".into(), setup_memory_bytestream())); 276 | txn.add(TxnAction::Put("3".into(), setup_memory_bytestream())); 277 | txn.add(TxnAction::Delete("4".into())); 278 | txn.add(TxnAction::Put("5".into(), setup_memory_bytestream())); 279 | txn.add(TxnAction::Put("6".into(), setup_memory_bytestream())); 280 | txn.add(TxnAction::Barrier); 281 | txn.add(TxnAction::Delete("7".into())); 282 | txn.add(TxnAction::Put("8".into(), setup_memory_bytestream())); 283 | txn.add(TxnAction::Delete("9".into())); 284 | txn.add(TxnAction::Put("10".into(), setup_memory_bytestream())); 285 | txn.add(TxnAction::Delete("11".into())); 286 | txn.add(TxnAction::Put("12".into(), setup_memory_bytestream())); 287 | txn.add(TxnAction::Delete("13".into())); 288 | txn.add(TxnAction::Barrier); 289 | txn.add(TxnAction::Delete("14".into())); 290 | txn.add(TxnAction::Delete("15".into())); 291 | 292 | let mock_provider = MockProvider::default(); 293 | txn.commit(&mock_provider).await.expect("unable to commit"); 294 | 295 | let ord_1 = (1..=6).cartesian_product(7..=13); 296 | ord_1.into_iter().for_each(|(x, y)| { 297 | mock_provider.assert_ord(&PathBuf::from(x.to_string()), &PathBuf::from(y.to_string())) 298 | }); 299 | let ord_2 = (7..=13).cartesian_product(14..=15); 300 | ord_2.into_iter().for_each(|(x, y)| { 301 | mock_provider.assert_ord(&PathBuf::from(x.to_string()), &PathBuf::from(y.to_string())) 302 | }); 303 | } 304 | 305 | fn validate_as(expect: Option>) -> Box>) -> Result<()> + Send> { 306 | Box::new(move |data| { 307 | if data == expect { 308 | Ok(()) 309 | } else { 310 | Err(StorageError::Conflict) 311 | } 312 | }) 313 | } 314 | 315 | #[rstest] 316 | // naive 317 | #[case(vec![ 318 | TxnAction::Put("1".into(), setup_memory_bytestream_with(vec![1,2,3,4])), 319 | TxnAction::Put("2".into(), setup_memory_bytestream_with(vec![1,2,3,4,5])), 320 | TxnAction::Put("3".into(), setup_memory_bytestream_with(vec![1,2,3,4,5,6])), 321 | TxnAction::Put("4".into(), setup_memory_bytestream_with(vec![1,2,3,4,5,6,7])), 322 | TxnAction::Barrier, 323 | TxnAction::Delete("4".into()), 324 | TxnAction::Assertion("1".into(), validate_as(Some(vec![1,2,3,4]))), 325 | TxnAction::Assertion("2".into(), validate_as(Some(vec![1,2,3,4,5]))), 326 | TxnAction::Assertion("3".into(), validate_as(Some(vec![1,2,3,4,5,6]))), 327 | TxnAction::Assertion("4".into(), validate_as(None)), 328 | ], |result: Result<()>|assert!(result.is_ok()))] 329 | // assertion has barrier 330 | #[case(vec![ 331 | TxnAction::Put("1".into(), setup_memory_bytestream_with(vec![1,2,3,4])), 332 | TxnAction::Put("2".into(), setup_memory_bytestream_with(vec![1,2,3,4,5])), 333 | TxnAction::Put("3".into(), setup_memory_bytestream_with(vec![1,2,3,4,5,6])), 334 | TxnAction::Put("4".into(), setup_memory_bytestream_with(vec![1,2,3,4,5,6,7])), 335 | TxnAction::Assertion("4".into(), validate_as(Some(vec![1,2,3,4,5,6,7]))), 336 | ], |result: Result<()>|assert!(result.is_ok()))] 337 | #[case(vec![ 338 | TxnAction::Put("1".into(), setup_memory_bytestream_with(vec![1,2,3,4])), 339 | TxnAction::Put("2".into(), setup_memory_bytestream_with(vec![1,2,3,4,5])), 340 | TxnAction::Put("3".into(), setup_memory_bytestream_with(vec![1,2,3,4,5,6])), 341 | TxnAction::Assertion("4".into(), validate_as(Some(vec![1,2,3,4,5,6,7]))), 342 | TxnAction::Put("4".into(), setup_memory_bytestream_with(vec![1,2,3,4,5,6,7])), 343 | ], |result: Result<()>|assert!(matches!(result, Err(StorageError::Conflict))))] 344 | #[tokio::test(flavor = "multi_thread", worker_threads = 6)] 345 | async fn must_txn_asrt(#[case] actions: Vec, #[case] expect: fn(Result<()>)) { 346 | let test_dir = tempdir().expect("temp dir creation failed"); 347 | let fs_storage = FSStorage::new_with_limit(test_dir.path(), 5); 348 | 349 | let mut txn = Txn::new(); 350 | for action in actions { 351 | txn.add(action); 352 | } 353 | expect(txn.commit(&fs_storage).await) 354 | } 355 | 356 | #[test] 357 | fn test_lockfile() { 358 | let mut meta_map: MetaKeyMap = HashMap::new(); 359 | meta_map.insert( 360 | PackageMeta::new("a", &Version(String::from("1.0.0")), 0), 361 | PathBuf::from("a.tar"), 362 | ); 363 | meta_map.insert( 364 | PackageMeta::new("b", &Version(String::from("1.0.1")), 1), 365 | PathBuf::from("b.tar"), 366 | ); 367 | 368 | let lock_file = LockFile::from(&meta_map); 369 | 370 | let ser_lockfile = serde_json::to_string(&lock_file).expect("unable to serialize lockfile"); 371 | let de_lockfile: LockFile = 372 | serde_json::from_str(ser_lockfile.as_str()).expect("unable to deserialize lockfile"); 373 | 374 | let de_map = MetaKeyMap::from(&de_lockfile); 375 | 376 | assert_eq!(meta_map, de_map, "map mismatch"); 377 | } 378 | --------------------------------------------------------------------------------