integrate CI and docs

This commit is contained in:
Levi Neuwirth 2026-05-01 11:29:18 -04:00
parent 21c6056ae6
commit 1996db37b2
65 changed files with 2238 additions and 699 deletions

View File

@ -35,13 +35,10 @@ jobs:
- name: Compile-check benches
run: cargo build --workspace --benches
# Informational: surfaces formatting drift without blocking merges.
# Flip continue-on-error to false (or remove the line) once the
# codebase is fmt-clean.
# Gate: codebase is fmt-clean as of v0.1.0; keep it that way.
fmt:
name: fmt (informational)
name: fmt
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable

View File

@ -14,7 +14,7 @@ members = [
[workspace.package]
version = "0.1.0"
edition = "2021"
license = "MIT"
license = "Apache-2.0"
authors = ["Levi J. Neuwirth <ln@levineuwirth.org>"]
rust-version = "1.75"

202
LICENSE Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

253
README.md Normal file
View File

@ -0,0 +1,253 @@
# LeVCS
A distributed version control system with first-class federation, signed
authority chains, and a cascading merge engine. Content-addressed by
BLAKE3, signed with Ed25519, and built to fix what git can't.
> **Status: v0.1.0 — protocol substrate complete, workflow surface deferred.**
> The object model, federation API, merge cascade, and instance server
> all work end-to-end. There is no PR review surface, issue tracker, or
> web UI yet — those are the next layer up. See
> [`doc/technical-report.md`](doc/technical-report.md) for a full
> framing of where this project is and why.
---
## What's different from git
- **Identity is in the protocol.** A repo's membership is a versioned,
signed authority object with explicit roles
(Reader/Contributor/Maintainer/Owner). Force-push enforcement and
push authorization are protocol-level, not server policy.
- **Federation is first-class.** Every repo has a global `repo_id`
(BLAKE3 of its genesis authority); instances mirror each other in
three storage modes (full / release-only / metadata-only).
- **Merge is a cascade**, not a line-level diff. Per-file dispatch to
a handler ranked by aggressiveness: textual fallback, format-aware
(JSON / YAML / TOML / XML / Markdown / prose), tree-sitter for
source code (Rust, Python, JS/TS, Go, C/C++, Java, Ruby, Bash), and
wasm-sandboxed plugins for the long tail.
- **BLAKE3, not SHA-1.** Tree-hashed, ~5 GiB/s on a laptop, 32-byte
IDs everywhere.
- **Releases are signed objects**, not mutable name pointers.
For a deeper comparison and context, see the
[technical report](doc/technical-report.md).
---
## Building
LeVCS is a Rust workspace. You'll need a recent stable toolchain
(workspace MSRV is 1.75) and a C compiler for the tree-sitter grammars.
```sh
cargo build --release
```
Two binaries land in `target/release/`:
- `levcs` — the user-facing CLI.
- `levcs-instance` — the federation HTTP server.
Install them somewhere on `PATH`:
```sh
sudo install -m 0755 \
target/release/levcs target/release/levcs-instance \
/usr/local/bin/
```
---
## Quick start (single user, local only)
```sh
# Generate an identity key (stored in $XDG_CONFIG_HOME/levcs/keys.toml).
levcs key generate --label me
# Create a repository wherever you have files to track.
mkdir /tmp/demo && cd /tmp/demo
echo "hello" > a.txt
levcs init --key me
levcs track --all
levcs commit -m "first commit"
levcs log
```
That's a fully working LeVCS repo. Branch and merge:
```sh
levcs branch feature/x
echo "more" >> a.txt
levcs commit -m "wip"
levcs branch main
levcs merge feature/x
```
If a merge produces conflicts, drop into the resolution TUI:
```sh
levcs merge --resolve
```
Cut a release:
```sh
levcs release v0.1.0 --notes "first release"
```
---
## Hosting an instance
To dogfood the federation surface, run `levcs-instance` on a VPS behind
nginx or Caddy. The full walkthrough is in
[`deploy/README.md`](deploy/README.md): build, systemd unit, reverse
proxy templates, firewall, and the laptop-side bootstrap.
The compressed version:
```sh
sudo cp deploy/levcs-instance.service /etc/systemd/system/
sudo cp deploy/instance.toml.example /etc/levcs/instance.toml
sudo $EDITOR /etc/levcs/instance.toml
sudo systemctl enable --now levcs-instance
# ... then drop deploy/Caddyfile.example into /etc/caddy/Caddyfile
```
From your laptop, point the local repo at the instance and push:
```sh
levcs instance --set https://levcs.example.com/levcs/v1
levcs push refs/branches/main
```
The first push to a fresh instance auto-inits the repo with your
genesis authority. Subsequent pushes are role-checked against the
authority chain.
---
## Repository layout
```
crates/
levcs-core/ Object model (Blob/Tree/Commit/Release/Authority),
hash, store, refs, repository abstractions.
levcs-identity/ Authority objects, Ed25519 keys, signing/verify.
levcs-merge/ Cascade engine, format and tree-sitter handlers,
plugin runtime, merge records.
levcs-protocol/ Pack codec, wire types, request signing, P2P.
levcs-client/ Thin HTTP client over the federation API.
levcs-instance/ Axum HTTP server (the federation peer).
levcs-cli/ The `levcs` user-facing CLI.
levcs-tui/ Conflict-resolution terminal UI.
deploy/ Production deployment artifacts (systemd, Caddy, nginx).
scripts/ Reproducible benchmark and ops scripts.
doc/ Technical report and architecture docs.
.github/workflows/ CI configuration.
```
---
## Testing
```sh
cargo test --workspace
```
Runs the full suite — unit tests, integration tests, federation
end-to-end tests including the three-instance "dogfood" scenario, the
merge conformance corpus, and property-based fuzz tests. ~194 tests at
v0.1.0; full run is well under a minute on a modern laptop.
Useful subsets:
```sh
# A single crate's tests
cargo test -p levcs-merge
# A specific integration test
cargo test -p levcs-instance --test dogfood
# Property tests only
cargo test -p levcs-merge --test proptest_textual
```
---
## Benchmarks
Microbenchmarks live in each crate's `benches/` directory and use
`criterion`. A reproducible runner with metadata capture and optional
flamegraph generation is at `scripts/bench.sh`:
```sh
scripts/bench.sh --quick # smoke test (~ a minute total)
scripts/bench.sh # full criterion run (~ a few minutes)
scripts/bench.sh --flamegraph # generate per-bench SVG flamegraphs
scripts/bench.sh --bench pack_codec # one bench only
```
Output goes to `bench-results/<host>-<UTC-timestamp>/` with a parsed
`summary.txt`, criterion's HTML reports, and a `metadata.txt` capturing
rustc version, kernel, CPU, and git rev for run-to-run comparison.
Headline numbers on a Ryzen 7 laptop:
- **Pack decode** at 10 × 1 MiB entries: ~2.3 ms (4.3 GiB/s).
- **Blob serialize + BLAKE3** at 1 MiB: ~190 µs (5.1 GiB/s).
- **Textual three-way merge** of a 100 KiB document: ~4.6 ms.
Pack encoding is the throughput floor at ~380 MiB/s — bottlenecked by
zstd level 3 on incompressible data.
---
## Documentation
- **[`doc/technical-report.md`](doc/technical-report.md)** — Distribution
document. What LeVCS is, how to use it, and how it differs from /
improves upon git. Targets technical evaluators and the workflow-spec
reader.
- **[`deploy/README.md`](deploy/README.md)** — Comprehensive VPS
deployment walkthrough.
- **`spec/`** — The protocol specification and trust-root revision.
Currently kept private; ask the maintainer for a copy.
---
## Contributing
This is a young project. The most useful contributions right now are:
1. **Trying it.** Run `levcs init` on a real project, push to a local
instance, and report friction.
2. **Workflow design.** The next major piece of work is the workflow
spec — PR/review surface, issues, CI conventions. Discussion welcome.
3. **Plugin handlers.** The wasm plugin protocol exists; concrete
handlers (e.g. protobuf, SQL migrations) are needed to validate it.
4. **Tightening CI.** The `fmt` and `clippy` GitHub jobs are
informational; flipping them to gating would close a small but real
quality gap.
Please open an issue or reach out before starting non-trivial work so
we can coordinate.
---
## License
Released under the Apache License 2.0 — see [`LICENSE`](LICENSE) for the
full text.
---
## Citation
If LeVCS supports academic work, please cite the v0.1.0 release. A
formal citation entry will land with the workflow spec; in the meantime
a repository-URL reference is fine.

View File

@ -200,13 +200,30 @@ pub struct GcArgs {
#[derive(Subcommand, Debug)]
pub enum KeyCmd {
Generate { label: String, #[arg(long)] encrypt: bool },
Generate {
label: String,
#[arg(long)]
encrypt: bool,
},
List,
Show { label: String },
Export { label: String, path: PathBuf },
Import { label: String, path: PathBuf },
Remove { label: String },
Rename { old: String, new: String },
Show {
label: String,
},
Export {
label: String,
path: PathBuf,
},
Import {
label: String,
path: PathBuf,
},
Remove {
label: String,
},
Rename {
old: String,
new: String,
},
}
#[derive(Subcommand, Debug)]
@ -215,18 +232,24 @@ pub enum AuthorityCmd {
List,
Add {
key: String,
#[arg(long)] role: String,
#[arg(long)] handle: Option<String>,
#[arg(long = "signing-key")] signing_key: Option<String>,
#[arg(long)]
role: String,
#[arg(long)]
handle: Option<String>,
#[arg(long = "signing-key")]
signing_key: Option<String>,
},
Remove {
key: String,
#[arg(long = "signing-key")] signing_key: Option<String>,
#[arg(long = "signing-key")]
signing_key: Option<String>,
},
Promote {
key: String,
#[arg(long)] role: String,
#[arg(long = "signing-key")] signing_key: Option<String>,
#[arg(long)]
role: String,
#[arg(long = "signing-key")]
signing_key: Option<String>,
},
}

View File

@ -69,9 +69,7 @@ pub fn load_secret(label: Option<&str>) -> Result<(String, SecretKey)> {
keychain_path()
));
} else {
return Err(anyhow!(
"multiple keys in keychain; pass --key <label>"
));
return Err(anyhow!("multiple keys in keychain; pass --key <label>"));
}
}
};

View File

@ -49,7 +49,8 @@ fn write_instance_url(repo: &Repository, url: &str) -> Result<()> {
let path = repo.config_path();
let s = fs::read_to_string(&path).unwrap_or_default();
let mut cfg: RepoConfig = toml::from_str(&s).unwrap_or_default();
cfg.instance.insert("url".into(), toml::Value::String(url.to_string()));
cfg.instance
.insert("url".into(), toml::Value::String(url.to_string()));
let out = toml::to_string_pretty(&cfg)?;
fs::write(&path, out)?;
Ok(())
@ -59,7 +60,10 @@ fn user_config_path() -> PathBuf {
if let Some(xdg) = std::env::var_os("XDG_CONFIG_HOME") {
PathBuf::from(xdg).join("levcs").join("config.toml")
} else if let Some(home) = std::env::var_os("HOME") {
PathBuf::from(home).join(".config").join("levcs").join("config.toml")
PathBuf::from(home)
.join(".config")
.join("levcs")
.join("config.toml")
} else {
PathBuf::from(".levcs.toml")
}
@ -126,7 +130,11 @@ pub fn instance(args: InstanceArgs) -> Result<()> {
if args.list {
let cfg = read_user_cfg()?;
for u in cfg.instances {
let star = if Some(u.clone()) == cfg.active { "*" } else { " " };
let star = if Some(u.clone()) == cfg.active {
"*"
} else {
" "
};
println!("{star} {u}");
}
return Ok(());
@ -151,7 +159,8 @@ fn active_instance() -> Result<String> {
}
}
let cfg = read_user_cfg()?;
cfg.active.ok_or_else(|| anyhow!("no active instance; run `levcs instance --set <URL>`"))
cfg.active
.ok_or_else(|| anyhow!("no active instance; run `levcs instance --set <URL>`"))
}
pub fn push(args: PushArgs) -> Result<()> {
@ -184,12 +193,16 @@ pub fn push(args: PushArgs) -> Result<()> {
let mut stack = vec![new];
let mut seen = std::collections::HashSet::<ObjectId>::new();
while let Some(id) = stack.pop() {
if !seen.insert(id) { continue; }
if !seen.insert(id) {
continue;
}
if let Ok(raw) = repo.objects.read_object(id) {
match raw.object_type {
ObjectType::Tree => {
if let Ok(t) = levcs_core::Tree::parse_body(&raw.body) {
for e in t.entries { stack.push(e.hash); }
for e in t.entries {
stack.push(e.hash);
}
}
}
ObjectType::Commit => {
@ -204,7 +217,9 @@ pub fn push(args: PushArgs) -> Result<()> {
stack.push(rel.tree);
stack.push(rel.predecessor);
stack.push(rel.authority);
if !rel.parent_release.is_zero() { stack.push(rel.parent_release); }
if !rel.parent_release.is_zero() {
stack.push(rel.parent_release);
}
}
}
ObjectType::Authority => {
@ -237,7 +252,9 @@ pub fn push(args: PushArgs) -> Result<()> {
let mut pack = Pack::new();
let mut deduped = std::collections::HashSet::new();
for id in needed {
if !deduped.insert(id) { continue; }
if !deduped.insert(id) {
continue;
}
if let Ok(bytes) = repo.objects.read_raw(id) {
if bytes.len() >= 5 {
pack.push(bytes[4], bytes);
@ -295,10 +312,15 @@ pub fn pull(args: PullArgs) -> Result<()> {
for (r, h) in remote_refs.branches {
if want_refs.contains(&r) {
let id = ObjectId::from_hex(&h)?;
repo.refs.write(&format!("refs/remote/origin/branches/{r}"), id)?;
repo.refs
.write(&format!("refs/remote/origin/branches/{r}"), id)?;
}
}
eprintln!("pulled {} object(s) from {} ref(s)", pack.entries.len(), want_refs.len());
eprintln!(
"pulled {} object(s) from {} ref(s)",
pack.entries.len(),
want_refs.len()
);
Ok(())
}
@ -382,9 +404,18 @@ pub fn fork(args: ForkArgs) -> Result<()> {
added_by: pk,
}],
policy: vec![
PolicyEntry { key: "public_read".into(), value: vec![0x01] },
PolicyEntry { key: "require_signed_releases".into(), value: vec![0x01] },
PolicyEntry { key: "allowed_handlers".into(), value: b"builtin".to_vec() },
PolicyEntry {
key: "public_read".into(),
value: vec![0x01],
},
PolicyEntry {
key: "require_signed_releases".into(),
value: vec![0x01],
},
PolicyEntry {
key: "allowed_handlers".into(),
value: b"builtin".to_vec(),
},
],
};
new_auth_body.normalize()?;
@ -397,9 +428,8 @@ pub fn fork(args: ForkArgs) -> Result<()> {
// 8. Build the fork commit's tree: source's tree, with .levcs/authority
// pointing at the new genesis.
let new_tree_id = crate::tree_helpers::put_authority_in_tree(
&repo, source_commit.tree, new_auth_id,
)?;
let new_tree_id =
crate::tree_helpers::put_authority_in_tree(&repo, source_commit.tree, new_auth_id)?;
// 9. Construct and sign the fork commit.
let flags = CommitFlags(CommitFlags::MODIFIES_AUTHORITY.0 | CommitFlags::FORK.0);
@ -558,8 +588,8 @@ pub fn deploy(args: DeployArgs) -> Result<()> {
let (manifest, pack) = build_deploy_archive(&repo, args.release)?;
let pack_bytes = pack.encode();
let listener = TcpListener::bind(&args.listen)
.with_context(|| format!("bind {}", args.listen))?;
let listener =
TcpListener::bind(&args.listen).with_context(|| format!("bind {}", args.listen))?;
let local = listener.local_addr()?;
eprintln!(
"deploy listening on {local}\n recipient = {}\n send {} branch(es), {} release(s), {} object(s) ({} bytes packed)",
@ -581,11 +611,7 @@ pub fn deploy(args: DeployArgs) -> Result<()> {
let (stream, peer_addr) = listener.accept()?;
eprintln!("dialer connected from {peer_addr}");
let mut session = match levcs_protocol::p2p::handshake_listen(
stream,
&sk,
&recipient_pub,
) {
let mut session = match levcs_protocol::p2p::handshake_listen(stream, &sk, &recipient_pub) {
Ok(s) => s,
Err(e) => bail!("handshake failed: {e}"),
};
@ -595,9 +621,7 @@ pub fn deploy(args: DeployArgs) -> Result<()> {
session
.send_pack(&pack_bytes)
.map_err(|e| anyhow!("send pack: {e}"))?;
session
.send_done()
.map_err(|e| anyhow!("send done: {e}"))?;
session.send_done().map_err(|e| anyhow!("send done: {e}"))?;
eprintln!("deploy complete");
Ok(())
}
@ -627,7 +651,11 @@ fn build_deploy_archive(
if branches.is_empty() && releases.is_empty() {
bail!(
"nothing to deploy: repository has no {}",
if release_only { "releases" } else { "branches or releases" }
if release_only {
"releases"
} else {
"branches or releases"
}
);
}
@ -655,7 +683,11 @@ fn build_deploy_archive(
}
let manifest = levcs_protocol::p2p::DeployManifest {
repo_id,
mode: if release_only { "release".into() } else { "all".into() },
mode: if release_only {
"release".into()
} else {
"all".into()
},
branches: branch_map,
releases: release_map,
authority_hash: auth.to_hex(),
@ -762,18 +794,17 @@ pub fn migrate(args: MigrateArgs) -> Result<()> {
write_instance_url(&repo, &args.to)?;
eprintln!("active instance for this repo set to {}", args.to);
} else {
eprintln!("(run `levcs instance --set {}` to repoint future operations)", args.to);
eprintln!(
"(run `levcs instance --set {}` to repoint future operations)",
args.to
);
}
Ok(())
}
/// Reachability walk shared by push() and migrate(). Inserts every object
/// transitively referenced from `start` into `out`, including blobs.
fn walk_closure(
repo: &Repository,
start: ObjectId,
out: &mut std::collections::HashSet<ObjectId>,
) {
fn walk_closure(repo: &Repository, start: ObjectId, out: &mut std::collections::HashSet<ObjectId>) {
let mut stack = vec![start];
while let Some(id) = stack.pop() {
if !out.insert(id) {
@ -838,12 +869,8 @@ pub fn dial(args: DialArgs) -> Result<()> {
let manifest = session
.recv_manifest()
.map_err(|e| anyhow!("recv manifest: {e}"))?;
let pack_bytes = session
.recv_pack()
.map_err(|e| anyhow!("recv pack: {e}"))?;
session
.recv_done()
.map_err(|e| anyhow!("recv done: {e}"))?;
let pack_bytes = session.recv_pack().map_err(|e| anyhow!("recv pack: {e}"))?;
session.recv_done().map_err(|e| anyhow!("recv done: {e}"))?;
let pack = Pack::decode(&pack_bytes).map_err(|e| anyhow!("decode pack: {e}"))?;
eprintln!(
"received {} object(s); manifest reports {} branch(es), {} release(s)",
@ -856,8 +883,10 @@ pub fn dial(args: DialArgs) -> Result<()> {
// mirrors the convention `levcs fork` uses.
let dest = match args.path {
Some(p) => p,
None => std::env::current_dir()?
.join(format!("dial-{}", &manifest.repo_id[..8.min(manifest.repo_id.len())])),
None => std::env::current_dir()?.join(format!(
"dial-{}",
&manifest.repo_id[..8.min(manifest.repo_id.len())]
)),
};
if dest.exists() {
bail!("destination already exists: {:?}", dest);
@ -918,7 +947,10 @@ pub fn dial(args: DialArgs) -> Result<()> {
// HEAD on main if present, otherwise any branch we got. Releases-only
// archives leave HEAD detached at the latest release's predecessor —
// there's no branch to point at.
if let Some((name, _)) = manifest.branches.iter().find(|(k, _)| k.as_str() == "main")
if let Some((name, _)) = manifest
.branches
.iter()
.find(|(k, _)| k.as_str() == "main")
.or_else(|| manifest.branches.iter().next())
{
let r = format!("refs/branches/{name}");

View File

@ -26,7 +26,9 @@ pub fn key(cmd: KeyCmd) -> Result<()> {
if encrypt {
let pp = read_passphrase("new passphrase: ")?;
let pp2 = read_passphrase("confirm passphrase: ")?;
if pp != pp2 { bail!("passphrases do not match"); }
if pp != pp2 {
bail!("passphrases do not match");
}
kc.add_encrypted(&label, &sk, pp.as_bytes())?;
} else {
kc.add_plaintext(&label, &sk)?;
@ -38,20 +40,29 @@ pub fn key(cmd: KeyCmd) -> Result<()> {
KeyCmd::List => {
let kc = load_keychain()?;
for e in &kc.keys {
let kind = if e.private_encrypted.is_some() { "encrypted" } else { "plaintext" };
let kind = if e.private_encrypted.is_some() {
"encrypted"
} else {
"plaintext"
};
println!("{}\t{}\t{kind}", e.label, e.public);
}
Ok(())
}
KeyCmd::Show { label } => {
let kc = load_keychain()?;
let e = kc.entry(&label).ok_or_else(|| anyhow!("no such key: {label}"))?;
let e = kc
.entry(&label)
.ok_or_else(|| anyhow!("no such key: {label}"))?;
println!("{}", e.public);
Ok(())
}
KeyCmd::Export { label, path } => {
let kc = load_keychain()?;
let e = kc.entry(&label).ok_or_else(|| anyhow!("no such key: {label}"))?.clone();
let e = kc
.entry(&label)
.ok_or_else(|| anyhow!("no such key: {label}"))?
.clone();
let mut single = Keychain::new();
single.keys.push(e);
single.save(&path)?;
@ -111,7 +122,12 @@ pub fn authority(cmd: AuthorityCmd) -> Result<()> {
}
Ok(())
}
AuthorityCmd::Add { key, role, handle, signing_key } => {
AuthorityCmd::Add {
key,
role,
handle,
signing_key,
} => {
let pk = PublicKey::parse_levcs(&key)?;
let role = Role::from_name(&role)?;
let handle = handle.unwrap_or_default();
@ -140,7 +156,11 @@ pub fn authority(cmd: AuthorityCmd) -> Result<()> {
Ok(())
})
}
AuthorityCmd::Promote { key, role, signing_key } => {
AuthorityCmd::Promote {
key,
role,
signing_key,
} => {
let pk = PublicKey::parse_levcs(&key)?;
let role = Role::from_name(&role)?;
mutate_authority(signing_key.as_deref(), |new_body, _| {
@ -168,17 +188,28 @@ where
let label = match signing_key_label {
Some(l) => Some(l.to_string()),
None => {
let auth_id = repo.current_authority()?.ok_or_else(|| anyhow!("no current authority"))?;
let auth_id = repo
.current_authority()?
.ok_or_else(|| anyhow!("no current authority"))?;
let signed = repo.read_signed(auth_id)?;
let body = AuthorityBody::parse(&signed.body)?;
let owners: Vec<&MemberEntry> = body.members.iter().filter(|m| m.role == Role::Owner).collect();
let owners: Vec<&MemberEntry> = body
.members
.iter()
.filter(|m| m.role == Role::Owner)
.collect();
if owners.len() == 1 {
let kc = crate::ctx::load_keychain()?;
let owner_key = owners[0].key;
kc.keys.iter().find(|e| {
levcs_identity::keys::PublicKey::parse_levcs(&e.public)
.ok().map(|p| p == owner_key).unwrap_or(false)
}).map(|e| e.label.clone())
kc.keys
.iter()
.find(|e| {
levcs_identity::keys::PublicKey::parse_levcs(&e.public)
.ok()
.map(|p| p == owner_key)
.unwrap_or(false)
})
.map(|e| e.label.clone())
} else {
None
}
@ -200,7 +231,10 @@ where
// Build successor body.
let mut new_body = cur_body.clone();
new_body.previous_authority = cur_id;
new_body.version = cur_body.version.checked_add(1).ok_or_else(|| anyhow!("version overflow"))?;
new_body.version = cur_body
.version
.checked_add(1)
.ok_or_else(|| anyhow!("version overflow"))?;
new_body.created_micros = now_micros();
f(&mut new_body, pk)?;
new_body.normalize()?;
@ -216,7 +250,8 @@ where
} else {
ZERO_ID
};
let new_tree_id = crate::tree_helpers::put_authority_in_tree(&repo, parent_tree_id, new_auth_id)?;
let new_tree_id =
crate::tree_helpers::put_authority_in_tree(&repo, parent_tree_id, new_auth_id)?;
let commit_obj = Commit {
tree: new_tree_id,
parents,

View File

@ -7,11 +7,11 @@ use std::path::{Path, PathBuf};
use anyhow::{anyhow, bail, Result};
use levcs_core::object::{ObjectType, SignedObject};
use levcs_core::refs::Head;
use levcs_core::{
Blob, Commit, CommitFlags, Index, IndexEntry, IndexEntryFlags, ObjectId, Refs, Release,
Repository, Tree, ZERO_ID,
};
use levcs_core::refs::Head;
use levcs_identity::authority::{
AuthorityBody, MemberEntry, PolicyEntry, Role, AUTHORITY_SCHEMA_VERSION,
};
@ -32,7 +32,11 @@ use crate::ctx::{load_keychain, load_secret, now_micros, open_repo, save_keychai
pub fn init(args: InitArgs) -> Result<()> {
let path = args.path.unwrap_or_else(|| PathBuf::from("."));
let path = if path.is_absolute() { path } else { std::env::current_dir()?.join(path) };
let path = if path.is_absolute() {
path
} else {
std::env::current_dir()?.join(path)
};
fs::create_dir_all(&path)?;
if path.join(".levcs").exists() {
bail!("repository already exists at {:?}", path);
@ -47,7 +51,10 @@ pub fn init(args: InitArgs) -> Result<()> {
let sk = SecretKey::generate();
kc.add_plaintext(&label, &sk)?;
save_keychain(&kc)?;
eprintln!("generated new key '{label}' at {:?}", crate::ctx::keychain_path());
eprintln!(
"generated new key '{label}' at {:?}",
crate::ctx::keychain_path()
);
sk
};
let pk = sk.public();
@ -68,9 +75,18 @@ pub fn init(args: InitArgs) -> Result<()> {
added_by: pk,
}],
policy: vec![
PolicyEntry { key: "public_read".into(), value: vec![0x01] },
PolicyEntry { key: "require_signed_releases".into(), value: vec![0x01] },
PolicyEntry { key: "allowed_handlers".into(), value: b"builtin".to_vec() },
PolicyEntry {
key: "public_read".into(),
value: vec![0x01],
},
PolicyEntry {
key: "require_signed_releases".into(),
value: vec![0x01],
},
PolicyEntry {
key: "allowed_handlers".into(),
value: b"builtin".to_vec(),
},
],
};
auth.normalize()?;
@ -83,10 +99,13 @@ pub fn init(args: InitArgs) -> Result<()> {
repo.set_genesis_authority(auth_id)?;
repo.set_current_authority(auth_id)?;
// Create empty `main` branch HEAD pointer.
repo.refs.write_head(&Head::Branch("refs/branches/main".into()))?;
repo.refs
.write_head(&Head::Branch("refs/branches/main".into()))?;
eprintln!(
"initialized levcs repository at {:?}\n repo_id = blake3:{}\n authority = {}",
path, auth.repo_id.to_hex(), auth_id
path,
auth.repo_id.to_hex(),
auth_id
);
Ok(())
}
@ -103,7 +122,11 @@ pub fn track(args: TrackArgs) -> Result<()> {
targets.extend(repo.walk_workdir()?);
} else {
for p in args.paths {
let abs = if p.is_absolute() { p } else { repo.workdir.join(p) };
let abs = if p.is_absolute() {
p
} else {
repo.workdir.join(p)
};
if abs.is_dir() {
walk_dir(&abs, &repo.workdir, &mut targets)?;
} else if abs.is_file() {
@ -114,7 +137,10 @@ pub fn track(args: TrackArgs) -> Result<()> {
}
}
for path in targets {
let rel = path.strip_prefix(&repo.workdir)?.to_string_lossy().replace('\\', "/");
let rel = path
.strip_prefix(&repo.workdir)?
.to_string_lossy()
.replace('\\', "/");
let bytes = fs::read(&path)?;
let blob = Blob::new(bytes.clone());
let id = repo.objects.write_raw(&blob.serialize())?;
@ -139,8 +165,15 @@ pub fn forget(args: ForgetArgs) -> Result<()> {
let repo = open_repo()?;
let mut idx = repo.read_index()?;
for p in args.paths {
let abs = if p.is_absolute() { p } else { repo.workdir.join(&p) };
let rel = abs.strip_prefix(&repo.workdir)?.to_string_lossy().replace('\\', "/");
let abs = if p.is_absolute() {
p
} else {
repo.workdir.join(&p)
};
let rel = abs
.strip_prefix(&repo.workdir)?
.to_string_lossy()
.replace('\\', "/");
idx.remove(&rel);
if !args.keep_file {
let _ = fs::remove_file(&abs);
@ -191,7 +224,10 @@ pub fn status() -> Result<()> {
let mut modified = Vec::new();
let mut untracked = Vec::new();
for path in workdir_files {
let rel = path.strip_prefix(&repo.workdir)?.to_string_lossy().replace('\\', "/");
let rel = path
.strip_prefix(&repo.workdir)?
.to_string_lossy()
.replace('\\', "/");
match tracked.get(&rel) {
None => untracked.push(rel),
Some(entry) => {
@ -206,7 +242,12 @@ pub fn status() -> Result<()> {
let work_set: HashSet<String> = repo
.walk_workdir()?
.iter()
.map(|p| p.strip_prefix(&repo.workdir).unwrap().to_string_lossy().replace('\\', "/"))
.map(|p| {
p.strip_prefix(&repo.workdir)
.unwrap()
.to_string_lossy()
.replace('\\', "/")
})
.collect();
let mut deleted: Vec<String> = idx
.entries
@ -217,15 +258,21 @@ pub fn status() -> Result<()> {
deleted.sort();
if !modified.is_empty() {
println!("\nmodified:");
for m in &modified { println!(" {m}"); }
for m in &modified {
println!(" {m}");
}
}
if !deleted.is_empty() {
println!("\ndeleted:");
for d in &deleted { println!(" {d}"); }
for d in &deleted {
println!(" {d}");
}
}
if !untracked.is_empty() {
println!("\nuntracked:");
for u in &untracked { println!(" {u}"); }
for u in &untracked {
println!(" {u}");
}
}
if modified.is_empty() && deleted.is_empty() && untracked.is_empty() {
println!("\nworking tree clean.");
@ -248,8 +295,12 @@ pub fn log(_args: LogArgs) -> Result<()> {
println!("commit {}", id);
println!("Author: {}", pk);
println!("Date: {} (us since epoch)", commit.timestamp_micros);
if commit.flags.modifies_authority() { println!("Flags: authority-modifying"); }
if commit.flags.is_fork() { println!("Flags: fork"); }
if commit.flags.modifies_authority() {
println!("Flags: authority-modifying");
}
if commit.flags.is_fork() {
println!("Flags: fork");
}
println!();
for line in commit.message.lines() {
println!(" {line}");
@ -308,7 +359,9 @@ pub fn commit(args: CommitArgs) -> Result<()> {
e.path
);
}
let blob_id = repo.objects.write_raw(&Blob::new(bytes.clone()).serialize())?;
let blob_id = repo
.objects
.write_raw(&Blob::new(bytes.clone()).serialize())?;
let meta = fs::metadata(&abs)?;
new_entries.push(IndexEntry {
path: e.path.clone(),
@ -379,7 +432,10 @@ pub fn commit(args: CommitArgs) -> Result<()> {
.find_member(&pk)
.ok_or_else(|| anyhow!("your key is not in the current authority"))?;
if member.role < Role::Contributor {
bail!("your key has role '{}', need at least contributor", member.role.name());
bail!(
"your key has role '{}', need at least contributor",
member.role.name()
);
}
let default_message = match merge_head_id {
@ -509,7 +565,11 @@ pub fn construct(args: ConstructArgs) -> Result<()> {
// Path-restricted reconstruction.
for p in paths {
let abs = if p.is_absolute() { p.clone() } else { repo.workdir.join(&p) };
let abs = if p.is_absolute() {
p.clone()
} else {
repo.workdir.join(&p)
};
let rel = abs
.strip_prefix(&repo.workdir)
.map_err(|_| anyhow!("path {:?} is outside the repository", p))?;
@ -603,7 +663,9 @@ pub fn diff(args: DiffArgs) -> Result<()> {
if restrict.is_empty() {
return true;
}
restrict.iter().any(|r| p == r || p.starts_with(&format!("{r}/")))
restrict
.iter()
.any(|r| p == r || p.starts_with(&format!("{r}/")))
};
let baseline = collect_tree_files(&repo, baseline_tree, "")?;
@ -611,7 +673,10 @@ pub fn diff(args: DiffArgs) -> Result<()> {
.walk_workdir()?
.into_iter()
.map(|p| -> Result<_> {
let rel = p.strip_prefix(&repo.workdir)?.to_string_lossy().replace('\\', "/");
let rel = p
.strip_prefix(&repo.workdir)?
.to_string_lossy()
.replace('\\', "/");
let bytes = fs::read(&p)?;
Ok((rel, bytes))
})
@ -626,7 +691,9 @@ pub fn diff(args: DiffArgs) -> Result<()> {
}
let a = baseline.get(k).cloned().unwrap_or_default();
let b = work.get(k).cloned().unwrap_or_default();
if a == b { continue; }
if a == b {
continue;
}
println!("--- a/{k}\n+++ b/{k}");
let a_s = String::from_utf8_lossy(&a);
let b_s = String::from_utf8_lossy(&b);
@ -655,7 +722,11 @@ fn collect_tree_files(
let raw = repo.objects.read_typed(tree_id, ObjectType::Tree)?;
let tree = Tree::parse_body(&raw.body)?;
for e in tree.entries {
let path = if prefix.is_empty() { e.name.clone() } else { format!("{prefix}/{}", e.name) };
let path = if prefix.is_empty() {
e.name.clone()
} else {
format!("{prefix}/{}", e.name)
};
match e.entry_type {
levcs_core::EntryType::Blob => {
let blob = repo.objects.read_typed(e.hash, ObjectType::Blob)?;
@ -679,7 +750,11 @@ pub fn branch(args: BranchArgs) -> Result<()> {
if args.list || (args.create.is_none() && args.switch.is_none() && args.delete.is_none()) {
let cur = repo.current_branch()?.unwrap_or_default();
for (name, id) in repo.refs.list_branches()? {
let marker = if cur == format!("refs/branches/{name}") { "*" } else { " " };
let marker = if cur == format!("refs/branches/{name}") {
"*"
} else {
" "
};
println!("{marker} {name}\t{id}");
}
return Ok(());
@ -687,7 +762,10 @@ pub fn branch(args: BranchArgs) -> Result<()> {
if let Some(name) = args.create {
let from = match args.from {
Some(s) => ObjectId::from_hex(&s)?,
None => repo.refs.resolve_head()?.ok_or_else(|| anyhow!("no HEAD"))?,
None => repo
.refs
.resolve_head()?
.ok_or_else(|| anyhow!("no HEAD"))?,
};
repo.refs.write(&format!("refs/branches/{name}"), from)?;
eprintln!("created branch {name} at {from}");
@ -697,7 +775,8 @@ pub fn branch(args: BranchArgs) -> Result<()> {
.refs
.read(&format!("refs/branches/{name}"))?
.ok_or_else(|| anyhow!("no such branch: {name}"))?;
repo.refs.write_head(&Head::Branch(format!("refs/branches/{name}")))?;
repo.refs
.write_head(&Head::Branch(format!("refs/branches/{name}")))?;
let raw = repo.read_raw_object(target)?;
let tree_id = match raw.object_type {
ObjectType::Commit => Commit::parse_body(&raw.body)?.tree,
@ -787,7 +866,11 @@ fn merge_run(args: MergeArgs) -> Result<()> {
files: Vec::new(),
};
let mut paths: BTreeSet<String> = BTreeSet::new();
for k in base_files.keys().chain(ours_files.keys()).chain(theirs_files.keys()) {
for k in base_files
.keys()
.chain(ours_files.keys())
.chain(theirs_files.keys())
{
// .levcs/* synthetic tree entries (authority, merge-record) live in
// commits but never on disk; skip them when reconciling files.
if k.starts_with(".levcs/") || k == ".levcs" {
@ -798,7 +881,10 @@ fn merge_run(args: MergeArgs) -> Result<()> {
let json_mode = args.format == "json";
if !json_mode && args.format != "text" {
bail!("unknown --format value: {} (allowed: text, json)", args.format);
bail!(
"unknown --format value: {} (allowed: text, json)",
args.format
);
}
let mut auto_resolved = 0usize;
let mut conflict_count = 0usize;
@ -900,7 +986,10 @@ fn merge_run(args: MergeArgs) -> Result<()> {
auto_resolved += 1;
merged_files.insert(path.clone(), content.clone());
}
MergeStatus::Conflict { partial, regions: _ } => {
MergeStatus::Conflict {
partial,
regions: _,
} => {
conflict_count += 1;
merged_files.insert(path.clone(), partial.clone());
}
@ -930,7 +1019,9 @@ fn merge_run(args: MergeArgs) -> Result<()> {
// without re-reading the working directory's view of every file.
let mut idx = Index::new();
for (path, bytes) in &merged_files {
let id = repo.objects.write_raw(&Blob::new(bytes.clone()).serialize())?;
let id = repo
.objects
.write_raw(&Blob::new(bytes.clone()).serialize())?;
let mut flags = IndexEntryFlags::TRACKED;
if let Some(fr) = record.files.iter().find(|fr| fr.path == *path) {
if matches!(fr.status, FileStatus::Manual) {
@ -999,7 +1090,9 @@ fn merge_run(args: MergeArgs) -> Result<()> {
println!(" conflicts: {conflict_count}");
println!();
if conflict_count > 0 {
println!("review with `levcs merge --review`, or edit conflicted files and run `levcs commit`.");
println!(
"review with `levcs merge --review`, or edit conflicted files and run `levcs commit`."
);
std::process::exit(1);
}
println!("clean merge. run `levcs commit` to finalize.");
@ -1030,10 +1123,10 @@ fn load_effective_merge_config(repo: &Repository) -> Result<MergeConfig> {
if !local_path.exists() {
return Ok(base);
}
let local_raw = fs::read_to_string(&local_path)
.map_err(|e| anyhow!("read merge.local.toml: {e}"))?;
let local: MergeConfig = toml::from_str(&local_raw)
.map_err(|e| anyhow!("parse merge.local.toml: {e}"))?;
let local_raw =
fs::read_to_string(&local_path).map_err(|e| anyhow!("read merge.local.toml: {e}"))?;
let local: MergeConfig =
toml::from_str(&local_raw).map_err(|e| anyhow!("parse merge.local.toml: {e}"))?;
levcs_merge::engine::layer_local_over(&base, &local)
.map_err(|e| anyhow!("merge.local.toml: {e}"))
}
@ -1284,15 +1377,12 @@ fn merge_explain() -> Result<()> {
.resolve_head()?
.ok_or_else(|| anyhow!("no HEAD"))?;
let head_commit = Commit::from_signed(&repo.read_signed(head_id)?)?;
let merge_head_id = ObjectId::from_hex(
fs::read_to_string(repo.levcs_dir.join("MERGE_HEAD"))?
.trim(),
)?;
let merge_head_id =
ObjectId::from_hex(fs::read_to_string(repo.levcs_dir.join("MERGE_HEAD"))?.trim())?;
let merge_head_commit = Commit::from_signed(&repo.read_signed(merge_head_id)?)?;
let base_tree = if repo.levcs_dir.join("MERGE_BASE").exists() {
let id = ObjectId::from_hex(
fs::read_to_string(repo.levcs_dir.join("MERGE_BASE"))?.trim(),
)?;
let id =
ObjectId::from_hex(fs::read_to_string(repo.levcs_dir.join("MERGE_BASE"))?.trim())?;
Some(Commit::from_signed(&repo.read_signed(id)?)?.tree)
} else {
None
@ -1314,8 +1404,8 @@ fn merge_explain() -> Result<()> {
.lookup_path(head_commit.tree, ".levcs/merge-record")?
.ok_or_else(|| anyhow!("no merge-record on HEAD or in progress"))?;
let blob = repo.objects.read_typed(entry.1, ObjectType::Blob)?;
let text = String::from_utf8(blob.body)
.map_err(|_| anyhow!("merge-record is not UTF-8"))?;
let text =
String::from_utf8(blob.body).map_err(|_| anyhow!("merge-record is not UTF-8"))?;
// For a committed merge, "ours" is HEAD's first parent and
// "theirs" is HEAD's second parent. If the commit isn't a
// merge, we have nothing to step through — fall back to text.
@ -1360,7 +1450,9 @@ fn merge_explain() -> Result<()> {
.map(|fr| {
let ours = read_path(ours_tree, &fr.path);
let theirs = read_path(theirs_tree, &fr.path);
let base = base_tree.map(|t| read_path(t, &fr.path)).unwrap_or_default();
let base = base_tree
.map(|t| read_path(t, &fr.path))
.unwrap_or_default();
// For explain, the "current" pane shows the *result* of the
// merge — i.e., what the engine produced for this file.
// For an in-progress merge that's the working tree; for a
@ -1381,7 +1473,9 @@ fn merge_explain() -> Result<()> {
notes: if fr.notes.is_empty() {
vec![]
} else {
vec![levcs_merge::MergeNote { message: fr.notes.clone() }]
vec![levcs_merge::MergeNote {
message: fr.notes.clone(),
}]
},
},
FileStatus::Manual => MergeStatus::Conflict {
@ -1438,8 +1532,7 @@ fn merge_review() -> Result<()> {
bail!("no merge in progress");
}
let s = fs::read_to_string(&merge_record_path)?;
let record = MergeRecord::from_toml(&s)
.map_err(|e| anyhow!("parse merge-record: {e}"))?;
let record = MergeRecord::from_toml(&s).map_err(|e| anyhow!("parse merge-record: {e}"))?;
// Resolve the three side trees the review needs.
// * ours — current HEAD's tree (what we had before the merge).
@ -1452,10 +1545,8 @@ fn merge_review() -> Result<()> {
let head_commit = Commit::from_signed(&repo.read_signed(head_id)?)?;
let ours_tree = head_commit.tree;
let merge_head_id = ObjectId::from_hex(
fs::read_to_string(repo.levcs_dir.join("MERGE_HEAD"))?
.trim(),
)?;
let merge_head_id =
ObjectId::from_hex(fs::read_to_string(repo.levcs_dir.join("MERGE_HEAD"))?.trim())?;
let merge_head_commit = Commit::from_signed(&repo.read_signed(merge_head_id)?)?;
let theirs_tree = merge_head_commit.tree;
@ -1492,16 +1583,16 @@ fn merge_review() -> Result<()> {
.map(|t| read_path(t, &fr.path))
.unwrap_or_default();
let status = match fr.status {
FileStatus::Auto | FileStatus::Ours | FileStatus::Theirs => {
MergeStatus::Merged {
content: current.clone(),
notes: if fr.notes.is_empty() {
vec![]
} else {
vec![levcs_merge::MergeNote { message: fr.notes.clone() }]
},
}
}
FileStatus::Auto | FileStatus::Ours | FileStatus::Theirs => MergeStatus::Merged {
content: current.clone(),
notes: if fr.notes.is_empty() {
vec![]
} else {
vec![levcs_merge::MergeNote {
message: fr.notes.clone(),
}]
},
},
FileStatus::Manual => MergeStatus::Conflict {
regions: vec![levcs_merge::ConflictRegion {
description: if fr.notes.is_empty() {
@ -1530,8 +1621,7 @@ fn merge_review() -> Result<()> {
.collect();
let total = files.len();
let final_state = levcs_tui::review(files)
.map_err(|e| anyhow!("review session: {e}"))?;
let final_state = levcs_tui::review(files).map_err(|e| anyhow!("review session: {e}"))?;
let report = final_state
.apply(&repo.workdir)
.map_err(|e| anyhow!("apply resolutions: {e}"))?;
@ -1546,7 +1636,9 @@ fn find_common_ancestor(repo: &Repository, a: ObjectId, b: ObjectId) -> Result<O
let mut a_anc: HashSet<ObjectId> = HashSet::new();
let mut stack = vec![a];
while let Some(id) = stack.pop() {
if !a_anc.insert(id) { continue; }
if !a_anc.insert(id) {
continue;
}
if let Ok(s) = repo.read_signed(id) {
if let Ok(c) = Commit::from_signed(&s) {
stack.extend(c.parents);
@ -1556,7 +1648,9 @@ fn find_common_ancestor(repo: &Repository, a: ObjectId, b: ObjectId) -> Result<O
let mut stack = vec![b];
let mut visited: HashSet<ObjectId> = HashSet::new();
while let Some(id) = stack.pop() {
if !visited.insert(id) { continue; }
if !visited.insert(id) {
continue;
}
if a_anc.contains(&id) {
return Ok(Some(id));
}
@ -1573,16 +1667,24 @@ pub fn release(args: ReleaseArgs) -> Result<()> {
let repo = open_repo()?;
let (_label, sk) = load_secret(args.key.as_deref())?;
let pk = sk.public();
let authority = repo.current_authority()?.ok_or_else(|| anyhow!("no current authority"))?;
let authority = repo
.current_authority()?
.ok_or_else(|| anyhow!("no current authority"))?;
let auth_signed = repo.read_signed(authority)?;
let auth_body = AuthorityBody::parse(&auth_signed.body)?;
let m = auth_body
.find_member(&pk)
.ok_or_else(|| anyhow!("your key is not in the current authority"))?;
if m.role < Role::Maintainer {
bail!("releases require maintainer role; your role is '{}'", m.role.name());
bail!(
"releases require maintainer role; your role is '{}'",
m.role.name()
);
}
let head = repo.refs.resolve_head()?.ok_or_else(|| anyhow!("no HEAD"))?;
let head = repo
.refs
.resolve_head()?
.ok_or_else(|| anyhow!("no HEAD"))?;
let head_commit = Commit::from_signed(&repo.read_signed(head)?)?;
let parent_release = repo
.refs
@ -1600,7 +1702,8 @@ pub fn release(args: ReleaseArgs) -> Result<()> {
};
let signed = sign_release(release, &sk)?;
let id = repo.write_signed(&signed)?;
repo.refs.write(&format!("refs/releases/{}", args.label), id)?;
repo.refs
.write(&format!("refs/releases/{}", args.label), id)?;
// §4.4: warm the release cache and run LRU eviction so the
// cache stays under its configured cap. The cap is 1 GiB by
@ -1635,7 +1738,9 @@ pub fn cache(args: CacheArgs) -> Result<()> {
}
if let Some(id) = args.restore {
let src = dir.join(&id);
if !src.is_dir() { bail!("no such cache: {id}"); }
if !src.is_dir() {
bail!("no such cache: {id}");
}
copy_dir_recursive(&src, &repo.workdir)?;
eprintln!("restored {id}");
return Ok(());
@ -1648,7 +1753,9 @@ pub fn cache(args: CacheArgs) -> Result<()> {
for path in repo.walk_workdir()? {
let rel = path.strip_prefix(&repo.workdir)?;
let target = dest.join(rel);
if let Some(parent) = target.parent() { fs::create_dir_all(parent)?; }
if let Some(parent) = target.parent() {
fs::create_dir_all(parent)?;
}
fs::copy(&path, &target)?;
}
if let Some(m) = args.message {
@ -1671,7 +1778,9 @@ fn copy_dir_recursive(src: &Path, dest: &Path) -> Result<()> {
fs::create_dir_all(&t)?;
copy_dir_recursive(&p, &t)?;
} else {
if let Some(parent) = t.parent() { fs::create_dir_all(parent)?; }
if let Some(parent) = t.parent() {
fs::create_dir_all(parent)?;
}
fs::copy(&p, &t)?;
}
}
@ -1701,17 +1810,29 @@ pub fn gc(args: GcArgs) -> Result<()> {
let repo = open_repo()?;
let mut reachable: HashSet<ObjectId> = HashSet::new();
let mut stack: Vec<ObjectId> = Vec::new();
if let Some(h) = repo.refs.resolve_head()? { stack.push(h); }
if let Some(c) = repo.current_authority()? { stack.push(c); }
if let Some(g) = repo.genesis_authority()? { stack.push(g); }
for (_, id) in repo.refs.list_all()? { stack.push(id); }
if let Some(h) = repo.refs.resolve_head()? {
stack.push(h);
}
if let Some(c) = repo.current_authority()? {
stack.push(c);
}
if let Some(g) = repo.genesis_authority()? {
stack.push(g);
}
for (_, id) in repo.refs.list_all()? {
stack.push(id);
}
while let Some(id) = stack.pop() {
if !reachable.insert(id) { continue; }
if !reachable.insert(id) {
continue;
}
if let Ok(raw) = repo.objects.read_object(id) {
match raw.object_type {
ObjectType::Tree => {
if let Ok(t) = Tree::parse_body(&raw.body) {
for e in t.entries { stack.push(e.hash); }
for e in t.entries {
stack.push(e.hash);
}
}
}
ObjectType::Commit => {
@ -1726,7 +1847,9 @@ pub fn gc(args: GcArgs) -> Result<()> {
stack.push(r.tree);
stack.push(r.predecessor);
stack.push(r.authority);
if !r.parent_release.is_zero() { stack.push(r.parent_release); }
if !r.parent_release.is_zero() {
stack.push(r.parent_release);
}
}
}
ObjectType::Authority => {
@ -1786,7 +1909,9 @@ fn walk_dir(dir: &Path, base: &Path, out: &mut Vec<PathBuf>) -> Result<()> {
let ent = ent?;
let path = ent.path();
let rel = path.strip_prefix(base)?;
if levcs_core::ignore::always_ignored(rel) { continue; }
if levcs_core::ignore::always_ignored(rel) {
continue;
}
let ft = ent.file_type()?;
if ft.is_dir() {
walk_dir(&path, base, out)?;
@ -1809,11 +1934,17 @@ fn file_mtime_micros(meta: &fs::Metadata) -> i64 {
fn file_mode_bits(meta: &fs::Metadata) -> u8 {
use std::os::unix::fs::PermissionsExt;
let m = meta.permissions().mode();
if m & 0o111 != 0 { 0o111 } else { 0 }
if m & 0o111 != 0 {
0o111
} else {
0
}
}
#[cfg(not(unix))]
fn file_mode_bits(_meta: &fs::Metadata) -> u8 { 0 }
fn file_mode_bits(_meta: &fs::Metadata) -> u8 {
0
}
#[allow(dead_code)]
fn _refs_unused(_: Refs) {}

View File

@ -73,7 +73,8 @@ fn fork_end_to_end() {
// 1. Boot an instance.
let instance_root = tempdir("levcs-fork-instance");
let instance_root_for_task = instance_root.clone();
let (addr, server_task) = runtime.block_on(async move { start_instance(instance_root_for_task).await });
let (addr, server_task) =
runtime.block_on(async move { start_instance(instance_root_for_task).await });
let base_url = format!("http://{addr}/levcs/v1");
// 2. Source repo.
@ -133,7 +134,10 @@ fn fork_end_to_end() {
// 4. Verify the fork.
let fork_dir = fork_parent.join("myfork");
assert!(fork_dir.is_dir(), "fork directory not created");
assert!(fork_dir.join("README").is_file(), "source content not checked out");
assert!(
fork_dir.join("README").is_file(),
"source content not checked out"
);
assert_eq!(
std::fs::read_to_string(fork_dir.join("README")).unwrap(),
"source repo content\n"
@ -160,7 +164,10 @@ fn fork_end_to_end() {
);
// Bob is the sole owner of the new genesis.
assert_eq!(fork_body.members.len(), 1);
assert_eq!(fork_body.members[0].role, levcs_identity::authority::Role::Owner);
assert_eq!(
fork_body.members[0].role,
levcs_identity::authority::Role::Owner
);
// 6. Confirm the fork commit has both flags set and a single parent.
let head_hex = std::fs::read_to_string(fork_dir.join(".levcs/refs/branches/main"))

View File

@ -106,16 +106,28 @@ fn inspect_lists_branches_authority_and_tree() {
let (code, stdout, _e) = run(&["inspect", &repo_id_hex], &probe, &xdg);
assert_eq!(code, 0, "inspect at root must succeed");
assert!(stdout.contains("repo_id"), "must show repo_id: {stdout}");
assert!(stdout.contains("current authority"), "must show authority: {stdout}");
assert!(
stdout.contains("current authority"),
"must show authority: {stdout}"
);
assert!(stdout.contains("branches:"), "must list branches: {stdout}");
assert!(stdout.contains("main"), "must show main branch: {stdout}");
assert!(stdout.contains("README"), "must list README at root: {stdout}");
assert!(stdout.contains("nested"), "must list nested subtree at root: {stdout}");
assert!(
stdout.contains("README"),
"must list README at root: {stdout}"
);
assert!(
stdout.contains("nested"),
"must list nested subtree at root: {stdout}"
);
// Path inspect: drill into the `nested/` subtree.
let (code, stdout, _e) = run(&["inspect", &repo_id_hex, "nested"], &probe, &xdg);
assert_eq!(code, 0, "inspect at nested/ must succeed");
assert!(stdout.contains("file.txt"), "must list nested/file.txt: {stdout}");
assert!(
stdout.contains("file.txt"),
"must list nested/file.txt: {stdout}"
);
task.abort();
let _ = std::fs::remove_dir_all(&instance_root);

View File

@ -83,7 +83,15 @@ fn authority_chain_round_trip() {
let (_, bob_pub, _) = run(&["key", "show", "bob"], &work, &xdg);
let bob_pub = bob_pub.trim().to_string();
let (code, _, e) = run(
&["authority", "add", &bob_pub, "--role", "contributor", "--handle", "bob"],
&[
"authority",
"add",
&bob_pub,
"--role",
"contributor",
"--handle",
"bob",
],
&work,
&xdg,
);
@ -152,13 +160,19 @@ fn gc_grace_period_keeps_young_objects_and_deletes_old_ones() {
// younger than that, so it must be kept.
let (code, _, e) = run(&["gc"], &work, &xdg);
assert_eq!(code, 0, "gc default: {e}");
assert!(stray.is_file(), "young unreachable object must be kept under default grace");
assert!(
stray.is_file(),
"young unreachable object must be kept under default grace"
);
assert!(e.contains("kept"), "gc must report kept count: {e}");
// Force grace=0 and the stray file must go.
let (code, _, e) = run(&["gc", "--grace-days=0"], &work, &xdg);
assert_eq!(code, 0, "gc grace=0: {e}");
assert!(!stray.is_file(), "with grace=0 the unreachable object must be deleted");
assert!(
!stray.is_file(),
"with grace=0 the unreachable object must be deleted"
);
assert!(e.contains("removed"), "gc must report deletion count: {e}");
let _ = std::fs::remove_dir_all(&work);

View File

@ -59,8 +59,14 @@ fn fast_forward_merge_advances_branch() {
assert_eq!(run(&["branch", "--switch", "main"], &work, &xdg).0, 0);
let (code, _, e) = run(&["merge", "feature"], &work, &xdg);
assert_eq!(code, 0, "fast-forward merge: {e}");
assert!(e.contains("fast-forward"), "expected fast-forward message; got {e}");
assert!(work.join("b.txt").is_file(), "feature file should be present");
assert!(
e.contains("fast-forward"),
"expected fast-forward message; got {e}"
);
assert!(
work.join("b.txt").is_file(),
"feature file should be present"
);
}
#[test]
@ -88,7 +94,10 @@ fn clean_three_way_merge_then_commit_produces_two_parents() {
assert_eq!(run(&["branch", "--switch", "main"], &work, &xdg).0, 0);
let (code, o, e) = run(&["merge", "feat"], &work, &xdg);
assert_eq!(code, 0, "merge: {e}");
assert!(o.contains("auto-resolved: 1") || o.contains("auto-resolved: 2"), "summary missing: {o}");
assert!(
o.contains("auto-resolved: 1") || o.contains("auto-resolved: 2"),
"summary missing: {o}"
);
// Both files should be in the working tree now.
assert!(work.join("b.txt").is_file());
@ -96,12 +105,18 @@ fn clean_three_way_merge_then_commit_produces_two_parents() {
// MERGE_HEAD should exist before commit, then disappear after.
let merge_head = work.join(".levcs/MERGE_HEAD");
assert!(merge_head.exists(), "MERGE_HEAD should be set before commit");
assert!(
merge_head.exists(),
"MERGE_HEAD should be set before commit"
);
// Finalize.
let (code, _, e) = run(&["commit", "-m", "merge feat"], &work, &xdg);
assert_eq!(code, 0, "commit (merge): {e}");
assert!(!merge_head.exists(), "MERGE_HEAD should be cleared after commit");
assert!(
!merge_head.exists(),
"MERGE_HEAD should be cleared after commit"
);
// Log should show the merge as the most recent commit.
let (code, log, _) = run(&["log"], &work, &xdg);
@ -131,7 +146,10 @@ fn conflicting_merge_writes_state_and_blocks_commit_until_resolved() {
assert_eq!(run(&["branch", "--switch", "main"], &work, &xdg).0, 0);
let (code, _o, e) = run(&["merge", "feat"], &work, &xdg);
assert_ne!(code, 0, "conflict should produce non-zero exit");
assert!(e.contains("CONFLICT") || e.contains("conflict"), "conflict report missing: {e}");
assert!(
e.contains("CONFLICT") || e.contains("conflict"),
"conflict report missing: {e}"
);
// Merge state files exist.
assert!(work.join(".levcs/MERGE_HEAD").exists());
@ -147,7 +165,10 @@ fn conflicting_merge_writes_state_and_blocks_commit_until_resolved() {
// commit must refuse while conflict markers remain.
let (code, _, e) = run(&["commit", "-m", "premature"], &work, &xdg);
assert_ne!(code, 0, "commit should refuse: {e}");
assert!(e.contains("conflict markers"), "marker check should mention markers: {e}");
assert!(
e.contains("conflict markers"),
"marker check should mention markers: {e}"
);
// Resolve manually and commit.
std::fs::write(work.join("a.txt"), b"resolved\n").unwrap();
@ -225,7 +246,10 @@ fn commit_refuses_merge_record_with_handler_outside_repo_policy() {
let (code, _, e) = run(&["commit", "-m", "merge feat"], &work, &xdg);
assert_ne!(code, 0, "commit must refuse a record outside policy");
assert!(e.contains("tree-sitter:protobuf"), "error must name the bad handler: {e}");
assert!(
e.contains("tree-sitter:protobuf"),
"error must name the bad handler: {e}"
);
}
#[test]
@ -293,10 +317,7 @@ fn merge_format_json_reports_conflicts_and_exits_nonzero() {
let v: serde_json::Value = serde_json::from_str(stdout.trim()).expect("parse JSON");
assert!(v["conflicts"].as_u64().unwrap() >= 1);
let files = v["files"].as_array().unwrap();
let conflicted: Vec<_> = files
.iter()
.filter(|f| f["status"] == "conflict")
.collect();
let conflicted: Vec<_> = files.iter().filter(|f| f["status"] == "conflict").collect();
assert!(!conflicted.is_empty(), "must report at least one conflict");
// Conflict regions array is present for conflicted files.
let f = conflicted[0];
@ -359,7 +380,10 @@ fn merge_local_toml_can_demote_handler() {
.iter()
.find(|f| f["path"] == "note.txt")
.expect("note.txt in report");
assert_eq!(txt["handler"], "textual", "demoted handler must be in effect");
assert_eq!(
txt["handler"], "textual",
"demoted handler must be in effect"
);
}
#[test]
@ -386,7 +410,10 @@ fn merge_local_toml_promotion_is_rejected() {
.unwrap();
let (code, _, e) = run(&["merge", "f"], &work, &xdg);
assert_ne!(code, 0, "promotion must error out");
assert!(e.contains("merge.local.toml"), "error must name the offending file: {e}");
assert!(
e.contains("merge.local.toml"),
"error must name the offending file: {e}"
);
assert!(e.contains("promote"), "error must say 'promote': {e}");
}

View File

@ -55,8 +55,14 @@ fn construct_restricted_to_paths_only_rewrites_those_files() {
// Restore only a.txt from HEAD.
let (code, _, e) = run(&["construct", "a.txt"], &work, &xdg);
assert_eq!(code, 0, "construct a.txt: {e}");
assert_eq!(std::fs::read_to_string(work.join("a.txt")).unwrap(), "a v1\n");
assert_eq!(std::fs::read_to_string(work.join("b.txt")).unwrap(), "b dirty\n");
assert_eq!(
std::fs::read_to_string(work.join("a.txt")).unwrap(),
"a v1\n"
);
assert_eq!(
std::fs::read_to_string(work.join("b.txt")).unwrap(),
"b dirty\n"
);
}
#[test]

View File

@ -51,25 +51,41 @@ impl Client {
pub fn instance_info(&self) -> Result<InstanceInfo, ClientError> {
let url = format!("{}/instance/info", self.base);
let res = self.http.get(&url).header("user-agent", &self.user_agent).send()?;
let res = self
.http
.get(&url)
.header("user-agent", &self.user_agent)
.send()?;
check(res)?.json::<InstanceInfo>().map_err(Into::into)
}
pub fn repo_info(&self, repo_id: &str) -> Result<InfoResponse, ClientError> {
let url = format!("{}/repos/{repo_id}/info", self.base);
let res = self.http.get(&url).header("user-agent", &self.user_agent).send()?;
let res = self
.http
.get(&url)
.header("user-agent", &self.user_agent)
.send()?;
check(res)?.json::<InfoResponse>().map_err(Into::into)
}
pub fn refs(&self, repo_id: &str) -> Result<RefList, ClientError> {
let url = format!("{}/repos/{repo_id}/refs", self.base);
let res = self.http.get(&url).header("user-agent", &self.user_agent).send()?;
let res = self
.http
.get(&url)
.header("user-agent", &self.user_agent)
.send()?;
check(res)?.json::<RefList>().map_err(Into::into)
}
pub fn get_object(&self, repo_id: &str, id: ObjectId) -> Result<Vec<u8>, ClientError> {
let url = format!("{}/repos/{repo_id}/objects/{}", self.base, id.to_hex());
let res = self.http.get(&url).header("user-agent", &self.user_agent).send()?;
let res = self
.http
.get(&url)
.header("user-agent", &self.user_agent)
.send()?;
let res = check(res)?;
Ok(res.bytes()?.to_vec())
}
@ -88,7 +104,11 @@ impl Client {
have_q.join(","),
want_q.join(",")
);
let res = self.http.get(&url).header("user-agent", &self.user_agent).send()?;
let res = self
.http
.get(&url)
.header("user-agent", &self.user_agent)
.send()?;
let bytes = check(res)?.bytes()?;
Pack::decode(&bytes).map_err(|e| ClientError::Decode(e.to_string()))
}
@ -103,7 +123,8 @@ impl Client {
// Body: pack bytes followed by 4 bytes manifest length, manifest JSON,
// then manifest signature (64 bytes).
let pack_bytes = pack.encode();
let manifest_json = serde_json::to_vec(manifest).map_err(|e| ClientError::Decode(e.to_string()))?;
let manifest_json =
serde_json::to_vec(manifest).map_err(|e| ClientError::Decode(e.to_string()))?;
let mut body = Vec::with_capacity(pack_bytes.len() + 4 + manifest_json.len() + 64);
body.extend_from_slice(&pack_bytes);
body.extend_from_slice(&(manifest_json.len() as u32).to_le_bytes());
@ -118,7 +139,8 @@ impl Client {
path_with_query: &path,
body: &body,
};
let (key, ts, nonce, sig) = sign_request(sk, &req).map_err(|e| ClientError::Auth(e.to_string()))?;
let (key, ts, nonce, sig) =
sign_request(sk, &req).map_err(|e| ClientError::Auth(e.to_string()))?;
let mut headers = HeaderMap::new();
headers.insert("LeVCS-Key", key.parse().unwrap());
@ -150,7 +172,8 @@ impl Client {
path_with_query: &path,
body: authority_object,
};
let (key, ts, nonce, sig) = sign_request(sk, &req).map_err(|e| ClientError::Auth(e.to_string()))?;
let (key, ts, nonce, sig) =
sign_request(sk, &req).map_err(|e| ClientError::Auth(e.to_string()))?;
let mut headers = HeaderMap::new();
headers.insert("LeVCS-Key", key.parse().unwrap());
headers.insert("LeVCS-Timestamp", ts.parse().unwrap());

View File

@ -17,7 +17,9 @@ use std::path::PathBuf;
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use levcs_core::hash::blake3_hash;
use levcs_core::object::ObjectType;
use levcs_core::{Blob, Commit, EntryType, FileMode, ObjectId, ObjectStore, Release, Tree, TreeEntry};
use levcs_core::{
Blob, Commit, EntryType, FileMode, ObjectId, ObjectStore, Release, Tree, TreeEntry,
};
fn tempdir(prefix: &str) -> PathBuf {
let mut p = std::env::temp_dir();
@ -110,9 +112,11 @@ fn bench_walk(c: &mut Criterion) {
store.ensure_dirs().unwrap();
let root = populate(&store, n);
g.bench_with_input(BenchmarkId::from_parameter(format!("{n}_objects")), &(), |b, _| {
b.iter(|| black_box(walk_reachable(&store, root)))
});
g.bench_with_input(
BenchmarkId::from_parameter(format!("{n}_objects")),
&(),
|b, _| b.iter(|| black_box(walk_reachable(&store, root))),
);
let _ = std::fs::remove_dir_all(dir);
}

View File

@ -12,7 +12,9 @@ fn lcg_bytes(seed: u64, n: usize) -> Vec<u8> {
let mut s = seed;
(0..n)
.map(|_| {
s = s.wrapping_mul(6364136223846793005).wrapping_add(1442695040888963407);
s = s
.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407);
(s >> 33) as u8
})
.collect()
@ -67,9 +69,11 @@ fn bench_tree(c: &mut Criterion) {
let mut g = c.benchmark_group("tree_serialize_hash");
for &n in &[10usize, 100, 1000] {
let t = make_tree(n);
g.bench_with_input(BenchmarkId::from_parameter(format!("{n}_entries")), &t, |b, t| {
b.iter(|| black_box(blake3_hash(&t.serialize())))
});
g.bench_with_input(
BenchmarkId::from_parameter(format!("{n}_entries")),
&t,
|b, t| b.iter(|| black_box(blake3_hash(&t.serialize()))),
);
}
g.finish();
}

View File

@ -10,7 +10,9 @@ pub struct Blob {
}
impl Blob {
pub fn new(bytes: Vec<u8>) -> Self { Self { bytes } }
pub fn new(bytes: Vec<u8>) -> Self {
Self { bytes }
}
pub fn serialize(&self) -> Vec<u8> {
frame_unsigned(ObjectType::Blob, &self.bytes)

View File

@ -26,15 +26,22 @@ impl CommitFlags {
pub const MODIFIES_AUTHORITY: CommitFlags = CommitFlags(0b01);
pub const FORK: CommitFlags = CommitFlags(0b10);
pub fn modifies_authority(self) -> bool { self.0 & 0b01 != 0 }
pub fn is_fork(self) -> bool { self.0 & 0b10 != 0 }
pub fn modifies_authority(self) -> bool {
self.0 & 0b01 != 0
}
pub fn is_fork(self) -> bool {
self.0 & 0b10 != 0
}
pub fn raw(self) -> u8 { self.0 }
pub fn raw(self) -> u8 {
self.0
}
pub fn validate(self) -> Result<(), Error> {
if self.0 & !0b11 != 0 {
return Err(Error::MalformedObject(format!(
"commit flags has reserved bits set: {:#x}", self.0
"commit flags has reserved bits set: {:#x}",
self.0
)));
}
Ok(())
@ -85,7 +92,9 @@ impl Commit {
pub fn parse_body(body: &[u8]) -> Result<Self, Error> {
let need = 32 + 1;
if body.len() < need {
return Err(Error::MalformedObject("commit body too short for tree+parent_count".into()));
return Err(Error::MalformedObject(
"commit body too short for tree+parent_count".into(),
));
}
let mut tree = [0u8; 32];
tree.copy_from_slice(&body[0..32]);
@ -124,7 +133,8 @@ impl Commit {
p += msg_len;
if p != body.len() {
return Err(Error::MalformedObject(format!(
"trailing {} byte(s) after commit message", body.len() - p
"trailing {} byte(s) after commit message",
body.len() - p
)));
}
Ok(Self {
@ -146,12 +156,14 @@ impl Commit {
pub fn from_signed(s: &SignedObject) -> Result<Self, Error> {
if s.object_type != ObjectType::Commit {
return Err(Error::MalformedObject(format!(
"expected commit, got {}", s.object_type.name()
"expected commit, got {}",
s.object_type.name()
)));
}
if s.signatures.len() != 1 {
return Err(Error::MalformedObject(format!(
"commit must have exactly 1 signature, got {}", s.signatures.len()
"commit must have exactly 1 signature, got {}",
s.signatures.len()
)));
}
let c = Commit::parse_body(&s.body)?;
@ -166,7 +178,10 @@ impl Commit {
/// Convenience: produce a partial signature entry with just the key set;
/// callers fill in `signature` after computing the Ed25519 signature.
pub fn signature_template(&self) -> SignatureEntry {
SignatureEntry { public_key: self.author_key, signature: [0u8; 64] }
SignatureEntry {
public_key: self.author_key,
signature: [0u8; 64],
}
}
}

View File

@ -52,7 +52,10 @@ pub enum Error {
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Self {
Error::Io { path: None, source: e }
Error::Io {
path: None,
source: e,
}
}
}
@ -70,6 +73,9 @@ pub trait IoExt<T> {
impl<T> IoExt<T> for std::result::Result<T, std::io::Error> {
fn ctx(self, path: impl Into<PathBuf>) -> Result<T> {
self.map_err(|e| Error::Io { path: Some(path.into()), source: e })
self.map_err(|e| Error::Io {
path: Some(path.into()),
source: e,
})
}
}

View File

@ -10,13 +10,21 @@ pub struct ObjectId(pub [u8; 32]);
pub const ZERO_ID: ObjectId = ObjectId([0u8; 32]);
impl ObjectId {
pub const fn from_bytes(b: [u8; 32]) -> Self { Self(b) }
pub const fn from_bytes(b: [u8; 32]) -> Self {
Self(b)
}
pub fn as_bytes(&self) -> &[u8; 32] { &self.0 }
pub fn as_bytes(&self) -> &[u8; 32] {
&self.0
}
pub fn to_hex(&self) -> String { hex::encode(self.0) }
pub fn to_hex(&self) -> String {
hex::encode(self.0)
}
pub fn is_zero(&self) -> bool { self.0 == [0u8; 32] }
pub fn is_zero(&self) -> bool {
self.0 == [0u8; 32]
}
pub fn from_hex(s: &str) -> Result<Self, Error> {
let bytes = hex::decode(s)?;
@ -46,7 +54,9 @@ impl fmt::Display for ObjectId {
impl FromStr for ObjectId {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Error> { Self::from_hex(s) }
fn from_str(s: &str) -> Result<Self, Error> {
Self::from_hex(s)
}
}
/// Compute a BLAKE3 hash with no key, returning an `ObjectId`.
@ -59,7 +69,9 @@ pub fn blake3_hash(data: &[u8]) -> ObjectId {
pub struct Hasher(blake3::Hasher);
impl Hasher {
pub fn new() -> Self { Self(blake3::Hasher::new()) }
pub fn new() -> Self {
Self(blake3::Hasher::new())
}
pub fn update(&mut self, data: &[u8]) -> &mut Self {
self.0.update(data);
self
@ -70,5 +82,7 @@ impl Hasher {
}
impl Default for Hasher {
fn default() -> Self { Self::new() }
fn default() -> Self {
Self::new()
}
}

View File

@ -21,7 +21,9 @@ struct Rule {
}
impl Ignore {
pub fn empty() -> Self { Self::default() }
pub fn empty() -> Self {
Self::default()
}
/// Parse a `.levcsignore` file's contents.
pub fn parse(text: &str) -> Self {
@ -43,15 +45,33 @@ impl Ignore {
};
// Always include `.levcs/` itself in the ignored set.
if let Ok(pattern) = Pattern::new(body) {
rules.push(Rule { pattern, negate, anchored });
rules.push(Rule {
pattern,
negate,
anchored,
});
}
}
// Always ignore `.levcs/`
if let Ok(pattern) = Pattern::new(".levcs") {
rules.insert(0, Rule { pattern, negate: false, anchored: true });
rules.insert(
0,
Rule {
pattern,
negate: false,
anchored: true,
},
);
}
if let Ok(pattern) = Pattern::new(".levcs/**") {
rules.insert(0, Rule { pattern, negate: false, anchored: true });
rules.insert(
0,
Rule {
pattern,
negate: false,
anchored: true,
},
);
}
Self { rules }
}
@ -63,8 +83,7 @@ impl Ignore {
r.pattern.matches(rel_path)
} else {
// Match against any suffix path component sequence.
r.pattern.matches(rel_path)
|| rel_path.split('/').any(|c| r.pattern.matches(c))
r.pattern.matches(rel_path) || rel_path.split('/').any(|c| r.pattern.matches(c))
};
if matched {
ignored = !r.negate;

View File

@ -33,9 +33,15 @@ impl IndexEntryFlags {
pub const CACHED: IndexEntryFlags = IndexEntryFlags(0b010);
pub const CONFLICTED: IndexEntryFlags = IndexEntryFlags(0b100);
pub fn is_tracked(self) -> bool { self.0 & 0b001 != 0 }
pub fn is_cached(self) -> bool { self.0 & 0b010 != 0 }
pub fn is_conflicted(self) -> bool { self.0 & 0b100 != 0 }
pub fn is_tracked(self) -> bool {
self.0 & 0b001 != 0
}
pub fn is_cached(self) -> bool {
self.0 & 0b010 != 0
}
pub fn is_conflicted(self) -> bool {
self.0 & 0b100 != 0
}
pub fn with(self, mask: IndexEntryFlags) -> IndexEntryFlags {
IndexEntryFlags(self.0 | mask.0)
@ -62,7 +68,9 @@ pub struct Index {
}
impl Index {
pub fn new() -> Self { Self::default() }
pub fn new() -> Self {
Self::default()
}
pub fn find(&self, path: &str) -> Option<&IndexEntry> {
self.entries.iter().find(|e| e.path == path)
@ -127,7 +135,9 @@ impl Index {
}
let version = LittleEndian::read_u32(&bytes[4..8]);
if version != INDEX_VERSION {
return Err(Error::InvalidIndex(format!("unsupported version {version}")));
return Err(Error::InvalidIndex(format!(
"unsupported version {version}"
)));
}
let count = LittleEndian::read_u32(&bytes[8..12]) as usize;
let mut entries = Vec::with_capacity(count);
@ -157,7 +167,12 @@ impl Index {
let size = LittleEndian::read_u64(&bytes[p..p + 8]);
p += 8;
entries.push(IndexEntry {
path, blob_hash: ObjectId(h), mode, flags, mtime_micros, size,
path,
blob_hash: ObjectId(h),
mode,
flags,
mtime_micros,
size,
});
}
if p != bytes.len() {
@ -170,7 +185,10 @@ impl Index {
match fs::read(path) {
Ok(bytes) => Index::parse(&bytes),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(Index::new()),
Err(e) => Err(Error::Io { path: Some(path.clone()), source: e }),
Err(e) => Err(Error::Io {
path: Some(path.clone()),
source: e,
}),
}
}

View File

@ -1,31 +1,31 @@
//! levcs-core: object model, hashing, and content-addressed object store
//! for the LeVCS specification (v1.1 trust-root revision).
pub mod blob;
pub mod commit;
pub mod error;
pub mod hash;
pub mod object;
pub mod blob;
pub mod tree;
pub mod commit;
pub mod release;
pub mod store;
pub mod refs;
pub mod index;
pub mod repo;
pub mod ignore;
pub mod index;
pub mod object;
pub mod refs;
pub mod release;
pub mod release_cache;
pub mod repo;
pub mod store;
pub mod tree;
pub use error::{Error, Result};
pub use hash::{ObjectId, ZERO_ID, blake3_hash};
pub use object::{
ObjectType, ObjectHeader, SignatureEntry, SignedObject, RawObject,
HEADER_SIZE, MAGIC, FORMAT_VERSION, SIGNATURE_ENTRY_SIZE,
};
pub use blob::Blob;
pub use tree::{Tree, TreeEntry, EntryType, FileMode};
pub use commit::{Commit, CommitFlags};
pub use release::Release;
pub use store::ObjectStore;
pub use refs::Refs;
pub use error::{Error, Result};
pub use hash::{blake3_hash, ObjectId, ZERO_ID};
pub use index::{Index, IndexEntry, IndexEntryFlags};
pub use object::{
ObjectHeader, ObjectType, RawObject, SignatureEntry, SignedObject, FORMAT_VERSION, HEADER_SIZE,
MAGIC, SIGNATURE_ENTRY_SIZE,
};
pub use refs::Refs;
pub use release::Release;
pub use repo::Repository;
pub use store::ObjectStore;
pub use tree::{EntryType, FileMode, Tree, TreeEntry};

View File

@ -105,7 +105,11 @@ impl ObjectHeader {
return Err(Error::MalformedObject("reserved bytes nonzero".into()));
}
let body_len = LittleEndian::read_u64(&bytes[8..16]);
Ok(Self { object_type, format_version, body_len })
Ok(Self {
object_type,
format_version,
body_len,
})
}
}
@ -132,7 +136,10 @@ impl SignatureEntry {
let mut sg = [0u8; 64];
pk.copy_from_slice(&bytes[0..32]);
sg.copy_from_slice(&bytes[32..96]);
Ok(Self { public_key: pk, signature: sg })
Ok(Self {
public_key: pk,
signature: sg,
})
}
}
@ -148,7 +155,11 @@ pub struct SignedObject {
impl SignedObject {
pub fn new(object_type: ObjectType, body: Vec<u8>) -> Self {
Self { object_type, body, signatures: Vec::new() }
Self {
object_type,
body,
signatures: Vec::new(),
}
}
/// The 32-byte hash that signers sign: BLAKE3(header || body).
@ -175,7 +186,8 @@ impl SignedObject {
.encode();
let n = self.signatures.len();
assert!(n <= 255, "too many signatures");
let mut out = Vec::with_capacity(HEADER_SIZE + self.body.len() + 1 + n * SIGNATURE_ENTRY_SIZE);
let mut out =
Vec::with_capacity(HEADER_SIZE + self.body.len() + 1 + n * SIGNATURE_ENTRY_SIZE);
out.extend_from_slice(&header);
out.extend_from_slice(&self.body);
out.push(n as u8);
@ -229,7 +241,9 @@ impl SignedObject {
let mut signatures = Vec::with_capacity(count);
for i in 0..count {
let off = trailer_start + i * SIGNATURE_ENTRY_SIZE;
signatures.push(SignatureEntry::decode(&bytes[off..off + SIGNATURE_ENTRY_SIZE])?);
signatures.push(SignatureEntry::decode(
&bytes[off..off + SIGNATURE_ENTRY_SIZE],
)?);
}
if bytes.len() != trailer_end {
return Err(Error::MalformedObject(format!(
@ -289,13 +303,19 @@ impl RawObject {
let mut sigs = Vec::with_capacity(count);
for i in 0..count {
let off = trailer_start + i * SIGNATURE_ENTRY_SIZE;
sigs.push(SignatureEntry::decode(&bytes[off..off + SIGNATURE_ENTRY_SIZE])?);
sigs.push(SignatureEntry::decode(
&bytes[off..off + SIGNATURE_ENTRY_SIZE],
)?);
}
sigs
} else {
Vec::new()
};
Ok(Self { object_type: header.object_type, body, signatures })
Ok(Self {
object_type: header.object_type,
body,
signatures,
})
}
/// Serialize a raw object (with empty trailer for unsigned types).
@ -308,7 +328,11 @@ impl RawObject {
.encode();
let signed = self.object_type.is_signed();
let n = self.signatures.len();
let trailer_size = if signed { 1 + n * SIGNATURE_ENTRY_SIZE } else { 0 };
let trailer_size = if signed {
1 + n * SIGNATURE_ENTRY_SIZE
} else {
0
};
let mut out = Vec::with_capacity(HEADER_SIZE + self.body.len() + trailer_size);
out.extend_from_slice(&header);
out.extend_from_slice(&self.body);
@ -321,7 +345,9 @@ impl RawObject {
out
}
pub fn object_id(&self) -> ObjectId { blake3_hash(&self.serialize()) }
pub fn object_id(&self) -> ObjectId {
blake3_hash(&self.serialize())
}
}
/// Helper used by unsigned object types (Blob, Tree) to wrap a body in the
@ -346,7 +372,11 @@ mod tests {
#[test]
fn header_roundtrip() {
let h = ObjectHeader { object_type: ObjectType::Blob, format_version: 1, body_len: 42 };
let h = ObjectHeader {
object_type: ObjectType::Blob,
format_version: 1,
body_len: 42,
};
let bytes = h.encode();
let h2 = ObjectHeader::decode(&bytes).unwrap();
assert_eq!(h, h2);
@ -355,7 +385,10 @@ mod tests {
#[test]
fn signed_object_roundtrip() {
let mut so = SignedObject::new(ObjectType::Commit, b"hello".to_vec());
so.signatures.push(SignatureEntry { public_key: [7u8; 32], signature: [9u8; 64] });
so.signatures.push(SignatureEntry {
public_key: [7u8; 32],
signature: [9u8; 64],
});
let bytes = so.serialize();
let so2 = SignedObject::parse(&bytes).unwrap();
assert_eq!(so.object_type, so2.object_type);

View File

@ -22,11 +22,17 @@ pub enum Head {
impl Refs {
pub fn new(levcs_dir: impl Into<PathBuf>) -> Self {
Self { levcs_dir: levcs_dir.into() }
Self {
levcs_dir: levcs_dir.into(),
}
}
pub fn refs_dir(&self) -> PathBuf { self.levcs_dir.join("refs") }
pub fn head_path(&self) -> PathBuf { self.levcs_dir.join("HEAD") }
pub fn refs_dir(&self) -> PathBuf {
self.levcs_dir.join("refs")
}
pub fn head_path(&self) -> PathBuf {
self.levcs_dir.join("HEAD")
}
pub fn ref_path(&self, name: &str) -> Result<PathBuf> {
validate_ref_name(name)?;
@ -38,7 +44,10 @@ impl Refs {
match fs::read_to_string(&path) {
Ok(s) => Ok(Some(parse_ref_value(&s)?)),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None),
Err(e) => Err(Error::Io { path: Some(path), source: e }),
Err(e) => Err(Error::Io {
path: Some(path),
source: e,
}),
}
}
@ -55,7 +64,10 @@ impl Refs {
match fs::remove_file(&path) {
Ok(()) => Ok(()),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(()),
Err(e) => Err(Error::Io { path: Some(path), source: e }),
Err(e) => Err(Error::Io {
path: Some(path),
source: e,
}),
}
}
@ -64,7 +76,10 @@ impl Refs {
match fs::read_to_string(&path) {
Ok(s) => Ok(Some(parse_head(&s)?)),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None),
Err(e) => Err(Error::Io { path: Some(path), source: e }),
Err(e) => Err(Error::Io {
path: Some(path),
source: e,
}),
}
}
@ -121,7 +136,9 @@ impl Refs {
pub fn list_branches(&self) -> Result<Vec<(String, ObjectId)>> {
let dir = self.refs_dir().join("branches");
let mut out = Vec::new();
if !dir.is_dir() { return Ok(out); }
if !dir.is_dir() {
return Ok(out);
}
for ent in fs::read_dir(&dir).ctx(dir.clone())? {
let ent = ent.ctx(dir.clone())?;
let name = ent.file_name().to_string_lossy().to_string();
@ -137,7 +154,9 @@ impl Refs {
pub fn list_releases(&self) -> Result<Vec<(String, ObjectId)>> {
let dir = self.refs_dir().join("releases");
let mut out = Vec::new();
if !dir.is_dir() { return Ok(out); }
if !dir.is_dir() {
return Ok(out);
}
for ent in fs::read_dir(&dir).ctx(dir.clone())? {
let ent = ent.ctx(dir.clone())?;
let name = ent.file_name().to_string_lossy().to_string();
@ -157,10 +176,14 @@ pub fn validate_ref_name(name: &str) -> Result<()> {
}
for comp in name.split('/') {
if comp.is_empty() {
return Err(Error::InvalidReference(format!("empty component in {name}")));
return Err(Error::InvalidReference(format!(
"empty component in {name}"
)));
}
if comp == "." || comp == ".." {
return Err(Error::InvalidReference(format!("reserved component: {comp}")));
return Err(Error::InvalidReference(format!(
"reserved component: {comp}"
)));
}
if comp.contains('\0') {
return Err(Error::InvalidReference("null byte".into()));
@ -218,7 +241,11 @@ mod tests {
#[test]
fn valid_names_accepted() {
for n in ["refs/branches/main", "refs/releases/v1.0", "refs/authority/current"] {
for n in [
"refs/branches/main",
"refs/releases/v1.0",
"refs/authority/current",
] {
validate_ref_name(n).unwrap();
}
}

View File

@ -38,7 +38,8 @@ impl Release {
if self.notes.len() > u32::MAX as usize {
return Err(Error::MalformedObject("release notes too large".into()));
}
let mut out = Vec::with_capacity(32 * 4 + 32 + 8 + 2 + self.label.len() + 4 + self.notes.len());
let mut out =
Vec::with_capacity(32 * 4 + 32 + 8 + 2 + self.label.len() + 4 + self.notes.len());
out.extend_from_slice(self.tree.as_bytes());
out.extend_from_slice(self.parent_release.as_bytes());
out.extend_from_slice(self.predecessor.as_bytes());
@ -80,7 +81,9 @@ impl Release {
let label_len = LittleEndian::read_u16(&body[p..p + 2]) as usize;
p += 2;
if body.len() < p + label_len + 4 {
return Err(Error::MalformedObject("release label/notes truncated".into()));
return Err(Error::MalformedObject(
"release label/notes truncated".into(),
));
}
let label = std::str::from_utf8(&body[p..p + label_len])
.map_err(|_| Error::MalformedObject("release label not UTF-8".into()))?
@ -96,11 +99,19 @@ impl Release {
.to_string();
p += notes_len;
if p != body.len() {
return Err(Error::MalformedObject("trailing bytes after release notes".into()));
return Err(Error::MalformedObject(
"trailing bytes after release notes".into(),
));
}
Ok(Self {
tree, parent_release, predecessor, authority, declarer_key,
timestamp_micros, label, notes,
tree,
parent_release,
predecessor,
authority,
declarer_key,
timestamp_micros,
label,
notes,
})
}
@ -111,11 +122,14 @@ impl Release {
pub fn from_signed(s: &SignedObject) -> Result<Self, Error> {
if s.object_type != ObjectType::Release {
return Err(Error::MalformedObject(format!(
"expected release, got {}", s.object_type.name()
"expected release, got {}",
s.object_type.name()
)));
}
if s.signatures.is_empty() {
return Err(Error::MalformedObject("release must have at least one signature".into()));
return Err(Error::MalformedObject(
"release must have at least one signature".into(),
));
}
let r = Release::parse_body(&s.body)?;
if r.declarer_key != s.signatures[0].public_key {

View File

@ -210,7 +210,7 @@ mod tests {
let repo = Repository::init_skeleton(&work).unwrap();
let dir = cache_dir(&repo);
let paths = populate(&dir, 5, 100); // total 500
// Cap at 250 → must evict 3 oldest (250 left).
// Cap at 250 → must evict 3 oldest (250 left).
let report = evict_to(&repo, 250).unwrap();
assert_eq!(report.evicted_files, 3);
assert!(report.remaining_bytes <= 250);

View File

@ -95,12 +95,23 @@ impl Repository {
fn open_at(workdir: PathBuf, levcs_dir: PathBuf) -> Self {
let objects = ObjectStore::new(levcs_dir.join("objects"));
let refs = Refs::new(levcs_dir.clone());
Self { workdir, levcs_dir, objects, refs }
Self {
workdir,
levcs_dir,
objects,
refs,
}
}
pub fn index_path(&self) -> PathBuf { self.levcs_dir.join("index") }
pub fn config_path(&self) -> PathBuf { self.levcs_dir.join("config") }
pub fn ignore_path(&self) -> PathBuf { self.workdir.join(".levcsignore") }
pub fn index_path(&self) -> PathBuf {
self.levcs_dir.join("index")
}
pub fn config_path(&self) -> PathBuf {
self.levcs_dir.join("config")
}
pub fn ignore_path(&self) -> PathBuf {
self.workdir.join(".levcsignore")
}
pub fn read_index(&self) -> Result<Index> {
Index::read_from(&self.index_path())
@ -260,7 +271,11 @@ impl Repository {
}
/// Find the path within a tree (recursively) and return (entry_type, hash).
pub fn lookup_path(&self, tree_id: ObjectId, path: &str) -> Result<Option<(EntryType, ObjectId)>> {
pub fn lookup_path(
&self,
tree_id: ObjectId,
path: &str,
) -> Result<Option<(EntryType, ObjectId)>> {
let raw = self.objects.read_typed(tree_id, ObjectType::Tree)?;
let tree = Tree::parse_body(&raw.body)?;
let mut comps = path.split('/').filter(|c| !c.is_empty());
@ -293,7 +308,9 @@ impl Repository {
fn mode_from_index(m: u8) -> FileMode {
let mut bits = 0u8;
if m & 0o111 != 0 { bits |= 0b01; }
if m & 0o111 != 0 {
bits |= 0b01;
}
FileMode(bits)
}
@ -312,7 +329,10 @@ impl TreeBuilder {
self.files.push((first.to_string(), hash, mode));
}
Some(rest) => {
self.dirs.entry(first.to_string()).or_default().insert(rest, hash, mode);
self.dirs
.entry(first.to_string())
.or_default()
.insert(rest, hash, mode);
}
}
}
@ -320,7 +340,12 @@ impl TreeBuilder {
fn write(self, repo: &Repository) -> Result<ObjectId> {
let mut tree = Tree::new();
for (name, hash, mode) in self.files {
tree.entries.push(TreeEntry { name, entry_type: EntryType::Blob, mode, hash });
tree.entries.push(TreeEntry {
name,
entry_type: EntryType::Blob,
mode,
hash,
});
}
for (name, sub) in self.dirs {
let sub_id = sub.write(repo)?;

View File

@ -77,7 +77,10 @@ impl ObjectStore {
if e.kind() == std::io::ErrorKind::NotFound {
Error::NotFound(id.to_hex())
} else {
Error::Io { path: Some(path.clone()), source: e }
Error::Io {
path: Some(path.clone()),
source: e,
}
}
})?;
let mut buf = Vec::new();
@ -102,7 +105,9 @@ impl ObjectStore {
let obj = self.read_object(id)?;
if obj.object_type != expected {
return Err(Error::MalformedObject(format!(
"expected {}, got {}", expected.name(), obj.object_type.name()
"expected {}, got {}",
expected.name(),
obj.object_type.name()
)));
}
Ok(obj)
@ -167,11 +172,13 @@ mod tests {
fn tempdir() -> PathBuf {
let mut p = std::env::temp_dir();
let n: u64 = blake3::hash(format!("{:?}-{}", std::time::SystemTime::now(), std::process::id()).as_bytes())
.as_bytes()
.iter()
.take(8)
.fold(0u64, |acc, b| (acc << 8) | *b as u64);
let n: u64 = blake3::hash(
format!("{:?}-{}", std::time::SystemTime::now(), std::process::id()).as_bytes(),
)
.as_bytes()
.iter()
.take(8)
.fold(0u64, |acc, b| (acc << 8) | *b as u64);
p.push(format!("levcs-store-test-{n}"));
std::fs::create_dir_all(&p).unwrap();
p

View File

@ -41,8 +41,12 @@ impl FileMode {
pub const EXECUTABLE: FileMode = FileMode(0b01);
pub const SYMLINK: FileMode = FileMode(0b10);
pub fn is_executable(self) -> bool { self.0 & 0b01 != 0 }
pub fn is_symlink(self) -> bool { self.0 & 0b10 != 0 }
pub fn is_executable(self) -> bool {
self.0 & 0b01 != 0
}
pub fn is_symlink(self) -> bool {
self.0 & 0b10 != 0
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
@ -59,7 +63,10 @@ impl TreeEntry {
return Err(Error::InvalidPath("empty tree-entry name".into()));
}
if name.len() > 255 {
return Err(Error::InvalidPath(format!("name too long ({} bytes)", name.len())));
return Err(Error::InvalidPath(format!(
"name too long ({} bytes)",
name.len()
)));
}
if name == "." || name == ".." {
return Err(Error::InvalidPath(format!("reserved name: {name}")));
@ -80,7 +87,9 @@ pub struct Tree {
}
impl Tree {
pub fn new() -> Self { Self::default() }
pub fn new() -> Self {
Self::default()
}
/// Sort entries by name (byte-wise) and validate; required for hash
/// determinism.
@ -88,12 +97,14 @@ impl Tree {
for e in &self.entries {
TreeEntry::validate_name(&e.name)?;
}
self.entries.sort_by(|a, b| a.name.as_bytes().cmp(b.name.as_bytes()));
self.entries
.sort_by(|a, b| a.name.as_bytes().cmp(b.name.as_bytes()));
// detect duplicate names
for w in self.entries.windows(2) {
if w[0].name == w[1].name {
return Err(Error::MalformedObject(format!(
"duplicate tree entry name: {}", w[0].name
"duplicate tree entry name: {}",
w[0].name
)));
}
}
@ -128,7 +139,9 @@ impl Tree {
let mut p = 0usize;
while p < body.len() {
if body.len() < p + 2 {
return Err(Error::MalformedObject("tree entry: short name length".into()));
return Err(Error::MalformedObject(
"tree entry: short name length".into(),
));
}
let n = LittleEndian::read_u16(&body[p..p + 2]) as usize;
p += 2;
@ -146,7 +159,12 @@ impl Tree {
let mut h = [0u8; 32];
h.copy_from_slice(&body[p..p + 32]);
p += 32;
entries.push(TreeEntry { name, entry_type, mode, hash: ObjectId(h) });
entries.push(TreeEntry {
name,
entry_type,
mode,
hash: ObjectId(h),
});
}
Ok(Tree { entries })
}

View File

@ -13,8 +13,8 @@
use std::panic::{catch_unwind, AssertUnwindSafe};
use levcs_core::object::{
ObjectHeader, ObjectType, RawObject, SignatureEntry, SignedObject, FORMAT_VERSION,
HEADER_SIZE, SIGNATURE_ENTRY_SIZE,
ObjectHeader, ObjectType, RawObject, SignatureEntry, SignedObject, FORMAT_VERSION, HEADER_SIZE,
SIGNATURE_ENTRY_SIZE,
};
use levcs_core::{Commit, Release, Tree};

View File

@ -6,9 +6,7 @@
//! a property fails, proptest shrinks toward a minimal failing case.
use levcs_core::object::RawObject;
use levcs_core::{
Blob, Commit, CommitFlags, EntryType, FileMode, ObjectId, Tree, TreeEntry,
};
use levcs_core::{Blob, Commit, CommitFlags, EntryType, FileMode, ObjectId, Tree, TreeEntry};
use proptest::collection::vec;
use proptest::prelude::*;

View File

@ -32,7 +32,11 @@ impl Role {
2 => Self::Contributor,
3 => Self::Maintainer,
4 => Self::Owner,
n => return Err(IdentityError::MalformedAuthority(format!("unknown role: {n}"))),
n => {
return Err(IdentityError::MalformedAuthority(format!(
"unknown role: {n}"
)))
}
})
}
@ -51,7 +55,11 @@ impl Role {
"contributor" => Self::Contributor,
"maintainer" => Self::Maintainer,
"owner" => Self::Owner,
other => return Err(IdentityError::MalformedAuthority(format!("unknown role: {other}"))),
other => {
return Err(IdentityError::MalformedAuthority(format!(
"unknown role: {other}"
)))
}
})
}
}
@ -85,14 +93,19 @@ pub struct AuthorityBody {
}
impl AuthorityBody {
pub fn is_genesis(&self) -> bool { self.previous_authority.is_zero() }
pub fn is_genesis(&self) -> bool {
self.previous_authority.is_zero()
}
pub fn find_member(&self, key: &PublicKey) -> Option<&MemberEntry> {
self.members.iter().find(|m| m.key == *key)
}
pub fn policy_value(&self, key: &str) -> Option<&[u8]> {
self.policy.iter().find(|p| p.key == key).map(|p| p.value.as_slice())
self.policy
.iter()
.find(|p| p.key == key)
.map(|p| p.value.as_slice())
}
pub fn public_read(&self) -> bool {
@ -106,7 +119,12 @@ impl AuthorityBody {
pub fn protected_branches(&self) -> Vec<String> {
self.policy_value("protected_branches")
.and_then(|v| std::str::from_utf8(v).ok())
.map(|s| s.split(',').filter(|s| !s.is_empty()).map(|s| s.to_string()).collect())
.map(|s| {
s.split(',')
.filter(|s| !s.is_empty())
.map(|s| s.to_string())
.collect()
})
.unwrap_or_default()
}
@ -116,7 +134,8 @@ impl AuthorityBody {
for m in &self.members {
if m.handle.len() > 64 {
return Err(IdentityError::MalformedAuthority(format!(
"handle too long: {} bytes", m.handle.len()
"handle too long: {} bytes",
m.handle.len()
)));
}
}
@ -124,22 +143,29 @@ impl AuthorityBody {
for w in self.members.windows(2) {
if w[0].key == w[1].key {
return Err(IdentityError::MalformedAuthority(format!(
"duplicate member: {}", w[0].key
"duplicate member: {}",
w[0].key
)));
}
}
self.policy.sort_by(|a, b| a.key.as_bytes().cmp(b.key.as_bytes()));
self.policy
.sort_by(|a, b| a.key.as_bytes().cmp(b.key.as_bytes()));
for w in self.policy.windows(2) {
if w[0].key == w[1].key {
return Err(IdentityError::MalformedAuthority(format!(
"duplicate policy key: {}", w[0].key
"duplicate policy key: {}",
w[0].key
)));
}
if w[0].key.len() > 255 {
return Err(IdentityError::MalformedAuthority("policy key too long".into()));
return Err(IdentityError::MalformedAuthority(
"policy key too long".into(),
));
}
if w[0].value.len() > u16::MAX as usize {
return Err(IdentityError::MalformedAuthority("policy value too large".into()));
return Err(IdentityError::MalformedAuthority(
"policy value too large".into(),
));
}
}
Ok(())
@ -222,7 +248,9 @@ fn encode_body(b: &AuthorityBody) -> Result<Vec<u8>> {
out.extend_from_slice(m.added_by.as_bytes());
}
if b.policy.len() > u16::MAX as usize {
return Err(IdentityError::MalformedAuthority("too many policy entries".into()));
return Err(IdentityError::MalformedAuthority(
"too many policy entries".into(),
));
}
let mut pc = [0u8; 2];
LittleEndian::write_u16(&mut pc, b.policy.len() as u16);
@ -240,7 +268,9 @@ fn encode_body(b: &AuthorityBody) -> Result<Vec<u8>> {
fn decode_body(bytes: &[u8]) -> Result<AuthorityBody> {
if bytes.len() < 2 + 32 + 32 + 4 + 8 + 2 {
return Err(IdentityError::MalformedAuthority("authority body too short".into()));
return Err(IdentityError::MalformedAuthority(
"authority body too short".into(),
));
}
let mut p = 0usize;
let schema_version = LittleEndian::read_u16(&bytes[p..p + 2]);
@ -265,7 +295,9 @@ fn decode_body(bytes: &[u8]) -> Result<AuthorityBody> {
let mut members = Vec::with_capacity(member_count);
for _ in 0..member_count {
if bytes.len() < p + 32 + 2 {
return Err(IdentityError::MalformedAuthority("member entry truncated".into()));
return Err(IdentityError::MalformedAuthority(
"member entry truncated".into(),
));
}
let mut k = [0u8; 32];
k.copy_from_slice(&bytes[p..p + 32]);
@ -273,7 +305,9 @@ fn decode_body(bytes: &[u8]) -> Result<AuthorityBody> {
let hl = LittleEndian::read_u16(&bytes[p..p + 2]) as usize;
p += 2;
if bytes.len() < p + hl + 1 + 8 + 32 {
return Err(IdentityError::MalformedAuthority("member entry truncated".into()));
return Err(IdentityError::MalformedAuthority(
"member entry truncated".into(),
));
}
let handle = std::str::from_utf8(&bytes[p..p + hl])
.map_err(|_| IdentityError::MalformedAuthority("handle not UTF-8".into()))?
@ -295,19 +329,25 @@ fn decode_body(bytes: &[u8]) -> Result<AuthorityBody> {
});
}
if bytes.len() < p + 2 {
return Err(IdentityError::MalformedAuthority("policy_count truncated".into()));
return Err(IdentityError::MalformedAuthority(
"policy_count truncated".into(),
));
}
let policy_count = LittleEndian::read_u16(&bytes[p..p + 2]) as usize;
p += 2;
let mut policy = Vec::with_capacity(policy_count);
for _ in 0..policy_count {
if bytes.len() < p + 1 {
return Err(IdentityError::MalformedAuthority("policy entry truncated".into()));
return Err(IdentityError::MalformedAuthority(
"policy entry truncated".into(),
));
}
let kl = bytes[p] as usize;
p += 1;
if bytes.len() < p + kl + 2 {
return Err(IdentityError::MalformedAuthority("policy entry truncated".into()));
return Err(IdentityError::MalformedAuthority(
"policy entry truncated".into(),
));
}
let key = std::str::from_utf8(&bytes[p..p + kl])
.map_err(|_| IdentityError::MalformedAuthority("policy key not UTF-8".into()))?
@ -316,7 +356,9 @@ fn decode_body(bytes: &[u8]) -> Result<AuthorityBody> {
let vl = LittleEndian::read_u16(&bytes[p..p + 2]) as usize;
p += 2;
if bytes.len() < p + vl {
return Err(IdentityError::MalformedAuthority("policy value truncated".into()));
return Err(IdentityError::MalformedAuthority(
"policy value truncated".into(),
));
}
let value = bytes[p..p + vl].to_vec();
p += vl;
@ -324,7 +366,8 @@ fn decode_body(bytes: &[u8]) -> Result<AuthorityBody> {
}
if p != bytes.len() {
return Err(IdentityError::MalformedAuthority(format!(
"trailing {} byte(s) after authority body", bytes.len() - p
"trailing {} byte(s) after authority body",
bytes.len() - p
)));
}
Ok(AuthorityBody {
@ -386,7 +429,10 @@ pub fn parse_toml_authority(text: &str) -> Result<AuthorityBody> {
}
let mut policy = Vec::new();
for (k, v) in t.policy {
policy.push(PolicyEntry { key: k, value: encode_policy_value(&v) });
policy.push(PolicyEntry {
key: k,
value: encode_policy_value(&v),
});
}
let mut body = AuthorityBody {
schema_version: t.schema_version,
@ -435,9 +481,9 @@ pub fn render_toml_authority(body: &AuthorityBody) -> Result<String> {
}
fn parse_blake3(s: &str) -> Result<ObjectId> {
let rest = s
.strip_prefix("blake3:")
.ok_or_else(|| IdentityError::MalformedAuthority(format!("missing blake3: prefix in {s}")))?;
let rest = s.strip_prefix("blake3:").ok_or_else(|| {
IdentityError::MalformedAuthority(format!("missing blake3: prefix in {s}"))
})?;
Ok(ObjectId::from_hex(rest).map_err(|e| IdentityError::MalformedAuthority(e.to_string()))?)
}
@ -449,11 +495,19 @@ fn parse_rfc3339_micros(s: &str) -> Result<i64> {
.ok_or_else(|| IdentityError::MalformedAuthority(format!("bad timestamp: {s}")))?;
let dparts: Vec<&str> = date.split('-').collect();
if dparts.len() != 3 {
return Err(IdentityError::MalformedAuthority(format!("bad date: {date}")));
return Err(IdentityError::MalformedAuthority(format!(
"bad date: {date}"
)));
}
let y: i64 = dparts[0].parse().map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let mo: u32 = dparts[1].parse().map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let d: u32 = dparts[2].parse().map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let y: i64 = dparts[0]
.parse()
.map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let mo: u32 = dparts[1]
.parse()
.map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let d: u32 = dparts[2]
.parse()
.map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let rest = rest.trim_end_matches('Z');
let (time, frac) = match rest.split_once('.') {
Some((t, f)) => (t, f),
@ -461,11 +515,19 @@ fn parse_rfc3339_micros(s: &str) -> Result<i64> {
};
let tparts: Vec<&str> = time.split(':').collect();
if tparts.len() != 3 {
return Err(IdentityError::MalformedAuthority(format!("bad time: {time}")));
return Err(IdentityError::MalformedAuthority(format!(
"bad time: {time}"
)));
}
let h: i64 = tparts[0].parse().map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let mi: i64 = tparts[1].parse().map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let se: i64 = tparts[2].parse().map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let h: i64 = tparts[0]
.parse()
.map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let mi: i64 = tparts[1]
.parse()
.map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let se: i64 = tparts[2]
.parse()
.map_err(|_| IdentityError::MalformedAuthority(s.into()))?;
let micros_frac: i64 = if frac.is_empty() {
0
} else {
@ -474,7 +536,8 @@ fn parse_rfc3339_micros(s: &str) -> Result<i64> {
while s6.len() < 6 {
s6.push('0');
}
s6.parse().map_err(|_| IdentityError::MalformedAuthority("bad fractional seconds".into()))?
s6.parse()
.map_err(|_| IdentityError::MalformedAuthority("bad fractional seconds".into()))?
};
let days = ymd_to_days(y, mo, d);
let total_secs = days * 86400 + h * 3600 + mi * 60 + se;
@ -579,8 +642,14 @@ mod tests {
added_by: pk,
}],
policy: vec![
PolicyEntry { key: "public_read".into(), value: vec![0x01] },
PolicyEntry { key: "allowed_handlers".into(), value: b"builtin".to_vec() },
PolicyEntry {
key: "public_read".into(),
value: vec![0x01],
},
PolicyEntry {
key: "allowed_handlers".into(),
value: b"builtin".to_vec(),
},
],
};
body.normalize().unwrap();
@ -607,7 +676,10 @@ mod tests {
added_micros: 1_700_000_000_000_000,
added_by: pk,
}],
policy: vec![PolicyEntry { key: "public_read".into(), value: vec![0x01] }],
policy: vec![PolicyEntry {
key: "public_read".into(),
value: vec![0x01],
}],
};
body.assign_genesis_repo_id().unwrap();
let toml_text = render_toml_authority(&body).unwrap();

View File

@ -63,14 +63,20 @@ impl Default for KdfParams {
impl Keychain {
pub fn new() -> Self {
Self { schema_version: KEYCHAIN_SCHEMA_VERSION, keys: Vec::new() }
Self {
schema_version: KEYCHAIN_SCHEMA_VERSION,
keys: Vec::new(),
}
}
pub fn default_path() -> PathBuf {
if let Some(xdg) = std::env::var_os("XDG_CONFIG_HOME") {
PathBuf::from(xdg).join("levcs").join("keys.toml")
} else if let Some(home) = std::env::var_os("HOME") {
PathBuf::from(home).join(".config").join("levcs").join("keys.toml")
PathBuf::from(home)
.join(".config")
.join("levcs")
.join("keys.toml")
} else {
PathBuf::from("/tmp").join("levcs").join("keys.toml")
}
@ -161,12 +167,7 @@ impl Keychain {
Ok(())
}
pub fn add_encrypted(
&mut self,
label: &str,
sk: &SecretKey,
passphrase: &[u8],
) -> Result<()> {
pub fn add_encrypted(&mut self, label: &str, sk: &SecretKey, passphrase: &[u8]) -> Result<()> {
if self.entry(label).is_some() {
return Err(IdentityError::Other(format!("key already exists: {label}")));
}
@ -193,7 +194,9 @@ impl Keychain {
pub fn rename(&mut self, old: &str, new: &str) -> Result<()> {
if self.entry(new).is_some() {
return Err(IdentityError::Other(format!("destination already exists: {new}")));
return Err(IdentityError::Other(format!(
"destination already exists: {new}"
)));
}
let e = self
.entry_mut(old)
@ -205,7 +208,9 @@ impl Keychain {
fn now_rfc3339() -> String {
use std::time::{SystemTime, UNIX_EPOCH};
let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default();
let dur = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default();
// crude RFC3339 (no tzdb dependency): seconds since epoch as Z time.
let secs = dur.as_secs() as i64;
// y/m/d via integer math.
@ -267,7 +272,8 @@ fn decrypt_secret(enc: &EncryptedKey, passphrase: &[u8]) -> Result<SecretKey> {
};
if enc.algorithm != "xchacha20poly1305-argon2id" {
return Err(IdentityError::Crypto(format!(
"unknown algorithm: {}", enc.algorithm
"unknown algorithm: {}",
enc.algorithm
)));
}
let salt = B64
@ -297,8 +303,13 @@ fn decrypt_secret(enc: &EncryptedKey, passphrase: &[u8]) -> Result<SecretKey> {
fn derive_key(passphrase: &[u8], salt: &[u8], params: &KdfParams) -> Result<[u8; 32]> {
use argon2::{Algorithm, Argon2, Params, Version};
let p = Params::new(params.memory, params.iterations, params.parallelism, Some(32))
.map_err(|e| IdentityError::Argon2(e.to_string()))?;
let p = Params::new(
params.memory,
params.iterations,
params.parallelism,
Some(32),
)
.map_err(|e| IdentityError::Argon2(e.to_string()))?;
let argon = Argon2::new(Algorithm::Argon2id, Version::V0x13, p);
let mut out = [0u8; 32];
argon
@ -326,7 +337,8 @@ mod tests {
fn keychain_encryption_roundtrip() {
let mut kc = Keychain::new();
let sk = SecretKey::generate();
kc.add_encrypted("locked", &sk, b"correct horse battery staple").unwrap();
kc.add_encrypted("locked", &sk, b"correct horse battery staple")
.unwrap();
let s = toml::to_string(&kc).unwrap();
let kc2: Keychain = toml::from_str(&s).unwrap();
let unlocked = kc2

View File

@ -14,11 +14,17 @@ use crate::error::{IdentityError, Result};
pub struct PublicKey(pub [u8; 32]);
impl PublicKey {
pub fn from_bytes(b: [u8; 32]) -> Self { Self(b) }
pub fn from_bytes(b: [u8; 32]) -> Self {
Self(b)
}
pub fn as_bytes(&self) -> &[u8; 32] { &self.0 }
pub fn as_bytes(&self) -> &[u8; 32] {
&self.0
}
pub fn to_levcs(&self) -> String { format!("ed25519:{}", hex::encode(self.0)) }
pub fn to_levcs(&self) -> String {
format!("ed25519:{}", hex::encode(self.0))
}
pub fn parse_levcs(s: &str) -> Result<Self> {
let rest = s
@ -27,7 +33,8 @@ impl PublicKey {
let bytes = hex::decode(rest)?;
if bytes.len() != 32 {
return Err(IdentityError::InvalidKey(format!(
"expected 32 bytes, got {}", bytes.len()
"expected 32 bytes, got {}",
bytes.len()
)));
}
let mut arr = [0u8; 32];
@ -39,7 +46,8 @@ impl PublicKey {
let vk = VerifyingKey::from_bytes(&self.0)
.map_err(|e| IdentityError::Crypto(format!("public key: {e}")))?;
let sig = Signature::from_bytes(signature);
vk.verify(msg, &sig).map_err(|_| IdentityError::BadSignature)?;
vk.verify(msg, &sig)
.map_err(|_| IdentityError::BadSignature)?;
Ok(())
}
}
@ -63,7 +71,9 @@ pub struct SecretKey {
}
impl SecretKey {
pub fn from_seed(seed: [u8; 32]) -> Self { Self { seed } }
pub fn from_seed(seed: [u8; 32]) -> Self {
Self { seed }
}
pub fn generate() -> Self {
let mut seed = [0u8; 32];
@ -71,7 +81,9 @@ impl SecretKey {
Self { seed }
}
pub fn seed(&self) -> &[u8; 32] { &self.seed }
pub fn seed(&self) -> &[u8; 32] {
&self.seed
}
pub fn public(&self) -> PublicKey {
let sk = SigningKey::from_bytes(&self.seed);
@ -84,7 +96,9 @@ impl SecretKey {
sig.to_bytes()
}
pub fn to_levcs(&self) -> String { format!("ed25519:{}", hex::encode(self.seed)) }
pub fn to_levcs(&self) -> String {
format!("ed25519:{}", hex::encode(self.seed))
}
pub fn parse_levcs(s: &str) -> Result<Self> {
let rest = s
@ -93,7 +107,8 @@ impl SecretKey {
let bytes = hex::decode(rest)?;
if bytes.len() != 32 {
return Err(IdentityError::InvalidKey(format!(
"expected 32 bytes, got {}", bytes.len()
"expected 32 bytes, got {}",
bytes.len()
)));
}
let mut seed = [0u8; 32];

View File

@ -1,21 +1,21 @@
//! levcs-identity: keychains, authority objects, signing, verification.
pub mod error;
pub mod keys;
pub mod keychain;
pub mod authority;
pub mod error;
pub mod keychain;
pub mod keys;
pub mod sign;
pub mod verify;
pub use error::IdentityError;
pub use keys::{KeyLabel, PublicKey, SecretKey};
pub use keychain::{Keychain, KeychainEntry};
pub use authority::{
AuthorityBody, MemberEntry, PolicyEntry, Role, AUTHORITY_SCHEMA_VERSION,
parse_toml_authority, render_toml_authority,
parse_toml_authority, render_toml_authority, AuthorityBody, MemberEntry, PolicyEntry, Role,
AUTHORITY_SCHEMA_VERSION,
};
pub use sign::{sign_commit, sign_release, sign_authority, sign_message};
pub use error::IdentityError;
pub use keychain::{Keychain, KeychainEntry};
pub use keys::{KeyLabel, PublicKey, SecretKey};
pub use sign::{sign_authority, sign_commit, sign_message, sign_release};
pub use verify::{
verify_signed_object, verify_commit, verify_authority_chain, verify_genesis,
Verification, VerifyError,
verify_authority_chain, verify_commit, verify_genesis, verify_signed_object, Verification,
VerifyError,
};

View File

@ -22,7 +22,10 @@ pub fn sign_commit(commit: Commit, sk: &SecretKey) -> Result<SignedObject> {
let mut signed = commit.into_signed().map_err(IdentityError::from)?;
let h = signed.signing_hash();
let signature = sk.sign(h.as_bytes());
signed.signatures.push(SignatureEntry { public_key: sk.public().0, signature });
signed.signatures.push(SignatureEntry {
public_key: sk.public().0,
signature,
});
Ok(signed)
}
@ -37,7 +40,10 @@ pub fn sign_release(release: Release, sk: &SecretKey) -> Result<SignedObject> {
let mut signed = release.into_signed().map_err(IdentityError::from)?;
let h = signed.signing_hash();
let signature = sk.sign(h.as_bytes());
signed.signatures.push(SignatureEntry { public_key: sk.public().0, signature });
signed.signatures.push(SignatureEntry {
public_key: sk.public().0,
signature,
});
Ok(signed)
}
@ -47,7 +53,10 @@ pub fn sign_authority(body: &AuthorityBody, sk: &SecretKey) -> Result<SignedObje
debug_assert_eq!(signed.object_type, ObjectType::Authority);
let h = signed.signing_hash();
let signature = sk.sign(h.as_bytes());
signed.signatures.push(SignatureEntry { public_key: sk.public().0, signature });
signed.signatures.push(SignatureEntry {
public_key: sk.public().0,
signature,
});
Ok(signed)
}
@ -56,5 +65,8 @@ pub fn sign_authority(body: &AuthorityBody, sk: &SecretKey) -> Result<SignedObje
pub fn add_cosigner_signature(signed: &mut SignedObject, sk: &SecretKey) {
let h = signed.signing_hash();
let signature = sk.sign(h.as_bytes());
signed.signatures.push(SignatureEntry { public_key: sk.public().0, signature });
signed.signatures.push(SignatureEntry {
public_key: sk.public().0,
signature,
});
}

View File

@ -69,10 +69,12 @@ impl ObjectSource for MemorySource {
fn read_signed<S: ObjectSource>(src: &S, id: ObjectId) -> Verification<SignedObject> {
let bytes = src.read_raw(id)?;
Ok(SignedObject::parse(&bytes).map_err(|e| VerifyError::Object {
hash: id.to_hex(),
kind: e.to_string(),
})?)
Ok(
SignedObject::parse(&bytes).map_err(|e| VerifyError::Object {
hash: id.to_hex(),
kind: e.to_string(),
})?,
)
}
/// Verify the signature(s) on a SignedObject. Each signature in the trailer
@ -94,13 +96,16 @@ pub fn verify_signed_object(signed: &SignedObject) -> Verification<()> {
pub fn verify_genesis(genesis: &SignedObject) -> Verification<AuthorityBody> {
if genesis.object_type != ObjectType::Authority {
return Err(VerifyError::Authority(format!(
"expected authority object, got {}", genesis.object_type.name()
"expected authority object, got {}",
genesis.object_type.name()
)));
}
let body = AuthorityBody::parse(&genesis.body)
.map_err(|e| VerifyError::Authority(e.to_string()))?;
let body =
AuthorityBody::parse(&genesis.body).map_err(|e| VerifyError::Authority(e.to_string()))?;
if !body.previous_authority.is_zero() {
return Err(VerifyError::Authority("genesis must have zero previous_authority".into()));
return Err(VerifyError::Authority(
"genesis must have zero previous_authority".into(),
));
}
if body.version != 1 {
return Err(VerifyError::Authority("genesis version must be 1".into()));
@ -148,7 +153,8 @@ fn verify_authority_step(
}
if new_body.version != prev_body.version + 1 {
return Err(VerifyError::Authority(format!(
"version not sequential: prev {} -> next {}", prev_body.version, new_body.version
"version not sequential: prev {} -> next {}",
prev_body.version, new_body.version
)));
}
if new_body.previous_authority != prev_id {
@ -232,10 +238,7 @@ pub fn verify_successor(
"signer must hold owner role in predecessor".into(),
));
}
let found_in_new = a_new
.signatures
.iter()
.any(|s| s.public_key == signer.0);
let found_in_new = a_new.signatures.iter().any(|s| s.public_key == signer.0);
if !found_in_new {
return Err(VerifyError::Authority(
"predecessor owner must also sign the new authority".into(),
@ -260,11 +263,13 @@ pub fn verify_fork(
if a_source_body.public_read() {
return Ok(());
}
let m = a_source_body
.find_member(&fork_author)
.ok_or_else(|| VerifyError::Authority("fork author not authorized to read source".into()))?;
let m = a_source_body.find_member(&fork_author).ok_or_else(|| {
VerifyError::Authority("fork author not authorized to read source".into())
})?;
if m.role < Role::Reader {
return Err(VerifyError::Authority("fork author lacks reader role".into()));
return Err(VerifyError::Authority(
"fork author lacks reader role".into(),
));
}
Ok(())
}
@ -324,7 +329,10 @@ pub fn verify_commit<S: ObjectSource>(
if signed.signatures.len() != 1 {
return Err(VerifyError::Commit {
hash: commit_id.to_hex(),
reason: format!("commit must have 1 signature, got {}", signed.signatures.len()),
reason: format!(
"commit must have 1 signature, got {}",
signed.signatures.len()
),
});
}
let sig = signed.signatures[0];
@ -360,7 +368,9 @@ pub fn verify_commit<S: ObjectSource>(
return Err(VerifyError::Commit {
hash: commit_id.to_hex(),
reason: format!(
"insufficient role: have {}, need {}", member.role.name(), required.name()
"insufficient role: have {}, need {}",
member.role.name(),
required.name()
),
});
}
@ -387,10 +397,12 @@ pub fn verify_commit<S: ObjectSource>(
});
}
let parent_signed = read_signed(src, commit.parents[0])?;
let parent_commit =
levcs_core::Commit::from_signed(&parent_signed).map_err(|e| {
VerifyError::Commit { hash: commit_id.to_hex(), reason: e.to_string() }
})?;
let parent_commit = levcs_core::Commit::from_signed(&parent_signed).map_err(|e| {
VerifyError::Commit {
hash: commit_id.to_hex(),
reason: e.to_string(),
}
})?;
let source_auth_signed = read_signed(src, parent_commit.authority)?;
let source_auth_body = AuthorityBody::parse(&source_auth_signed.body)
.map_err(|e| VerifyError::Authority(e.to_string()))?;
@ -438,9 +450,11 @@ pub fn verify_release<S: ObjectSource>(src: &S, release_id: ObjectId) -> Verific
});
}
verify_signed_object(&signed)?;
let release = levcs_core::Release::parse_body(&signed.body).map_err(|e| {
VerifyError::Object { hash: release_id.to_hex(), kind: e.to_string() }
})?;
let release =
levcs_core::Release::parse_body(&signed.body).map_err(|e| VerifyError::Object {
hash: release_id.to_hex(),
kind: e.to_string(),
})?;
let _ = verify_authority_chain(src, release.authority)?;
let auth_signed = read_signed(src, release.authority)?;
let auth_body = AuthorityBody::parse(&auth_signed.body)
@ -653,7 +667,10 @@ mod tests {
fork.assign_genesis_repo_id().unwrap();
let fork_signed = crate::sign::sign_authority(&fork, &bob).unwrap();
let res = verify_fork(bob_pk, &source, &fork_signed, &fork);
assert!(res.is_err(), "stranger should not be able to fork private source");
assert!(
res.is_err(),
"stranger should not be able to fork private source"
);
}
#[test]

View File

@ -156,7 +156,10 @@ fn key_parsers_handle_almost_valid_inputs() {
1 => 'g',
2 => 'Z',
3 => ' ',
_ => "0123456789abcdef".chars().nth((lcg(&mut seed) % 16) as usize).unwrap(),
_ => "0123456789abcdef"
.chars()
.nth((lcg(&mut seed) % 16) as usize)
.unwrap(),
};
hex_part.push(c);
}

View File

@ -34,7 +34,9 @@ use levcs_core::object::ObjectType;
use levcs_core::{Commit, EntryType, ObjectId, ObjectStore, Tree};
use levcs_identity::authority::AuthorityBody;
use levcs_identity::keys::PublicKey;
use levcs_identity::verify::{verify_authority_chain, verify_genesis, ObjectSource as VerifySource};
use levcs_identity::verify::{
verify_authority_chain, verify_genesis, ObjectSource as VerifySource,
};
use levcs_merge::engine::check_handler_allowed;
use levcs_merge::record::MergeRecord;
use levcs_protocol::auth::{verify_request, AuthRequest, DEFAULT_CLOCK_SKEW};
@ -82,7 +84,9 @@ pub struct MirrorConfig {
pub writeback: bool,
}
fn default_mirror_mode() -> String { "full".into() }
fn default_mirror_mode() -> String {
"full".into()
}
impl InstanceConfig {
/// Look up a mirror declaration for `repo_id`. Returns `None` for
@ -177,7 +181,10 @@ pub fn router(state: AppState) -> Router {
.route("/levcs/v1/instance/peers", get(handle_instance_peers))
.route("/levcs/v1/repos/:repo_id/info", get(handle_repo_info))
.route("/levcs/v1/repos/:repo_id/refs", get(handle_repo_refs))
.route("/levcs/v1/repos/:repo_id/objects/:hash", get(handle_get_object))
.route(
"/levcs/v1/repos/:repo_id/objects/:hash",
get(handle_get_object),
)
.route("/levcs/v1/repos/:repo_id/pack", get(handle_get_pack))
.route("/levcs/v1/repos/:repo_id/push", post(handle_push))
.route("/levcs/v1/repos/:repo_id/init", post(handle_init))
@ -429,7 +436,11 @@ fn verify_request_against(
let nonce = h("LeVCS-Nonce")?;
let sig = h("LeVCS-Signature")?;
let now = levcs_protocol::auth::current_micros();
let req = AuthRequest { method, path_with_query: path, body };
let req = AuthRequest {
method,
path_with_query: path,
body,
};
let auth = verify_request(&req, key, ts, nonce, sig, now, DEFAULT_CLOCK_SKEW)
.map_err(|e| err(StatusCode::UNAUTHORIZED, e.to_string()))?;
let mut cache = s.nonce_cache.lock().unwrap();
@ -449,10 +460,10 @@ async fn handle_init(
let auth = verify_request_against(&s, &headers, "POST", &path, body.as_ref())?;
// Body is the genesis authority object (signed).
use levcs_core::object::SignedObject;
let signed = SignedObject::parse(&body)
.map_err(|e| err(StatusCode::BAD_REQUEST, e.to_string()))?;
let body_parsed = verify_genesis(&signed)
.map_err(|e| err(StatusCode::BAD_REQUEST, e.to_string()))?;
let signed =
SignedObject::parse(&body).map_err(|e| err(StatusCode::BAD_REQUEST, e.to_string()))?;
let body_parsed =
verify_genesis(&signed).map_err(|e| err(StatusCode::BAD_REQUEST, e.to_string()))?;
if hex::encode(body_parsed.repo_id.as_bytes()) != repo_id {
return Err(err(
StatusCode::BAD_REQUEST,
@ -532,7 +543,10 @@ async fn handle_push(
return Err(err(StatusCode::BAD_REQUEST, "body truncated after pack"));
}
let manifest_len = u32::from_le_bytes([
body[pack_len], body[pack_len + 1], body[pack_len + 2], body[pack_len + 3],
body[pack_len],
body[pack_len + 1],
body[pack_len + 2],
body[pack_len + 3],
]) as usize;
if body.len() != pack_len + 4 + manifest_len + 64 {
return Err(err(StatusCode::BAD_REQUEST, "body length mismatch"));
@ -616,7 +630,8 @@ async fn handle_push(
let record = MergeRecord::from_toml(record_str)
.map_err(|e| err(StatusCode::BAD_REQUEST, format!("merge-record: {e}")))?;
for fr in &record.files {
if !check_handler_allowed(&fr.handler, &fr.handler_hash, &s.config.allowed_handlers) {
if !check_handler_allowed(&fr.handler, &fr.handler_hash, &s.config.allowed_handlers)
{
return Err(err(
StatusCode::FORBIDDEN,
format!(
@ -646,10 +661,7 @@ async fn handle_push(
.find_member(&auth.key)
.ok_or_else(|| err(StatusCode::FORBIDDEN, "pusher not in authority"))?;
if member.role < levcs_identity::authority::Role::Contributor {
return Err(err(
StatusCode::FORBIDDEN,
"pusher lacks contributor role",
));
return Err(err(StatusCode::FORBIDDEN, "pusher lacks contributor role"));
}
// Step 4: verify each new commit and compare-and-swap each ref.
@ -661,9 +673,10 @@ async fn handle_push(
.read(&u.r#ref)
.map_err(|e| err(StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
let old_expected = match &u.old_hash {
Some(s) if !s.is_empty() => Some(ObjectId::from_hex(s).map_err(|e| {
err(StatusCode::BAD_REQUEST, format!("bad old_hash: {e}"))
})?),
Some(s) if !s.is_empty() => Some(
ObjectId::from_hex(s)
.map_err(|e| err(StatusCode::BAD_REQUEST, format!("bad old_hash: {e}")))?,
),
_ => None,
};
if old_actual != old_expected {
@ -690,7 +703,10 @@ async fn handle_push(
other => {
return Err(err(
StatusCode::BAD_REQUEST,
format!("ref tip is {} object, must be Commit or Release", other.name()),
format!(
"ref tip is {} object, must be Commit or Release",
other.name()
),
));
}
}

View File

@ -101,7 +101,12 @@ fn build_genesis() -> Setup {
let auth_bytes = signed.serialize();
let auth_id = blake3_hash(&auth_bytes);
let repo_id = auth.repo_id.to_hex();
Setup { sk, auth_id, repo_id, auth_bytes }
Setup {
sk,
auth_id,
repo_id,
auth_bytes,
}
}
/// Build a single-file commit. Each successive call uses different
@ -339,7 +344,12 @@ async fn dogfood_three_instance_scenario() {
let sk = SecretKey::from_seed(seed);
let client = Client::new(base);
let (pack, cid, _) = build_commit(
&sk, auth_id, "a.txt", b"fourth\n", Some(prev), 1_700_000_020_000_000,
&sk,
auth_id,
"a.txt",
b"fourth\n",
Some(prev),
1_700_000_020_000_000,
);
let manifest = PushManifest {
authority_hash: auth_id.to_hex(),
@ -453,9 +463,19 @@ async fn dogfood_three_instance_scenario() {
})
.await
.unwrap();
assert_eq!(a_obj, b_obj, "A and B must serve byte-identical head commits");
assert_eq!(a_obj, c_obj, "A and C must serve byte-identical head commits");
assert_eq!(blake3_hash(&a_obj), head, "object hash must match the requested id");
assert_eq!(
a_obj, b_obj,
"A and B must serve byte-identical head commits"
);
assert_eq!(
a_obj, c_obj,
"A and C must serve byte-identical head commits"
);
assert_eq!(
blake3_hash(&a_obj),
head,
"object hash must match the requested id"
);
a_task.abort();
b_task.abort();

View File

@ -73,7 +73,10 @@ async fn instance_info_and_init_roundtrip() {
added_micros: now,
added_by: pk,
}],
policy: vec![PolicyEntry { key: "public_read".into(), value: vec![0x01] }],
policy: vec![PolicyEntry {
key: "public_read".into(),
value: vec![0x01],
}],
};
body.normalize().unwrap();
body.assign_genesis_repo_id().unwrap();
@ -84,7 +87,11 @@ async fn instance_info_and_init_roundtrip() {
// Sign request manually via levcs-protocol's sign_request.
use levcs_protocol::auth::{sign_request, AuthRequest};
let path = format!("/repos/{repo_id}/init");
let req = AuthRequest { method: "POST", path_with_query: &path, body: &bytes };
let req = AuthRequest {
method: "POST",
path_with_query: &path,
body: &bytes,
};
let (key, ts, nonce, sig) = sign_request(&sk, &req).unwrap();
let res = client
.post(format!("{base}{path}"))
@ -98,7 +105,12 @@ async fn instance_info_and_init_roundtrip() {
.send()
.await
.unwrap();
assert!(res.status().is_success(), "init returned {}: {}", res.status(), res.text().await.unwrap());
assert!(
res.status().is_success(),
"init returned {}: {}",
res.status(),
res.text().await.unwrap()
);
// /repos/{id}/info should now succeed
let info: serde_json::Value = client

View File

@ -83,7 +83,10 @@ fn build_genesis() -> Setup {
added_micros: now,
added_by: pk,
}],
policy: vec![PolicyEntry { key: "public_read".into(), value: vec![0x01] }],
policy: vec![PolicyEntry {
key: "public_read".into(),
value: vec![0x01],
}],
};
auth.normalize().unwrap();
auth.assign_genesis_repo_id().unwrap();
@ -91,18 +94,19 @@ fn build_genesis() -> Setup {
let auth_bytes = signed.serialize();
let auth_id = blake3_hash(&auth_bytes);
let repo_id = auth.repo_id.to_hex();
Setup { sk, auth_id, repo_id, auth_bytes }
Setup {
sk,
auth_id,
repo_id,
auth_bytes,
}
}
/// Build a single root commit whose tree carries one blob with the
/// provided contents. Different `marker` strings give different
/// commit hashes — useful when we want two commits with no shared
/// ancestry beyond the genesis state.
fn build_commit(
sk: &SecretKey,
auth_id: ObjectId,
marker: &str,
) -> (Pack, ObjectId) {
fn build_commit(sk: &SecretKey, auth_id: ObjectId, marker: &str) -> (Pack, ObjectId) {
let pk = sk.public();
let blob = Blob::new(format!("hello-{marker}\n").into_bytes());
let blob_bytes = blob.serialize();

View File

@ -12,9 +12,7 @@ use std::sync::Arc;
use levcs_client::Client;
use levcs_core::hash::blake3_hash;
use levcs_core::object::ObjectType;
use levcs_core::{
Blob, Commit, CommitFlags, EntryType, FileMode, Tree, TreeEntry, ZERO_ID,
};
use levcs_core::{Blob, Commit, CommitFlags, EntryType, FileMode, Tree, TreeEntry, ZERO_ID};
use levcs_identity::authority::{AuthorityBody, MemberEntry, PolicyEntry, Role};
use levcs_identity::keys::SecretKey;
use levcs_identity::sign::{sign_authority, sign_commit};
@ -82,7 +80,12 @@ fn build_genesis() -> Setup {
let auth_bytes = signed.serialize();
let auth_id = blake3_hash(&auth_bytes);
let repo_id = auth.repo_id.to_hex();
Setup { sk, auth_id, repo_id, auth_bytes }
Setup {
sk,
auth_id,
repo_id,
auth_bytes,
}
}
fn build_simple_commit_pack(
@ -231,7 +234,10 @@ async fn mirror_pulls_state_from_source() {
"mirror's main must match source"
);
assert!(mirror_info.is_mirror, "/info must declare mirror status");
assert_eq!(mirror_info.mirror_source.as_deref(), Some(source_base.as_str()));
assert_eq!(
mirror_info.mirror_source.as_deref(),
Some(source_base.as_str())
);
assert_eq!(mirror_info.mirror_mode.as_deref(), Some("full"));
// Push to mirror must be refused (read-only by config).
@ -243,8 +249,7 @@ async fn mirror_pulls_state_from_source() {
move || {
let sk = SecretKey::from_seed(seed);
let client = Client::new(mb);
let (pack, commit_id) =
build_simple_commit_pack(&sk, auth_id, "x.txt", b"x\n", None);
let (pack, commit_id) = build_simple_commit_pack(&sk, auth_id, "x.txt", b"x\n", None);
let manifest = PushManifest {
authority_hash: auth_id.to_hex(),
updates: vec![PushUpdate {
@ -333,8 +338,7 @@ async fn migrate_replays_repo_to_fresh_instance() {
let sk = SecretKey::from_seed(seed);
let client = Client::new(base);
client.init(&sk, &repo_id, &auth_bytes).unwrap();
let (pack, commit_id) =
build_simple_commit_pack(&sk, auth_id, "f.txt", b"v1\n", None);
let (pack, commit_id) = build_simple_commit_pack(&sk, auth_id, "f.txt", b"v1\n", None);
let manifest = PushManifest {
authority_hash: auth_id.to_hex(),
updates: vec![PushUpdate {
@ -364,8 +368,7 @@ async fn migrate_replays_repo_to_fresh_instance() {
// §5.7 step 1: init with the authority object.
client.init(&sk, &repo_id, &auth_bytes).unwrap();
// §5.7 step 3: push history.
let (pack, commit_id) =
build_simple_commit_pack(&sk, auth_id, "f.txt", b"v1\n", None);
let (pack, commit_id) = build_simple_commit_pack(&sk, auth_id, "f.txt", b"v1\n", None);
let manifest = PushManifest {
authority_hash: auth_id.to_hex(),
updates: vec![PushUpdate {

View File

@ -11,9 +11,7 @@ use std::path::PathBuf;
use levcs_client::Client;
use levcs_core::hash::blake3_hash;
use levcs_core::object::ObjectType;
use levcs_core::{
Blob, Commit, CommitFlags, EntryType, FileMode, Tree, TreeEntry, ZERO_ID,
};
use levcs_core::{Blob, Commit, CommitFlags, EntryType, FileMode, Tree, TreeEntry, ZERO_ID};
use levcs_identity::authority::{AuthorityBody, MemberEntry, PolicyEntry, Role};
use levcs_identity::keys::SecretKey;
use levcs_identity::sign::{sign_authority, sign_commit};
@ -32,7 +30,9 @@ fn tempdir(prefix: &str) -> PathBuf {
p
}
async fn start(allowed_handlers: Vec<String>) -> (SocketAddr, tokio::task::JoinHandle<()>, PathBuf) {
async fn start(
allowed_handlers: Vec<String>,
) -> (SocketAddr, tokio::task::JoinHandle<()>, PathBuf) {
let root = tempdir("levcs-policy");
let cfg = InstanceConfig {
root: root.clone(),
@ -77,7 +77,10 @@ fn build_genesis() -> Setup {
added_micros: now,
added_by: pk,
}],
policy: vec![PolicyEntry { key: "public_read".into(), value: vec![0x01] }],
policy: vec![PolicyEntry {
key: "public_read".into(),
value: vec![0x01],
}],
};
auth.normalize().unwrap();
auth.assign_genesis_repo_id().unwrap();
@ -85,7 +88,12 @@ fn build_genesis() -> Setup {
let auth_bytes = signed.serialize();
let auth_id = blake3_hash(&auth_bytes);
let repo_id = auth.repo_id.to_hex();
Setup { sk, auth_id, repo_id, auth_bytes }
Setup {
sk,
auth_id,
repo_id,
auth_bytes,
}
}
/// Build a commit whose tree has `path` -> blob(content) and (optionally) a
@ -180,9 +188,8 @@ async fn builtin_only_policy_admits_clean_push() {
let sk = SecretKey::from_seed(seed);
let client = Client::new(base);
client.init(&sk, &repo_id, &auth_bytes).unwrap();
let (pack, commit_id) = build_pack_with_optional_record(
&sk, auth_id, "a.txt", b"hello\n", None, None,
);
let (pack, commit_id) =
build_pack_with_optional_record(&sk, auth_id, "a.txt", b"hello\n", None, None);
let manifest = PushManifest {
authority_hash: auth_id.to_hex(),
updates: vec![PushUpdate {
@ -233,7 +240,12 @@ status = "auto"
let client = Client::new(base);
client.init(&sk, &repo_id, &auth_bytes).unwrap();
let (pack, commit_id) = build_pack_with_optional_record(
&sk, auth_id, "a.txt", b"hello\n", Some(&toml), None,
&sk,
auth_id,
"a.txt",
b"hello\n",
Some(&toml),
None,
);
let manifest = PushManifest {
authority_hash: auth_id.to_hex(),
@ -254,7 +266,10 @@ status = "auto"
match result {
Err(levcs_client::ClientError::Server { status, body }) => {
assert_eq!(status, 403, "expected 403, got {status} {body}");
assert!(body.contains("tree-sitter:protobuf"), "error must name the rejected handler: {body}");
assert!(
body.contains("tree-sitter:protobuf"),
"error must name the rejected handler: {body}"
);
}
other => panic!("expected 403 server error, got {other:?}"),
}
@ -292,7 +307,12 @@ status = "auto"
let client = Client::new(base);
client.init(&sk, &repo_id, &auth_bytes).unwrap();
let (pack, commit_id) = build_pack_with_optional_record(
&sk, auth_id, "a.txt", b"hello\n", Some(&toml), None,
&sk,
auth_id,
"a.txt",
b"hello\n",
Some(&toml),
None,
);
let manifest = PushManifest {
authority_hash: auth_id.to_hex(),
@ -309,7 +329,10 @@ status = "auto"
})
.await
.unwrap();
assert!(result.is_ok(), "permissive policy must accept any handler: {result:?}");
assert!(
result.is_ok(),
"permissive policy must accept any handler: {result:?}"
);
task.abort();
let _ = std::fs::remove_dir_all(root);

View File

@ -15,9 +15,7 @@ use std::path::PathBuf;
use levcs_client::{Client, ClientError};
use levcs_core::hash::blake3_hash;
use levcs_core::object::ObjectType;
use levcs_core::{
Blob, Commit, CommitFlags, EntryType, FileMode, Tree, TreeEntry, ZERO_ID,
};
use levcs_core::{Blob, Commit, CommitFlags, EntryType, FileMode, Tree, TreeEntry, ZERO_ID};
use levcs_identity::authority::{AuthorityBody, MemberEntry, PolicyEntry, Role};
use levcs_identity::keys::SecretKey;
use levcs_identity::sign::{sign_authority, sign_commit};
@ -81,7 +79,10 @@ fn build_genesis() -> Setup {
added_micros: now,
added_by: pk,
}],
policy: vec![PolicyEntry { key: "public_read".into(), value: vec![0x01] }],
policy: vec![PolicyEntry {
key: "public_read".into(),
value: vec![0x01],
}],
};
auth.normalize().unwrap();
auth.assign_genesis_repo_id().unwrap();
@ -89,7 +90,12 @@ fn build_genesis() -> Setup {
let auth_bytes = signed.serialize();
let auth_id = blake3_hash(&auth_bytes);
let repo_id = auth.repo_id.to_hex();
Setup { sk, auth_id, repo_id, auth_bytes }
Setup {
sk,
auth_id,
repo_id,
auth_bytes,
}
}
/// Build a single-blob, single-commit pack for a push test. Returns
@ -251,7 +257,10 @@ async fn full_mode_accepts_branch_push() {
})
.await
.unwrap();
assert!(result.is_ok(), "full mode must accept branch push: {result:?}");
assert!(
result.is_ok(),
"full mode must accept branch push: {result:?}"
);
task.abort();
let _ = std::fs::remove_dir_all(root);
}

View File

@ -94,10 +94,7 @@ pub fn handler_rank(name: &str) -> u8 {
/// motivating example is "I don't trust the Rust handler today, force
/// `*.rs` back to textual." Returns the merged config or, on
/// promotion attempt, the offending glob.
pub fn layer_local_over(
repo: &MergeConfig,
local: &MergeConfig,
) -> Result<MergeConfig, String> {
pub fn layer_local_over(repo: &MergeConfig, local: &MergeConfig) -> Result<MergeConfig, String> {
let mut merged = repo.clone();
for local_rule in &local.rules {
let local_rank = handler_rank(&local_rule.handler);
@ -163,7 +160,9 @@ impl Default for CascadeEngine {
}
impl CascadeEngine {
pub fn new() -> Self { Self::default() }
pub fn new() -> Self {
Self::default()
}
pub fn with_config(mut self, cfg: MergeConfig) -> Self {
self.rules = cfg.rules;
@ -188,7 +187,10 @@ impl CascadeEngine {
let bytes = fetch(entry)?;
let hash = parse_hash(&entry.hash)?;
let plugin = PluginHandler::new(
PluginConfig { name: entry.name.clone(), hash },
PluginConfig {
name: entry.name.clone(),
hash,
},
&bytes,
)
.map_err(|e| format!("plugin {}: {e}", entry.name))?;
@ -231,13 +233,7 @@ impl CascadeEngine {
self.handlers.iter().find(|h| h.name() == pref).cloned()
}
pub fn merge_file(
&self,
path: &Path,
base: &[u8],
ours: &[u8],
theirs: &[u8],
) -> MergeResult {
pub fn merge_file(&self, path: &Path, base: &[u8], ours: &[u8], theirs: &[u8]) -> MergeResult {
if let Some(h) = self.pick(path) {
if h.applicable(path, base, ours, theirs) {
let result = h.merge(path, base, ours, theirs);
@ -264,10 +260,22 @@ impl CascadeEngine {
/// repository policy checks (§6.6.4) to expand the "builtin" alias and to
/// reject merge-record entries that reference unknown handlers.
pub const BUILTIN_HANDLERS: &[&str] = &[
"json", "yaml", "toml", "xml", "markdown", "prose", "textual",
"tree-sitter:rust", "tree-sitter:python", "tree-sitter:javascript",
"tree-sitter:typescript", "tree-sitter:go", "tree-sitter:c",
"tree-sitter:cpp", "tree-sitter:java", "tree-sitter:ruby",
"json",
"yaml",
"toml",
"xml",
"markdown",
"prose",
"textual",
"tree-sitter:rust",
"tree-sitter:python",
"tree-sitter:javascript",
"tree-sitter:typescript",
"tree-sitter:go",
"tree-sitter:c",
"tree-sitter:cpp",
"tree-sitter:java",
"tree-sitter:ruby",
"tree-sitter:shell",
];
@ -278,8 +286,7 @@ pub const BUILTIN_HANDLERS: &[&str] = &[
pub const FLOW_HANDLERS: &[&str] = &["ours-only", "theirs-only", "delete", "no-auto", "none"];
pub fn is_builtin_handler(name: &str) -> bool {
BUILTIN_HANDLERS.iter().any(|b| *b == name)
|| FLOW_HANDLERS.iter().any(|b| *b == name)
BUILTIN_HANDLERS.iter().any(|b| *b == name) || FLOW_HANDLERS.iter().any(|b| *b == name)
}
/// Decide whether a `(handler, handler_hash)` tuple is permitted by the
@ -321,16 +328,18 @@ pub fn check_handler_allowed(handler: &str, handler_hash: &str, allowed: &[Strin
let hash = handler_hash.strip_prefix("blake3:").unwrap_or(handler_hash);
Some(format!("{handler}:blake3:{hash}"))
};
allowed.iter().any(|s| {
s == handler
|| needle_with_hash.as_deref().map(|n| s == n).unwrap_or(false)
})
allowed
.iter()
.any(|s| s == handler || needle_with_hash.as_deref().map(|n| s == n).unwrap_or(false))
}
fn parse_hash(s: &str) -> Result<[u8; 32], String> {
let trimmed = s.strip_prefix("blake3:").unwrap_or(s);
if trimmed.len() != 64 {
return Err(format!("expected 64-char blake3 hash, got {} chars", trimmed.len()));
return Err(format!(
"expected 64-char blake3 hash, got {} chars",
trimmed.len()
));
}
let mut out = [0u8; 32];
for (i, byte) in out.iter_mut().enumerate() {
@ -464,7 +473,10 @@ mod tests {
policy: None,
};
let err = layer_local_over(&repo, &local).expect_err("must reject");
assert!(err.contains("promote"), "error must mention promotion: {err}");
assert!(
err.contains("promote"),
"error must mention promotion: {err}"
);
}
#[test]

View File

@ -15,7 +15,9 @@ pub struct JsonHandler;
pub struct TomlHandler;
impl MergeHandler for JsonHandler {
fn name(&self) -> &str { "json" }
fn name(&self) -> &str {
"json"
}
fn applicable(&self, path: &Path, _b: &[u8], _o: &[u8], _t: &[u8]) -> bool {
path.extension().and_then(|e| e.to_str()) == Some("json")
}
@ -35,17 +37,27 @@ impl MergeHandler for JsonHandler {
let status = if conflicts.is_empty() {
MergeStatus::Merged {
content: bytes,
notes: vec![MergeNote { message: "structural JSON three-way merge".into() }],
notes: vec![MergeNote {
message: "structural JSON three-way merge".into(),
}],
}
} else {
MergeStatus::Conflict { regions: conflicts, partial: bytes }
MergeStatus::Conflict {
regions: conflicts,
partial: bytes,
}
};
MergeResult { handler: self.name().into(), status }
MergeResult {
handler: self.name().into(),
status,
}
}
}
impl MergeHandler for TomlHandler {
fn name(&self) -> &str { "toml" }
fn name(&self) -> &str {
"toml"
}
fn applicable(&self, path: &Path, _b: &[u8], _o: &[u8], _t: &[u8]) -> bool {
path.extension().and_then(|e| e.to_str()) == Some("toml")
}
@ -71,12 +83,20 @@ impl MergeHandler for TomlHandler {
let status = if conflicts.is_empty() {
MergeStatus::Merged {
content: bytes,
notes: vec![MergeNote { message: "structural TOML three-way merge".into() }],
notes: vec![MergeNote {
message: "structural TOML three-way merge".into(),
}],
}
} else {
MergeStatus::Conflict { regions: conflicts, partial: bytes }
MergeStatus::Conflict {
regions: conflicts,
partial: bytes,
}
};
MergeResult { handler: self.name().into(), status }
MergeResult {
handler: self.name().into(),
status,
}
}
}
@ -158,8 +178,12 @@ pub fn merge_value(
};
match (bv, ov, tv) {
(None, None, None) => {}
(None, Some(o), None) => { merged.insert(k.clone(), o.clone()); }
(None, None, Some(t)) => { merged.insert(k.clone(), t.clone()); }
(None, Some(o), None) => {
merged.insert(k.clone(), o.clone());
}
(None, None, Some(t)) => {
merged.insert(k.clone(), t.clone());
}
(Some(b), Some(o), None) => {
if b == o {
// theirs deleted; ours unchanged → delete.
@ -167,8 +191,12 @@ pub fn merge_value(
// ours modified, theirs deleted → conflict (keep ours).
merged.insert(k.clone(), o.clone());
conflicts.push(ConflictRegion {
description: format!("{sub_path}: modified by ours, deleted by theirs"),
base: 0..0, ours: 0..0, theirs: 0..0,
description: format!(
"{sub_path}: modified by ours, deleted by theirs"
),
base: 0..0,
ours: 0..0,
theirs: 0..0,
});
}
}
@ -178,8 +206,12 @@ pub fn merge_value(
} else {
merged.insert(k.clone(), t.clone());
conflicts.push(ConflictRegion {
description: format!("{sub_path}: deleted by ours, modified by theirs"),
base: 0..0, ours: 0..0, theirs: 0..0,
description: format!(
"{sub_path}: deleted by ours, modified by theirs"
),
base: 0..0,
ours: 0..0,
theirs: 0..0,
});
}
}
@ -194,8 +226,12 @@ pub fn merge_value(
} else {
merged.insert(k.clone(), o.clone());
conflicts.push(ConflictRegion {
description: format!("{sub_path}: independently added with different values"),
base: 0..0, ours: 0..0, theirs: 0..0,
description: format!(
"{sub_path}: independently added with different values"
),
base: 0..0,
ours: 0..0,
theirs: 0..0,
});
}
}
@ -220,7 +256,9 @@ pub fn merge_value(
// Scalar conflict.
let conflict = ConflictRegion {
description: format!("{path}: divergent scalar modifications"),
base: 0..0, ours: 0..0, theirs: 0..0,
base: 0..0,
ours: 0..0,
theirs: 0..0,
};
(ours.clone(), vec![conflict])
}

View File

@ -49,7 +49,9 @@ fn yaml_to_json(v: serde_yaml::Value) -> Value {
if let Some(i) = n.as_i64() {
Value::from(i)
} else if let Some(f) = n.as_f64() {
serde_json::Number::from_f64(f).map(Value::Number).unwrap_or(Value::Null)
serde_json::Number::from_f64(f)
.map(Value::Number)
.unwrap_or(Value::Null)
} else {
Value::Null
}
@ -98,7 +100,9 @@ fn json_to_yaml(v: &Value) -> serde_yaml::Value {
}
impl MergeHandler for YamlHandler {
fn name(&self) -> &str { "yaml" }
fn name(&self) -> &str {
"yaml"
}
fn applicable(&self, path: &Path, _b: &[u8], _o: &[u8], _t: &[u8]) -> bool {
matches!(
@ -128,12 +132,20 @@ impl MergeHandler for YamlHandler {
let status = if conflicts.is_empty() {
MergeStatus::Merged {
content: bytes,
notes: vec![MergeNote { message: "structural YAML three-way merge".into() }],
notes: vec![MergeNote {
message: "structural YAML three-way merge".into(),
}],
}
} else {
MergeStatus::Conflict { regions: conflicts, partial: bytes }
MergeStatus::Conflict {
regions: conflicts,
partial: bytes,
}
};
MergeResult { handler: self.name().into(), status }
MergeResult {
handler: self.name().into(),
status,
}
}
}
@ -150,14 +162,20 @@ struct MdSection {
fn split_markdown(src: &str) -> Vec<MdSection> {
let mut out: Vec<MdSection> = Vec::new();
let mut cur = MdSection { heading: None, text: String::new() };
let mut cur = MdSection {
heading: None,
text: String::new(),
};
for line in src.split_inclusive('\n') {
let trimmed = line.trim_start();
if trimmed.starts_with('#') {
if !cur.text.is_empty() || cur.heading.is_some() {
out.push(std::mem::replace(
&mut cur,
MdSection { heading: None, text: String::new() },
MdSection {
heading: None,
text: String::new(),
},
));
}
// Extract heading text (without leading #s).
@ -179,7 +197,9 @@ fn split_markdown(src: &str) -> Vec<MdSection> {
}
impl MergeHandler for MarkdownHandler {
fn name(&self) -> &str { "markdown" }
fn name(&self) -> &str {
"markdown"
}
fn applicable(&self, path: &Path, _b: &[u8], _o: &[u8], _t: &[u8]) -> bool {
matches!(
@ -257,8 +277,13 @@ impl MergeHandler for MarkdownHandler {
} else {
output.push(s.text.clone());
conflicts.push(ConflictRegion {
description: format!("section '{}' modified vs deleted", s.heading.clone().unwrap_or_default()),
base: 0..0, ours: 0..0, theirs: 0..0,
description: format!(
"section '{}' modified vs deleted",
s.heading.clone().unwrap_or_default()
),
base: 0..0,
ours: 0..0,
theirs: 0..0,
});
}
}
@ -287,7 +312,8 @@ impl MergeHandler for MarkdownHandler {
output.push(String::from_utf8_lossy(&partial).to_string());
let h = s.heading.clone().unwrap_or_default();
for mut r in regions {
r.description = format!("section '{h}': {}", r.description);
r.description =
format!("section '{h}': {}", r.description);
conflicts.push(r);
}
}
@ -318,7 +344,9 @@ impl MergeHandler for MarkdownHandler {
"section '{}' deleted by ours, modified by theirs",
s.heading.clone().unwrap_or_default()
),
base: 0..0, ours: 0..0, theirs: 0..0,
base: 0..0,
ours: 0..0,
theirs: 0..0,
});
}
}
@ -329,12 +357,20 @@ impl MergeHandler for MarkdownHandler {
let status = if conflicts.is_empty() {
MergeStatus::Merged {
content: merged.into_bytes(),
notes: vec![MergeNote { message: "section-based markdown merge".into() }],
notes: vec![MergeNote {
message: "section-based markdown merge".into(),
}],
}
} else {
MergeStatus::Conflict { regions: conflicts, partial: merged.into_bytes() }
MergeStatus::Conflict {
regions: conflicts,
partial: merged.into_bytes(),
}
};
MergeResult { handler: self.name().into(), status }
MergeResult {
handler: self.name().into(),
status,
}
}
}
@ -364,16 +400,17 @@ fn split_paragraphs(s: &str) -> Vec<String> {
}
impl MergeHandler for ProseHandler {
fn name(&self) -> &str { "prose" }
fn name(&self) -> &str {
"prose"
}
fn applicable(&self, path: &Path, _b: &[u8], _o: &[u8], _t: &[u8]) -> bool {
matches!(path.extension().and_then(|e| e.to_str()), Some("txt"))
}
fn merge(&self, path: &Path, base: &[u8], ours: &[u8], theirs: &[u8]) -> MergeResult {
let to_paras = |b: &[u8]| -> Option<Vec<String>> {
std::str::from_utf8(b).ok().map(split_paragraphs)
};
let to_paras =
|b: &[u8]| -> Option<Vec<String>> { std::str::from_utf8(b).ok().map(split_paragraphs) };
let (b, o, t) = match (to_paras(base), to_paras(ours), to_paras(theirs)) {
(Some(b), Some(o), Some(t)) => (b, o, t),
_ => {
@ -383,9 +420,7 @@ impl MergeHandler for ProseHandler {
};
}
};
let key = |p: &str| -> String {
blake3::hash(p.trim().as_bytes()).to_hex().to_string()
};
let key = |p: &str| -> String { blake3::hash(p.trim().as_bytes()).to_hex().to_string() };
// Paragraph identity is content-only, so a modification looks like
// delete+add. To avoid silently concatenating two divergent edits
// (which would lose their conflict), check that every base paragraph
@ -444,7 +479,9 @@ impl MergeHandler for ProseHandler {
handler: self.name().into(),
status: MergeStatus::Merged {
content: merged.into_bytes(),
notes: vec![MergeNote { message: "paragraph-level prose merge".into() }],
notes: vec![MergeNote {
message: "paragraph-level prose merge".into(),
}],
},
}
}
@ -487,7 +524,11 @@ fn xml_parse(src: &[u8]) -> Option<Vec<XmlNode>> {
}
Event::End(_) => {
let (name, attrs, children) = stack.pop()?;
let node = XmlNode::Element { name, attrs, children };
let node = XmlNode::Element {
name,
attrs,
children,
};
if let Some(parent) = stack.last_mut() {
parent.2.push(node);
} else {
@ -503,7 +544,11 @@ fn xml_parse(src: &[u8]) -> Option<Vec<XmlNode>> {
attrs.push((k, v));
}
attrs.sort();
let node = XmlNode::Element { name, attrs, children: Vec::new() };
let node = XmlNode::Element {
name,
attrs,
children: Vec::new(),
};
if let Some(parent) = stack.last_mut() {
parent.2.push(node);
} else {
@ -540,7 +585,11 @@ fn write_node(n: &XmlNode, out: &mut String) {
XmlNode::Text(t) => {
out.push_str(&xml_escape_text(t));
}
XmlNode::Element { name, attrs, children } => {
XmlNode::Element {
name,
attrs,
children,
} => {
out.push('<');
out.push_str(name);
for (k, v) in attrs {
@ -570,7 +619,9 @@ fn xml_escape_text(s: &str) -> String {
}
fn xml_escape_attr(s: &str) -> String {
s.replace('&', "&amp;").replace('"', "&quot;").replace('<', "&lt;")
s.replace('&', "&amp;")
.replace('"', "&quot;")
.replace('<', "&lt;")
}
fn merge_xml_children(
@ -604,20 +655,26 @@ fn merge_xml_children(
(None, None, Some(t)) => merged.push(t.clone()),
(Some(_), None, None) => {}
(Some(b), Some(o), None) => {
if b == o { /* deleted in theirs */ } else {
if b == o { /* deleted in theirs */
} else {
merged.push(o.clone());
conflicts.push(ConflictRegion {
description: format!("{path}[{i}]: modified by ours, deleted by theirs"),
base: 0..0, ours: 0..0, theirs: 0..0,
base: 0..0,
ours: 0..0,
theirs: 0..0,
});
}
}
(Some(b), None, Some(t)) => {
if b == t { /* deleted in ours */ } else {
if b == t { /* deleted in ours */
} else {
merged.push(t.clone());
conflicts.push(ConflictRegion {
description: format!("{path}[{i}]: deleted by ours, modified by theirs"),
base: 0..0, ours: 0..0, theirs: 0..0,
base: 0..0,
ours: 0..0,
theirs: 0..0,
});
}
}
@ -627,8 +684,12 @@ fn merge_xml_children(
} else {
merged.push(o.clone());
conflicts.push(ConflictRegion {
description: format!("{path}[{i}]: independently added with different values"),
base: 0..0, ours: 0..0, theirs: 0..0,
description: format!(
"{path}[{i}]: independently added with different values"
),
base: 0..0,
ours: 0..0,
theirs: 0..0,
});
}
}
@ -659,20 +720,35 @@ fn merge_xml_node(
}
match (base, ours, theirs) {
(
XmlNode::Element { name: bn, attrs: ba, children: bc },
XmlNode::Element { name: on, attrs: oa, children: oc },
XmlNode::Element { name: tn, attrs: ta, children: tc },
XmlNode::Element {
name: bn,
attrs: ba,
children: bc,
},
XmlNode::Element {
name: on,
attrs: oa,
children: oc,
},
XmlNode::Element {
name: tn,
attrs: ta,
children: tc,
},
) if bn == on && on == tn => {
// Merge attributes structurally via JSON.
let to_obj = |v: &[(String, String)]| -> Value {
let mut m = serde_json::Map::new();
for (k, val) in v { m.insert(k.clone(), Value::String(val.clone())); }
for (k, val) in v {
m.insert(k.clone(), Value::String(val.clone()));
}
Value::Object(m)
};
let (am, ac) = merge_value(&to_obj(ba), &to_obj(oa), &to_obj(ta), &format!("{path}.@"));
let attrs: Vec<(String, String)> = match am {
Value::Object(m) => {
let mut v: Vec<_> = m.into_iter()
let mut v: Vec<_> = m
.into_iter()
.map(|(k, val)| (k, val.as_str().unwrap_or("").to_string()))
.collect();
v.sort();
@ -684,7 +760,11 @@ fn merge_xml_node(
let mut conflicts = ac;
conflicts.extend(cc);
(
XmlNode::Element { name: on.clone(), attrs, children: cm },
XmlNode::Element {
name: on.clone(),
attrs,
children: cm,
},
conflicts,
)
}
@ -694,7 +774,9 @@ fn merge_xml_node(
ours.clone(),
vec![ConflictRegion {
description: format!("{path}: structural mismatch between ours and theirs"),
base: 0..0, ours: 0..0, theirs: 0..0,
base: 0..0,
ours: 0..0,
theirs: 0..0,
}],
)
}
@ -702,7 +784,9 @@ fn merge_xml_node(
}
impl MergeHandler for XmlHandler {
fn name(&self) -> &str { "xml" }
fn name(&self) -> &str {
"xml"
}
fn applicable(&self, path: &Path, _b: &[u8], _o: &[u8], _t: &[u8]) -> bool {
matches!(
@ -727,12 +811,20 @@ impl MergeHandler for XmlHandler {
let status = if conflicts.is_empty() {
MergeStatus::Merged {
content: bytes,
notes: vec![MergeNote { message: "structural XML merge".into() }],
notes: vec![MergeNote {
message: "structural XML merge".into(),
}],
}
} else {
MergeStatus::Conflict { regions: conflicts, partial: bytes }
MergeStatus::Conflict {
regions: conflicts,
partial: bytes,
}
};
MergeResult { handler: self.name().into(), status }
MergeResult {
handler: self.name().into(),
status,
}
}
}

View File

@ -14,8 +14,14 @@ pub struct ConflictRegion {
#[derive(Clone, Debug)]
pub enum MergeStatus {
Merged { content: Vec<u8>, notes: Vec<MergeNote> },
Conflict { regions: Vec<ConflictRegion>, partial: Vec<u8> },
Merged {
content: Vec<u8>,
notes: Vec<MergeNote>,
},
Conflict {
regions: Vec<ConflictRegion>,
partial: Vec<u8>,
},
NotApplicable,
}

View File

@ -5,16 +5,16 @@
//! file path / extension, applies the highest-priority applicable handler,
//! and falls through on `NotApplicable`.
pub mod handler;
pub mod engine;
pub mod textual;
pub mod format;
pub mod format_extra;
pub mod handler;
pub mod plugin;
pub mod record;
pub mod textual;
pub mod tree_sitter_handler;
pub use handler::{ConflictRegion, MergeHandler, MergeResult, MergeNote, MergeStatus};
pub use engine::{CascadeEngine, MergeConfig, MergeRule};
pub use record::{MergeRecord, FileRecord, FileStatus};
pub use handler::{ConflictRegion, MergeHandler, MergeNote, MergeResult, MergeStatus};
pub use record::{FileRecord, FileStatus, MergeRecord};
pub use tree_sitter_handler::{Lang, TreeSitterHandler};

View File

@ -100,10 +100,17 @@ impl PluginHandler {
Engine::new(&config).map_err(|e| PluginError::Other(format!("engine: {e}")))?;
let module = Module::new(&engine, wasm_bytes)
.map_err(|e| PluginError::Other(format!("compile: {e}")))?;
Ok(Self { name: cfg.name, hash: cfg.hash, engine, module })
Ok(Self {
name: cfg.name,
hash: cfg.hash,
engine,
module,
})
}
pub fn hash(&self) -> &[u8; 32] { &self.hash }
pub fn hash(&self) -> &[u8; 32] {
&self.hash
}
fn run_merge(
&self,
@ -209,7 +216,10 @@ impl PluginHandler {
.read(store.as_context(), ptr, &mut buf)
.map_err(|e| PluginError::Other(format!("read: {e}")))?;
}
Ok(PluginOutput { conflict, bytes: buf })
Ok(PluginOutput {
conflict,
bytes: buf,
})
}
}
@ -220,7 +230,9 @@ struct PluginOutput {
}
impl MergeHandler for PluginHandler {
fn name(&self) -> &str { &self.name }
fn name(&self) -> &str {
&self.name
}
fn applicable(&self, _path: &Path, _b: &[u8], _o: &[u8], _t: &[u8]) -> bool {
// Selection is handled by config rules, not by extension; the engine
@ -272,7 +284,10 @@ impl MergeHandler for PluginHandler {
pub enum PluginError {
#[error("plugin hash mismatch: expected blake3:{} got blake3:{}",
hex_encode(.expected), hex_encode(.actual))]
HashMismatch { expected: [u8; 32], actual: [u8; 32] },
HashMismatch {
expected: [u8; 32],
actual: [u8; 32],
},
#[error("plugin missing required export: {0}")]
MissingExport(&'static str),
#[error("plugin trapped: {0}")]
@ -363,7 +378,10 @@ mod tests {
fn plugin_returns_ours_unchanged() {
let (bytes, hash) = build(RETURN_OURS_WAT);
let h = PluginHandler::new(
PluginConfig { name: "test:return_ours".into(), hash },
PluginConfig {
name: "test:return_ours".into(),
hash,
},
&bytes,
)
.unwrap();
@ -378,7 +396,10 @@ mod tests {
fn plugin_conflict_bit_produces_conflict_status() {
let (bytes, hash) = build(CONFLICT_WAT);
let h = PluginHandler::new(
PluginConfig { name: "test:always_conflict".into(), hash },
PluginConfig {
name: "test:always_conflict".into(),
hash,
},
&bytes,
)
.unwrap();
@ -391,7 +412,10 @@ mod tests {
let (bytes, _real) = build(RETURN_OURS_WAT);
let bad_hash = [0u8; 32];
let err = PluginHandler::new(
PluginConfig { name: "test:bad_hash".into(), hash: bad_hash },
PluginConfig {
name: "test:bad_hash".into(),
hash: bad_hash,
},
&bytes,
)
.err()
@ -409,7 +433,12 @@ mod tests {
config.epoch_interruption(true);
let engine = Engine::new(&config).unwrap();
let module = Module::new(&engine, &bytes).unwrap();
let h = PluginHandler { name: "test:loop".into(), hash, engine, module };
let h = PluginHandler {
name: "test:loop".into(),
hash,
engine,
module,
};
// Spawn a fast bumper rather than waiting the full 10s.
let engine_clone = h.engine.clone();

View File

@ -155,7 +155,11 @@ fn merge_patches(
let resolved = resolve_group(base_pos, end, base_lines, &group_o, &group_t);
match resolved {
Resolution::Applied(lines) => output.extend(lines),
Resolution::Conflict { ours_lines, theirs_lines, base_range } => {
Resolution::Conflict {
ours_lines,
theirs_lines,
base_range,
} => {
let ours_start = output.len();
output.extend(conflict_marker_ours());
output.extend(ours_lines.clone());
@ -238,12 +242,24 @@ fn resolve_group(
}
}
fn conflict_marker_ours() -> Vec<String> { vec!["<<<<<<< ours\n".to_string()] }
fn conflict_marker_base() -> Vec<String> { vec!["||||||| base\n".to_string()] }
fn conflict_marker_theirs() -> Vec<String> { vec!["=======\n".to_string()] }
fn conflict_marker_end() -> Vec<String> { vec![">>>>>>> theirs\n".to_string()] }
fn conflict_marker_ours() -> Vec<String> {
vec!["<<<<<<< ours\n".to_string()]
}
fn conflict_marker_base() -> Vec<String> {
vec!["||||||| base\n".to_string()]
}
fn conflict_marker_theirs() -> Vec<String> {
vec!["=======\n".to_string()]
}
fn conflict_marker_end() -> Vec<String> {
vec![">>>>>>> theirs\n".to_string()]
}
pub fn three_way_merge_lines(base: &str, ours: &str, theirs: &str) -> (String, Vec<ConflictRegion>) {
pub fn three_way_merge_lines(
base: &str,
ours: &str,
theirs: &str,
) -> (String, Vec<ConflictRegion>) {
let base_lines = split_lines_keep(base);
let ours_lines = split_lines_keep(ours);
let theirs_lines = split_lines_keep(theirs);
@ -267,13 +283,17 @@ trait MapFirst<A, B> {
}
impl<S: Into<String>, B> MapFirst<S, B> for (S, B) {
fn map_first(self) -> (String, B) { (self.0.into(), self.1) }
fn map_first(self) -> (String, B) {
(self.0.into(), self.1)
}
}
pub struct TextualHandler;
impl MergeHandler for TextualHandler {
fn name(&self) -> &str { "textual" }
fn name(&self) -> &str {
"textual"
}
fn applicable(&self, _path: &Path, base: &[u8], ours: &[u8], theirs: &[u8]) -> bool {
// Only apply to anything that's valid UTF-8 — we refuse to do
@ -301,7 +321,10 @@ impl MergeHandler for TextualHandler {
partial: merged.into_bytes(),
}
};
MergeResult { handler: self.name().into(), status }
MergeResult {
handler: self.name().into(),
status,
}
}
}

View File

@ -145,7 +145,9 @@ pub struct TreeSitterHandler {
}
impl TreeSitterHandler {
pub fn new(lang: Lang) -> Self { Self { lang } }
pub fn new(lang: Lang) -> Self {
Self { lang }
}
fn parse(&self, src: &[u8]) -> Option<Tree> {
let mut p = Parser::new();
@ -240,7 +242,12 @@ fn block_from_node(node: Node, src: &[u8]) -> Block {
Some(n) => BlockKey::Named(kind.clone(), n),
None => BlockKey::Anon(kind.clone(), *blake3::hash(&text).as_bytes()),
};
Block { kind, key, range, text }
Block {
kind,
key,
range,
text,
}
}
fn top_level_blocks(tree: &Tree, src: &[u8]) -> Vec<Block> {
@ -379,7 +386,13 @@ fn merge_blocks_inner(
output.push(o.text.clone());
} else {
had_conflict = true;
conflicts.push(make_region(&o.kind, "concurrent additions diverge", None, Some(o), Some(tb)));
conflicts.push(make_region(
&o.kind,
"concurrent additions diverge",
None,
Some(o),
Some(tb),
));
output.push(conflict_marker_block(&o.text, &tb.text));
}
}
@ -388,7 +401,13 @@ fn merge_blocks_inner(
// ours unchanged, theirs deleted → honour deletion
} else {
had_conflict = true;
conflicts.push(make_region(&o.kind, "modify-vs-delete", Some(bb), Some(o), None));
conflicts.push(make_region(
&o.kind,
"modify-vs-delete",
Some(bb),
Some(o),
None,
));
output.push(conflict_marker_block(&o.text, &[]));
notes.push(MergeNote {
message: format!("{}: modified by ours, deleted by theirs", o.kind),
@ -450,7 +469,13 @@ fn merge_blocks_inner(
// theirs unchanged, ours deleted → honour deletion
} else {
had_conflict = true;
conflicts.push(make_region(&t.kind, "delete-vs-modify", Some(bb), None, Some(t)));
conflicts.push(make_region(
&t.kind,
"delete-vs-modify",
Some(bb),
None,
Some(t),
));
output.push(conflict_marker_block(&[], &t.text));
notes.push(MergeNote {
message: format!("{}: deleted by ours, modified by theirs", t.kind),
@ -460,7 +485,12 @@ fn merge_blocks_inner(
}
}
InnerMerge { blocks: output, conflicts, notes, had_conflict }
InnerMerge {
blocks: output,
conflicts,
notes,
had_conflict,
}
}
/// Try to merge a single conflicted block by descending into its body and
@ -525,7 +555,12 @@ fn try_recursive_clean(
// Recursion only buys something when at least one of the inner blocks
// has a recoverable identity. If everything is anonymous, body diffs
// collapse to text-level and we'd match arbitrary content together.
if bc.iter().chain(&oc).chain(&tc).all(|b| matches!(b.key, BlockKey::Anon(..))) {
if bc
.iter()
.chain(&oc)
.chain(&tc)
.all(|b| matches!(b.key, BlockKey::Anon(..)))
{
return None;
}
@ -571,11 +606,7 @@ fn body_prefix_suffix<'a>(
(&body_text[..first_rel], &body_text[last_rel..])
}
fn inter_child_separator(
body_text: &[u8],
children: &[Block],
body_start_abs: usize,
) -> Vec<u8> {
fn inter_child_separator(body_text: &[u8], children: &[Block], body_start_abs: usize) -> Vec<u8> {
if children.len() < 2 {
return b"\n\n".to_vec();
}
@ -593,7 +624,9 @@ fn strip_trailing_newline(b: &[u8]) -> Vec<u8> {
}
impl MergeHandler for TreeSitterHandler {
fn name(&self) -> &str { self.lang.handler_name() }
fn name(&self) -> &str {
self.lang.handler_name()
}
fn applicable(&self, _path: &Path, _base: &[u8], _ours: &[u8], _theirs: &[u8]) -> bool {
// Applicability is decided in `merge`; if any input fails to parse
@ -660,7 +693,12 @@ mod tests {
fn run(lang: Lang, base: &str, ours: &str, theirs: &str) -> MergeResult {
let h = TreeSitterHandler::new(lang);
h.merge(Path::new("file"), base.as_bytes(), ours.as_bytes(), theirs.as_bytes())
h.merge(
Path::new("file"),
base.as_bytes(),
ours.as_bytes(),
theirs.as_bytes(),
)
}
#[test]
@ -776,7 +814,9 @@ mod tests {
let result = run(Lang::Rust, base, ours, theirs);
match result.status {
MergeStatus::Conflict { regions, .. } => {
assert!(regions.iter().any(|r| r.description.contains("modify-vs-delete")));
assert!(regions
.iter()
.any(|r| r.description.contains("modify-vs-delete")));
}
other => panic!("expected Conflict, got {other:?}"),
}
@ -801,7 +841,9 @@ mod tests {
assert!(s.contains("fn c("), "must keep theirs-side addition: {s}");
assert!(s.contains("impl Foo"), "must preserve outer header");
assert!(
notes.iter().any(|n| n.message.contains("recursive descent")),
notes
.iter()
.any(|n| n.message.contains("recursive descent")),
"merge note must record that recursion fired: {notes:?}"
);
}

View File

@ -113,8 +113,8 @@ fn collect_scenarios() -> Vec<PathBuf> {
fn run_scenario(dir: &Path) -> Result<(), String> {
let manifest_text = std::fs::read_to_string(dir.join("manifest.toml"))
.map_err(|e| format!("read manifest: {e}"))?;
let manifest: Manifest = toml::from_str(&manifest_text)
.map_err(|e| format!("parse manifest: {e}"))?;
let manifest: Manifest =
toml::from_str(&manifest_text).map_err(|e| format!("parse manifest: {e}"))?;
let ext = manifest
.input_ext
.clone()
@ -174,8 +174,7 @@ fn run_scenario(dir: &Path) -> Result<(), String> {
}
for needle in &manifest.expected.notes_contain {
if !notes.iter().any(|n| n.message.contains(needle)) {
let messages: Vec<&str> =
notes.iter().map(|n| n.message.as_str()).collect();
let messages: Vec<&str> = notes.iter().map(|n| n.message.as_str()).collect();
return Err(format!(
"[{}] expected a note containing {:?}, saw {:?}",
manifest.description, needle, messages
@ -185,10 +184,7 @@ fn run_scenario(dir: &Path) -> Result<(), String> {
// Sanity check: a merged outcome must not carry conflict markers.
// A handler that wrote markers but reported Merged would silently
// smuggle conflicts past CI.
if s.contains("<<<<<<< ours")
|| s.contains("=======")
|| s.contains(">>>>>>> theirs")
{
if s.contains("<<<<<<< ours") || s.contains("=======") || s.contains(">>>>>>> theirs") {
return Err(format!(
"[{}] merged outcome contains conflict markers — handler {:?} is buggy\n--- output ---\n{s}\n",
manifest.description, result.handler
@ -214,8 +210,7 @@ fn run_scenario(dir: &Path) -> Result<(), String> {
}
for needle in &manifest.expected.region_descriptions_contain {
if !regions.iter().any(|r| r.description.contains(needle)) {
let descs: Vec<&str> =
regions.iter().map(|r| r.description.as_str()).collect();
let descs: Vec<&str> = regions.iter().map(|r| r.description.as_str()).collect();
return Err(format!(
"[{}] expected a region with description containing {:?}, saw {:?}",
manifest.description, needle, descs

View File

@ -15,7 +15,9 @@ fn lcg_bytes(seed: u64, n: usize) -> Vec<u8> {
let mut s = seed;
(0..n)
.map(|_| {
s = s.wrapping_mul(6364136223846793005).wrapping_add(1442695040888963407);
s = s
.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407);
(s >> 33) as u8
})
.collect()
@ -80,9 +82,11 @@ fn bench_decode(c: &mut Criterion) {
let pk = random_pack(0xa5a5_a5a5, count, size);
let bytes = pk.encode();
g.throughput(Throughput::Bytes(bytes.len() as u64));
g.bench_with_input(BenchmarkId::new("incompressible", label), &bytes, |b, bytes| {
b.iter(|| black_box(Pack::decode(bytes).unwrap()))
});
g.bench_with_input(
BenchmarkId::new("incompressible", label),
&bytes,
|b, bytes| b.iter(|| black_box(Pack::decode(bytes).unwrap())),
);
}
{
let pk = delta_friendly_pack(0x5a5a_5a5a, 100, 1024);

View File

@ -108,12 +108,13 @@ pub fn verify_request(
skew_seconds: i64,
) -> Result<AuthHeaders, AuthError> {
let pk = PublicKey::parse_levcs(key_header).map_err(AuthError::Identity)?;
let timestamp_micros: i64 = timestamp_header
.parse()
.map_err(|e: std::num::ParseIntError| AuthError::InvalidHeader {
name: "LeVCS-Timestamp",
detail: e.to_string(),
})?;
let timestamp_micros: i64 =
timestamp_header
.parse()
.map_err(|e: std::num::ParseIntError| AuthError::InvalidHeader {
name: "LeVCS-Timestamp",
detail: e.to_string(),
})?;
let skew = (now_micros - timestamp_micros) / 1_000_000;
if skew.abs() > skew_seconds {
return Err(AuthError::Skew(skew));
@ -161,8 +162,10 @@ pub fn verify_request(
}
fn getrandom_bytes(buf: &mut [u8]) -> Result<(), AuthError> {
getrandom::getrandom(buf)
.map_err(|e| AuthError::InvalidHeader { name: "LeVCS-Nonce", detail: e.to_string() })
getrandom::getrandom(buf).map_err(|e| AuthError::InvalidHeader {
name: "LeVCS-Nonce",
detail: e.to_string(),
})
}
pub fn current_micros() -> i64 {
@ -202,7 +205,11 @@ mod tests {
body,
};
let (key, ts, nonce, sig) = sign_request(&sk, &req).unwrap();
let req2 = AuthRequest { method: "POST", path_with_query: "/levcs/v1/repos/abc/push", body: b"bogus" };
let req2 = AuthRequest {
method: "POST",
path_with_query: "/levcs/v1/repos/abc/push",
body: b"bogus",
};
let now = current_micros();
assert!(verify_request(&req2, &key, &ts, &nonce, &sig, now, DEFAULT_CLOCK_SKEW).is_err());
}

View File

@ -1,15 +1,13 @@
//! levcs-protocol: wire types, request signing, and pack-file framing.
pub mod auth;
pub mod wire;
pub mod pack;
pub mod p2p;
pub mod pack;
pub mod wire;
pub use auth::{
sign_request, verify_request, build_canonical, AuthError, AuthHeaders, AuthRequest,
build_canonical, sign_request, verify_request, AuthError, AuthHeaders, AuthRequest,
DEFAULT_CLOCK_SKEW, NONCE_TTL_SECS,
};
pub use wire::{
InfoResponse, InstanceInfo, PushManifest, PushUpdate, RefList, RefsResponse,
};
pub use pack::{Pack, PackEntry, PACK_MAGIC, PACK_VERSION};
pub use wire::{InfoResponse, InstanceInfo, PushManifest, PushUpdate, RefList, RefsResponse};

View File

@ -182,11 +182,15 @@ pub fn handshake_dial<S: Read + Write>(
return Err(P2pError::PeerError(decode_error(&payload)));
}
if tag != TAG_HELLO_ACK {
return Err(P2pError::UnexpectedTag { got: tag, expected: TAG_HELLO_ACK });
return Err(P2pError::UnexpectedTag {
got: tag,
expected: TAG_HELLO_ACK,
});
}
if payload.len() != 32 + 32 + 64 {
return Err(P2pError::Malformed(format!(
"HelloAck payload length {} (want 128)", payload.len()
"HelloAck payload length {} (want 128)",
payload.len()
)));
}
let mut peer_pub_bytes = [0u8; 32];
@ -221,10 +225,19 @@ pub fn handshake_dial<S: Read + Write>(
match tag {
TAG_OK => {}
TAG_ERROR => return Err(P2pError::PeerError(decode_error(&payload))),
other => return Err(P2pError::UnexpectedTag { got: other, expected: TAG_OK }),
other => {
return Err(P2pError::UnexpectedTag {
got: other,
expected: TAG_OK,
})
}
}
Ok(Session { stream, peer_key: peer_pub, transcript })
Ok(Session {
stream,
peer_key: peer_pub,
transcript,
})
}
/// Run the deployer (sender) side of the handshake.
@ -242,11 +255,15 @@ pub fn handshake_listen<S: Read + Write>(
// 1 ← Hello: dialer's pub + challenge.
let (tag, payload) = read_frame(&mut stream)?;
if tag != TAG_HELLO {
return Err(P2pError::UnexpectedTag { got: tag, expected: TAG_HELLO });
return Err(P2pError::UnexpectedTag {
got: tag,
expected: TAG_HELLO,
});
}
if payload.len() != 64 {
return Err(P2pError::Malformed(format!(
"Hello payload length {} (want 64)", payload.len()
"Hello payload length {} (want 64)",
payload.len()
)));
}
let mut peer_pub_bytes = [0u8; 32];
@ -286,11 +303,15 @@ pub fn handshake_listen<S: Read + Write>(
return Err(P2pError::PeerError(decode_error(&payload)));
}
if tag != TAG_AUTH {
return Err(P2pError::UnexpectedTag { got: tag, expected: TAG_AUTH });
return Err(P2pError::UnexpectedTag {
got: tag,
expected: TAG_AUTH,
});
}
if payload.len() != 64 {
return Err(P2pError::Malformed(format!(
"Auth payload length {} (want 64)", payload.len()
"Auth payload length {} (want 64)",
payload.len()
)));
}
let mut peer_sig = [0u8; 64];
@ -302,7 +323,11 @@ pub fn handshake_listen<S: Read + Write>(
// 4 → Ok.
write_frame(&mut stream, TAG_OK, &[])?;
Ok(Session { stream, peer_key: peer_pub, transcript })
Ok(Session {
stream,
peer_key: peer_pub,
transcript,
})
}
impl<S: Write> Session<S> {
@ -331,7 +356,10 @@ impl<S: Read> Session<S> {
return Err(P2pError::PeerError(decode_error(&payload)));
}
if tag != TAG_MANIFEST {
return Err(P2pError::UnexpectedTag { got: tag, expected: TAG_MANIFEST });
return Err(P2pError::UnexpectedTag {
got: tag,
expected: TAG_MANIFEST,
});
}
Ok(serde_json::from_slice(&payload)?)
}
@ -342,7 +370,10 @@ impl<S: Read> Session<S> {
return Err(P2pError::PeerError(decode_error(&payload)));
}
if tag != TAG_PACK {
return Err(P2pError::UnexpectedTag { got: tag, expected: TAG_PACK });
return Err(P2pError::UnexpectedTag {
got: tag,
expected: TAG_PACK,
});
}
Ok(payload)
}
@ -352,7 +383,10 @@ impl<S: Read> Session<S> {
match tag {
TAG_DONE => Ok(()),
TAG_ERROR => Err(P2pError::PeerError(decode_error(&payload))),
other => Err(P2pError::UnexpectedTag { got: other, expected: TAG_DONE }),
other => Err(P2pError::UnexpectedTag {
got: other,
expected: TAG_DONE,
}),
}
}
}
@ -375,7 +409,10 @@ fn compute_transcript(
/// Write one P2P frame. Exposed so external transports and fuzzers can
/// drive the codec without going through a `Session`.
pub fn write_frame<W: Write>(w: &mut W, tag: u8, payload: &[u8]) -> Result<(), P2pError> {
let total = payload.len().checked_add(1).ok_or(P2pError::FrameTooLarge(usize::MAX))?;
let total = payload
.len()
.checked_add(1)
.ok_or(P2pError::FrameTooLarge(usize::MAX))?;
if total > MAX_FRAME_BYTES {
return Err(P2pError::FrameTooLarge(total));
}
@ -415,8 +452,7 @@ fn decode_error(payload: &[u8]) -> String {
}
fn fill_random(buf: &mut [u8]) -> Result<(), P2pError> {
getrandom::getrandom(buf)
.map_err(|e| P2pError::Malformed(format!("getrandom: {e}")))
getrandom::getrandom(buf).map_err(|e| P2pError::Malformed(format!("getrandom: {e}")))
}
#[cfg(test)]
@ -457,13 +493,18 @@ mod tests {
q.extend(buf);
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> { Ok(()) }
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
fn duplex() -> (DuplexEnd, DuplexEnd) {
let a = std::sync::Arc::new(std::sync::Mutex::new(std::collections::VecDeque::new()));
let b = std::sync::Arc::new(std::sync::Mutex::new(std::collections::VecDeque::new()));
(
DuplexEnd { read: a.clone(), write: b.clone() },
DuplexEnd {
read: a.clone(),
write: b.clone(),
},
DuplexEnd { read: b, write: a },
)
}
@ -500,8 +541,7 @@ mod tests {
let (dialer_end, deployer_end) = duplex();
let deployer_handle = std::thread::spawn(move || {
handshake_listen(deployer_end, &deployer_sk, &dialer_pub)
.map(|s| s.peer_key)
handshake_listen(deployer_end, &deployer_sk, &dialer_pub).map(|s| s.peer_key)
});
let dialer_session = handshake_dial(dialer_end, &dialer_sk, &deployer_pub).unwrap();
let deployer_peer = deployer_handle.join().unwrap().unwrap();
@ -541,9 +581,8 @@ mod tests {
let deployer_pub = deployer_sk.public();
let (dialer_end, deployer_end) = duplex();
let deployer_handle = std::thread::spawn(move || {
handshake_listen(deployer_end, &deployer_sk, &imposter_pub)
});
let deployer_handle =
std::thread::spawn(move || handshake_listen(deployer_end, &deployer_sk, &imposter_pub));
// Dialer presents itself with its real key; deployer is
// expecting the imposter's key, so must reject.
let _ = handshake_dial(dialer_end, &dialer_sk, &deployer_pub);

View File

@ -70,7 +70,9 @@ pub struct Pack {
}
impl Pack {
pub fn new() -> Self { Self::default() }
pub fn new() -> Self {
Self::default()
}
pub fn push(&mut self, object_type: u8, bytes: Vec<u8>) {
self.entries.push(PackEntry { object_type, bytes });
@ -78,7 +80,11 @@ impl Pack {
pub fn encode(&self) -> Vec<u8> {
let mut out = Vec::with_capacity(
16 + self.entries.iter().map(|e| 10 + e.bytes.len()).sum::<usize>(),
16 + self
.entries
.iter()
.map(|e| 10 + e.bytes.len())
.sum::<usize>(),
);
out.extend_from_slice(&PACK_MAGIC);
let mut v = [0u8; 4];
@ -277,7 +283,10 @@ impl Pack {
// is a valid base.
let hash = *blake3::hash(&data).as_bytes();
idx_by_hash.entry(hash).or_insert(entries.len());
entries.push(PackEntry { object_type, bytes: data });
entries.push(PackEntry {
object_type,
bytes: data,
});
}
Ok((Self { entries }, p))
}
@ -295,7 +304,9 @@ mod tests {
let mut s = seed;
(0..n)
.map(|_| {
s = s.wrapping_mul(6364136223846793005).wrapping_add(1442695040888963407);
s = s
.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407);
(s >> 33) as u8
})
.collect()

View File

@ -207,7 +207,9 @@ fn pack_decode_survives_mutation_of_valid_pack() {
0 => {
let flips = (lcg(&mut seed) % 4 + 1) as usize;
for _ in 0..flips {
if buf.is_empty() { break; }
if buf.is_empty() {
break;
}
let idx = (lcg(&mut seed) as usize) % buf.len();
let bit = (lcg(&mut seed) % 8) as u8;
buf[idx] ^= 1 << bit;

View File

@ -79,10 +79,7 @@ pub fn run_editor_on(initial: &[u8], path_hint: &Path) -> Result<EditOutcome, Ed
let tmp = make_tempfile(path_hint)?;
std::fs::write(&tmp, initial)?;
let status = Command::new(prog)
.args(&leading)
.arg(&tmp)
.status()?;
let status = Command::new(prog).args(&leading).arg(&tmp).status()?;
if !status.success() {
let _ = std::fs::remove_file(&tmp);
return Err(EditError::EditorFailed(status.code().unwrap_or(-1)));
@ -109,7 +106,10 @@ fn make_tempfile(hint: &Path) -> std::io::Result<PathBuf> {
.file_name()
.and_then(|s| s.to_str())
.unwrap_or("buffer");
p.push(format!("levcs-review-{}-{nanos}-{base}", std::process::id()));
p.push(format!(
"levcs-review-{}-{nanos}-{base}",
std::process::id()
));
Ok(p)
}

View File

@ -185,7 +185,11 @@ fn draw(frame: &mut ratatui::Frame, state: &ReviewState) {
let area = frame.area();
let outer = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(1), Constraint::Min(3), Constraint::Length(2)])
.constraints([
Constraint::Length(1),
Constraint::Min(3),
Constraint::Length(2),
])
.split(area);
draw_title(frame, outer[0], state);
@ -200,7 +204,11 @@ fn draw_title(frame: &mut ratatui::Frame, area: Rect, state: &ReviewState) {
.iter()
.filter(|f| matches!(f.status, MergeStatus::Conflict { .. }))
.count();
let mode = if state.read_only { "--explain" } else { "--review" };
let mode = if state.read_only {
"--explain"
} else {
"--review"
};
let title = format!(" levcs merge {mode} {total} file(s), {conflicted} with conflicts ");
frame.render_widget(
Paragraph::new(title)
@ -223,8 +231,20 @@ fn draw_main(frame: &mut ratatui::Frame, area: Rect, state: &ReviewState) {
draw_file_list(frame, cols[0], state);
if let Some(file) = state.current_file() {
let region = file.conflict_regions().get(state.selected_region);
draw_pane(frame, cols[1], "ours", &file.ours, region.map(|r| r.ours.clone()));
draw_pane(frame, cols[2], "base", &file.base, region.map(|r| r.base.clone()));
draw_pane(
frame,
cols[1],
"ours",
&file.ours,
region.map(|r| r.ours.clone()),
);
draw_pane(
frame,
cols[2],
"base",
&file.base,
region.map(|r| r.base.clone()),
);
draw_pane(
frame,
cols[3],
@ -317,7 +337,11 @@ fn draw_pane(
}
let p = Paragraph::new(lines)
.block(Block::default().borders(Borders::ALL).title(title.to_string()))
.block(
Block::default()
.borders(Borders::ALL)
.title(title.to_string()),
)
.wrap(Wrap { trim: false });
frame.render_widget(p, area);
}
@ -423,9 +447,15 @@ mod key_tests {
FileEntry {
path: path.into(),
status: if regions == 0 {
MergeStatus::Merged { content: vec![], notes: vec![] }
MergeStatus::Merged {
content: vec![],
notes: vec![],
}
} else {
MergeStatus::Conflict { regions: regs, partial: vec![] }
MergeStatus::Conflict {
regions: regs,
partial: vec![],
}
},
current: vec![],
ours: vec![],

View File

@ -122,7 +122,10 @@ impl ReviewState {
}
pub fn next_region(&mut self) {
let n = self.current_file().map(|f| f.conflict_regions().len()).unwrap_or(0);
let n = self
.current_file()
.map(|f| f.conflict_regions().len())
.unwrap_or(0);
if n > 0 && self.selected_region + 1 < n {
self.selected_region += 1;
}
@ -154,11 +157,21 @@ impl ReviewState {
}
}
pub fn accept_ours(&mut self) { self.set_current_resolution(Resolution::AcceptOurs); }
pub fn accept_theirs(&mut self) { self.set_current_resolution(Resolution::AcceptTheirs); }
pub fn keep_current(&mut self) { self.set_current_resolution(Resolution::KeepCurrent); }
pub fn skip(&mut self) { self.set_current_resolution(Resolution::Skip); }
pub fn quit(&mut self) { self.quitting = true; }
pub fn accept_ours(&mut self) {
self.set_current_resolution(Resolution::AcceptOurs);
}
pub fn accept_theirs(&mut self) {
self.set_current_resolution(Resolution::AcceptTheirs);
}
pub fn keep_current(&mut self) {
self.set_current_resolution(Resolution::KeepCurrent);
}
pub fn skip(&mut self) {
self.set_current_resolution(Resolution::Skip);
}
pub fn quit(&mut self) {
self.quitting = true;
}
/// Walk every file and write the chosen bytes. Returns
/// `(written, skipped)`. Errors short-circuit — partial writes are
@ -170,7 +183,10 @@ impl ReviewState {
// count every file as skipped so the caller can still report
// the totals it expects.
if self.read_only {
return Ok(ApplyReport { written: 0, skipped: self.files.len() });
return Ok(ApplyReport {
written: 0,
skipped: self.files.len(),
});
}
let mut written = 0usize;
let mut skipped = 0usize;
@ -230,10 +246,12 @@ fn write_file(target: &Path, bytes: &[u8]) -> std::io::Result<()> {
t.set_file_name(name);
t
}
None => return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"target has no file name",
)),
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"target has no file name",
))
}
};
std::fs::write(&tmp, bytes)?;
std::fs::rename(&tmp, target)?;
@ -249,15 +267,30 @@ mod tests {
let conflict_regions: Vec<ConflictRegion> = (0..regions)
.map(|i| ConflictRegion {
description: format!("region {i}"),
base: Range { start: i * 10, end: i * 10 + 5 },
ours: Range { start: i * 10, end: i * 10 + 7 },
theirs: Range { start: i * 10, end: i * 10 + 6 },
base: Range {
start: i * 10,
end: i * 10 + 5,
},
ours: Range {
start: i * 10,
end: i * 10 + 7,
},
theirs: Range {
start: i * 10,
end: i * 10 + 6,
},
})
.collect();
let status = if regions == 0 {
MergeStatus::Merged { content: b"ok".to_vec(), notes: vec![] }
MergeStatus::Merged {
content: b"ok".to_vec(),
notes: vec![],
}
} else {
MergeStatus::Conflict { regions: conflict_regions, partial: vec![] }
MergeStatus::Conflict {
regions: conflict_regions,
partial: vec![],
}
};
FileEntry {
path: path.into(),
@ -359,14 +392,8 @@ mod tests {
assert_eq!(report.written, 2);
assert_eq!(report.skipped, 1);
assert_eq!(
std::fs::read(dir.join("a.txt")).unwrap(),
b"OURS-a.txt"
);
assert_eq!(
std::fs::read(dir.join("b.txt")).unwrap(),
b"THEIRS-b.txt"
);
assert_eq!(std::fs::read(dir.join("a.txt")).unwrap(), b"OURS-a.txt");
assert_eq!(std::fs::read(dir.join("b.txt")).unwrap(), b"THEIRS-b.txt");
// Skipped file untouched.
assert_eq!(std::fs::read(dir.join("c.txt")).unwrap(), b"original-c");
@ -387,7 +414,9 @@ mod tests {
std::fs::write(dir.join("a.txt"), b"original").unwrap();
let mut s = ReviewState::new(vec![entry("a.txt", 1)]);
s.set_current_resolution(Resolution::Edit { bytes: b"hand-edited".to_vec() });
s.set_current_resolution(Resolution::Edit {
bytes: b"hand-edited".to_vec(),
});
let report = s.apply(&dir).unwrap();
assert_eq!(report.written, 1);
assert_eq!(report.skipped, 0);