diff -urN cargo-0.96.0-orig/Cargo.toml cargo-0.96.0/Cargo.toml --- cargo-0.96.0-orig/Cargo.toml 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/Cargo.toml 2026-05-07 22:50:32 +0900 @@ -41,16 +41,8 @@ core-foundation = { version = "0.10.1", features = ["mac_os_10_7_support"] } crates-io = { version = "0.40.18", path = "crates/crates-io" } criterion = { version = "0.8.1", features = ["html_reports"] } -curl = "0.4.49" -# Do not upgrade curl-sys past 0.4.83 -# https://github.com/rust-lang/cargo/issues/16357 -curl-sys = "=0.4.83" filetime = "0.2.26" flate2 = { version = "1.1.9", default-features = false, features = ["zlib-rs"] } -git2 = "0.20.4" -git2-curl = "0.21.0" -# When updating this, also see if `gix-transport` further down needs updating or some auth-related tests will fail. -gix = { version = "0.79.0", default-features = false, features = ["progress-tree", "parallel", "dirwalk", "status"] } glob = "0.3.3" handlebars = { version = "6.4.0", features = ["dir_source"] } heck = "0.5.0" @@ -65,16 +57,11 @@ jiff = { version = "0.2.17", default-features = false, features = [ "std" ] } jobserver = "0.1.34" libc = "0.2.178" -libgit2-sys = "0.18.3" libloading = "0.9.0" memchr = "2.7.6" memfd = "0.6.5" miow = "0.6.1" opener = "0.8.3" -openssl = "0.10.75" -# Pinned due to ppc64 ELFv1/v2 ABI issue in 3.5.5 -# https://github.com/openssl/openssl/issues/29815 -openssl-src = "=300.5.4" os_info = { version = "3.14.0", default-features = false } pasetors = { version = "0.7.7", features = ["v3", "paserk", "std", "serde"] } pathdiff = "0.2.3" @@ -169,13 +156,8 @@ clap_complete.workspace = true color-print.workspace = true crates-io.workspace = true -curl = { workspace = true, features = ["http2"] } -curl-sys.workspace = true filetime.workspace = true flate2.workspace = true -git2.workspace = true -git2-curl.workspace = true -gix.workspace = true glob.workspace = true heck.workspace = true hex.workspace = true @@ -237,10 +219,6 @@ [target.'cfg(target_os = "macos")'.dependencies] cargo-credential-macos-keychain.workspace = true -[target.'cfg(not(windows))'.dependencies] -openssl = { workspace = true, optional = true } -openssl-src = { workspace = true, optional = true } - [target.'cfg(windows)'.dependencies] cargo-credential-wincred.workspace = true @@ -259,10 +237,6 @@ [dev-dependencies] annotate-snippets = { workspace = true, features = ["testing-colors"] } cargo-test-support.workspace = true -gix = { workspace = true, features = ["revision"] } -# When building Cargo for tests, a safety-measure in `gix` needs to be disabled -# to allow sending credentials over HTTP connections. -gix-transport = { version = "0.54.0", features = ["http-client-insecure-credentials"] } same-file.workspace = true snapbox.workspace = true @@ -279,15 +253,8 @@ doc = false [features] -default = ["http-transport-curl"] -vendored-openssl = ["openssl/vendored"] -vendored-libgit2 = ["libgit2-sys/vendored"] -# This is primarily used by rust-lang/rust distributing cargo the executable. -all-static = ['vendored-openssl', 'curl/static-curl', 'curl/force-system-lib-on-osx', 'vendored-libgit2'] -# Exactly one of 'http-transport-curl' or 'http-transport-reqwest' must be enabled -# when using Cargo as a library. By default, it is 'http-transport-curl'. -http-transport-curl = ["gix/blocking-http-transport-curl"] -http-transport-reqwest = ["gix/blocking-http-transport-reqwest"] +default = [] +vendored-openssl = [] [lints] workspace = true diff -urN cargo-0.96.0-orig/src/bin/cargo/cli.rs cargo-0.96.0/src/bin/cargo/cli.rs --- cargo-0.96.0-orig/src/bin/cargo/cli.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/bin/cargo/cli.rs 2026-05-07 22:47:42 +0900 @@ -214,63 +214,11 @@ version_string.push_str(&format!("commit-date: {}\n", ci.commit_date)); } writeln!(version_string, "host: {}", env!("RUST_HOST_TARGET")).unwrap(); - add_libgit2(&mut version_string); - add_curl(&mut version_string); - add_ssl(&mut version_string); writeln!(version_string, "os: {}", os_info::get()).unwrap(); } version_string } -fn add_libgit2(version_string: &mut String) { - let git2_v = git2::Version::get(); - let lib_v = git2_v.libgit2_version(); - let vendored = if git2_v.vendored() { - format!("vendored") - } else { - format!("system") - }; - writeln!( - version_string, - "libgit2: {}.{}.{} (sys:{} {})", - lib_v.0, - lib_v.1, - lib_v.2, - git2_v.crate_version(), - vendored - ) - .unwrap(); -} - -fn add_curl(version_string: &mut String) { - let curl_v = curl::Version::get(); - let vendored = if curl_v.vendored() { - format!("vendored") - } else { - format!("system") - }; - writeln!( - version_string, - "libcurl: {} (sys:{} {} ssl:{})", - curl_v.version(), - curl_sys::rust_crate_version(), - vendored, - curl_v.ssl_version().unwrap_or("none") - ) - .unwrap(); -} - -fn add_ssl(version_string: &mut String) { - #[cfg(feature = "openssl")] - { - writeln!(version_string, "ssl: {}", openssl::version::version()).unwrap(); - } - #[cfg(not(feature = "openssl"))] - { - let _ = version_string; // Silence unused warning. - } -} - /// Expands aliases recursively to collect all the command line arguments. /// /// [`GlobalArgs`] need to be extracted before expanding aliases because the diff -urN cargo-0.96.0-orig/src/bin/cargo/commands/mod.rs cargo-0.96.0/src/bin/cargo/commands/mod.rs --- cargo-0.96.0-orig/src/bin/cargo/commands/mod.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/bin/cargo/commands/mod.rs 2026-05-07 22:47:42 +0900 @@ -9,8 +9,6 @@ clean::cli(), config::cli(), doc::cli(), - fetch::cli(), - fix::cli(), generate_lockfile::cli(), git_checkout::cli(), help::cli(), @@ -18,14 +16,9 @@ init::cli(), install::cli(), locate_project::cli(), - login::cli(), - logout::cli(), metadata::cli(), new::cli(), - owner::cli(), - package::cli(), pkgid::cli(), - publish::cli(), read_manifest::cli(), remove::cli(), report::cli(), @@ -40,7 +33,6 @@ vendor::cli(), verify_project::cli(), version::cli(), - yank::cli(), ] } diff -urN cargo-0.96.0-orig/src/cargo/core/compiler/build_config.rs cargo-0.96.0/src/cargo/core/compiler/build_config.rs --- cargo-0.96.0-orig/src/cargo/core/compiler/build_config.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/core/compiler/build_config.rs 2026-05-07 22:47:42 +0900 @@ -1,7 +1,7 @@ use crate::core::compiler::CompileKind; use crate::util::context::JobsConfig; use crate::util::interning::InternedString; -use crate::util::{CargoResult, GlobalContext, RustfixDiagnosticServer}; +use crate::util::{CargoResult, GlobalContext}; use anyhow::{Context as _, bail}; use cargo_util::ProcessBuilder; use serde::ser; @@ -33,9 +33,6 @@ pub dry_run: bool, /// An optional override of the rustc process for primary units pub primary_unit_rustc: Option, - /// A thread used by `cargo fix` to receive messages on a socket regarding - /// the success/failure of applying fixes. - pub rustfix_diagnostic_server: Rc>>, /// The directory to copy final artifacts to. Note that even if /// `artifact-dir` is set, a copy of artifacts still can be found at /// `target/(debug\release)` as usual. @@ -124,7 +121,6 @@ unit_graph: false, dry_run: false, primary_unit_rustc: None, - rustfix_diagnostic_server: Rc::new(RefCell::new(None)), export_dir: None, future_incompat_report: false, timing_report: false, diff -urN cargo-0.96.0-orig/src/cargo/core/compiler/job_queue/mod.rs cargo-0.96.0/src/cargo/core/compiler/job_queue/mod.rs --- cargo-0.96.0-orig/src/cargo/core/compiler/job_queue/mod.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/core/compiler/job_queue/mod.rs 2026-05-07 22:47:42 +0900 @@ -146,7 +146,6 @@ use crate::core::{PackageId, Shell, TargetKind}; use crate::util::CargoResult; use crate::util::context::WarningHandling; -use crate::util::diagnostic_server::{self, DiagnosticPrinter}; use crate::util::errors::AlreadyPrintedError; use crate::util::machine_message::{self, Message as _}; use crate::util::{self, internal}; @@ -203,7 +202,6 @@ /// retrieved from the `queue`. We eagerly pull jobs off the main queue to /// allow us to request jobserver tokens pretty early. pending_queue: Vec<(Unit, Job, usize)>, - print: DiagnosticPrinter<'gctx>, /// How many jobs we've finished finished: usize, @@ -379,7 +377,6 @@ warning: String, }, - FixDiagnostic(diagnostic_server::Message), Token(io::Result), Finish(JobId, Artifact, CargoResult<()>), FutureIncompatReport(JobId, Vec), @@ -511,10 +508,6 @@ .collect(), tokens: Vec::new(), pending_queue: Vec::new(), - print: DiagnosticPrinter::new( - build_runner.bcx.gctx, - &build_runner.bcx.rustc().workspace_wrapper, - ), finished: 0, per_package_future_incompat_reports: Vec::new(), }; @@ -529,20 +522,6 @@ }) .context("failed to create helper thread for jobserver management")?; - // Create a helper thread to manage the diagnostics for rustfix if - // necessary. - let messages = state.messages.clone(); - // It is important that this uses `push` instead of `push_bounded` for - // now. If someone wants to fix this to be bounded, the `drop` - // implementation needs to be changed to avoid possible deadlocks. - let _diagnostic_server = build_runner - .bcx - .build_config - .rustfix_diagnostic_server - .borrow_mut() - .take() - .map(move |srv| srv.start(move |msg| messages.push(Message::FixDiagnostic(msg)))); - thread::scope( move |scope| match state.drain_the_queue(build_runner, scope, &helper) { Some(err) => Err(err), @@ -665,9 +644,6 @@ } => { self.bump_warning_count(id, lint, emitted, fixable); } - Message::FixDiagnostic(msg) => { - self.print.print(&msg)?; - } Message::Finish(id, artifact, result) => { let unit = match artifact { // If `id` has completely finished we remove it diff -urN cargo-0.96.0-orig/src/cargo/core/compiler/rustdoc.rs cargo-0.96.0/src/cargo/core/compiler/rustdoc.rs --- cargo-0.96.0-orig/src/cargo/core/compiler/rustdoc.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/core/compiler/rustdoc.rs 2026-05-07 22:47:42 +0900 @@ -3,7 +3,6 @@ use crate::core::compiler::build_runner::BuildRunner; use crate::core::compiler::unit::Unit; use crate::core::compiler::{BuildContext, CompileKind}; -use crate::sources::CRATES_IO_REGISTRY; use crate::util::errors::{CargoResult, internal}; use cargo_util::ProcessBuilder; use std::collections::HashMap; @@ -74,7 +73,7 @@ impl Default for RustdocExternMap { fn default() -> Self { Self { - registries: HashMap::from([(CRATES_IO_REGISTRY.into(), DOCS_RS_URL.into())]), + registries: HashMap::from([]), std: None, } } @@ -85,9 +84,6 @@ ) -> Result, D::Error> { use serde::Deserialize; let mut registries = HashMap::deserialize(de)?; - if !registries.contains_key(CRATES_IO_REGISTRY) { - registries.insert(CRATES_IO_REGISTRY.into(), DOCS_RS_URL.into()); - } Ok(registries) } @@ -127,9 +123,6 @@ if !sid.is_registry() { return false; } - if sid.is_crates_io() { - return registry == CRATES_IO_REGISTRY; - } if let Some(index_url) = name2url.get(registry) { return index_url == sid.url(); } diff -urN cargo-0.96.0-orig/src/cargo/core/package.rs cargo-0.96.0/src/cargo/core/package.rs --- cargo-0.96.0-orig/src/cargo/core/package.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/core/package.rs 2026-05-07 22:47:42 +0900 @@ -11,8 +11,6 @@ use anyhow::Context as _; use cargo_util_schemas::manifest::{Hints, RustVersion}; -use curl::easy::Easy; -use curl::multi::{EasyHandle, Multi}; use semver::Version; use serde::Serialize; use tracing::debug; @@ -29,12 +27,8 @@ use crate::sources::source::{MaybePackage, SourceMap}; use crate::util::HumanBytes; use crate::util::cache_lock::{CacheLock, CacheLockMode}; -use crate::util::errors::{CargoResult, HttpNotSuccessful}; +use crate::util::errors::{CargoResult}; use crate::util::interning::InternedString; -use crate::util::network::http::HttpTimeout; -use crate::util::network::http::http_handle_and_timeout; -use crate::util::network::retry::{Retry, RetryResult}; -use crate::util::network::sleep::SleepTracker; use crate::util::{self, GlobalContext, Progress, ProgressStyle, internal}; /// Information about a package that is available somewhere in the file system. @@ -290,102 +284,6 @@ packages: HashMap>, sources: RefCell>, gctx: &'gctx GlobalContext, - multi: Multi, - /// Used to prevent reusing the `PackageSet` to download twice. - downloading: Cell, - /// Whether or not to use curl HTTP/2 multiplexing. - multiplexing: bool, -} - -/// Helper for downloading crates. -pub struct Downloads<'a, 'gctx> { - set: &'a PackageSet<'gctx>, - /// When a download is started, it is added to this map. The key is a - /// "token" (see `Download::token`). It is removed once the download is - /// finished. - pending: HashMap, EasyHandle)>, - /// Set of packages currently being downloaded. This should stay in sync - /// with `pending`. - pending_ids: HashSet, - /// Downloads that have failed and are waiting to retry again later. - sleeping: SleepTracker<(Download<'gctx>, Easy)>, - /// The final result of each download. A pair `(token, result)`. This is a - /// temporary holding area, needed because curl can report multiple - /// downloads at once, but the main loop (`wait`) is written to only - /// handle one at a time. - results: Vec<(usize, Result<(), curl::Error>)>, - /// The next ID to use for creating a token (see `Download::token`). - next: usize, - /// Progress bar. - progress: RefCell>>, - /// Number of downloads that have successfully finished. - downloads_finished: usize, - /// Total bytes for all successfully downloaded packages. - downloaded_bytes: u64, - /// Size (in bytes) and package name of the largest downloaded package. - largest: (u64, InternedString), - /// Time when downloading started. - start: Instant, - /// Indicates *all* downloads were successful. - success: bool, - - /// Timeout management, both of timeout thresholds as well as whether or not - /// our connection has timed out (and accompanying message if it has). - /// - /// Note that timeout management is done manually here instead of in libcurl - /// because we want to apply timeouts to an entire batch of operations, not - /// any one particular single operation. - timeout: HttpTimeout, - /// Last time bytes were received. - updated_at: Cell, - /// This is a slow-speed check. It is reset to `now + timeout_duration` - /// every time at least `threshold` bytes are received. If the current - /// time ever exceeds `next_speed_check`, then give up and report a - /// timeout error. - next_speed_check: Cell, - /// This is the slow-speed threshold byte count. It starts at the - /// configured threshold value (default 10), and is decremented by the - /// number of bytes received in each chunk. If it is <= zero, the - /// threshold has been met and data is being received fast enough not to - /// trigger a timeout; reset `next_speed_check` and set this back to the - /// configured threshold. - next_speed_check_bytes_threshold: Cell, - /// Global filesystem lock to ensure only one Cargo is downloading at a - /// time. - _lock: CacheLock<'gctx>, -} - -struct Download<'gctx> { - /// The token for this download, used as the key of the `Downloads::pending` map - /// and stored in `EasyHandle` as well. - token: usize, - - /// The package that we're downloading. - id: PackageId, - - /// Actual downloaded data, updated throughout the lifetime of this download. - data: RefCell>, - - /// HTTP headers for debugging. - headers: RefCell>, - - /// The URL that we're downloading from, cached here for error messages and - /// reenqueuing. - url: String, - - /// A descriptive string to print when we've finished downloading this crate. - descriptor: String, - - /// Statistics updated from the progress callback in libcurl. - total: Cell, - current: Cell, - - /// The moment we started this transfer at. - start: Instant, - timed_out: Cell>, - - /// Logic used to track retrying this download if it's a spurious failure. - retry: Retry<'gctx>, } impl<'gctx> PackageSet<'gctx> { @@ -394,17 +292,6 @@ sources: SourceMap<'gctx>, gctx: &'gctx GlobalContext, ) -> CargoResult> { - // We've enabled the `http2` feature of `curl` in Cargo, so treat - // failures here as fatal as it would indicate a build-time problem. - let mut multi = Multi::new(); - let multiplexing = gctx.http_config()?.multiplexing.unwrap_or(true); - multi - .pipelining(false, multiplexing) - .context("failed to enable multiplexing/pipelining in curl")?; - - // let's not flood crates.io with connections - multi.set_max_host_connections(2)?; - Ok(PackageSet { packages: package_ids .iter() @@ -412,9 +299,6 @@ .collect(), sources: RefCell::new(sources), gctx, - multi, - downloading: Cell::new(false), - multiplexing, }) } @@ -426,36 +310,6 @@ self.packages.values().filter_map(|p| p.get()) } - pub fn enable_download<'a>(&'a self) -> CargoResult> { - assert!(!self.downloading.replace(true)); - let timeout = HttpTimeout::new(self.gctx)?; - Ok(Downloads { - start: Instant::now(), - set: self, - next: 0, - pending: HashMap::new(), - pending_ids: HashSet::new(), - sleeping: SleepTracker::new(), - results: Vec::new(), - progress: RefCell::new(Some(Progress::with_style( - "Downloading", - ProgressStyle::Ratio, - self.gctx, - ))), - downloads_finished: 0, - downloaded_bytes: 0, - largest: (0, "".into()), - success: false, - updated_at: Cell::new(Instant::now()), - timeout, - next_speed_check: Cell::new(Instant::now()), - next_speed_check_bytes_threshold: Cell::new(0), - _lock: self - .gctx - .acquire_package_cache_lock(CacheLockMode::DownloadExclusive)?, - }) - } - pub fn get_one(&self, id: PackageId) -> CargoResult<&Package> { if let Some(pkg) = self.packages.get(&id).and_then(|slot| slot.get()) { return Ok(pkg); @@ -468,16 +322,9 @@ let _lock = self .gctx .acquire_package_cache_lock(CacheLockMode::DownloadExclusive)?; - let mut downloads = self.enable_download()?; for id in ids { - pkgs.extend(downloads.start(id)?); - } - while downloads.remaining() > 0 { - pkgs.push(downloads.wait()?); + pkgs.push(self.fetch(id)?); } - downloads.success = true; - drop(downloads); - let mut deferred = self.gctx.deferred_global_last_use()?; deferred.save_no_error(self.gctx); Ok(pkgs) @@ -669,8 +516,6 @@ /// Merge the given set into self. pub fn add_set(&mut self, set: PackageSet<'gctx>) { - assert!(!self.downloading.get()); - assert!(!set.downloading.get()); for (pkg_id, p_cell) in set.packages { self.packages.entry(pkg_id).or_insert(p_cell); } @@ -678,535 +523,30 @@ let other_sources = set.sources.into_inner(); sources.add_source_map(other_sources); } -} -impl<'a, 'gctx> Downloads<'a, 'gctx> { - /// Starts to download the package for the `id` specified. - /// - /// Returns `None` if the package is queued up for download and will - /// eventually be returned from `wait_for_download`. Returns `Some(pkg)` if - /// the package is ready and doesn't need to be downloaded. + /// Local equivalent of Downloads.start() + wait() #[tracing::instrument(skip_all)] - pub fn start(&mut self, id: PackageId) -> CargoResult> { - self.start_inner(id) - .with_context(|| format!("failed to download `{}`", id)) - } - - fn start_inner(&mut self, id: PackageId) -> CargoResult> { - // First up see if we've already cached this package, in which case - // there's nothing to do. + fn fetch<'a>(&'a self, id: PackageId) -> CargoResult<&'a Package> { let slot = self - .set .packages .get(&id) .ok_or_else(|| internal(format!("couldn't find `{}` in package set", id)))?; if let Some(pkg) = slot.get() { - return Ok(Some(pkg)); + return Ok(pkg); } - - // Ask the original source for this `PackageId` for the corresponding - // package. That may immediately come back and tell us that the package - // is ready, or it could tell us that it needs to be downloaded. - let mut sources = self.set.sources.borrow_mut(); + let mut sources = self.sources.borrow_mut(); let source = sources .get_mut(id.source_id()) .ok_or_else(|| internal(format!("couldn't find source for `{}`", id)))?; - let pkg = source + let maybe_pkg = source .download(id) .context("unable to get packages from source")?; - let (url, descriptor, authorization) = match pkg { + match maybe_pkg { MaybePackage::Ready(pkg) => { - debug!("{} doesn't need a download", id); assert!(slot.set(pkg).is_ok()); - return Ok(Some(slot.get().unwrap())); - } - MaybePackage::Download { - url, - descriptor, - authorization, - } => (url, descriptor, authorization), - }; - - // Ok we're going to download this crate, so let's set up all our - // internal state and hand off an `Easy` handle to our libcurl `Multi` - // handle. This won't actually start the transfer, but later it'll - // happen during `wait_for_download` - let token = self.next; - self.next += 1; - debug!(target: "network", "downloading {} as {}", id, token); - assert!(self.pending_ids.insert(id)); - - let (mut handle, _timeout) = http_handle_and_timeout(self.set.gctx)?; - handle.get(true)?; - handle.url(&url)?; - handle.follow_location(true)?; // follow redirects - - // Add authorization header. - if let Some(authorization) = authorization { - let mut headers = curl::easy::List::new(); - headers.append(&format!("Authorization: {}", authorization))?; - handle.http_headers(headers)?; - } - - // Enable HTTP/2 if possible. - crate::try_old_curl_http2_pipewait!(self.set.multiplexing, handle); - - handle.write_function(move |buf| { - debug!(target: "network", "{} - {} bytes of data", token, buf.len()); - tls::with(|downloads| { - if let Some(downloads) = downloads { - downloads.pending[&token] - .0 - .data - .borrow_mut() - .extend_from_slice(buf); - } - }); - Ok(buf.len()) - })?; - handle.header_function(move |data| { - tls::with(|downloads| { - if let Some(downloads) = downloads { - // Headers contain trailing \r\n, trim them to make it easier - // to work with. - let h = String::from_utf8_lossy(data).trim().to_string(); - downloads.pending[&token].0.headers.borrow_mut().push(h); - } - }); - true - })?; - - handle.progress(true)?; - handle.progress_function(move |dl_total, dl_cur, _, _| { - tls::with(|downloads| match downloads { - Some(d) => d.progress(token, dl_total as u64, dl_cur as u64), - None => false, - }) - })?; - - // If the progress bar isn't enabled then it may be awhile before the - // first crate finishes downloading so we inform immediately that we're - // downloading crates here. - if self.downloads_finished == 0 - && self.pending.is_empty() - && !self.progress.borrow().as_ref().unwrap().is_enabled() - { - self.set.gctx.shell().status("Downloading", "crates ...")?; - } - - let dl = Download { - token, - data: RefCell::new(Vec::new()), - headers: RefCell::new(Vec::new()), - id, - url, - descriptor, - total: Cell::new(0), - current: Cell::new(0), - start: Instant::now(), - timed_out: Cell::new(None), - retry: Retry::new(self.set.gctx)?, - }; - self.enqueue(dl, handle)?; - self.tick(WhyTick::DownloadStarted)?; - - Ok(None) - } - - /// Returns the number of crates that are still downloading. - pub fn remaining(&self) -> usize { - self.pending.len() + self.sleeping.len() - } - - /// Blocks the current thread waiting for a package to finish downloading. - /// - /// This method will wait for a previously enqueued package to finish - /// downloading and return a reference to it after it's done downloading. - /// - /// # Panics - /// - /// This function will panic if there are no remaining downloads. - #[tracing::instrument(skip_all)] - pub fn wait(&mut self) -> CargoResult<&'a Package> { - let (dl, data) = loop { - assert_eq!(self.pending.len(), self.pending_ids.len()); - let (token, result) = self.wait_for_curl()?; - debug!(target: "network", "{} finished with {:?}", token, result); - - let (mut dl, handle) = self - .pending - .remove(&token) - .expect("got a token for a non-in-progress transfer"); - let data = mem::take(&mut *dl.data.borrow_mut()); - let headers = mem::take(&mut *dl.headers.borrow_mut()); - let mut handle = self.set.multi.remove(handle)?; - self.pending_ids.remove(&dl.id); - - // Check if this was a spurious error. If it was a spurious error - // then we want to re-enqueue our request for another attempt and - // then we wait for another request to finish. - let ret = { - let timed_out = &dl.timed_out; - let url = &dl.url; - dl.retry.r#try(|| { - if let Err(e) = result { - // If this error is "aborted by callback" then that's - // probably because our progress callback aborted due to - // a timeout. We'll find out by looking at the - // `timed_out` field, looking for a descriptive message. - // If one is found we switch the error code (to ensure - // it's flagged as spurious) and then attach our extra - // information to the error. - if !e.is_aborted_by_callback() { - return Err(e.into()); - } - - return Err(match timed_out.replace(None) { - Some(msg) => { - let code = curl_sys::CURLE_OPERATION_TIMEDOUT; - let mut err = curl::Error::new(code); - err.set_extra(msg); - err - } - None => e, - } - .into()); - } - - let code = handle.response_code()?; - if code != 200 && code != 0 { - return Err(HttpNotSuccessful::new_from_handle( - &mut handle, - &url, - data, - headers, - ) - .into()); - } - Ok(data) - }) - }; - match ret { - RetryResult::Success(data) => break (dl, data), - RetryResult::Err(e) => { - return Err(e.context(format!("failed to download from `{}`", dl.url))); - } - RetryResult::Retry(sleep) => { - debug!(target: "network", "download retry {} for {sleep}ms", dl.url); - self.sleeping.push(sleep, (dl, handle)); - } - } - }; - - // If the progress bar isn't enabled then we still want to provide some - // semblance of progress of how we're downloading crates, and if the - // progress bar is enabled this provides a good log of what's happening. - self.progress.borrow_mut().as_mut().unwrap().clear(); - self.set.gctx.shell().status("Downloaded", &dl.descriptor)?; - - self.downloads_finished += 1; - self.downloaded_bytes += dl.total.get(); - if dl.total.get() > self.largest.0 { - self.largest = (dl.total.get(), dl.id.name()); - } - - // We're about to synchronously extract the crate below. While we're - // doing that our download progress won't actually be updated, nor do we - // have a great view into the progress of the extraction. Let's prepare - // the user for this CPU-heavy step if it looks like it'll take some - // time to do so. - let kib_400 = 1024 * 400; - if dl.total.get() < kib_400 { - self.tick(WhyTick::DownloadFinished)?; - } else { - self.tick(WhyTick::Extracting(&dl.id.name()))?; - } - - // Inform the original source that the download is finished which - // should allow us to actually get the package and fill it in now. - let mut sources = self.set.sources.borrow_mut(); - let source = sources - .get_mut(dl.id.source_id()) - .ok_or_else(|| internal(format!("couldn't find source for `{}`", dl.id)))?; - let start = Instant::now(); - let pkg = source.finish_download(dl.id, data)?; - - // Assume that no time has passed while we were calling - // `finish_download`, update all speed checks and timeout limits of all - // active downloads to make sure they don't fire because of a slowly - // extracted tarball. - let finish_dur = start.elapsed(); - self.updated_at.set(self.updated_at.get() + finish_dur); - self.next_speed_check - .set(self.next_speed_check.get() + finish_dur); - - let slot = &self.set.packages[&dl.id]; - assert!(slot.set(pkg).is_ok()); - Ok(slot.get().unwrap()) - } - - fn enqueue(&mut self, dl: Download<'gctx>, handle: Easy) -> CargoResult<()> { - let mut handle = self.set.multi.add(handle)?; - let now = Instant::now(); - handle.set_token(dl.token)?; - self.updated_at.set(now); - self.next_speed_check.set(now + self.timeout.dur); - self.next_speed_check_bytes_threshold - .set(u64::from(self.timeout.low_speed_limit)); - dl.timed_out.set(None); - dl.current.set(0); - dl.total.set(0); - self.pending.insert(dl.token, (dl, handle)); - Ok(()) - } - - /// Block, waiting for curl. Returns a token and a `Result` for that token - /// (`Ok` means the download successfully finished). - fn wait_for_curl(&mut self) -> CargoResult<(usize, Result<(), curl::Error>)> { - // This is the main workhorse loop. We use libcurl's portable `wait` - // method to actually perform blocking. This isn't necessarily too - // efficient in terms of fd management, but we should only be juggling - // a few anyway. - // - // Here we start off by asking the `multi` handle to do some work via - // the `perform` method. This will actually do I/O work (non-blocking) - // and attempt to make progress. Afterwards we ask about the `messages` - // contained in the handle which will inform us if anything has finished - // transferring. - // - // If we've got a finished transfer after all that work we break out - // and process the finished transfer at the end. Otherwise we need to - // actually block waiting for I/O to happen, which we achieve with the - // `wait` method on `multi`. - loop { - self.add_sleepers()?; - let n = tls::set(self, || { - self.set - .multi - .perform() - .context("failed to perform http requests") - })?; - debug!(target: "network", "handles remaining: {}", n); - let results = &mut self.results; - let pending = &self.pending; - self.set.multi.messages(|msg| { - let token = msg.token().expect("failed to read token"); - let handle = &pending[&token].1; - if let Some(result) = msg.result_for(handle) { - results.push((token, result)); - } else { - debug!(target: "network", "message without a result (?)"); - } - }); - - if let Some(pair) = results.pop() { - break Ok(pair); - } - assert_ne!(self.remaining(), 0); - if self.pending.is_empty() { - let delay = self.sleeping.time_to_next().unwrap(); - debug!(target: "network", "sleeping main thread for {delay:?}"); - std::thread::sleep(delay); - } else { - let min_timeout = Duration::new(1, 0); - let timeout = self.set.multi.get_timeout()?.unwrap_or(min_timeout); - let timeout = timeout.min(min_timeout); - self.set - .multi - .wait(&mut [], timeout) - .context("failed to wait on curl `Multi`")?; - } - } - } - - fn add_sleepers(&mut self) -> CargoResult<()> { - for (dl, handle) in self.sleeping.to_retry() { - self.pending_ids.insert(dl.id); - self.enqueue(dl, handle)?; - } - Ok(()) - } - - fn progress(&self, token: usize, total: u64, cur: u64) -> bool { - let dl = &self.pending[&token].0; - dl.total.set(total); - let now = Instant::now(); - if cur > dl.current.get() { - let delta = cur - dl.current.get(); - let threshold = self.next_speed_check_bytes_threshold.get(); - - dl.current.set(cur); - self.updated_at.set(now); - - if delta >= threshold { - self.next_speed_check.set(now + self.timeout.dur); - self.next_speed_check_bytes_threshold - .set(u64::from(self.timeout.low_speed_limit)); - } else { - self.next_speed_check_bytes_threshold.set(threshold - delta); - } - } - if self.tick(WhyTick::DownloadUpdate).is_err() { - return false; - } - - // If we've spent too long not actually receiving any data we time out. - if now > self.updated_at.get() + self.timeout.dur { - self.updated_at.set(now); - let msg = format!( - "failed to download any data for `{}` within {}s", - dl.id, - self.timeout.dur.as_secs() - ); - dl.timed_out.set(Some(msg)); - return false; - } - - // If we reached the point in time that we need to check our speed - // limit, see if we've transferred enough data during this threshold. If - // it fails this check then we fail because the download is going too - // slowly. - if now >= self.next_speed_check.get() { - self.next_speed_check.set(now + self.timeout.dur); - assert!(self.next_speed_check_bytes_threshold.get() > 0); - let msg = format!( - "download of `{}` failed to transfer more \ - than {} bytes in {}s", - dl.id, - self.timeout.low_speed_limit, - self.timeout.dur.as_secs() - ); - dl.timed_out.set(Some(msg)); - return false; - } - - true - } - - fn tick(&self, why: WhyTick<'_>) -> CargoResult<()> { - let mut progress = self.progress.borrow_mut(); - let progress = progress.as_mut().unwrap(); - - if let WhyTick::DownloadUpdate = why { - if !progress.update_allowed() { - return Ok(()); - } + return Ok(slot.get().unwrap()); + }, + MaybePackage::Download{..} => unreachable!("functionality removed"), } - let pending = self.remaining(); - let mut msg = if pending == 1 { - format!("{} crate", pending) - } else { - format!("{} crates", pending) - }; - match why { - WhyTick::Extracting(krate) => { - msg.push_str(&format!(", extracting {} ...", krate)); - } - _ => { - let mut dur = Duration::new(0, 0); - let mut remaining = 0; - for (dl, _) in self.pending.values() { - dur += dl.start.elapsed(); - // If the total/current look weird just throw out the data - // point, sounds like curl has more to learn before we have - // the true information. - if dl.total.get() >= dl.current.get() { - remaining += dl.total.get() - dl.current.get(); - } - } - if remaining > 0 && dur > Duration::from_millis(500) { - msg.push_str(&format!(", remaining bytes: {:.1}", HumanBytes(remaining))); - } - } - } - progress.print_now(&msg) - } -} - -#[derive(Copy, Clone)] -enum WhyTick<'a> { - DownloadStarted, - DownloadUpdate, - DownloadFinished, - Extracting(&'a str), -} - -impl<'a, 'gctx> Drop for Downloads<'a, 'gctx> { - fn drop(&mut self) { - self.set.downloading.set(false); - let progress = self.progress.get_mut().take().unwrap(); - // Don't print a download summary if we're not using a progress bar, - // we've already printed lots of `Downloading...` items. - if !progress.is_enabled() { - return; - } - // If we didn't download anything, no need for a summary. - if self.downloads_finished == 0 { - return; - } - // If an error happened, let's not clutter up the output. - if !self.success { - return; - } - // pick the correct plural of crate(s) - let crate_string = if self.downloads_finished == 1 { - "crate" - } else { - "crates" - }; - let mut status = format!( - "{} {} ({:.1}) in {}", - self.downloads_finished, - crate_string, - HumanBytes(self.downloaded_bytes), - util::elapsed(self.start.elapsed()) - ); - // print the size of largest crate if it was >1mb - // however don't print if only a single crate was downloaded - // because it is obvious that it will be the largest then - let mib_1 = 1024 * 1024; - if self.largest.0 > mib_1 && self.downloads_finished > 1 { - status.push_str(&format!( - " (largest was `{}` at {:.1})", - self.largest.1, - HumanBytes(self.largest.0), - )); - } - // Clear progress before displaying final summary. - drop(progress); - drop(self.set.gctx.shell().status("Downloaded", status)); - } -} - -mod tls { - use std::cell::Cell; - - use super::Downloads; - - thread_local!(static PTR: Cell = const { Cell::new(0) }); - - pub(crate) fn with(f: impl FnOnce(Option<&Downloads<'_, '_>>) -> R) -> R { - let ptr = PTR.with(|p| p.get()); - if ptr == 0 { - f(None) - } else { - unsafe { f(Some(&*(ptr as *const Downloads<'_, '_>))) } - } - } - - pub(crate) fn set(dl: &Downloads<'_, '_>, f: impl FnOnce() -> R) -> R { - struct Reset<'a, T: Copy>(&'a Cell, T); - - impl<'a, T: Copy> Drop for Reset<'a, T> { - fn drop(&mut self) { - self.0.set(self.1); - } - } - - PTR.with(|p| { - let _reset = Reset(p, p.get()); - p.set(dl as *const Downloads<'_, '_> as usize); - f() - }) } } diff -urN cargo-0.96.0-orig/src/cargo/core/resolver/mod.rs cargo-0.96.0/src/cargo/core/resolver/mod.rs --- cargo-0.96.0-orig/src/cargo/core/resolver/mod.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/core/resolver/mod.rs 2026-05-07 22:47:42 +0900 @@ -68,7 +68,19 @@ use crate::core::{Dependency, PackageId, Registry, Summary}; use crate::util::context::GlobalContext; use crate::util::errors::CargoResult; -use crate::util::network::PollExt; +use std::task::Poll; +trait PollExt { + fn expect(self, msg: &str) -> T; +} +impl PollExt for Poll { + #[track_caller] + fn expect(self, msg: &str) -> T { + match self { + Poll::Ready(val) => val, + Poll::Pending => panic!("{}", msg), + } + } +} use self::context::ResolverContext; use self::dep_cache::RegistryQueryer; diff -urN cargo-0.96.0-orig/src/cargo/core/source_id.rs cargo-0.96.0/src/cargo/core/source_id.rs --- cargo-0.96.0-orig/src/cargo/core/source_id.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/core/source_id.rs 2026-05-07 22:47:42 +0900 @@ -1,10 +1,9 @@ use crate::core::GitReference; use crate::core::PackageId; use crate::core::SourceKind; -use crate::sources::registry::CRATES_IO_HTTP_INDEX; use crate::sources::source::Source; -use crate::sources::{CRATES_IO_DOMAIN, CRATES_IO_INDEX, CRATES_IO_REGISTRY, DirectorySource}; -use crate::sources::{GitSource, PathSource, RegistrySource}; +use crate::sources::{CRATES_IO_REGISTRY, DirectorySource}; +use crate::sources::{PathSource, RegistrySource}; use crate::util::interning::InternedString; use crate::util::{CanonicalUrl, CargoResult, GlobalContext, IntoUrl, context}; use anyhow::Context as _; @@ -267,29 +266,12 @@ /// Returns the `SourceId` corresponding to the main repository, using the /// sparse HTTP index if allowed. pub fn crates_io_maybe_sparse_http(gctx: &GlobalContext) -> CargoResult { - if Self::crates_io_is_sparse(gctx)? { - gctx.check_registry_index_not_set()?; - let url = CRATES_IO_HTTP_INDEX.into_url().unwrap(); - let key = KeyOf::Registry(CRATES_IO_REGISTRY.into()); - SourceId::new(SourceKind::SparseRegistry, url, Some(key)) - } else { - Self::crates_io(gctx) - } + Self::crates_io(gctx) } /// Returns whether to access crates.io over the sparse protocol. pub fn crates_io_is_sparse(gctx: &GlobalContext) -> CargoResult { - let proto: Option> = gctx.get("registries.crates-io.protocol")?; - let is_sparse = match proto.as_ref().map(|v| v.val.as_str()) { - Some("sparse") => true, - Some("git") => false, - Some(unknown) => anyhow::bail!( - "unsupported registry protocol `{unknown}` (defined in {})", - proto.as_ref().unwrap().definition - ), - None => true, - }; - Ok(is_sparse) + Ok(false) } /// Gets the `SourceId` associated with given name of the remote registry. @@ -314,11 +296,7 @@ /// Displays the text "crates.io index" for Cargo shell status output. pub fn display_index(self) -> String { - if self.is_crates_io() { - format!("{} index", CRATES_IO_DOMAIN) - } else { - format!("`{}` index", self.display_registry_name()) - } + format!("`{}` index", self.display_registry_name()) } /// Displays the name of a registry if it has one. Otherwise just the URL. @@ -397,7 +375,7 @@ ) -> CargoResult> { trace!("loading SourceId; {}", self); match self.inner.kind { - SourceKind::Git(..) => Ok(Box::new(GitSource::new(self, gctx)?)), + SourceKind::Git(..) => Err(anyhow::anyhow!("functionality removed")), SourceKind::Path => { let path = self .inner @@ -409,9 +387,20 @@ } Ok(Box::new(PathSource::new(&path, self, gctx))) } - SourceKind::Registry | SourceKind::SparseRegistry => Ok(Box::new( - RegistrySource::remote(self, yanked_whitelist, gctx)?, - )), + SourceKind::Registry | SourceKind::SparseRegistry => { + const CRATES_IO_INDEX: &str = "https://github.com/rust-lang/crates.io-index"; + if self.inner.url.to_string() == CRATES_IO_INDEX { + // NOTE(achurch): assumed to come from crates_io_source_id + // and to be subsequently overridden + return Ok(Box::new(RegistrySource::local( + self, + Path::new("/nonexistent"), + yanked_whitelist, + gctx, + ))) + } + Err(anyhow::anyhow!("functionality removed")) + } SourceKind::LocalRegistry => { let path = self .inner @@ -551,7 +540,7 @@ _ => return false, } let url = self.inner.url.as_str(); - url == CRATES_IO_INDEX || url == CRATES_IO_HTTP_INDEX || is_overridden_crates_io_url(url) + is_overridden_crates_io_url(url) } /// Hashes `self` to be used in the name of some Cargo folders, so shouldn't vary. diff -urN cargo-0.96.0-orig/src/cargo/core/workspace.rs cargo-0.96.0/src/cargo/core/workspace.rs --- cargo-0.96.0-orig/src/cargo/core/workspace.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/core/workspace.rs 2026-05-07 22:47:42 +0900 @@ -38,7 +38,7 @@ use crate::lints::rules::unused_workspace_package_fields; use crate::ops; use crate::ops::lockfile::LOCKFILE_NAME; -use crate::sources::{CRATES_IO_INDEX, CRATES_IO_REGISTRY, PathSource, SourceConfigMap}; +use crate::sources::{CRATES_IO_REGISTRY, PathSource, SourceConfigMap}; use crate::util::context; use crate::util::context::{FeatureUnification, Value}; use crate::util::edit_distance; @@ -552,7 +552,6 @@ let mut patch = HashMap::new(); for (url, deps) in config_patch.into_iter().flatten() { let url = match &url[..] { - CRATES_IO_REGISTRY => CRATES_IO_INDEX.parse().unwrap(), url => self .gctx .get_registry_index(url) diff -urN cargo-0.96.0-orig/src/cargo/ops/cargo_add/mod.rs cargo-0.96.0/src/cargo/ops/cargo_add/mod.rs --- cargo-0.96.0-orig/src/cargo/ops/cargo_add/mod.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/ops/cargo_add/mod.rs 2026-05-07 22:47:42 +0900 @@ -38,7 +38,6 @@ use crate::util::style; use crate::util::toml::lookup_path_base; use crate::util::toml_mut::dependency::Dependency; -use crate::util::toml_mut::dependency::GitSource; use crate::util::toml_mut::dependency::MaybeWorkspace; use crate::util::toml_mut::dependency::PathSource; use crate::util::toml_mut::dependency::RegistrySource; @@ -323,15 +322,6 @@ pub path: Option, /// Specify a named base for a path dependency pub base: Option, - - /// Git repo for dependency - pub git: Option, - /// Specify an alternative git branch - pub branch: Option, - /// Specify a specific git rev - pub rev: Option, - /// Specify a specific git tag - pub tag: Option, } fn resolve_dependency( @@ -349,40 +339,7 @@ .as_deref() .map(CrateSpec::resolve) .transpose()?; - let mut selected_dep = if let Some(url) = &arg.git { - let mut src = GitSource::new(url); - if let Some(branch) = &arg.branch { - src = src.set_branch(branch); - } - if let Some(tag) = &arg.tag { - src = src.set_tag(tag); - } - if let Some(rev) = &arg.rev { - src = src.set_rev(rev); - } - - let selected = if let Some(crate_spec) = &crate_spec { - if let Some(v) = crate_spec.version_req() { - // crate specifier includes a version (e.g. `docopt@0.8`) - anyhow::bail!("cannot specify a git URL (`{url}`) with a version (`{v}`)."); - } - let dependency = crate_spec.to_dependency()?.set_source(src); - let selected = select_package(&dependency, gctx, registry)?; - if dependency.name != selected.name { - gctx.shell().warn(format!( - "translating `{}` to `{}`", - dependency.name, selected.name, - ))?; - } - selected - } else { - let mut source = crate::sources::GitSource::new(src.source_id()?, gctx)?; - let packages = source.read_packages()?; - let package = infer_package_for_git_source(packages, &src)?; - Dependency::from(package.summary()) - }; - selected - } else if let Some(raw_path) = &arg.path { + let mut selected_dep = if let Some(raw_path) = &arg.path { let path = paths::normalize_path(&std::env::current_dir()?.join(raw_path)); let mut src = PathSource::new(path); src.base = arg.base.clone(); @@ -600,10 +557,7 @@ let source = pkg_id.source_id(); if source.is_git() { - Ok(Some(( - Option::::None, - Source::Git(GitSource::new(source.as_encoded_url().to_string())), - ))) + Ok(None) } else if let Some(path) = source.local_path() { Ok(Some((None, Source::Path(PathSource::new(path))))) } else { diff -urN cargo-0.96.0-orig/src/cargo/ops/cargo_install.rs cargo-0.96.0/src/cargo/ops/cargo_install.rs --- cargo-0.96.0-orig/src/cargo/ops/cargo_install.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/ops/cargo_install.rs 2026-05-07 22:47:42 +0900 @@ -8,7 +8,7 @@ use crate::ops::{CompileFilter, Packages}; use crate::ops::{FilterRule, common_for_install_and_uninstall::*}; use crate::sources::source::Source; -use crate::sources::{GitSource, PathSource, SourceConfigMap}; +use crate::sources::{PathSource, SourceConfigMap}; use crate::util::context::FeatureUnification; use crate::util::errors::CargoResult; use crate::util::{Filesystem, GlobalContext, Rustc}; @@ -99,14 +99,7 @@ }; if source_id.is_git() { - let mut source = GitSource::new(source_id, gctx)?; - select_pkg( - &mut source, - dep, - |git: &mut GitSource<'_>| git.read_packages(), - gctx, - current_rust_version, - )? + bail!("functionality removed") } else if source_id.is_path() { let mut src = path_source(source_id, gctx)?; if !src.path().is_dir() { @@ -335,8 +328,6 @@ self.ws.set_target_dir(target_dir); } - self.check_yanked_install()?; - let exec: Arc = Arc::new(DefaultExecutor); self.opts.build_config.dry_run = dry_run; let compile = ops::compile_ws(&self.ws, &self.opts, &exec).with_context(|| { @@ -585,23 +576,6 @@ Ok(true) } } - - fn check_yanked_install(&self) -> CargoResult<()> { - if self.ws.ignore_lock() || !self.ws.root().join("Cargo.lock").exists() { - return Ok(()); - } - // It would be best if `source` could be passed in here to avoid a - // duplicate "Updating", but since `source` is taken by value, then it - // wouldn't be available for `compile_ws`. - let dry_run = false; - let (pkg_set, resolve) = ops::resolve_ws(&self.ws, dry_run)?; - ops::check_yanked( - self.ws.gctx(), - &pkg_set, - &resolve, - "consider running without --locked", - ) - } } fn make_warning_about_missing_features(binaries: &[&Target]) -> String { diff -urN cargo-0.96.0-orig/src/cargo/ops/cargo_new.rs cargo-0.96.0/src/cargo/ops/cargo_new.rs --- cargo-0.96.0-orig/src/cargo/ops/cargo_new.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/ops/cargo_new.rs 2026-05-07 22:47:42 +0900 @@ -1,7 +1,6 @@ use crate::core::{Edition, Shell, Workspace}; use crate::util::errors::CargoResult; use crate::util::important_paths::find_root_manifest_for_wd; -use crate::util::{FossilRepo, GitRepo, HgRepo, PijulRepo, existing_vcs_repo}; use crate::util::{GlobalContext, restricted_names}; use anyhow::{Context as _, anyhow}; use cargo_util::paths::{self, write_atomic}; @@ -551,40 +550,6 @@ let mut version_control = opts.version_control; - if version_control == None { - let mut num_detected_vcses = 0; - - if path.join(".git").exists() { - version_control = Some(VersionControl::Git); - num_detected_vcses += 1; - } - - if path.join(".hg").exists() { - version_control = Some(VersionControl::Hg); - num_detected_vcses += 1; - } - - if path.join(".pijul").exists() { - version_control = Some(VersionControl::Pijul); - num_detected_vcses += 1; - } - - if path.join(".fossil").exists() { - version_control = Some(VersionControl::Fossil); - num_detected_vcses += 1; - } - - // if none exists, maybe create git, like in `cargo new` - - if num_detected_vcses > 1 { - anyhow::bail!( - "more than one of .hg, .git, .pijul, .fossil configurations \ - found and the ignore file can't be filled in as \ - a result. specify --vcs to override detection" - ); - } - } - let mkopts = MkOptions { version_control, path, @@ -604,194 +569,12 @@ Ok(kind) } -/// `IgnoreList` -struct IgnoreList { - /// git like formatted entries - ignore: Vec, - /// mercurial formatted entries - hg_ignore: Vec, - /// Fossil-formatted entries. - fossil_ignore: Vec, -} - -impl IgnoreList { - /// constructor to build a new ignore file - fn new() -> IgnoreList { - IgnoreList { - ignore: Vec::new(), - hg_ignore: Vec::new(), - fossil_ignore: Vec::new(), - } - } - - /// Add a new entry to the ignore list. Requires three arguments with the - /// entry in possibly three different formats. One for "git style" entries, - /// one for "mercurial style" entries and one for "fossil style" entries. - fn push(&mut self, ignore: &str, hg_ignore: &str, fossil_ignore: &str) { - self.ignore.push(ignore.to_string()); - self.hg_ignore.push(hg_ignore.to_string()); - self.fossil_ignore.push(fossil_ignore.to_string()); - } - - /// Return the correctly formatted content of the ignore file for the given - /// version control system as `String`. - fn format_new(&self, vcs: VersionControl) -> String { - let ignore_items = match vcs { - VersionControl::Hg => &self.hg_ignore, - VersionControl::Fossil => &self.fossil_ignore, - _ => &self.ignore, - }; - - ignore_items.join("\n") + "\n" - } - - /// `format_existing` is used to format the `IgnoreList` when the ignore file - /// already exists. It reads the contents of the given `BufRead` and - /// checks if the contents of the ignore list are already existing in the - /// file. - fn format_existing(&self, existing: T, vcs: VersionControl) -> CargoResult { - let mut existing_items = Vec::new(); - for (i, item) in existing.lines().enumerate() { - match item { - Ok(s) => existing_items.push(s), - Err(err) => match err.kind() { - ErrorKind::InvalidData => { - return Err(anyhow!( - "Character at line {} is invalid. Cargo only supports UTF-8.", - i - )); - } - _ => return Err(anyhow!(err)), - }, - } - } - - let ignore_items = match vcs { - VersionControl::Hg => &self.hg_ignore, - VersionControl::Fossil => &self.fossil_ignore, - _ => &self.ignore, - }; - - let mut out = String::new(); - - // Fossil does not support `#` comments. - if vcs != VersionControl::Fossil { - out.push_str("\n\n# Added by cargo\n"); - if ignore_items - .iter() - .any(|item| existing_items.contains(item)) - { - out.push_str("#\n# already existing elements were commented out\n"); - } - out.push('\n'); - } - - for item in ignore_items { - if existing_items.contains(item) { - if vcs == VersionControl::Fossil { - // Just merge for Fossil. - continue; - } - out.push('#'); - } - out.push_str(item); - out.push('\n'); - } - - Ok(out) - } -} - -/// Writes the ignore file to the given directory. If the ignore file for the -/// given vcs system already exists, its content is read and duplicate ignore -/// file entries are filtered out. -fn write_ignore_file(base_path: &Path, list: &IgnoreList, vcs: VersionControl) -> CargoResult<()> { - // Fossil only supports project-level settings in a dedicated subdirectory. - if vcs == VersionControl::Fossil { - paths::create_dir_all(base_path.join(".fossil-settings"))?; - } - - for fp_ignore in match vcs { - VersionControl::Git => vec![base_path.join(".gitignore")], - VersionControl::Hg => vec![base_path.join(".hgignore")], - VersionControl::Pijul => vec![base_path.join(".ignore")], - // Fossil has a cleaning functionality configured in a separate file. - VersionControl::Fossil => vec![ - base_path.join(".fossil-settings/ignore-glob"), - base_path.join(".fossil-settings/clean-glob"), - ], - VersionControl::NoVcs => return Ok(()), - } { - let ignore: String = match paths::open(&fp_ignore) { - Err(err) => match err.downcast_ref::() { - Some(io_err) if io_err.kind() == ErrorKind::NotFound => list.format_new(vcs), - _ => return Err(err), - }, - Ok(file) => list.format_existing(BufReader::new(file), vcs)?, - }; - - paths::append(&fp_ignore, ignore.as_bytes())?; - } - - Ok(()) -} - -/// Initializes the correct VCS system based on the provided config. -fn init_vcs(path: &Path, vcs: VersionControl, gctx: &GlobalContext) -> CargoResult<()> { - match vcs { - VersionControl::Git => { - if !path.join(".git").exists() { - // Temporary fix to work around bug in libgit2 when creating a - // directory in the root of a posix filesystem. - // See: https://github.com/libgit2/libgit2/issues/5130 - paths::create_dir_all(path)?; - GitRepo::init(path, gctx.cwd())?; - } - } - VersionControl::Hg => { - if !path.join(".hg").exists() { - HgRepo::init(path, gctx.cwd())?; - } - } - VersionControl::Pijul => { - if !path.join(".pijul").exists() { - PijulRepo::init(path, gctx.cwd())?; - } - } - VersionControl::Fossil => { - if !path.join(".fossil").exists() { - FossilRepo::init(path, gctx.cwd())?; - } - } - VersionControl::NoVcs => { - paths::create_dir_all(path)?; - } - }; - - Ok(()) -} - fn mk(gctx: &GlobalContext, opts: &MkOptions<'_>) -> CargoResult<()> { let path = opts.path; let name = opts.name; let cfg = gctx.get::("cargo-new")?; - // Using the push method with multiple arguments ensures that the entries - // for all mutually-incompatible VCS in terms of syntax are in sync. - let mut ignore = IgnoreList::new(); - ignore.push("/target", "^target$", "target"); - - let vcs = opts.version_control.unwrap_or_else(|| { - let in_existing_vcs = existing_vcs_repo(path.parent().unwrap_or(path), gctx.cwd()); - match (cfg.version_control, in_existing_vcs) { - (None, false) => VersionControl::Git, - (Some(opt), false) => opt, - (_, true) => VersionControl::NoVcs, - } - }); - - init_vcs(path, vcs, gctx)?; - write_ignore_file(path, &ignore, vcs)?; + paths::create_dir_all(path)?; // Create `Cargo.toml` file with necessary `[lib]` and `[[bin]]` sections, if needed. let mut manifest = toml_edit::DocumentMut::new(); diff -urN cargo-0.96.0-orig/src/cargo/ops/mod.rs cargo-0.96.0/src/cargo/ops/mod.rs --- cargo-0.96.0-orig/src/cargo/ops/mod.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/ops/mod.rs 2026-05-07 22:47:42 +0900 @@ -1,5 +1,3 @@ -use crate::sources::CRATES_IO_DOMAIN; - pub use self::cargo_clean::{CleanContext, CleanOptions, clean}; pub use self::cargo_compile::unit_generator::UnitGenerator; pub use self::cargo_compile::{CompileFilter, FilterRule, LibRule, Packages}; @@ -7,14 +5,9 @@ CompileOptions, compile, compile_with_exec, compile_ws, create_bcx, print, resolve_all_features, }; pub use self::cargo_doc::{DocOptions, OutputFormat, doc}; -pub use self::cargo_fetch::{FetchOptions, fetch}; pub use self::cargo_install::{install, install_list}; pub use self::cargo_new::{NewOptions, NewProjectKind, VersionControl, init, new}; pub use self::cargo_output_metadata::{ExportInfo, OutputMetadataOptions, output_metadata}; -pub use self::cargo_package::PackageMessageFormat; -pub use self::cargo_package::PackageOpts; -pub use self::cargo_package::check_yanked; -pub use self::cargo_package::package; pub use self::cargo_pkgid::pkgid; pub use self::cargo_read_manifest::read_package; pub use self::cargo_report::rebuilds::ReportRebuildsOptions; @@ -33,21 +26,13 @@ pub use self::cargo_update::upgrade_manifests; pub use self::cargo_update::write_manifest_upgrades; pub use self::common_for_install_and_uninstall::{InstallTracker, resolve_root}; -pub use self::fix::{ - EditionFixMode, FixOptions, fix, fix_edition, fix_exec_rustc, fix_get_proxy_lock_addr, -}; pub use self::lockfile::{load_pkg_lockfile, resolve_to_string, write_pkg_lockfile}; -pub use self::registry::OwnersOptions; -pub use self::registry::PublishOpts; -pub use self::registry::RegistryCredentialConfig; -pub use self::registry::RegistryOrIndex; -pub use self::registry::info; -pub use self::registry::modify_owners; -pub use self::registry::publish; -pub use self::registry::registry_login; -pub use self::registry::registry_logout; -pub use self::registry::search; -pub use self::registry::yank; +/// Represents either `--registry` or `--index` argument, which is mutually exclusive. +#[derive(Debug, Clone)] +pub enum RegistryOrIndex { + Registry(String), + Index(url::Url), +} pub use self::resolve::{ WorkspaceResolve, add_overrides, get_resolved_packages, resolve_with_previous, resolve_ws, resolve_ws_with_opts, @@ -59,11 +44,9 @@ pub(crate) mod cargo_compile; pub mod cargo_config; mod cargo_doc; -mod cargo_fetch; mod cargo_install; mod cargo_new; mod cargo_output_metadata; -mod cargo_package; mod cargo_pkgid; mod cargo_read_manifest; pub mod cargo_remove; @@ -73,9 +56,7 @@ mod cargo_uninstall; mod cargo_update; mod common_for_install_and_uninstall; -mod fix; pub(crate) mod lockfile; -pub(crate) mod registry; pub(crate) mod resolve; pub mod tree; mod vendor; @@ -93,6 +74,7 @@ }; if !dep.specified_req() && dep.is_transitive() { + const CRATES_IO_DOMAIN: &str = "crates.io"; let dep_version_source = dep.registry_id().map_or_else( || CRATES_IO_DOMAIN.to_string(), |registry_id| registry_id.display_registry_name(), diff -urN cargo-0.96.0-orig/src/cargo/ops/vendor.rs cargo-0.96.0/src/cargo/ops/vendor.rs --- cargo-0.96.0-orig/src/cargo/ops/vendor.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/ops/vendor.rs 2026-05-07 22:47:42 +0900 @@ -246,7 +246,7 @@ // we'll do a direct extraction into the vendor directory. let registry = match sid.kind() { SourceKind::Registry | SourceKind::SparseRegistry => { - RegistrySource::remote(sid, &Default::default(), gctx)? + bail!("functionality removed") } SourceKind::LocalRegistry => { let path = sid.url().to_file_path().expect("local path"); diff -urN cargo-0.96.0-orig/src/cargo/sources/mod.rs cargo-0.96.0/src/cargo/sources/mod.rs --- cargo-0.96.0-orig/src/cargo/sources/mod.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/sources/mod.rs 2026-05-07 22:47:42 +0900 @@ -15,7 +15,6 @@ //! into this category. So do local registry and sparse registry. //! * [`DirectorySource`] --- Files are downloaded ahead of time. Primarily //! designed for crates generated from `cargo vendor`. -//! * [`GitSource`] --- This gets crate information from a git repository. //! * [`PathSource`] --- This gets crate information from a local path on the //! filesystem. //! * [`ReplacedSource`] --- This manages the [source replacement] feature, @@ -28,18 +27,16 @@ pub use self::config::SourceConfigMap; pub use self::directory::DirectorySource; -pub use self::git::GitSource; pub use self::path::PathEntry; pub use self::path::PathSource; pub use self::path::RecursivePathSource; pub use self::registry::{ - CRATES_IO_DOMAIN, CRATES_IO_INDEX, CRATES_IO_REGISTRY, IndexSummary, RegistrySource, + CRATES_IO_REGISTRY, IndexSummary, RegistrySource, }; pub use self::replaced::ReplacedSource; pub mod config; pub mod directory; -pub mod git; pub mod overlay; pub mod path; pub mod registry; diff -urN cargo-0.96.0-orig/src/cargo/sources/path.rs cargo-0.96.0/src/cargo/sources/path.rs --- cargo-0.96.0-orig/src/cargo/sources/path.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/sources/path.rs 2026-05-07 22:47:42 +0900 @@ -19,9 +19,6 @@ use anyhow::Context as _; use cargo_util::paths; use filetime::FileTime; -use gix::bstr::{BString, ByteVec}; -use gix::dir::entry::Status; -use gix::index::entry::Stage; use ignore::gitignore::GitignoreBuilder; use tracing::{debug, info, trace, warn}; use walkdir::WalkDir; @@ -429,20 +426,6 @@ } } -impl From for FileType { - fn from(value: gix::dir::entry::Kind) -> Self { - use gix::dir::entry::Kind; - match value { - Kind::Untrackable => FileType::Other, - Kind::File => FileType::File { - maybe_symlink: false, - }, - Kind::Symlink => FileType::Symlink, - Kind::Directory | Kind::Repository => FileType::Dir, - } - } -} - /// [`PathBuf`] with extra metadata. #[derive(Clone, Debug)] pub struct PathEntry { @@ -569,14 +552,9 @@ fn _list_files(pkg: &Package, gctx: &GlobalContext) -> CargoResult> { let root = pkg.root(); let no_include_option = pkg.manifest().include().is_empty(); - let git_repo = if no_include_option { - discover_gix_repo(root)? - } else { - None - }; let mut exclude_builder = GitignoreBuilder::new(root); - if no_include_option && git_repo.is_none() { + if no_include_option { // no include option and not git repo discovered (see rust-lang/cargo#7183). exclude_builder.add_line(None, ".*")?; } @@ -623,232 +601,11 @@ ignore_should_package(relative_path, is_dir) }; - // Attempt Git-prepopulate only if no `include` (see rust-lang/cargo#4135). - if no_include_option { - if let Some(repo) = git_repo { - return list_files_gix(pkg, &repo, &filter, gctx); - } - } let mut ret = Vec::new(); list_files_walk(pkg.root(), &mut ret, true, &filter, gctx)?; Ok(ret) } -/// Returns [`Some(gix::Repository)`](gix::Repository) if the discovered repository -/// (searched upwards from `root`) contains a tracked `/Cargo.toml`. -/// Otherwise, the caller should fall back on full file list. -fn discover_gix_repo(root: &Path) -> CargoResult> { - let repo = match gix::ThreadSafeRepository::discover(root) { - Ok(repo) => repo.to_thread_local(), - Err(e) => { - tracing::debug!( - "could not discover git repo at or above {}: {}", - root.display(), - e - ); - return Ok(None); - } - }; - let index = repo - .index_or_empty() - .with_context(|| format!("failed to open git index at {}", repo.path().display()))?; - let repo_root = repo.workdir().ok_or_else(|| { - anyhow::format_err!( - "did not expect repo at {} to be bare", - repo.path().display() - ) - })?; - let repo_relative_path = match paths::strip_prefix_canonical(root, repo_root) { - Ok(p) => p, - Err(e) => { - warn!( - "cannot determine if path `{:?}` is in git repo `{:?}`: {:?}", - root, repo_root, e - ); - return Ok(None); - } - }; - let manifest_path = gix::path::join_bstr_unix_pathsep( - gix::path::to_unix_separators_on_windows(gix::path::into_bstr(repo_relative_path)), - "Cargo.toml", - ); - if index.entry_index_by_path(&manifest_path).is_ok() { - return Ok(Some(repo)); - } - // Package Cargo.toml is not in git, don't use git to guide our selection. - Ok(None) -} - -/// Lists files relevant to building this package inside this source by -/// traversing the git working tree, while avoiding ignored files. -/// -/// This looks into Git sub-repositories as well, resolving them to individual files. -/// Symlinks to directories will also be resolved, but walked as repositories if they -/// point to one to avoid picking up `.git` directories. -fn list_files_gix( - pkg: &Package, - repo: &gix::Repository, - filter: &dyn Fn(&Path, bool) -> bool, - gctx: &GlobalContext, -) -> CargoResult> { - debug!("list_files_gix {}", pkg.package_id()); - let options = repo - .dirwalk_options()? - .emit_untracked(gix::dir::walk::EmissionMode::Matching) - .emit_ignored(None) - .emit_tracked(true) - .recurse_repositories(false) - .symlinks_to_directories_are_ignored_like_directories(true) - .emit_empty_directories(false); - let index = repo.index_or_empty()?; - let root = repo - .workdir() - .ok_or_else(|| anyhow::format_err!("can't list files on a bare repository"))?; - assert!( - root.is_absolute(), - "BUG: paths used internally are absolute, and the repo inherits that" - ); - - let pkg_path = pkg.root(); - let repo_relative_pkg_path = pkg_path.strip_prefix(root).unwrap_or(Path::new("")); - let target_prefix = gix::path::to_unix_separators_on_windows(gix::path::into_bstr( - repo_relative_pkg_path.join("target/"), - )); - let package_prefix = - gix::path::to_unix_separators_on_windows(gix::path::into_bstr(repo_relative_pkg_path)); - - let pathspec = { - // Include the package root. - let mut include = BString::from(":(top)"); - include.push_str(package_prefix.as_ref()); - - // Exclude the target directory. - let mut exclude = BString::from(":!(exclude,top)"); - exclude.push_str(target_prefix.as_ref()); - - vec![include, exclude] - }; - - let mut files = Vec::::new(); - let mut subpackages_found = Vec::new(); - for item in repo - .dirwalk_iter(index.clone(), pathspec, Default::default(), options)? - .filter(|res| { - // Don't include Cargo.lock if it is untracked. Packaging will - // generate a new one as needed. - // Also don't include untrackable directory entries, like FIFOs. - res.as_ref().map_or(true, |item| { - item.entry.disk_kind != Some(gix::dir::entry::Kind::Untrackable) - && !(item.entry.status == Status::Untracked - && item.entry.rela_path == "Cargo.lock") - }) - }) - .map(|res| { - res.map(|item| { - // Assumption: if a file tracked as a symlink in Git index, and - // the actual file type on disk is file, then it might be a - // plain text file symlink. - // There are exceptions like the file has changed from a symlink - // to a real text file, but hasn't been committed to Git index. - // Exceptions may be rare so we're okay with this now. - let maybe_plain_text_symlink = item.entry.index_kind - == Some(gix::dir::entry::Kind::Symlink) - && item.entry.disk_kind == Some(gix::dir::entry::Kind::File); - ( - item.entry.rela_path, - item.entry.disk_kind, - maybe_plain_text_symlink, - ) - }) - }) - .chain( - // Append entries that might be tracked in `/target/`. - index - .prefixed_entries(target_prefix.as_ref()) - .unwrap_or_default() - .iter() - .filter(|entry| { - // probably not needed as conflicts prevent this to run, but let's be explicit. - entry.stage() == Stage::Unconflicted - }) - .map(|entry| { - ( - entry.path(&index).to_owned(), - // Do not trust what's recorded in the index, enforce checking the disk. - // This traversal is not part of a `status()`, and tracking things in `target/` - // is rare. - None, - false, - ) - }) - .map(Ok), - ) - { - let (rela_path, kind, maybe_plain_text_symlink) = item?; - let file_path = root.join(gix::path::from_bstr(rela_path)); - if file_path.file_name().and_then(|name| name.to_str()) == Some("Cargo.toml") { - // Keep track of all sub-packages found and also strip out all - // matches we've found so far. Note, though, that if we find - // our own `Cargo.toml`, we keep going. - let path = file_path.parent().unwrap(); - if path != pkg_path { - debug!("subpackage found: {}", path.display()); - files.retain(|p| !p.starts_with(path)); - subpackages_found.push(path.to_path_buf()); - continue; - } - } - - // If this file is part of any other sub-package we've found so far, - // skip it. - if subpackages_found.iter().any(|p| file_path.starts_with(p)) { - continue; - } - - let is_dir = kind.map_or(false, |kind| { - if kind == gix::dir::entry::Kind::Symlink { - // Symlinks must be checked to see if they point to a directory - // we should traverse. - file_path.is_dir() - } else { - kind.is_dir() - } - }); - if is_dir { - // This could be a submodule, or a sub-repository. In any case, we prefer to walk - // it with git-support to leverage ignored files and to avoid pulling in entire - // .git repositories. - match gix::open(&file_path) { - Ok(sub_repo) => { - files.extend(list_files_gix(pkg, &sub_repo, filter, gctx)?); - } - Err(_) => { - list_files_walk(&file_path, &mut files, false, filter, gctx)?; - } - } - } else if (filter)(&file_path, is_dir) { - assert!(!is_dir); - trace!(" found {}", file_path.display()); - let ty = match kind.map(Into::into) { - Some(FileType::File { .. }) => FileType::File { - maybe_symlink: maybe_plain_text_symlink, - }, - Some(ty) => ty, - None => FileType::Other, - }; - files.push(PathEntry { - path: file_path, - ty, - // Git index doesn't include files from symlink directory, - // symlink dirs are handled in `list_files_walk`. - under_symlink_dir: false, - }); - } - } - - return Ok(files); -} - /// Lists files relevant to building this package inside this source by /// walking the filesystem from the package root path. /// diff -urN cargo-0.96.0-orig/src/cargo/sources/registry/mod.rs cargo-0.96.0/src/cargo/sources/registry/mod.rs --- cargo-0.96.0-orig/src/cargo/sources/registry/mod.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/sources/registry/mod.rs 2026-05-07 22:47:42 +0900 @@ -209,7 +209,18 @@ use crate::sources::source::Source; use crate::util::cache_lock::CacheLockMode; use crate::util::interning::InternedString; -use crate::util::network::PollExt; +trait PollExt { + fn expect(self, msg: &str) -> T; +} +impl PollExt for Poll { + #[track_caller] + fn expect(self, msg: &str) -> T { + match self { + Poll::Ready(val) => val, + Poll::Pending => panic!("{}", msg), + } + } +} use crate::util::{CargoResult, Filesystem, GlobalContext, LimitErrorReader, restricted_names}; use crate::util::{VersionExt, hex}; @@ -219,10 +230,7 @@ /// Not to be confused with `.cargo-ok` file in git sources. const PACKAGE_SOURCE_LOCK: &str = ".cargo-ok"; -pub const CRATES_IO_INDEX: &str = "https://github.com/rust-lang/crates.io-index"; -pub const CRATES_IO_HTTP_INDEX: &str = "sparse+https://index.crates.io/"; pub const CRATES_IO_REGISTRY: &str = "crates-io"; -pub const CRATES_IO_DOMAIN: &str = "crates.io"; /// The content inside `.cargo-ok`. /// See [`RegistrySource::unpack_package`] for more. @@ -449,12 +457,9 @@ }, } -mod download; -mod http_remote; pub(crate) mod index; pub use index::IndexSummary; mod local; -mod remote; /// Generates a unique name for [`SourceId`] to have a unique path to put their /// index files. @@ -474,39 +479,6 @@ } impl<'gctx> RegistrySource<'gctx> { - /// Creates a [`Source`] of a "remote" registry. - /// It could be either an HTTP-based [`http_remote::HttpRegistry`] or - /// a Git-based [`remote::RemoteRegistry`]. - /// - /// * `yanked_whitelist` --- Packages allowed to be used, even if they are yanked. - pub fn remote( - source_id: SourceId, - yanked_whitelist: &HashSet, - gctx: &'gctx GlobalContext, - ) -> CargoResult> { - assert!(source_id.is_remote_registry()); - let name = short_name( - source_id, - gctx.cli_unstable() - .git - .map_or(false, |features| features.shallow_index) - && !source_id.is_sparse(), - ); - let ops = if source_id.is_sparse() { - Box::new(http_remote::HttpRegistry::new(source_id, gctx, &name)?) as Box<_> - } else { - Box::new(remote::RemoteRegistry::new(source_id, gctx, &name)) as Box<_> - }; - - Ok(RegistrySource::new( - source_id, - gctx, - &name, - ops, - yanked_whitelist, - )) - } - /// Creates a [`Source`] of a local registry, with [`local::LocalRegistry`] under the hood. /// /// * `path` --- The root path of a local registry on the file system. diff -urN cargo-0.96.0-orig/src/cargo/util/command_prelude.rs cargo-0.96.0/src/cargo/util/command_prelude.rs --- cargo-0.96.0-orig/src/cargo/util/command_prelude.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/util/command_prelude.rs 2026-05-07 22:50:56 +0900 @@ -3,7 +3,7 @@ use crate::core::compiler::{BuildConfig, CompileKind, MessageFormat, RustcTargetData}; use crate::core::resolver::{CliFeatures, ForceAllTargets, HasDevUnits}; use crate::core::{Edition, Package, TargetKind, Workspace, profiles::Profiles, shell}; -use crate::ops::registry::RegistryOrIndex; +use crate::ops::RegistryOrIndex; use crate::ops::{self, CompileFilter, CompileOptions, NewOptions, Packages, VersionControl}; use crate::util::important_paths::find_root_manifest_for_wd; use crate::util::interning::InternedString; diff -urN cargo-0.96.0-orig/src/cargo/util/context/mod.rs cargo-0.96.0/src/cargo/util/context/mod.rs --- cargo-0.96.0-orig/src/cargo/util/context/mod.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/util/context/mod.rs 2026-05-07 22:47:42 +0900 @@ -80,24 +80,18 @@ use crate::core::global_cache_tracker::{DeferredGlobalLastUse, GlobalCacheTracker}; use crate::core::shell::Verbosity; use crate::core::{CliUnstable, Shell, SourceId, Workspace, WorkspaceRootConfig, features}; -use crate::ops::RegistryCredentialConfig; -use crate::sources::CRATES_IO_INDEX; -use crate::sources::CRATES_IO_REGISTRY; +use crate::sources::registry::CRATES_IO_REGISTRY; use crate::util::OnceExt as _; use crate::util::cache_lock::{CacheLock, CacheLockMode, CacheLocker}; use crate::util::errors::CargoResult; -use crate::util::network::http::configure_http_handle; -use crate::util::network::http::http_handle; use crate::util::restricted_names::is_glob_pattern; use crate::util::{CanonicalUrl, closest_msg, internal}; use crate::util::{Filesystem, IntoUrl, IntoUrlWithBase, Rustc}; use annotate_snippets::Level; use anyhow::{Context as _, anyhow, bail, format_err}; -use cargo_credential::Secret; use cargo_util::paths; use cargo_util_schemas::manifest::RegistryName; -use curl::easy::Easy; use itertools::Itertools; use serde::Deserialize; use serde::de::IntoDeserializer as _; @@ -136,8 +130,6 @@ mod schema; pub use schema::*; -use super::auth::RegistryConfig; - /// Helper macro for creating typed access methods. macro_rules! get_value_typed { ($name:ident, $ty:ty, $variant:ident, $expected:expr) => { @@ -197,14 +189,6 @@ FileDiscovery, } -/// A previously generated authentication token and the data needed to determine if it can be reused. -#[derive(Debug)] -pub struct CredentialCacheValue { - pub token_value: Secret, - pub expiration: Option, - pub operation_independent: bool, -} - /// Configuration information for cargo. This is not specific to a build, it is information /// relating to cargo itself. #[derive(Debug)] @@ -244,8 +228,6 @@ unstable_flags: CliUnstable, /// Cli flags of the form "-Z something" unstable_flags_cli: Option>, - /// A handle on curl easy mode for http calls - easy: OnceLock>, /// Cache of the `SourceId` for crates.io crates_io_source_id: OnceLock, /// If false, don't cache `rustc --version --verbose` invocations @@ -258,11 +240,6 @@ env: Env, /// Tracks which sources have been updated to avoid multiple updates. updated_sources: Mutex>, - /// Cache of credentials from configuration or credential providers. - /// Maps from url to credential value. - credential_cache: Mutex>, - /// Cache of registry config from the `[registries]` table. - registry_config: Mutex>>, /// Locks on the package and index caches. package_cache_lock: CacheLocker, /// Cached configuration parsed by Cargo @@ -350,15 +327,12 @@ }, unstable_flags: CliUnstable::default(), unstable_flags_cli: None, - easy: Default::default(), crates_io_source_id: Default::default(), cache_rustc_info, creation_time: Instant::now(), target_dir: None, env, updated_sources: Default::default(), - credential_cache: Default::default(), - registry_config: Default::default(), package_cache_lock: CacheLocker::new(), http_config: Default::default(), future_incompat_config: Default::default(), @@ -577,18 +551,6 @@ self.updated_sources.lock().unwrap() } - /// Cached credentials from credential providers or configuration. - pub fn credential_cache(&self) -> MutexGuard<'_, HashMap> { - self.credential_cache.lock().unwrap() - } - - /// Cache of already parsed registries from the `[registries]` table. - pub(crate) fn registry_config( - &self, - ) -> MutexGuard<'_, HashMap>> { - self.registry_config.lock().unwrap() - } - /// Gets all config values from disk. /// /// This will lazy-load the values as necessary. Callers are responsible @@ -1881,24 +1843,9 @@ self.jobserver.as_ref() } - pub fn http(&self) -> CargoResult<&Mutex> { - let http = self - .easy - .try_borrow_with(|| http_handle(self).map(Into::into))?; - { - let mut http = http.lock().unwrap(); - http.reset(); - let timeout = configure_http_handle(self, &mut http)?; - timeout.configure(&mut http)?; - } - Ok(http) - } - pub fn http_config(&self) -> CargoResult<&CargoHttpConfig> { self.http_config.try_borrow_with(|| { let mut http = self.get::("http")?; - let curl_v = curl::Version::get(); - disables_multiplexing_for_bad_curl(curl_v.version(), &mut http, self); Ok(http) }) } @@ -2015,6 +1962,9 @@ pub fn crates_io_source_id(&self) -> CargoResult { let source_id = self.crates_io_source_id.try_borrow_with(|| { self.check_registry_index_not_set()?; + // NOTE(achurch): this has to be the original github URL to + // avoid obsolete lockfile errors with --locked + const CRATES_IO_INDEX: &str = "https://github.com/rust-lang/crates.io-index"; let url = CRATES_IO_INDEX.into_url().unwrap(); SourceId::for_alt_registry(&url, CRATES_IO_REGISTRY) })?; @@ -2130,153 +2080,6 @@ ::home::cargo_home_with_cwd(cwd).ok() } -pub fn save_credentials( - gctx: &GlobalContext, - token: Option, - registry: &SourceId, -) -> CargoResult<()> { - let registry = if registry.is_crates_io() { - None - } else { - let name = registry - .alt_registry_key() - .ok_or_else(|| internal("can't save credentials for anonymous registry"))?; - Some(name) - }; - - // If 'credentials' exists, write to that for backward compatibility reasons. - // Otherwise write to 'credentials.toml'. There's no need to print the - // warning here, because it would already be printed at load time. - let home_path = gctx.home_path.clone().into_path_unlocked(); - let filename = match gctx.get_file_path(&home_path, "credentials", false)? { - Some(path) => match path.file_name() { - Some(filename) => Path::new(filename).to_owned(), - None => Path::new("credentials.toml").to_owned(), - }, - None => Path::new("credentials.toml").to_owned(), - }; - - let mut file = { - gctx.home_path.create_dir()?; - gctx.home_path - .open_rw_exclusive_create(filename, gctx, "credentials' config file")? - }; - - let mut contents = String::new(); - file.read_to_string(&mut contents).with_context(|| { - format!( - "failed to read configuration file `{}`", - file.path().display() - ) - })?; - - let mut toml = parse_document(&contents, file.path(), gctx)?; - - // Move the old token location to the new one. - if let Some(token) = toml.remove("token") { - let map = HashMap::from([("token".to_string(), token)]); - toml.insert("registry".into(), map.into()); - } - - if let Some(token) = token { - // login - - let path_def = Definition::Path(file.path().to_path_buf()); - let (key, mut value) = match token { - RegistryCredentialConfig::Token(token) => { - // login with token - - let key = "token".to_string(); - let value = ConfigValue::String(token.expose(), path_def.clone()); - let map = HashMap::from([(key, value)]); - let table = CV::Table(map, path_def.clone()); - - if let Some(registry) = registry { - let map = HashMap::from([(registry.to_string(), table)]); - ("registries".into(), CV::Table(map, path_def.clone())) - } else { - ("registry".into(), table) - } - } - RegistryCredentialConfig::AsymmetricKey((secret_key, key_subject)) => { - // login with key - - let key = "secret-key".to_string(); - let value = ConfigValue::String(secret_key.expose(), path_def.clone()); - let mut map = HashMap::from([(key, value)]); - if let Some(key_subject) = key_subject { - let key = "secret-key-subject".to_string(); - let value = ConfigValue::String(key_subject, path_def.clone()); - map.insert(key, value); - } - let table = CV::Table(map, path_def.clone()); - - if let Some(registry) = registry { - let map = HashMap::from([(registry.to_string(), table)]); - ("registries".into(), CV::Table(map, path_def.clone())) - } else { - ("registry".into(), table) - } - } - _ => unreachable!(), - }; - - if registry.is_some() { - if let Some(table) = toml.remove("registries") { - let v = CV::from_toml(path_def, table)?; - value.merge(v, false)?; - } - } - toml.insert(key, value.into_toml()); - } else { - // logout - if let Some(registry) = registry { - if let Some(registries) = toml.get_mut("registries") { - if let Some(reg) = registries.get_mut(registry) { - let rtable = reg.as_table_mut().ok_or_else(|| { - format_err!("expected `[registries.{}]` to be a table", registry) - })?; - rtable.remove("token"); - rtable.remove("secret-key"); - rtable.remove("secret-key-subject"); - } - } - } else if let Some(registry) = toml.get_mut("registry") { - let reg_table = registry - .as_table_mut() - .ok_or_else(|| format_err!("expected `[registry]` to be a table"))?; - reg_table.remove("token"); - reg_table.remove("secret-key"); - reg_table.remove("secret-key-subject"); - } - } - - let contents = toml.to_string(); - file.seek(SeekFrom::Start(0))?; - file.write_all(contents.as_bytes()) - .with_context(|| format!("failed to write to `{}`", file.path().display()))?; - file.file().set_len(contents.len() as u64)?; - set_permissions(file.file(), 0o600) - .with_context(|| format!("failed to set permissions of `{}`", file.path().display()))?; - - return Ok(()); - - #[cfg(unix)] - fn set_permissions(file: &File, mode: u32) -> CargoResult<()> { - use std::os::unix::fs::PermissionsExt; - - let mut perms = file.metadata()?.permissions(); - perms.set_mode(mode); - file.set_permissions(perms)?; - Ok(()) - } - - #[cfg(not(unix))] - fn set_permissions(_file: &File, _mode: u32) -> CargoResult<()> { - Ok(()) - } -} - /// Represents a config-include value in the configuration. /// /// This intentionally doesn't derive serde deserialization @@ -2494,34 +2297,6 @@ } } } - -/// Disable HTTP/2 multiplexing for some broken versions of libcurl. -/// -/// In certain versions of libcurl when proxy is in use with HTTP/2 -/// multiplexing, connections will continue stacking up. This was -/// fixed in libcurl 8.0.0 in curl/curl@821f6e2a89de8aec1c7da3c0f381b92b2b801efc -/// -/// However, Cargo can still link against old system libcurl if it is from a -/// custom built one or on macOS. For those cases, multiplexing needs to be -/// disabled when those versions are detected. -fn disables_multiplexing_for_bad_curl( - curl_version: &str, - http: &mut CargoHttpConfig, - gctx: &GlobalContext, -) { - use crate::util::network; - - if network::proxy::http_proxy_exists(http, gctx) && http.multiplexing.is_none() { - let bad_curl_versions = ["7.87.0", "7.88.0", "7.88.1"]; - if bad_curl_versions - .iter() - .any(|v| curl_version.starts_with(v)) - { - tracing::info!("disabling multiplexing with proxy, curl version is {curl_version}"); - http.multiplexing = Some(false); - } - } -} #[cfg(test)] mod tests { diff -urN cargo-0.96.0-orig/src/cargo/util/errors.rs cargo-0.96.0/src/cargo/util/errors.rs --- cargo-0.96.0-orig/src/cargo/util/errors.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/util/errors.rs 2026-05-07 22:47:42 +0900 @@ -1,5 +1,4 @@ use anyhow::Error; -use curl::easy::Easy; use std::fmt::{self, Write}; use std::path::PathBuf; @@ -7,110 +6,6 @@ pub type CargoResult = anyhow::Result; -/// These are headers that are included in error messages to help with -/// diagnosing issues. -pub const DEBUG_HEADERS: &[&str] = &[ - // This is the unique ID that identifies the request in CloudFront which - // can be used for looking at the AWS logs. - "x-amz-cf-id", - // This is the CloudFront POP (Point of Presence) that identifies the - // region where the request was routed. This can help identify if an issue - // is region-specific. - "x-amz-cf-pop", - // The unique token used for troubleshooting S3 requests via AWS logs or support. - "x-amz-request-id", - // Another token used in conjunction with x-amz-request-id. - "x-amz-id-2", - // Whether or not there was a cache hit or miss (both CloudFront and Fastly). - "x-cache", - // The cache server that processed the request (Fastly). - "x-served-by", -]; - -#[derive(Debug)] -pub struct HttpNotSuccessful { - pub code: u32, - pub url: String, - pub ip: Option, - pub body: Vec, - pub headers: Vec, -} - -impl HttpNotSuccessful { - pub fn new_from_handle( - handle: &mut Easy, - initial_url: &str, - body: Vec, - headers: Vec, - ) -> HttpNotSuccessful { - let ip = handle.primary_ip().ok().flatten().map(|s| s.to_string()); - let url = handle - .effective_url() - .ok() - .flatten() - .unwrap_or(initial_url) - .to_string(); - HttpNotSuccessful { - code: handle.response_code().unwrap_or(0), - url, - ip, - body, - headers, - } - } - - /// Renders the error in a compact form. - pub fn display_short(&self) -> String { - self.render(false) - } - - fn render(&self, show_headers: bool) -> String { - let mut result = String::new(); - let body = std::str::from_utf8(&self.body) - .map(|s| truncate_with_ellipsis(s, 512)) - .unwrap_or_else(|_| format!("[{} non-utf8 bytes]", self.body.len())); - - write!( - result, - "failed to get successful HTTP response from `{}`", - self.url - ) - .unwrap(); - if let Some(ip) = &self.ip { - write!(result, " ({ip})").unwrap(); - } - write!(result, ", got {}\n", self.code).unwrap(); - if show_headers { - let headers: Vec<_> = self - .headers - .iter() - .filter(|header| { - let Some((name, _)) = header.split_once(":") else { - return false; - }; - DEBUG_HEADERS.contains(&name.to_ascii_lowercase().trim()) - }) - .collect(); - if !headers.is_empty() { - writeln!(result, "debug headers:").unwrap(); - for header in headers { - writeln!(result, "{header}").unwrap(); - } - } - } - write!(result, "body:\n{body}").unwrap(); - result - } -} - -impl fmt::Display for HttpNotSuccessful { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(&self.render(true)) - } -} - -impl std::error::Error for HttpNotSuccessful {} - // ============================================================================= // Verbose error diff -urN cargo-0.96.0-orig/src/cargo/util/mod.rs cargo-0.96.0/src/cargo/util/mod.rs --- cargo-0.96.0-orig/src/cargo/util/mod.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/util/mod.rs 2026-05-07 22:47:42 +0900 @@ -5,7 +5,6 @@ pub use self::context::{ConfigValue, GlobalContext, homedir}; pub(crate) use self::counter::MetricsCounter; pub use self::dependency_queue::DependencyQueue; -pub use self::diagnostic_server::RustfixDiagnosticServer; pub use self::edit_distance::{closest, closest_msg, edit_distance}; pub use self::errors::CliError; pub use self::errors::{CargoResult, CliResult, internal}; @@ -17,29 +16,24 @@ pub use self::into_url::IntoUrl; pub use self::into_url_with_base::IntoUrlWithBase; pub(crate) use self::io::LimitErrorReader; -pub use self::lockserver::{LockServer, LockServerClient, LockServerStarted}; pub use self::logger::BuildLogger; pub use self::once::OnceExt; pub use self::progress::{Progress, ProgressStyle}; pub use self::queue::Queue; pub use self::rustc::Rustc; pub use self::semver_ext::{OptVersionReq, VersionExt}; -pub use self::vcs::{FossilRepo, GitRepo, HgRepo, PijulRepo, existing_vcs_repo}; pub use self::workspace::{ add_path_args, path_args, print_available_benches, print_available_binaries, print_available_examples, print_available_packages, print_available_tests, }; -pub mod auth; pub mod cache_lock; mod canonical_url; pub mod command_prelude; pub mod context; mod counter; pub mod cpu; -pub mod credential; mod dependency_queue; -pub mod diagnostic_server; pub mod edit_distance; pub mod errors; pub mod flock; @@ -54,11 +48,9 @@ mod into_url_with_base; mod io; pub mod job; -mod lockserver; pub mod log_message; pub mod logger; pub mod machine_message; -pub mod network; mod once; pub mod open; mod progress; @@ -71,7 +63,6 @@ pub mod style; pub mod toml; pub mod toml_mut; -mod vcs; mod workspace; pub fn is_rustup() -> bool { diff -urN cargo-0.96.0-orig/src/cargo/util/toml/mod.rs cargo-0.96.0/src/cargo/util/toml/mod.rs --- cargo-0.96.0-orig/src/cargo/util/toml/mod.rs 2006-07-24 10:21:28 +0900 +++ cargo-0.96.0/src/cargo/util/toml/mod.rs 2026-05-07 22:47:42 +0900 @@ -33,7 +33,7 @@ use crate::core::{GitReference, PackageIdSpec, SourceId, WorkspaceConfig, WorkspaceRootConfig}; use crate::lints::get_key_value_span; use crate::lints::rel_cwd_manifest_path; -use crate::sources::{CRATES_IO_INDEX, CRATES_IO_REGISTRY}; +use crate::sources::{CRATES_IO_REGISTRY}; use crate::util::errors::{CargoResult, ManifestError}; use crate::util::interning::InternedString; use crate::util::{ @@ -2091,9 +2091,6 @@ spec ) })?; - if spec.url().is_none() { - spec.set_url(CRATES_IO_INDEX.parse().unwrap()); - } if replacement.is_version_specified() { bail!( @@ -2130,7 +2127,6 @@ let mut patch = HashMap::new(); for (toml_url, deps) in me.patch.iter().flatten() { let url = match &toml_url[..] { - CRATES_IO_REGISTRY => CRATES_IO_INDEX.parse().unwrap(), _ => manifest_ctx .gctx .get_registry_index(toml_url)