-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathhakari.rs
1652 lines (1504 loc) · 62 KB
/
hakari.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (c) The cargo-guppy Contributors
// SPDX-License-Identifier: MIT OR Apache-2.0
use crate::{
explain::HakariExplain,
toml_name_map,
toml_out::{write_toml, HakariOutputOptions},
CargoTomlError, HakariCargoToml, TomlOutError,
};
use ahash::AHashMap;
use bimap::BiHashMap;
use debug_ignore::DebugIgnore;
use guppy::{
errors::TargetSpecError,
graph::{
cargo::{BuildPlatform, CargoOptions, CargoResolverVersion, CargoSet, InitialsPlatform},
feature::{named_feature_filter, FeatureId, FeatureLabel, FeatureSet, StandardFeatures},
DependencyDirection, PackageGraph, PackageMetadata,
},
platform::{Platform, PlatformSpec, TargetFeatures},
PackageId,
};
use rayon::prelude::*;
use std::{
borrow::Cow,
collections::{BTreeMap, BTreeSet, HashSet},
fmt,
sync::Arc,
};
/// Configures and constructs [`Hakari`](Hakari) instances.
///
/// This struct provides a number of options that determine how `Hakari` instances are generated.
#[derive(Clone, Debug)]
pub struct HakariBuilder<'g> {
graph: DebugIgnore<&'g PackageGraph>,
hakari_package: Option<PackageMetadata<'g>>,
pub(crate) platforms: Vec<Arc<Platform>>,
resolver: CargoResolverVersion,
pub(crate) verify_mode: bool,
pub(crate) traversal_excludes: HashSet<&'g PackageId>,
final_excludes: HashSet<&'g PackageId>,
pub(crate) registries: BiHashMap<String, String, ahash::RandomState, ahash::RandomState>,
unify_target_host: UnifyTargetHost,
output_single_feature: bool,
pub(crate) dep_format_version: DepFormatVersion,
pub(crate) workspace_hack_line_style: WorkspaceHackLineStyle,
}
impl<'g> HakariBuilder<'g> {
/// Creates a new `HakariBuilder` instance from a `PackageGraph`.
///
/// The Hakari package itself is usually present in the workspace. If so, specify its
/// package ID, otherwise pass in `None`.
///
/// Returns an error if a Hakari package ID is specified but it isn't known to the graph, or
/// isn't in the workspace.
pub fn new(
graph: &'g PackageGraph,
hakari_id: Option<&PackageId>,
) -> Result<Self, guppy::Error> {
let hakari_package = hakari_id
.map(|package_id| {
let package = graph.metadata(package_id)?;
if !package.in_workspace() {
return Err(guppy::Error::UnknownWorkspaceName(
package.name().to_string(),
));
}
Ok(package)
})
.transpose()?;
Ok(Self {
graph: DebugIgnore(graph),
hakari_package,
platforms: vec![],
resolver: CargoResolverVersion::V2,
verify_mode: false,
traversal_excludes: HashSet::new(),
final_excludes: HashSet::new(),
registries: BiHashMap::with_hashers(Default::default(), Default::default()),
unify_target_host: UnifyTargetHost::default(),
output_single_feature: false,
dep_format_version: DepFormatVersion::default(),
workspace_hack_line_style: WorkspaceHackLineStyle::default(),
})
}
/// Returns the `PackageGraph` used to construct this `Hakari` instance.
pub fn graph(&self) -> &'g PackageGraph {
// This is a spurious clippy lint on Rust 1.65.0
#[allow(clippy::explicit_auto_deref)]
*self.graph
}
/// Returns the Hakari package, or `None` if it wasn't passed into [`new`](Self::new).
pub fn hakari_package(&self) -> Option<&PackageMetadata<'g>> {
self.hakari_package.as_ref()
}
/// Reads the existing TOML file for the Hakari package from disk, returning a
/// `HakariCargoToml`.
///
/// This can be used with [`Hakari::to_toml_string`](Hakari::to_toml_string) to manage the
/// contents of the Hakari package's TOML file on disk.
///
/// Returns an error if there was an issue reading the TOML file from disk, or `None` if
/// this builder was created without a Hakari package.
pub fn read_toml(&self) -> Option<Result<HakariCargoToml, CargoTomlError>> {
let hakari_package = self.hakari_package()?;
let workspace_path = hakari_package
.source()
.workspace_path()
.expect("hakari_package is in workspace");
Some(HakariCargoToml::new_relative(
self.graph.workspace().root(),
workspace_path,
))
}
/// Sets a list of platforms for `hakari` to use.
///
/// By default, `hakari` unifies features that are always enabled across all platforms. If
/// builds are commonly performed on a few platforms, `hakari` can output platform-specific
/// instructions for those builds.
///
/// This currently supports target triples only, without further customization around
/// target features or flags. In the future, this may support `cfg()` expressions using
/// an [SMT solver](https://en.wikipedia.org/wiki/Satisfiability_modulo_theories).
///
/// Call `set_platforms` with an empty list to reset to default behavior.
///
/// Returns an error if a platform wasn't known to [`target_spec`], the library `hakari` uses
/// to resolve platforms.
pub fn set_platforms(
&mut self,
platforms: impl IntoIterator<Item = impl Into<Cow<'static, str>>>,
) -> Result<&mut Self, TargetSpecError> {
self.platforms = platforms
.into_iter()
.map(|s| Ok(Arc::new(Platform::new(s.into(), TargetFeatures::Unknown)?)))
.collect::<Result<Vec<_>, _>>()?;
Ok(self)
}
/// Returns the platforms set through `set_platforms`, or an empty list if no platforms are
/// set.
pub fn platforms(&self) -> impl ExactSizeIterator<Item = &str> + '_ {
self.platforms.iter().map(|platform| platform.triple_str())
}
/// Sets the Cargo resolver version.
///
/// By default, `HakariBuilder` uses [version 2](CargoResolverVersion::V2) of the Cargo
/// resolver. For more about Cargo resolvers, see the documentation for
/// [`CargoResolverVersion`](CargoResolverVersion).
pub fn set_resolver(&mut self, resolver: CargoResolverVersion) -> &mut Self {
self.resolver = resolver;
self
}
/// Returns the current Cargo resolver version.
pub fn resolver(&self) -> CargoResolverVersion {
self.resolver
}
/// Pretends that the provided packages don't exist during graph traversals.
///
/// Users may wish to not consider certain packages while figuring out the unified feature set.
/// Setting this option prevents those packages from being considered.
///
/// Practically, this means that:
/// * If a workspace package is specified, Cargo build simulations for it will not be run.
/// * If a third-party package is specified, it will not be present in the output, nor will
/// any transitive dependencies or features enabled by it that aren't enabled any other way.
/// In other words, any packages excluded during traversal are also [excluded from the final
/// output](Self::add_final_excludes).
///
/// Returns an error if any package IDs specified aren't known to the graph.
pub fn add_traversal_excludes<'b>(
&mut self,
excludes: impl IntoIterator<Item = &'b PackageId>,
) -> Result<&mut Self, guppy::Error> {
let traversal_exclude: Vec<&'g PackageId> = excludes
.into_iter()
.map(|package_id| Ok(self.graph.metadata(package_id)?.id()))
.collect::<Result<_, _>>()?;
self.traversal_excludes.extend(traversal_exclude);
Ok(self)
}
/// Returns the packages currently excluded during graph traversals.
///
/// Also returns the Hakari package if specified. This is because the Hakari package is treated
/// as excluded while performing unification.
pub fn traversal_excludes<'b>(&'b self) -> impl Iterator<Item = &'g PackageId> + 'b {
let excludes = self.make_traversal_excludes();
excludes.iter()
}
/// Returns true if a package ID is currently excluded during traversal.
///
/// Also returns true for the Hakari package if specified. This is because the Hakari package is
/// treated as excluded by the algorithm.
///
/// Returns an error if this package ID isn't known to the underlying graph.
pub fn is_traversal_excluded(&self, package_id: &PackageId) -> Result<bool, guppy::Error> {
self.graph.metadata(package_id)?;
let excludes = self.make_traversal_excludes();
Ok(excludes.is_excluded(package_id))
}
/// Adds packages to be removed from the final output.
///
/// Unlike [`traversal_excludes`](Self::traversal_excludes), these packages are considered
/// during traversals, but removed at the end.
///
/// Returns an error if any package IDs specified aren't known to the graph.
pub fn add_final_excludes<'b>(
&mut self,
excludes: impl IntoIterator<Item = &'b PackageId>,
) -> Result<&mut Self, guppy::Error> {
let final_excludes: Vec<&'g PackageId> = excludes
.into_iter()
.map(|package_id| Ok(self.graph.metadata(package_id)?.id()))
.collect::<Result<_, _>>()?;
self.final_excludes.extend(final_excludes);
Ok(self)
}
/// Returns the packages to be removed from the final output.
pub fn final_excludes<'b>(&'b self) -> impl Iterator<Item = &'g PackageId> + 'b {
self.final_excludes.iter().copied()
}
/// Returns true if a package ID is currently excluded from the final output.
///
/// Returns an error if this package ID isn't known to the underlying graph.
pub fn is_final_excluded(&self, package_id: &PackageId) -> Result<bool, guppy::Error> {
self.graph.metadata(package_id)?;
Ok(self.final_excludes.contains(package_id))
}
/// Returns true if a package ID is excluded from either the traversal or the final output.
///
/// Also returns true for the Hakari package if specified. This is because the Hakari package is
/// treated as excluded by the algorithm.
///
/// Returns an error if this package ID isn't known to the underlying graph.
#[inline]
pub fn is_excluded(&self, package_id: &PackageId) -> Result<bool, guppy::Error> {
Ok(self.is_traversal_excluded(package_id)? || self.is_final_excluded(package_id)?)
}
/// Add alternate registries by (name, URL) pairs.
///
/// This is a temporary workaround until [Cargo issue #9052](https://github.com/rust-lang/cargo/issues/9052)
/// is resolved.
pub fn add_registries(
&mut self,
registries: impl IntoIterator<Item = (impl Into<String>, impl Into<String>)>,
) -> &mut Self {
self.registries.extend(
registries
.into_iter()
.map(|(name, url)| (name.into(), url.into())),
);
self
}
/// Whether and how to unify feature sets across target and host platforms.
///
/// This is an advanced feature that most users don't need to set. For more information about
/// this option, see the documentation for [`UnifyTargetHost`](UnifyTargetHost).
pub fn set_unify_target_host(&mut self, unify_target_host: UnifyTargetHost) -> &mut Self {
self.unify_target_host = unify_target_host;
self
}
/// Returns the current value of `unify_target_host`.
pub fn unify_target_host(&self) -> UnifyTargetHost {
self.unify_target_host
}
/// Whether to unify feature sets for all dependencies.
///
/// By default, Hakari only produces output for dependencies that are built with more
/// than one feature set. If set to true, Hakari will produce outputs for all dependencies,
/// including those that don't need to be unified.
///
/// This is rarely needed in production, and is most useful for testing and debugging scenarios.
pub fn set_output_single_feature(&mut self, output_single_feature: bool) -> &mut Self {
self.output_single_feature = output_single_feature;
self
}
/// Returns the current value of `output_single_feature`.
pub fn output_single_feature(&self) -> bool {
self.output_single_feature
}
/// Version of hakari data to output.
///
/// For more, see the documentation for [`DepFormatVersion`](DepFormatVersion).
pub fn set_dep_format_version(&mut self, dep_format_version: DepFormatVersion) -> &mut Self {
self.dep_format_version = dep_format_version;
self
}
/// Returns the current value of `dep_format_version`.
pub fn dep_format_version(&self) -> DepFormatVersion {
self.dep_format_version
}
/// Kind of `workspace-hack = ...` lines to output.
///
/// For more, see the documentation for [`WorkspaceHackLineStyle`].
pub fn set_workspace_hack_line_style(
&mut self,
line_style: WorkspaceHackLineStyle,
) -> &mut Self {
self.workspace_hack_line_style = line_style;
self
}
/// Returns the current value of `workspace_hack_line_style`.
pub fn workspace_hack_line_style(&self) -> WorkspaceHackLineStyle {
self.workspace_hack_line_style
}
/// Computes the `Hakari` for this builder.
pub fn compute(self) -> Hakari<'g> {
Hakari::build(self)
}
// ---
// Helper methods
// ---
#[cfg(feature = "cli-support")]
pub(crate) fn traversal_excludes_only<'b>(
&'b self,
) -> impl Iterator<Item = &'g PackageId> + 'b {
self.traversal_excludes.iter().copied()
}
fn make_traversal_excludes<'b>(&'b self) -> TraversalExcludes<'g, 'b> {
let hakari_package = if self.verify_mode {
None
} else {
self.hakari_package.map(|package| package.id())
};
TraversalExcludes {
excludes: &self.traversal_excludes,
hakari_package,
}
}
fn make_features_only<'b>(&'b self) -> FeatureSet<'g> {
if self.verify_mode {
match &self.hakari_package {
Some(package) => package.to_package_set(),
None => self.graph.resolve_none(),
}
.to_feature_set(StandardFeatures::Default)
} else {
self.graph.feature_graph().resolve_none()
}
}
}
#[cfg(feature = "cli-support")]
mod summaries {
use super::*;
use crate::summaries::HakariBuilderSummary;
use guppy::platform::TargetFeatures;
impl<'g> HakariBuilder<'g> {
/// Constructs a `HakariBuilder` from a `PackageGraph` and a serialized summary.
///
/// Requires the `cli-support` feature to be enabled.
///
/// Returns an error if the summary references a package that's not present, or if there was
/// some other issue while creating a `HakariBuilder` from the summary.
pub fn from_summary(
graph: &'g PackageGraph,
summary: &HakariBuilderSummary,
) -> Result<Self, guppy::Error> {
let hakari_package = summary
.hakari_package
.as_ref()
.map(|name| graph.workspace().member_by_name(name))
.transpose()?;
let platforms = summary
.platforms
.iter()
.map(|triple_str| {
let platform = Platform::new(triple_str.clone(), TargetFeatures::Unknown)
.map_err(|err| {
guppy::Error::TargetSpecError(
"while resolving hakari config or summary".to_owned(),
err,
)
})?;
Ok(platform.into())
})
.collect::<Result<Vec<_>, _>>()?;
let registries: BiHashMap<_, _, ahash::RandomState, ahash::RandomState> = summary
.registries
.iter()
.map(|(name, url)| (name.clone(), url.clone()))
.collect();
let traversal_excludes = summary
.traversal_excludes
.to_package_set_registry(
graph,
|name| registries.get_by_left(name).map(|s| s.as_str()),
"resolving hakari traversal-excludes",
)?
.package_ids(DependencyDirection::Forward)
.collect();
let final_excludes = summary
.final_excludes
.to_package_set_registry(
graph,
|name| registries.get_by_left(name).map(|s| s.as_str()),
"resolving hakari final-excludes",
)?
.package_ids(DependencyDirection::Forward)
.collect();
Ok(Self {
graph: DebugIgnore(graph),
hakari_package,
resolver: summary.resolver,
verify_mode: false,
unify_target_host: summary.unify_target_host,
output_single_feature: summary.output_single_feature,
dep_format_version: summary.dep_format_version,
workspace_hack_line_style: summary.workspace_hack_line_style,
platforms,
registries,
traversal_excludes,
final_excludes,
})
}
}
}
/// Whether to unify feature sets for a given dependency across target and host platforms.
///
/// Consider a dependency that is built as both normally (on the target platform) and in a build
/// script or proc macro. The normal dependency is considered to be built on the *target platform*,
/// and is represented in the `[dependencies]` section in the generated `Cargo.toml`.
/// The build dependency is built on the *host platform*, represented in the `[build-dependencies]`
/// section.
///
/// Now consider that the target and host platforms need two different sets of features:
///
/// ```toml
/// ## feature set on target platform
/// [dependencies]
/// my-dep = { version = "1.0", features = ["a", "b"] }
///
/// ## feature set on host platform
/// [build-dependencies]
/// my-dep = { version = "1.0", features = ["b", "c"] }
/// ```
///
/// Should hakari unify the feature sets across the `[dependencies]` and `[build-dependencies]`
/// feature sets?
///
/// Call `HakariBuilder::set_unify_target_host` to configure this option.
#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[cfg_attr(feature = "proptest1", derive(proptest_derive::Arbitrary))]
#[cfg_attr(feature = "cli-support", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "cli-support", serde(rename_all = "kebab-case"))]
#[non_exhaustive]
pub enum UnifyTargetHost {
/// Perform no unification across the target and host feature sets.
///
/// This is the most conservative option, but it means that some dependencies may be built with
/// two different sets of features. In this mode, Hakari will likely be significantly less
/// efficient.
None,
/// Automatically choose between the [`UnifyIfBoth`](Self::UnifyIfBoth) and the
/// [`ReplicateTargetOnHost`](Self::ReplicateTargetOnHost) options:
/// * If the workspace contains proc macros, or crates that are build dependencies of other
/// crates, choose the `ReplicateTargetAsHost` strategy.
/// * Otherwise, choose the `UnifyIfBoth` strategy.
///
/// This is the default behavior.
Auto,
/// Perform unification across target and host feature sets, but only if a dependency is built
/// on both the target and the host.
///
/// This is useful if cross-compilations are uncommon and one wishes to avoid the same package
/// being built two different ways: once for the target and once for the host.
UnifyIfBoth,
/// Perform unification across target and host feature sets, and also replicate all target-only
/// lines to the host.
///
/// This is most useful if some workspace packages are proc macros or build dependencies
/// used by other packages.
ReplicateTargetOnHost,
}
/// The default for `UnifyTargetHost`: automatically choose unification strategy based on the
/// workspace.
impl Default for UnifyTargetHost {
#[inline]
fn default() -> Self {
UnifyTargetHost::Auto
}
}
/// Format version for hakari.
///
/// Older versions are kept around for backwards compatibility.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
#[cfg_attr(feature = "cli-support", derive(serde::Deserialize, serde::Serialize))]
#[cfg_attr(feature = "proptest1", derive(proptest_derive::Arbitrary))]
#[non_exhaustive]
#[derive(Default)]
pub enum DepFormatVersion {
/// `workspace-hack = { path = ...}`. (Note the lack of a trailing space.)
///
/// This was used until `cargo hakari 0.9.6`.
#[cfg_attr(feature = "cli-support", serde(rename = "1"))]
#[default]
V1,
/// `workspace-hack = { version = "0.1", path = ... }`. This was introduced in
/// `cargo hakari 0.9.8`.
#[cfg_attr(feature = "cli-support", serde(rename = "2"))]
V2,
/// Elides build metadata. This was introduced in `cargo hakari 0.9.18`.
#[cfg_attr(feature = "cli-support", serde(rename = "3"))]
V3,
/// Sorts dependency names alphabetically. This was introduced in `cargo hakari 0.9.22`.
///
/// (Dependency names were usually produced in sorted order before V4, but there are
/// some edge cases where they weren't: see [issue
/// #65](https://github.com/guppy-rs/guppy/issues/65).
#[cfg_attr(feature = "cli-support", serde(rename = "4"))]
V4,
}
impl DepFormatVersion {
/// Returns the highest format version supported by this version of `cargo hakari`.
#[inline]
pub fn latest() -> Self {
DepFormatVersion::V4
}
}
impl fmt::Display for DepFormatVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DepFormatVersion::V1 => write!(f, "1"),
DepFormatVersion::V2 => write!(f, "2"),
DepFormatVersion::V3 => write!(f, "3"),
DepFormatVersion::V4 => write!(f, "4"),
}
}
}
/// Style of `workspace-hack = ...` lines to output.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
#[cfg_attr(feature = "cli-support", derive(serde::Deserialize, serde::Serialize))]
#[cfg_attr(feature = "cli-support", serde(rename_all = "kebab-case"))]
#[cfg_attr(feature = "proptest1", derive(proptest_derive::Arbitrary))]
#[non_exhaustive]
#[derive(Default)]
pub enum WorkspaceHackLineStyle {
/// `workspace-hack = { version = "0.1", path = ... }`.
#[default]
Full,
/// `workspace-hack = { version = "0.1" }`.
VersionOnly,
/// `workspace-hack.workspace = true`
WorkspaceDotted,
}
/// A key representing a platform and host/target. Returned by `Hakari`.
#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct OutputKey {
/// The index of the build platform for this key, or `None` if the computation was done in a
/// platform-independent manner.
pub platform_idx: Option<usize>,
/// The build platform: target or host.
pub build_platform: BuildPlatform,
}
/// The result of a Hakari computation.
///
/// This contains all the data required to generate a workspace package.
///
/// Produced by [`HakariBuilder::compute`](HakariBuilder::compute).
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct Hakari<'g> {
pub(crate) builder: HakariBuilder<'g>,
/// The map built by Hakari of dependencies that need to be unified.
///
/// This map is used to construct the TOML output. Public access is provided in case some
/// post-processing needs to be done.
pub output_map: OutputMap<'g>,
/// The complete map of dependency build results built by Hakari.
///
/// This map is not used to generate the TOML output.
pub computed_map: ComputedMap<'g>,
}
impl<'g> Hakari<'g> {
/// Returns the `HakariBuilder` used to create this instance.
pub fn builder(&self) -> &HakariBuilder<'g> {
&self.builder
}
/// Reads the existing TOML file for the Hakari package from disk, returning a
/// `HakariCargoToml`.
///
/// This can be used with [`to_toml_string`](Self::to_toml_string) to manage the contents of
/// the given TOML file on disk.
///
/// Returns an error if there was an issue reading the TOML file from disk, or `None` if
/// the builder's [`hakari_package`](HakariBuilder::hakari_package) is `None`.
pub fn read_toml(&self) -> Option<Result<HakariCargoToml, CargoTomlError>> {
self.builder.read_toml()
}
/// Writes `[dependencies]` and other `Cargo.toml` lines to the given `fmt::Write` instance.
///
/// `&mut String` and `fmt::Formatter` both implement `fmt::Write`.
pub fn write_toml(
&self,
options: &HakariOutputOptions,
out: impl fmt::Write,
) -> Result<(), TomlOutError> {
write_toml(
&self.builder,
&self.output_map,
options,
self.builder.dep_format_version,
out,
)
}
/// Returns a map of dependency names as present in the workspace-hack's `Cargo.toml` to their
/// corresponding [`PackageMetadata`].
///
/// Packages which have one version are present as their original names, while packages with
/// more than one version have a hash appended to them.
pub fn toml_name_map(&self) -> AHashMap<Cow<'g, str>, PackageMetadata<'g>> {
toml_name_map(&self.output_map, self.builder.dep_format_version)
}
/// Returns a `HakariExplain`, which can be used to print out why a specific package is
/// in the workspace-hack's `Cargo.toml`.
///
/// Returns an error if the package ID was not found in the output.
pub fn explain(
&self,
package_id: &'g PackageId,
) -> Result<HakariExplain<'g, '_>, guppy::Error> {
HakariExplain::new(self, package_id)
}
/// A convenience method around `write_toml` that returns a new string with `Cargo.toml` lines.
///
/// The returned string is guaranteed to be valid TOML, and can be provided to
/// a [`HakariCargoToml`](crate::HakariCargoToml) obtained from [`read_toml`](Self::read_toml).
pub fn to_toml_string(&self, options: &HakariOutputOptions) -> Result<String, TomlOutError> {
let mut out = String::new();
self.write_toml(options, &mut out)?;
Ok(out)
}
// ---
// Helper methods
// ---
fn build(builder: HakariBuilder<'g>) -> Self {
let graph = *builder.graph;
let mut computed_map_build = ComputedMapBuild::new(&builder);
let platform_specs: Vec<_> = builder
.platforms
.iter()
.map(|platform| PlatformSpec::Platform(platform.clone()))
.collect();
let unify_target_host = builder.unify_target_host.to_impl(graph);
// Collect all the dependencies that need to be unified, by platform and build type.
let mut map_build: OutputMapBuild<'g> = OutputMapBuild::new(graph);
map_build.insert_all(
computed_map_build.iter(),
builder.output_single_feature,
unify_target_host,
);
if !builder.output_single_feature {
// Adding packages might cause different feature sets for some dependencies. Simulate
// further builds with the given target and host features, and use that to add in any
// extra features that need to be considered.
loop {
let mut add_extra = HashSet::new();
for (output_key, features) in map_build.iter_feature_sets() {
let initials_platform = match output_key.build_platform {
BuildPlatform::Target => InitialsPlatform::Standard,
BuildPlatform::Host => InitialsPlatform::Host,
};
let mut cargo_opts = CargoOptions::new();
let platform_spec = match output_key.platform_idx {
Some(idx) => platform_specs[idx].clone(),
None => PlatformSpec::Always,
};
// Third-party dependencies are built without including dev.
cargo_opts
.set_include_dev(false)
.set_initials_platform(initials_platform)
.set_platform(platform_spec)
.set_resolver(builder.resolver)
.add_omitted_packages(computed_map_build.excludes.iter());
let cargo_set = features
.into_cargo_set(&cargo_opts)
.expect("into_cargo_set processed successfully");
// Check the features for the cargo set to see if any further dependencies were
// built with a different result and weren't included in the hakari map
// originally.
for &(build_platform, feature_set) in cargo_set.all_features().iter() {
for feature_list in
feature_set.packages_with_features(DependencyDirection::Forward)
{
let dep = feature_list.package();
let dep_id = dep.id();
// This is "get or insert" because we could be adding whole new
// dependencies here rather than just new features to existing
// dependencies.
let v_mut = computed_map_build
.get_or_insert_mut(output_key.platform_idx, dep_id);
// Is it already present in the output?
let new_key = OutputKey {
platform_idx: output_key.platform_idx,
build_platform,
};
if map_build.is_inserted(new_key, dep_id) {
continue;
}
let this_list: BTreeSet<_> = feature_list.named_features().collect();
let already_present = v_mut.contains(build_platform, &this_list);
if !already_present {
// The feature list added by this dependency is non-unique.
v_mut.mark_fixed_up(build_platform, this_list);
add_extra.insert((output_key.platform_idx, dep_id));
}
}
}
}
if add_extra.is_empty() {
break;
}
map_build.insert_all(
add_extra.iter().map(|&(platform_idx, dep_id)| {
let v = computed_map_build
.get(platform_idx, dep_id)
.expect("full value should be present");
(platform_idx, dep_id, v)
}),
builder.output_single_feature,
unify_target_host,
);
}
}
let computed_map = computed_map_build.computed_map;
let output_map = map_build.finish(
&builder.final_excludes,
builder.dep_format_version,
builder.output_single_feature,
);
Self {
builder,
output_map,
computed_map,
}
}
}
/// The map used by Hakari to generate output TOML.
///
/// This is a two-level `BTreeMap`, where:
/// * the top-level keys are [`OutputKey`](OutputKey) instances.
/// * the inner map is keyed by dependency [`PackageId`](PackageId) instances, and the values are
/// the corresponding [`PackageMetadata`](PackageMetadata) for this dependency, and the set of
/// features enabled for this package.
///
/// This is an alias for the type of [`Hakari::output_map`](Hakari::output_map).
pub type OutputMap<'g> =
BTreeMap<OutputKey, BTreeMap<&'g PackageId, (PackageMetadata<'g>, BTreeSet<&'g str>)>>;
/// The map of all build results computed by Hakari.
///
/// The keys are the platform index and the dependency's package ID, and the values are
/// [`ComputedValue`](ComputedValue) instances that represent the different feature sets this
/// dependency is built with on both the host and target platforms.
///
/// The values that are most interesting are the ones where maps have two elements or more: they
/// indicate dependencies with features that need to be unified.
///
/// This is an alias for the type of [`Hakari::computed_map`](Hakari::computed_map).
pub type ComputedMap<'g> = BTreeMap<(Option<usize>, &'g PackageId), ComputedValue<'g>>;
/// The values of a [`ComputedMap`](ComputedMap).
///
/// This represents a pair of `ComputedInnerMap` instances: one for the target platform and one for
/// the host. For more about the values, see the documentation for
/// [`ComputedInnerMap`](ComputedInnerMap).
#[derive(Clone, Debug, Default)]
pub struct ComputedValue<'g> {
/// The feature sets built on the target platform.
pub target_inner: ComputedInnerMap<'g>,
/// The feature sets built on the host platform.
pub host_inner: ComputedInnerMap<'g>,
}
/// A target map or a host map in a [`ComputedValue`](ComputedValue).
///
/// * The keys are sets of feature names (or empty for no features).
/// * The values are [`ComputedInnerValue`] instances.
pub type ComputedInnerMap<'g> = BTreeMap<BTreeSet<&'g str>, ComputedInnerValue<'g>>;
/// The values of [`ComputedInnerMap`].
#[derive(Clone, Debug, Default)]
pub struct ComputedInnerValue<'g> {
/// The workspace packages, selected features, and include dev that cause the key in
/// `ComputedMap` to be built with the feature set that forms the key of `ComputedInnerMap`.
/// They are not defined to be in any particular order.
pub workspace_packages: Vec<(PackageMetadata<'g>, StandardFeatures, bool)>,
/// Whether at least one post-computation fixup was performed with this feature set.
pub fixed_up: bool,
}
impl<'g> ComputedInnerValue<'g> {
fn extend(&mut self, other: ComputedInnerValue<'g>) {
self.workspace_packages.extend(other.workspace_packages);
self.fixed_up |= other.fixed_up;
}
#[inline]
fn push(
&mut self,
package: PackageMetadata<'g>,
features: StandardFeatures,
include_dev: bool,
) {
self.workspace_packages
.push((package, features, include_dev));
}
}
#[derive(Debug)]
struct TraversalExcludes<'g, 'b> {
excludes: &'b HashSet<&'g PackageId>,
hakari_package: Option<&'g PackageId>,
}
impl<'g, 'b> TraversalExcludes<'g, 'b> {
fn iter(&self) -> impl Iterator<Item = &'g PackageId> + 'b {
self.excludes.iter().copied().chain(self.hakari_package)
}
fn is_excluded(&self, package_id: &PackageId) -> bool {
self.hakari_package == Some(package_id) || self.excludes.contains(package_id)
}
}
/// Intermediate build state used by Hakari.
#[derive(Debug)]
struct ComputedMapBuild<'g, 'b> {
excludes: TraversalExcludes<'g, 'b>,
computed_map: ComputedMap<'g>,
}
impl<'g, 'b> ComputedMapBuild<'g, 'b> {
fn new(builder: &'b HakariBuilder<'g>) -> Self {
// This was just None or All for a bit under the theory that feature sets are additive only,
// but unfortunately we cannot exploit this property because it doesn't account for the fact
// that some dependencies might not be built *at all*, under certain feature combinations.
//
// That's also why we simulate builds with and without dev-only dependencies in all cases.
//
// For example, for:
//
// ```toml
// [dependencies]
// dep = { version = "1", optional = true }
//
// [dev-dependencies]
// dep = { version = "1", optional = true, features = ["dev-feature"] }
//
// [features]
// default = ["dep"]
// extra = ["dep/extra", "dep/dev-feature"]
// ```
//
// | feature set | include dev | dep status |
// | ----------- | ----------- | ------------------ |
// | none | no | not built |
// | none | yes | not built |
// | default | no | no features |
// | default | yes | dev-feature |
// | all | no | extra, dev-feature |
// | all | yes | extra, dev-feature |
//
// (And there's further complexity possible with transitive deps as well.)
let features_include_dev = [
(StandardFeatures::None, false),
(StandardFeatures::None, true),
(StandardFeatures::Default, false),
(StandardFeatures::Default, true),
(StandardFeatures::All, false),
(StandardFeatures::All, true),
];
// Features for the "always" platform spec.
let always_features = features_include_dev
.iter()
.map(|&(features, include_dev)| (None, PlatformSpec::Always, features, include_dev));
// Features for specified platforms.
let specified_features =
features_include_dev
.iter()
.flat_map(|&(features, include_dev)| {
builder
.platforms
.iter()
.enumerate()
.map(move |(idx, platform)| {
(
Some(idx),
PlatformSpec::Platform(platform.clone()),
features,
include_dev,
)
})
});
let platforms_features: Vec<_> = always_features.chain(specified_features).collect();
let workspace = builder.graph.workspace();
let excludes = builder.make_traversal_excludes();
let features_only = builder.make_features_only();
let excludes_ref = &excludes;
let features_only_ref = &features_only;
let computed_map: ComputedMap<'g> = platforms_features
.into_par_iter()
// The cargo_set computation in the inner iterator is the most expensive part of the
// process, so use flat_map instead of flat_map_iter.
.flat_map(|(idx, platform_spec, feature_filter, include_dev)| {
let mut cargo_options = CargoOptions::new();
cargo_options
.set_include_dev(include_dev)
.set_resolver(builder.resolver)
.set_platform(platform_spec)
.add_omitted_packages(excludes.iter());
workspace.par_iter().map(move |workspace_package| {
if excludes_ref.is_excluded(workspace_package.id()) {
// Skip this package since it was excluded during traversal.
return BTreeMap::new();
}