6 Commits

Author SHA1 Message Date
Martin Weinelt
7123dd8981 flake.nix: update to nixos-25.11
And squashes eval warnings from accessing pkgs.hostPlatform.
2025-12-05 15:46:05 -05:00
Martin Weinelt
39b2c7c0da package.nix: update postgresql to 17
NixOS 25.11 does not ship with PostgreSQL 13 any more.
2025-12-05 15:46:05 -05:00
John Ericson
20a0857660 Revert "Deduplicate protocol code more with ServeProto::BasicClientConnection"
This reverts commit 58846b0a1c.
2025-12-05 15:46:05 -05:00
Joshua Leivenzon
23c6a292be GithubRefs: Allow arbitrary ref types
GitHub's reference list API does not actually restrict the specified type, so don't artificially restrict it.

The API does not actually make a distinction between the "type" and "prefix" at all, but this is maintained for backwards compatibility. The two are simply concatenated.
2025-12-05 15:46:05 -05:00
John Ericson
9ebc15e709 Upgrade Nix to 2.32 2025-10-20 21:20:16 -04:00
John Ericson
dbae951443 Deduplicate protocol code more with ServeProto::BasicClientConnection
I did this in Nix for this purpose, but didn't get around to actually
taking advantage of it here, until now.
2025-10-20 21:20:16 -04:00
6 changed files with 114 additions and 98 deletions

24
flake.lock generated
View File

@@ -3,16 +3,16 @@
"nix": { "nix": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1759956402, "lastModified": 1760573252,
"narHash": "sha256-CM27YK+KMi3HLRXqjPaJwkTabmKW+CDXOE3kMMtXH3s=", "narHash": "sha256-mcvNeNdJP5R7huOc8Neg0qZESx/0DMg8Fq6lsdx0x8U=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nix", "repo": "nix",
"rev": "3019db2c87006817b6201113ad4ceee0c53c3b62", "rev": "3c39583e5512729f9c5a44c3b03b6467a2acd963",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"ref": "2.31-maintenance", "ref": "2.32-maintenance",
"repo": "nix", "repo": "nix",
"type": "github" "type": "github"
} }
@@ -20,32 +20,32 @@
"nix-eval-jobs": { "nix-eval-jobs": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1757626891, "lastModified": 1760478325,
"narHash": "sha256-VrHPtHxVIboqgnw+tlCQepgtBOhBvU5hxbMHsPo8LAc=", "narHash": "sha256-hA+NOH8KDcsuvH7vJqSwk74PyZP3MtvI/l+CggZcnTc=",
"owner": "nix-community", "owner": "nix-community",
"repo": "nix-eval-jobs", "repo": "nix-eval-jobs",
"rev": "c975efc5b2bec0c1ff93c67de4a03306af258ff7", "rev": "daa42f9e9c84aeff1e325dd50fda321f53dfd02c",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "nix-community", "owner": "nix-community",
"ref": "v2.31.0", "ref": "v2.32.1",
"repo": "nix-eval-jobs", "repo": "nix-eval-jobs",
"type": "github" "type": "github"
} }
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1759652726, "lastModified": 1764020296,
"narHash": "sha256-2VjnimOYDRb3DZHyQ2WH2KCouFqYm9h0Rr007Al/WSA=", "narHash": "sha256-6zddwDs2n+n01l+1TG6PlyokDdXzu/oBmEejcH5L5+A=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "06b2985f0cc9eb4318bf607168f4b15af1e5e81d", "rev": "a320ce8e6e2cc6b4397eef214d202a50a4583829",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"ref": "nixos-25.05-small", "ref": "nixos-25.11-small",
"repo": "nixpkgs", "repo": "nixpkgs",
"type": "github" "type": "github"
} }

View File

@@ -1,16 +1,16 @@
{ {
description = "A Nix-based continuous build system"; description = "A Nix-based continuous build system";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05-small"; inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11-small";
inputs.nix = { inputs.nix = {
url = "github:NixOS/nix/2.31-maintenance"; url = "github:NixOS/nix/2.32-maintenance";
# We want to control the deps precisely # We want to control the deps precisely
flake = false; flake = false;
}; };
inputs.nix-eval-jobs = { inputs.nix-eval-jobs = {
url = "github:nix-community/nix-eval-jobs/v2.31.0"; url = "github:nix-community/nix-eval-jobs/v2.32.1";
# We want to control the deps precisely # We want to control the deps precisely
flake = false; flake = false;
}; };
@@ -59,7 +59,7 @@
manual = forEachSystem (system: let manual = forEachSystem (system: let
pkgs = nixpkgs.legacyPackages.${system}; pkgs = nixpkgs.legacyPackages.${system};
hydra = self.packages.${pkgs.hostPlatform.system}.hydra; hydra = self.packages.${pkgs.stdenv.hostPlatform.system}.hydra;
in in
pkgs.runCommand "hydra-manual-${hydra.version}" { } pkgs.runCommand "hydra-manual-${hydra.version}" { }
'' ''

View File

@@ -4,7 +4,7 @@
hydra = { pkgs, lib,... }: { hydra = { pkgs, lib,... }: {
_file = ./default.nix; _file = ./default.nix;
imports = [ ./hydra.nix ]; imports = [ ./hydra.nix ];
services.hydra-dev.package = lib.mkDefault self.packages.${pkgs.hostPlatform.system}.hydra; services.hydra-dev.package = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system}.hydra;
}; };
hydraTest = { pkgs, ... }: { hydraTest = { pkgs, ... }: {

View File

@@ -31,7 +31,7 @@
, perl , perl
, pixz , pixz
, boost , boost
, postgresql_13 , postgresql_17
, nlohmann_json , nlohmann_json
, prometheus-cpp , prometheus-cpp
@@ -192,7 +192,7 @@ stdenv.mkDerivation (finalAttrs: {
subversion subversion
breezy breezy
openldap openldap
postgresql_13 postgresql_17
pixz pixz
nix-eval-jobs nix-eval-jobs
]; ];

View File

@@ -14,6 +14,7 @@
#include <nix/util/current-process.hh> #include <nix/util/current-process.hh>
#include <nix/util/processes.hh> #include <nix/util/processes.hh>
#include <nix/util/util.hh> #include <nix/util/util.hh>
#include <nix/store/export-import.hh>
#include <nix/store/serve-protocol.hh> #include <nix/store/serve-protocol.hh>
#include <nix/store/serve-protocol-impl.hh> #include <nix/store/serve-protocol-impl.hh>
#include <nix/store/ssh.hh> #include <nix/store/ssh.hh>
@@ -104,7 +105,7 @@ static void copyClosureTo(
std::chrono::seconds(600)); std::chrono::seconds(600));
conn.to << ServeProto::Command::ImportPaths; conn.to << ServeProto::Command::ImportPaths;
destStore.exportPaths(missing, conn.to); exportPaths(destStore, missing, conn.to);
conn.to.flush(); conn.to.flush();
if (readInt(conn.from) != 1) if (readInt(conn.from) != 1)
@@ -262,16 +263,18 @@ static BuildResult performBuild(
// Since this a `BasicDerivation`, `staticOutputHashes` will not // Since this a `BasicDerivation`, `staticOutputHashes` will not
// do any real work. // do any real work.
auto outputHashes = staticOutputHashes(localStore, drv); auto outputHashes = staticOutputHashes(localStore, drv);
for (auto & [outputName, output] : drvOutputs) { if (auto * successP = result.tryGetSuccess()) {
auto outputPath = output.second; for (auto & [outputName, output] : drvOutputs) {
// Weve just asserted that the output paths of the derivation auto outputPath = output.second;
// were known // Weve just asserted that the output paths of the derivation
assert(outputPath); // were known
auto outputHash = outputHashes.at(outputName); assert(outputPath);
auto drvOutput = DrvOutput { outputHash, outputName }; auto outputHash = outputHashes.at(outputName);
result.builtOutputs.insert_or_assign( auto drvOutput = DrvOutput { outputHash, outputName };
std::move(outputName), successP->builtOutputs.insert_or_assign(
Realisation { drvOutput, *outputPath }); std::move(outputName),
Realisation { drvOutput, *outputPath });
}
} }
} }
@@ -336,54 +339,68 @@ void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
startTime = buildResult.startTime; startTime = buildResult.startTime;
stopTime = buildResult.stopTime; stopTime = buildResult.stopTime;
timesBuilt = buildResult.timesBuilt; timesBuilt = buildResult.timesBuilt;
errorMsg = buildResult.errorMsg;
isNonDeterministic = buildResult.isNonDeterministic;
switch ((BuildResult::Status) buildResult.status) { std::visit(overloaded{
case BuildResult::Built: [&](const BuildResult::Success & success) {
stepStatus = bsSuccess; stepStatus = bsSuccess;
break; switch (success.status) {
case BuildResult::Substituted: case BuildResult::Success::Built:
case BuildResult::AlreadyValid: break;
stepStatus = bsSuccess; case BuildResult::Success::Substituted:
isCached = true; case BuildResult::Success::AlreadyValid:
break; case BuildResult::Success::ResolvesToAlreadyValid:
case BuildResult::PermanentFailure: isCached = true;
stepStatus = bsFailed; break;
canCache = true; default:
errorMsg = ""; assert(false);
break; }
case BuildResult::InputRejected: },
case BuildResult::OutputRejected: [&](const BuildResult::Failure & failure) {
stepStatus = bsFailed; errorMsg = failure.errorMsg;
canCache = true; isNonDeterministic = failure.isNonDeterministic;
break; switch (failure.status) {
case BuildResult::TransientFailure: case BuildResult::Failure::PermanentFailure:
stepStatus = bsFailed; stepStatus = bsFailed;
canRetry = true; canCache = true;
errorMsg = ""; errorMsg = "";
break; break;
case BuildResult::TimedOut: case BuildResult::Failure::InputRejected:
stepStatus = bsTimedOut; case BuildResult::Failure::OutputRejected:
errorMsg = ""; stepStatus = bsFailed;
break; canCache = true;
case BuildResult::MiscFailure: break;
stepStatus = bsAborted; case BuildResult::Failure::TransientFailure:
canRetry = true; stepStatus = bsFailed;
break; canRetry = true;
case BuildResult::LogLimitExceeded: errorMsg = "";
stepStatus = bsLogLimitExceeded; break;
break; case BuildResult::Failure::TimedOut:
case BuildResult::NotDeterministic: stepStatus = bsTimedOut;
stepStatus = bsNotDeterministic; errorMsg = "";
canRetry = false; break;
canCache = true; case BuildResult::Failure::MiscFailure:
break; stepStatus = bsAborted;
default: canRetry = true;
stepStatus = bsAborted; break;
break; case BuildResult::Failure::LogLimitExceeded:
} stepStatus = bsLogLimitExceeded;
break;
case BuildResult::Failure::NotDeterministic:
stepStatus = bsNotDeterministic;
canRetry = false;
canCache = true;
break;
case BuildResult::Failure::CachedFailure:
case BuildResult::Failure::DependencyFailed:
case BuildResult::Failure::NoSubstituters:
case BuildResult::Failure::HashMismatch:
stepStatus = bsAborted;
break;
default:
assert(false);
}
},
}, buildResult.inner);
} }
/* Utility guard object to auto-release a semaphore on destruction. */ /* Utility guard object to auto-release a semaphore on destruction. */
@@ -405,7 +422,7 @@ void State::buildRemote(ref<Store> destStore,
std::function<void(StepState)> updateStep, std::function<void(StepState)> updateStep,
NarMemberDatas & narMembers) NarMemberDatas & narMembers)
{ {
assert(BuildResult::TimedOut == 8); assert(BuildResult::Failure::TimedOut == 8);
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath); auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
AutoDelete logFileDel(logFile, false); AutoDelete logFileDel(logFile, false);
@@ -514,7 +531,7 @@ void State::buildRemote(ref<Store> destStore,
updateStep(ssBuilding); updateStep(ssBuilding);
BuildResult buildResult = build_remote::performBuild( auto buildResult = build_remote::performBuild(
conn, conn,
*localStore, *localStore,
step->drvPath, step->drvPath,
@@ -556,8 +573,9 @@ void State::buildRemote(ref<Store> destStore,
wakeDispatcher(); wakeDispatcher();
StorePathSet outputs; StorePathSet outputs;
for (auto & [_, realisation] : buildResult.builtOutputs) if (auto * successP = buildResult.tryGetSuccess())
outputs.insert(realisation.outPath); for (auto & [_, realisation] : successP->builtOutputs)
outputs.insert(realisation.outPath);
/* Copy the output paths. */ /* Copy the output paths. */
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) { if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
@@ -590,15 +608,17 @@ void State::buildRemote(ref<Store> destStore,
/* Register the outputs of the newly built drv */ /* Register the outputs of the newly built drv */
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) { if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
auto outputHashes = staticOutputHashes(*localStore, *step->drv); auto outputHashes = staticOutputHashes(*localStore, *step->drv);
for (auto & [outputName, realisation] : buildResult.builtOutputs) { if (auto * successP = buildResult.tryGetSuccess()) {
// Register the resolved drv output for (auto & [outputName, realisation] : successP->builtOutputs) {
destStore->registerDrvOutput(realisation); // Register the resolved drv output
destStore->registerDrvOutput(realisation);
// Also register the unresolved one // Also register the unresolved one
auto unresolvedRealisation = realisation; auto unresolvedRealisation = realisation;
unresolvedRealisation.signatures.clear(); unresolvedRealisation.signatures.clear();
unresolvedRealisation.id.drvHash = outputHashes.at(outputName); unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
destStore->registerDrvOutput(unresolvedRealisation); destStore->registerDrvOutput(unresolvedRealisation);
}
} }
} }

View File

@@ -18,9 +18,8 @@ tags) from GitHub following a certain naming scheme
=head1 DESCRIPTION =head1 DESCRIPTION
This plugin reads the list of branches or tags using GitHub's REST API. The name This plugin reads the list of branches or tags using GitHub's REST API. This
of the reference must follow a particular prefix. This list is stored in the list is stored in the nix-store and used as an input to declarative jobsets.
nix-store and used as an input to declarative jobsets.
=head1 CONFIGURATION =head1 CONFIGURATION
@@ -34,7 +33,7 @@ The declarative project C<spec.json> file must contains an input such as
"pulls": { "pulls": {
"type": "github_refs", "type": "github_refs",
"value": "[owner] [repo] heads|tags - [prefix]", "value": "[owner] [repo] [type] - [prefix]",
"emailresponsible": false "emailresponsible": false
} }
@@ -42,12 +41,11 @@ In the above snippet, C<[owner]> is the repository owner and C<[repo]> is the
repository name. Also note a literal C<->, which is placed there for the future repository name. Also note a literal C<->, which is placed there for the future
use. use.
C<heads|tags> denotes that one of these two is allowed, that is, the third C<[type]> is the type of ref to list. Typical values are "heads", "tags", and
position should hold either the C<heads> or the C<tags> keyword. In case of the former, the plugin "pull". "." will include all types.
will fetch all branches, while in case of the latter, it will fetch the tags.
C<prefix> denotes the prefix the reference name must start with, in order to be C<prefix> denotes the prefix the reference name must start with, in order to be
included. included. "." will include all references.
For example, C<"value": "nixos hydra heads - release/"> refers to For example, C<"value": "nixos hydra heads - release/"> refers to
L<https://github.com/nixos/hydra> repository, and will fetch all branches that L<https://github.com/nixos/hydra> repository, and will fetch all branches that
@@ -102,8 +100,6 @@ sub fetchInput {
return undef if $input_type ne "github_refs"; return undef if $input_type ne "github_refs";
my ($owner, $repo, $type, $fut, $prefix) = split ' ', $value; my ($owner, $repo, $type, $fut, $prefix) = split ' ', $value;
die "type field is neither 'heads' nor 'tags', but '$type'"
unless $type eq 'heads' or $type eq 'tags';
my $auth = $self->{config}->{github_authorization}->{$owner}; my $auth = $self->{config}->{github_authorization}->{$owner};
my $githubEndpoint = $self->{config}->{github_endpoint} // "https://api.github.com"; my $githubEndpoint = $self->{config}->{github_endpoint} // "https://api.github.com";