From 4ea646130c5e637b883e9656ee5f6a71d1a728ab Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 8 Dec 2021 11:44:08 -0500 Subject: [PATCH 001/401] RunCommand: split out documentation, fixup the matcher syntax --- doc/manual/src/SUMMARY.md | 1 + doc/manual/src/plugins/README.md | 4 +++- doc/manual/src/plugins/RunCommand.md | 32 ++++++++++++++++++++++++++++ 3 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 doc/manual/src/plugins/RunCommand.md diff --git a/doc/manual/src/SUMMARY.md b/doc/manual/src/SUMMARY.md index 80e73112..357e795a 100644 --- a/doc/manual/src/SUMMARY.md +++ b/doc/manual/src/SUMMARY.md @@ -7,6 +7,7 @@ - [Hydra jobs](./jobs.md) - [Plugins](./plugins/README.md) - [Declarative Projects](./plugins/declarative-projects.md) + - [RunCommand](./plugins/RunCommand.md) - [Using the external API](api.md) - [Webhooks](webhooks.md) - [Monitoring Hydra](./monitoring/README.md) diff --git a/doc/manual/src/plugins/README.md b/doc/manual/src/plugins/README.md index b5486fdb..26ee2649 100644 --- a/doc/manual/src/plugins/README.md +++ b/doc/manual/src/plugins/README.md @@ -192,10 +192,12 @@ Writes InfluxDB events when a builds finished. - `influxdb.url` - `influxdb.db` -## Run command +## RunCommand Runs a shell command when the build is finished. +See [The RunCommand Plugin](./RunCommand.md) for more information. + ### Configuration options: - `runcommand.[].job` diff --git a/doc/manual/src/plugins/RunCommand.md b/doc/manual/src/plugins/RunCommand.md new file mode 100644 index 00000000..8b1818cc --- /dev/null +++ b/doc/manual/src/plugins/RunCommand.md @@ -0,0 +1,32 @@ +## The RunCommand Plugin + +Hydra supports executing a program after certain builds finish. +This behavior is disabled by default. + +Hydra executes these commands under the `hydra-notify` service. + +### Static Commands + +Configure specific commands to execute after the specified matching job finishes. + +#### Configuration + +- `runcommand.[].job` + +A matcher for jobs to match in the format `project:jobset:job`. Defaults to `*:*:*`. + +**Note:** This matcher format is not a regular expression. +The `*` is a wildcard for that entire section, partial matches are not supported. + +- `runcommand.[].command` + +Command to run. Can use the `$HYDRA_JSON` environment variable to access information about the build. + +### Example + +```xml + + job = myProject:*:* + command = cat $HYDRA_JSON > /tmp/hydra-output + +``` From 6ffc93c01a55c86b50d7a20dd5a474a9a5389be4 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 8 Dec 2021 12:37:13 -0500 Subject: [PATCH 002/401] RunCommand: write documentation for dynamic commands --- doc/manual/src/plugins/RunCommand.md | 50 ++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/doc/manual/src/plugins/RunCommand.md b/doc/manual/src/plugins/RunCommand.md index 8b1818cc..b186be80 100644 --- a/doc/manual/src/plugins/RunCommand.md +++ b/doc/manual/src/plugins/RunCommand.md @@ -30,3 +30,53 @@ Command to run. Can use the `$HYDRA_JSON` environment variable to access informa command = cat $HYDRA_JSON > /tmp/hydra-output ``` + +### Dynamic Commands + +Hydra can optionally run RunCommand hooks defined dynamically by the jobset. +This must be turned on explicitly in the `hydra.conf` and per jobset. + +#### Behavior + +Hydra will execute any program defined under the `runCommandHook` attribute set. These jobs must have a single output named `out`, and that output must be an executable file located directly at `$out`. + +#### Security Properties + +Safely deploying dynamic commands requires careful design of your Hydra jobs. Allowing arbitrary users to define attributes in your top level attribute set will allow that user to execute code on your Hydra. + +If a jobset has dynamic commands enabled, you must ensure only trusted users can define top level attributes. + + +#### Configuration + +- `dynamicruncommand.enable` + +Set to 1 to enable dynamic RunCommand program execution. + +#### Example + +In your Hydra configuration, specify: + +```xml + + enable = 1 + +``` + +Then create a job named `runCommandHook.example` in your jobset: + +``` +{ pkgs, ... }: { + runCommandHook = { + recurseForDerivations = true; + + example = pkgs.writeScript "run-me" '' + #!${pkgs.runtimeShell} + + ${pkgs.jq}/bin/jq . "$HYDRA_JSON" + ''; + }; +} +``` + +After the `runcommandHook.example` build finishes that script will execute. From ea311a0eb4849cca49e328719e721aa33695e0b1 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 8 Dec 2021 11:38:14 -0500 Subject: [PATCH 003/401] RunCommand: enable the plugin if dynamicruncommand is set --- src/lib/Hydra/Plugin/RunCommand.pm | 24 +++++- t/Hydra/Plugin/RunCommand/matcher.t | 119 +++++++++++++++++++++++++++- 2 files changed, 140 insertions(+), 3 deletions(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index 401942c7..8404f280 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -12,7 +12,29 @@ use Try::Tiny; sub isEnabled { my ($self) = @_; - return defined $self->{config}->{runcommand}; + + return areStaticCommandsEnabled($self->{config}) || areDynamicCommandsEnabled($self->{config}); +} + +sub areStaticCommandsEnabled { + my ($config) = @_; + + if (defined $config->{runcommand}) { + return 1; + } + + return 0; +} + +sub areDynamicCommandsEnabled { + my ($config) = @_; + + if ((defined $config->{dynamicruncommand}) + && $config->{dynamicruncommand}->{enable}) { + return 1; + } + + return 0; } sub configSectionMatches { diff --git a/t/Hydra/Plugin/RunCommand/matcher.t b/t/Hydra/Plugin/RunCommand/matcher.t index bc40ba77..9797f7e1 100644 --- a/t/Hydra/Plugin/RunCommand/matcher.t +++ b/t/Hydra/Plugin/RunCommand/matcher.t @@ -7,13 +7,13 @@ use Hydra::Plugin::RunCommand; subtest "isEnabled" => sub { is( Hydra::Plugin::RunCommand::isEnabled({}), - "", + 0, "Disabled by default." ); is( Hydra::Plugin::RunCommand::isEnabled({ config => {}}), - "", + 0, "Disabled by default." ); @@ -22,6 +22,121 @@ subtest "isEnabled" => sub { 1, "Enabled if any runcommand blocks exist." ); + + is( + Hydra::Plugin::RunCommand::isEnabled({ config => { dynamicruncommand => {}}}), + 0, + "Not enabled if an empty dynamicruncommand blocks exist." + ); + + is( + Hydra::Plugin::RunCommand::isEnabled({ config => { dynamicruncommand => { enable => 0 }}}), + 0, + "Not enabled if a dynamicruncommand blocks exist without enable being set to 1." + ); + + is( + Hydra::Plugin::RunCommand::isEnabled({ config => { dynamicruncommand => { enable => 1 }}}), + 1, + "Enabled if a dynamicruncommand blocks exist with enable being set to 1." + ); + + is( + Hydra::Plugin::RunCommand::isEnabled({ config => { + runcommand => {}, + dynamicruncommand => { enable => 0 } + }}), + 1, + "Enabled if a runcommand config block exists, even if a dynamicruncommand is explicitly disabled." + ); +}; + +subtest "areStaticCommandsEnabled" => sub { + is( + Hydra::Plugin::RunCommand::areStaticCommandsEnabled({}), + 0, + "Disabled by default." + ); + + is( + Hydra::Plugin::RunCommand::areStaticCommandsEnabled({}), + 0, + "Disabled by default." + ); + + is( + Hydra::Plugin::RunCommand::areStaticCommandsEnabled({ runcommand => {}}), + 1, + "Enabled if any runcommand blocks exist." + ); + + is( + Hydra::Plugin::RunCommand::areStaticCommandsEnabled({ dynamicruncommand => {}}), + 0, + "Not enabled by dynamicruncommand blocks." + ); + + is( + Hydra::Plugin::RunCommand::areStaticCommandsEnabled({ dynamicruncommand => { enable => 0 }}), + 0, + "Not enabled by dynamicruncommand blocks." + ); + + is( + Hydra::Plugin::RunCommand::areStaticCommandsEnabled({ dynamicruncommand => { enable => 1 }}), + 0, + "Not enabled by dynamicruncommand blocks." + ); + + is( + Hydra::Plugin::RunCommand::areStaticCommandsEnabled({ + runcommand => {}, + dynamicruncommand => { enable => 0 } + }), + 1, + "Enabled if a runcommand config block exists, even if a dynamicruncommand is explicitly disabled." + ); +}; + +subtest "areDynamicCommandsEnabled" => sub { + is( + Hydra::Plugin::RunCommand::areDynamicCommandsEnabled({}), + 0, + "Disabled by default." + ); + + is( + Hydra::Plugin::RunCommand::areDynamicCommandsEnabled({ runcommand => {}}), + 0, + "Disabled even if any runcommand blocks exist." + ); + + is( + Hydra::Plugin::RunCommand::areDynamicCommandsEnabled({ dynamicruncommand => {}}), + 0, + "Not enabled if an empty dynamicruncommand blocks exist." + ); + + is( + Hydra::Plugin::RunCommand::areDynamicCommandsEnabled({ dynamicruncommand => { enable => 0 }}), + 0, + "Not enabled if a dynamicruncommand blocks exist without enable being set to 1." + ); + + is( + Hydra::Plugin::RunCommand::areDynamicCommandsEnabled({ dynamicruncommand => { enable => 1 }}), + 1, + "Enabled if a dynamicruncommand blocks exist with enable being set to 1." + ); + + is( + Hydra::Plugin::RunCommand::areDynamicCommandsEnabled({ + runcommand => {}, + dynamicruncommand => { enable => 0 } + }), + 0, + "Disabled if dynamicruncommand is explicitly disabled." + ); }; subtest "configSectionMatches" => sub { From e56c49333f7d31654661f0f2f3b3d86a6ccd31cb Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 8 Dec 2021 16:03:43 -0500 Subject: [PATCH 004/401] RunCommand: Add a WIP execution of dynamic commands This in-progress feature will run a dynamically generated set of buildFinished hooks, which must be nested under the `runCommandHook.*` attribute set. This implementation is not very good, with some to-dos: 1. Only run if the build succeeded 2. Verify the output is named $out and that it is an executable file (or a symlink to a file) 3. Require the jobset itself have a flag enabling the feature, since this feature can be a bit dangerous if various people of different trust levels can create the jobs. --- src/lib/Hydra/Plugin/RunCommand.pm | 33 +++++++-- t/Hydra/Plugin/RunCommand/fanout.t | 108 ++++++++++++++++++++++++++++ t/Hydra/Plugin/RunCommand/matcher.t | 40 ----------- t/jobs/runcommand-dynamic.nix | 27 +++++++ 4 files changed, 161 insertions(+), 47 deletions(-) create mode 100644 t/Hydra/Plugin/RunCommand/fanout.t create mode 100644 t/jobs/runcommand-dynamic.nix diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index 8404f280..92acc326 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -65,10 +65,11 @@ sub eventMatches { } sub fanoutToCommands { - my ($config, $event, $project, $jobset, $job) = @_; + my ($config, $event, $build) = @_; my @commands; + # Calculate all the statically defined commands to execute my $cfg = $config->{runcommand}; my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : (); @@ -77,9 +78,10 @@ sub fanoutToCommands { next unless eventMatches($conf, $event); next unless configSectionMatches( $matcher, - $project, - $jobset, - $job); + $build->get_column('project'), + $build->get_column('jobset'), + $build->get_column('job') + ); if (!defined($conf->{command})) { warn " section for '$matcher' lacks a 'command' option"; @@ -92,6 +94,25 @@ sub fanoutToCommands { }) } + # Calculate all dynamically defined commands to execute + if (areDynamicCommandsEnabled($config)) { + # missing test cases: + # + # 1. is it enabled on the jobset? + # 2. what if the result is a directory? + # 3. what if the job doens't have an out? + # 4. what if the build failed? + my $job = $build->get_column('job'); + + if ($job =~ "^runCommandHook\.") { + my $out = $build->buildoutputs->find({name => "out"}); + push(@commands, { + matcher => "DynamicRunCommand($job)", + command => $out->path + }) + } + } + return \@commands; } @@ -160,9 +181,7 @@ sub buildFinished { my $commandsToRun = fanoutToCommands( $self->{config}, $event, - $build->project->get_column('name'), - $build->jobset->get_column('name'), - $build->get_column('job') + $build ); if (@$commandsToRun == 0) { diff --git a/t/Hydra/Plugin/RunCommand/fanout.t b/t/Hydra/Plugin/RunCommand/fanout.t new file mode 100644 index 00000000..d3a7b98a --- /dev/null +++ b/t/Hydra/Plugin/RunCommand/fanout.t @@ -0,0 +1,108 @@ +use strict; +use warnings; +use Setup; + +my %ctx = test_init(); + +use Test2::V0; +use Hydra::Plugin::RunCommand; + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"}); + +my $jobset = createBaseJobset("basic", "runcommand-dynamic.nix", $ctx{jobsdir}); + +ok(evalSucceeds($jobset), "Evaluating jobs/runcommand-dynamic.nix should exit with return code 0"); +is(nrQueuedBuildsForJobset($jobset), 1, "Evaluating jobs/runcommand-dynamic.nix should result in 1 build1"); + +(my $build) = queuedBuildsForJobset($jobset); + +is($build->job, "runCommandHook.example", "The only job should be runCommandHook.example"); +ok(runBuild($build), "Build should exit with return code 0"); +my $newbuild = $db->resultset('Builds')->find($build->id); +is($newbuild->finished, 1, "Build should be finished."); +is($newbuild->buildstatus, 0, "Build should have buildstatus 0."); + +subtest "fanoutToCommands" => sub { + my $config = { + runcommand => [ + { + job => "", + command => "foo" + }, + { + job => "tests:*:*", + command => "bar" + }, + { + job => "tests:basic:nomatch", + command => "baz" + } + ] + }; + + is( + Hydra::Plugin::RunCommand::fanoutToCommands( + $config, + "buildFinished", + $newbuild + ), + [ + { + matcher => "", + command => "foo" + }, + { + matcher => "tests:*:*", + command => "bar" + } + ], + "fanoutToCommands returns a command per matching job" + ); +}; + +subtest "fanoutToCommandsWithDynamicRunCommandSupport" => sub { + like( + $build->buildoutputs->find({name => "out"})->path, + qr/my-build-product$/, + "The way we find the out path is reasonable" + ); + + my $config = { + dynamicruncommand => { enable => 1 }, + runcommand => [ + { + job => "tests:basic:*", + command => "baz" + } + ] + }; + + is( + Hydra::Plugin::RunCommand::fanoutToCommands( + $config, + "buildFinished", + $build + ), + [ + { + matcher => "tests:basic:*", + command => "baz" + }, + { + matcher => "DynamicRunCommand(runCommandHook.example)", + command => $build->buildoutputs->find({name => "out"})->path + } + ], + "fanoutToCommands returns a command per matching job" + ); +}; + +done_testing; diff --git a/t/Hydra/Plugin/RunCommand/matcher.t b/t/Hydra/Plugin/RunCommand/matcher.t index 9797f7e1..ca74a84c 100644 --- a/t/Hydra/Plugin/RunCommand/matcher.t +++ b/t/Hydra/Plugin/RunCommand/matcher.t @@ -249,44 +249,4 @@ subtest "eventMatches" => sub { ); }; -subtest "fanoutToCommands" => sub { - my $config = { - runcommand => [ - { - job => "", - command => "foo" - }, - { - job => "project:*:*", - command => "bar" - }, - { - job => "project:jobset:nomatch", - command => "baz" - } - ] - }; - - is( - Hydra::Plugin::RunCommand::fanoutToCommands( - $config, - "buildFinished", - "project", - "jobset", - "job" - ), - [ - { - matcher => "", - command => "foo" - }, - { - matcher => "project:*:*", - command => "bar" - } - ], - "fanoutToCommands returns a command per matching job" - ); -}; - done_testing; diff --git a/t/jobs/runcommand-dynamic.nix b/t/jobs/runcommand-dynamic.nix new file mode 100644 index 00000000..cf231f8f --- /dev/null +++ b/t/jobs/runcommand-dynamic.nix @@ -0,0 +1,27 @@ +with import ./config.nix; +{ + runCommandHook.example = mkDerivation + { + name = "my-build-product"; + builder = "/bin/sh"; + outputs = [ "out" "bin" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh + + echo "$PATH" + + mkdir $bin + echo "foo" > $bin/bar + + metrics=$out/nix-support/hydra-metrics + mkdir -p "$(dirname "$metrics")" + echo "lineCoverage 18 %" >> "$metrics" + echo "maxResident 27 KiB" >> "$metrics" + '' + ) + ]; + }; + +} From e7f68045f445e4c32363d2e9f6cf9fa917fa67e1 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 14 Dec 2021 16:31:19 -0500 Subject: [PATCH 005/401] DynamicRunCommand: pull out the function determining if a build is eligible for execution under dynamic run commands. --- src/lib/Hydra/Plugin/RunCommand.pm | 15 ++++++++++--- t/Hydra/Plugin/RunCommand/fanout.t | 34 ++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index 92acc326..8d3cf35a 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -37,6 +37,16 @@ sub areDynamicCommandsEnabled { return 0; } +sub isBuildEligibleForDynamicRunCommand { + my ($build) = @_; + + if ($build->get_column("job") =~ "^runCommandHook\..+") { + return 1; + } + + return 0; +} + sub configSectionMatches { my ($name, $project, $jobset, $job) = @_; @@ -102,9 +112,8 @@ sub fanoutToCommands { # 2. what if the result is a directory? # 3. what if the job doens't have an out? # 4. what if the build failed? - my $job = $build->get_column('job'); - - if ($job =~ "^runCommandHook\.") { + if (isBuildEligibleForDynamicRunCommand($build)) { + my $job = $build->get_column('job'); my $out = $build->buildoutputs->find({name => "out"}); push(@commands, { matcher => "DynamicRunCommand($job)", diff --git a/t/Hydra/Plugin/RunCommand/fanout.t b/t/Hydra/Plugin/RunCommand/fanout.t index d3a7b98a..2edcb390 100644 --- a/t/Hydra/Plugin/RunCommand/fanout.t +++ b/t/Hydra/Plugin/RunCommand/fanout.t @@ -105,4 +105,38 @@ subtest "fanoutToCommandsWithDynamicRunCommandSupport" => sub { ); }; +subtest "isBuildEligibleForDynamicRunCommand" => sub { + my $build = Hydra::Schema::Result::Builds->new({ + "job" => "foo bar baz" + }); + + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($build), + 0, + "The job name does not match" + ); + + $build->set_column("job", "runCommandHook"); + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($build), + 0, + "The job name does not match" + ); + + $build->set_column("job", "runCommandHook."); + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($build), + 0, + "The job name does not match" + ); + + $build->set_column("job", "runCommandHook.a"); + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($build), + 1, + "The job name does match" + ); +}; + + done_testing; From c2be27e82b7d31ff585a5cb90b4d777d3b9c3bf2 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 14 Dec 2021 21:39:13 -0500 Subject: [PATCH 006/401] fanout.t: switch to makeAndEvaluateJobset --- t/Hydra/Plugin/RunCommand/fanout.t | 39 ++++++++++-------------------- 1 file changed, 13 insertions(+), 26 deletions(-) diff --git a/t/Hydra/Plugin/RunCommand/fanout.t b/t/Hydra/Plugin/RunCommand/fanout.t index 2edcb390..d9f67f14 100644 --- a/t/Hydra/Plugin/RunCommand/fanout.t +++ b/t/Hydra/Plugin/RunCommand/fanout.t @@ -1,34 +1,21 @@ use strict; use warnings; use Setup; - -my %ctx = test_init(); - use Test2::V0; use Hydra::Plugin::RunCommand; -require Hydra::Schema; -require Hydra::Model::DB; +my $ctx = test_context(); -use Test2::V0; +my $builds = $ctx->makeAndEvaluateJobset( + expression => "runcommand-dynamic.nix", + build => 1 +); -my $db = Hydra::Model::DB->new; -hydra_setup($db); - -my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"}); - -my $jobset = createBaseJobset("basic", "runcommand-dynamic.nix", $ctx{jobsdir}); - -ok(evalSucceeds($jobset), "Evaluating jobs/runcommand-dynamic.nix should exit with return code 0"); -is(nrQueuedBuildsForJobset($jobset), 1, "Evaluating jobs/runcommand-dynamic.nix should result in 1 build1"); - -(my $build) = queuedBuildsForJobset($jobset); +my $build = $builds->{"runCommandHook.example"}; is($build->job, "runCommandHook.example", "The only job should be runCommandHook.example"); -ok(runBuild($build), "Build should exit with return code 0"); -my $newbuild = $db->resultset('Builds')->find($build->id); -is($newbuild->finished, 1, "Build should be finished."); -is($newbuild->buildstatus, 0, "Build should have buildstatus 0."); +is($build->finished, 1, "Build should be finished."); +is($build->buildstatus, 0, "Build should have buildstatus 0."); subtest "fanoutToCommands" => sub { my $config = { @@ -38,7 +25,7 @@ subtest "fanoutToCommands" => sub { command => "foo" }, { - job => "tests:*:*", + job => "*:*:*", command => "bar" }, { @@ -52,7 +39,7 @@ subtest "fanoutToCommands" => sub { Hydra::Plugin::RunCommand::fanoutToCommands( $config, "buildFinished", - $newbuild + $build ), [ { @@ -60,7 +47,7 @@ subtest "fanoutToCommands" => sub { command => "foo" }, { - matcher => "tests:*:*", + matcher => "*:*:*", command => "bar" } ], @@ -79,7 +66,7 @@ subtest "fanoutToCommandsWithDynamicRunCommandSupport" => sub { dynamicruncommand => { enable => 1 }, runcommand => [ { - job => "tests:basic:*", + job => "*:*:*", command => "baz" } ] @@ -93,7 +80,7 @@ subtest "fanoutToCommandsWithDynamicRunCommandSupport" => sub { ), [ { - matcher => "tests:basic:*", + matcher => "*:*:*", command => "baz" }, { From 1a30a0c2f13ad9cc7a52480f918111f6278503cc Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 14 Dec 2021 22:07:15 -0500 Subject: [PATCH 007/401] Dynamic RunCommand: validate that the job's out exists, is a file (or points to a file) which is executable. --- src/lib/Hydra/Plugin/RunCommand.pm | 30 +++++- t/Hydra/Plugin/RunCommand/fanout.t | 88 +++++++++++------ t/jobs/runcommand-dynamic.nix | 145 ++++++++++++++++++++++++----- 3 files changed, 213 insertions(+), 50 deletions(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index 8d3cf35a..8998fc39 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -41,6 +41,32 @@ sub isBuildEligibleForDynamicRunCommand { my ($build) = @_; if ($build->get_column("job") =~ "^runCommandHook\..+") { + my $out = $build->buildoutputs->find({name => "out"}); + if (!defined $out) { + warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: no output named 'out'."; + return 0; + } + + my $path = $out->path; + if (-l $path) { + $path = readlink($path); + } + + if (! -e $path) { + warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The 'out' output doesn't exist locally. This is a bug."; + return 0; + } + + if (! -x $path) { + warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The 'out' output is not executable."; + return 0; + } + + if (! -f $path) { + warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The 'out' output is not a regular file or symlink."; + return 0; + } + return 1; } @@ -109,9 +135,7 @@ sub fanoutToCommands { # missing test cases: # # 1. is it enabled on the jobset? - # 2. what if the result is a directory? - # 3. what if the job doens't have an out? - # 4. what if the build failed? + # 2. what if the build failed? if (isBuildEligibleForDynamicRunCommand($build)) { my $job = $build->get_column('job'); my $out = $build->buildoutputs->find({name => "out"}); diff --git a/t/Hydra/Plugin/RunCommand/fanout.t b/t/Hydra/Plugin/RunCommand/fanout.t index d9f67f14..41236456 100644 --- a/t/Hydra/Plugin/RunCommand/fanout.t +++ b/t/Hydra/Plugin/RunCommand/fanout.t @@ -93,36 +93,72 @@ subtest "fanoutToCommandsWithDynamicRunCommandSupport" => sub { }; subtest "isBuildEligibleForDynamicRunCommand" => sub { - my $build = Hydra::Schema::Result::Builds->new({ - "job" => "foo bar baz" - }); + subtest "Non-matches based on name alone ..." => sub { + my $build = $builds->{"foo-bar-baz"}; + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($build), + 0, + "The job name does not match" + ); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($build), - 0, - "The job name does not match" - ); + $build->set_column("job", "runCommandHook"); + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($build), + 0, + "The job name does not match" + ); - $build->set_column("job", "runCommandHook"); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($build), - 0, - "The job name does not match" - ); + $build->set_column("job", "runCommandHook."); + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($build), + 0, + "The job name does not match" + ); + }; - $build->set_column("job", "runCommandHook."); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($build), - 0, - "The job name does not match" - ); + subtest "On outputs ..." => sub { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), + 1, + "out is an executable file" + ); - $build->set_column("job", "runCommandHook.a"); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($build), - 1, - "The job name does match" - ); + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.symlink"}), + 1, + "out is a symlink to an executable file" + ); + + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.no-out"}), + 0, + "No output named out" + ); + + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.out-is-directory"}), + 0, + "out is a directory" + ); + + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.out-is-not-executable-file"}), + 0, + "out is a file which is not not executable" + ); + + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.symlink-non-executable"}), + 0, + "out is a symlink to a non-executable file" + ); + + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.symlink-directory"}), + 0, + "out is a symlink to a directory" + ); + }; }; diff --git a/t/jobs/runcommand-dynamic.nix b/t/jobs/runcommand-dynamic.nix index cf231f8f..c0b005b7 100644 --- a/t/jobs/runcommand-dynamic.nix +++ b/t/jobs/runcommand-dynamic.nix @@ -1,27 +1,130 @@ with import ./config.nix; -{ - runCommandHook.example = mkDerivation - { - name = "my-build-product"; - builder = "/bin/sh"; - outputs = [ "out" "bin" ]; - args = [ - ( - builtins.toFile "builder.sh" '' - #! /bin/sh +rec { + foo-bar-baz = mkDerivation { + name = "foo-bar-baz"; + builder = "/bin/sh"; + outputs = [ "out" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh - echo "$PATH" + touch $out + '' + ) + ]; + }; - mkdir $bin - echo "foo" > $bin/bar + runCommandHook.example = mkDerivation { + name = "my-build-product"; + builder = "/bin/sh"; + outputs = [ "out" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh - metrics=$out/nix-support/hydra-metrics - mkdir -p "$(dirname "$metrics")" - echo "lineCoverage 18 %" >> "$metrics" - echo "maxResident 27 KiB" >> "$metrics" - '' - ) - ]; - }; + touch $out + chmod +x $out + # ... dunno ... + '' + ) + ]; + }; + + runCommandHook.symlink = mkDerivation { + name = "symlink-out"; + builder = "/bin/sh"; + outputs = [ "out" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh + + ln -s $1 $out + '' + ) + + runCommandHook.example + ]; + }; + + runCommandHook.no-out = mkDerivation { + name = "no-out"; + builder = "/bin/sh"; + outputs = [ "bin" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh + mkdir $bin + '' + ) + ]; + }; + + runCommandHook.out-is-directory = mkDerivation { + name = "out-is-directory"; + builder = "/bin/sh"; + outputs = [ "out" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh + + mkdir $out + '' + ) + ]; + }; + + runCommandHook.out-is-not-executable-file = mkDerivation { + name = "out-is-directory"; + builder = "/bin/sh"; + outputs = [ "out" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh + + touch $out + '' + ) + ]; + }; + + runCommandHook.symlink-non-executable = mkDerivation { + name = "symlink-out"; + builder = "/bin/sh"; + outputs = [ "out" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh + + ln -s $1 $out + '' + ) + + runCommandHook.out-is-not-executable-file + ]; + }; + + runCommandHook.symlink-directory = mkDerivation { + name = "symlink-directory"; + builder = "/bin/sh"; + outputs = [ "out" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh + + ln -s $1 $out + '' + ) + + runCommandHook.out-is-directory + ]; + }; } From 216d8bee3532d3d5a2326aff3f7c8bd8cff6c007 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 14 Dec 2021 22:10:02 -0500 Subject: [PATCH 008/401] DynamicRunCommand: don't run if the build failed --- src/lib/Hydra/Plugin/RunCommand.pm | 5 ++++- t/Hydra/Plugin/RunCommand/fanout.t | 8 ++++++++ t/jobs/runcommand-dynamic.nix | 18 ++++++++++++++++++ 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index 8998fc39..2ab20274 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -40,6 +40,10 @@ sub areDynamicCommandsEnabled { sub isBuildEligibleForDynamicRunCommand { my ($build) = @_; + if ($build->get_column("buildstatus") != 0) { + return 0; + } + if ($build->get_column("job") =~ "^runCommandHook\..+") { my $out = $build->buildoutputs->find({name => "out"}); if (!defined $out) { @@ -135,7 +139,6 @@ sub fanoutToCommands { # missing test cases: # # 1. is it enabled on the jobset? - # 2. what if the build failed? if (isBuildEligibleForDynamicRunCommand($build)) { my $job = $build->get_column('job'); my $out = $build->buildoutputs->find({name => "out"}); diff --git a/t/Hydra/Plugin/RunCommand/fanout.t b/t/Hydra/Plugin/RunCommand/fanout.t index 41236456..8d34e582 100644 --- a/t/Hydra/Plugin/RunCommand/fanout.t +++ b/t/Hydra/Plugin/RunCommand/fanout.t @@ -159,6 +159,14 @@ subtest "isBuildEligibleForDynamicRunCommand" => sub { "out is a symlink to a directory" ); }; + + subtest "On build status ..." => sub { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.failed"}), + 0, + "Failed builds don't get run" + ); + }; }; diff --git a/t/jobs/runcommand-dynamic.nix b/t/jobs/runcommand-dynamic.nix index c0b005b7..1971bb82 100644 --- a/t/jobs/runcommand-dynamic.nix +++ b/t/jobs/runcommand-dynamic.nix @@ -127,4 +127,22 @@ rec { ]; }; + runCommandHook.failed = mkDerivation { + name = "failed"; + builder = "/bin/sh"; + outputs = [ "out" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh + + touch $out + chmod +x $out + + exit 1 + '' + ) + ]; + }; + } From 97a1d2d1d488887c2f9a65a22197e0f1c46be58e Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 14 Dec 2021 22:12:03 -0500 Subject: [PATCH 009/401] Jobsets: add enable_dynamic_run_command --- src/lib/Hydra/Schema/Result/Jobsets.pm | 12 ++++++++++-- src/sql/hydra.sql | 1 + 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/lib/Hydra/Schema/Result/Jobsets.pm b/src/lib/Hydra/Schema/Result/Jobsets.pm index 13ac09e4..bd4b7165 100644 --- a/src/lib/Hydra/Schema/Result/Jobsets.pm +++ b/src/lib/Hydra/Schema/Result/Jobsets.pm @@ -155,6 +155,12 @@ __PACKAGE__->table("jobsets"); data_type: 'text' is_nullable: 1 +=head2 enable_dynamic_run_command + + data_type: 'boolean' + default_value: false + is_nullable: 0 + =cut __PACKAGE__->add_columns( @@ -207,6 +213,8 @@ __PACKAGE__->add_columns( { data_type => "integer", default_value => 0, is_nullable => 0 }, "flake", { data_type => "text", is_nullable => 1 }, + "enable_dynamic_run_command", + { data_type => "boolean", default_value => \"false", is_nullable => 0 }, ); =head1 PRIMARY KEY @@ -354,8 +362,8 @@ __PACKAGE__->has_many( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-01-08 22:24:10 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:cQOnMitrWGMoJX6kZGNW+w +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-01-24 14:17:33 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:7wPE5ebeVTkenMCWG9Sgcg use JSON::MaybeXS; diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql index 26617789..73802d75 100644 --- a/src/sql/hydra.sql +++ b/src/sql/hydra.sql @@ -88,6 +88,7 @@ create table Jobsets ( startTime integer, -- if jobset is currently running type integer not null default 0, -- 0 == legacy, 1 == flake flake text, + enable_dynamic_run_command boolean not null default false, constraint jobsets_schedulingshares_nonzero_check check (schedulingShares > 0), constraint jobsets_type_known_check check (type = 0 or type = 1), -- If the type is 0, then nixExprInput and nixExprPath should be non-null and other type-specific fields should be null From 3cce0c5ef6a928c7a7f7091c27691a25482dccc1 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 14 Dec 2021 22:15:50 -0500 Subject: [PATCH 010/401] Only run dynamic runcommand hooks if the jobset enables them --- src/lib/Hydra/Plugin/RunCommand.pm | 7 +++---- t/Hydra/Plugin/RunCommand/fanout.t | 13 +++++++++++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index 2ab20274..79bf72a8 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -71,7 +71,9 @@ sub isBuildEligibleForDynamicRunCommand { return 0; } - return 1; + if ($build->jobset->enable_dynamic_run_command) { + return 1; + } } return 0; @@ -136,9 +138,6 @@ sub fanoutToCommands { # Calculate all dynamically defined commands to execute if (areDynamicCommandsEnabled($config)) { - # missing test cases: - # - # 1. is it enabled on the jobset? if (isBuildEligibleForDynamicRunCommand($build)) { my $job = $build->get_column('job'); my $out = $build->buildoutputs->find({name => "out"}); diff --git a/t/Hydra/Plugin/RunCommand/fanout.t b/t/Hydra/Plugin/RunCommand/fanout.t index 8d34e582..72d58b3c 100644 --- a/t/Hydra/Plugin/RunCommand/fanout.t +++ b/t/Hydra/Plugin/RunCommand/fanout.t @@ -13,6 +13,9 @@ my $builds = $ctx->makeAndEvaluateJobset( my $build = $builds->{"runCommandHook.example"}; +# Enable dynamic runcommand on the jobset +$build->jobset->update({enable_dynamic_run_command => 1}); + is($build->job, "runCommandHook.example", "The only job should be runCommandHook.example"); is($build->finished, 1, "Build should be finished."); is($build->buildstatus, 0, "Build should have buildstatus 0."); @@ -167,6 +170,16 @@ subtest "isBuildEligibleForDynamicRunCommand" => sub { "Failed builds don't get run" ); }; + + subtest "With dynamic runcommand disabled ..." => sub { + $build->jobset->update({enable_dynamic_run_command => 0}); + + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), + 0, + "Builds don't run from a jobset with disabled dynamic runcommand" + ); + }; }; From a9bfabd6722bdfe8a1cd69d045a0bfabb0284e1d Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 15 Dec 2021 11:16:05 -0500 Subject: [PATCH 011/401] sql: add a migration for enable_dynamic_run_command --- src/sql/upgrade-82.sql | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 src/sql/upgrade-82.sql diff --git a/src/sql/upgrade-82.sql b/src/sql/upgrade-82.sql new file mode 100644 index 00000000..eb012762 --- /dev/null +++ b/src/sql/upgrade-82.sql @@ -0,0 +1,2 @@ +ALTER TABLE Jobsets + ADD COLUMN enable_dynamic_run_command boolean not null default false; From 85a53694c8a87544dc2192a1f8900dadca499a6f Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 15 Dec 2021 12:32:10 -0500 Subject: [PATCH 012/401] sql: add enable_dynamic_run_command to the Project as well --- src/sql/hydra.sql | 1 + src/sql/upgrade-82.sql | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql index 73802d75..eaae6da3 100644 --- a/src/sql/hydra.sql +++ b/src/sql/hydra.sql @@ -49,6 +49,7 @@ create table Projects ( declfile text, -- File containing declarative jobset specification decltype text, -- Type of the input containing declarative jobset specification declvalue text, -- Value of the input containing declarative jobset specification + enable_dynamic_run_command boolean not null default false, foreign key (owner) references Users(userName) on update cascade ); diff --git a/src/sql/upgrade-82.sql b/src/sql/upgrade-82.sql index eb012762..a619caf3 100644 --- a/src/sql/upgrade-82.sql +++ b/src/sql/upgrade-82.sql @@ -1,2 +1,4 @@ ALTER TABLE Jobsets ADD COLUMN enable_dynamic_run_command boolean not null default false; +ALTER TABLE Projects + ADD COLUMN enable_dynamic_run_command boolean not null default false; From 0c96172c2890d0043a6ba41bdbc8d4765c2d1ca4 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 15 Dec 2021 12:33:16 -0500 Subject: [PATCH 013/401] RunCommand: only run dynamic runcommand hooks if the project AND jobset agree they should be enabled --- t/Hydra/Plugin/RunCommand/fanout.t | 40 ++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/t/Hydra/Plugin/RunCommand/fanout.t b/t/Hydra/Plugin/RunCommand/fanout.t index 72d58b3c..90bf4a6f 100644 --- a/t/Hydra/Plugin/RunCommand/fanout.t +++ b/t/Hydra/Plugin/RunCommand/fanout.t @@ -13,7 +13,8 @@ my $builds = $ctx->makeAndEvaluateJobset( my $build = $builds->{"runCommandHook.example"}; -# Enable dynamic runcommand on the jobset +# Enable dynamic runcommand on the project and jobset +$build->project->update({enable_dynamic_run_command => 1}); $build->jobset->update({enable_dynamic_run_command => 1}); is($build->job, "runCommandHook.example", "The only job should be runCommandHook.example"); @@ -172,13 +173,38 @@ subtest "isBuildEligibleForDynamicRunCommand" => sub { }; subtest "With dynamic runcommand disabled ..." => sub { - $build->jobset->update({enable_dynamic_run_command => 0}); + subtest "disabled on the project, enabled on the jobset" => { + $build->project->update({enable_dynamic_run_command => 0}); + $build->jobset->update({enable_dynamic_run_command => 1}); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), - 0, - "Builds don't run from a jobset with disabled dynamic runcommand" - ); + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), + 0, + "Builds don't run from a jobset with disabled dynamic runcommand" + ); + }; + + subtest "enabled on the project, disabled on the jobset" => { + $build->project->update({enable_dynamic_run_command => 1}); + $build->jobset->update({enable_dynamic_run_command => 0}); + + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), + 0, + "Builds don't run from a jobset with disabled dynamic runcommand" + ); + }; + + subtest "disabled on the project, disabled on the jobset" => { + $build->project->update({enable_dynamic_run_command => 0}); + $build->jobset->update({enable_dynamic_run_command => 0}); + + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), + 0, + "Builds don't run from a jobset with disabled dynamic runcommand" + ); + }; }; }; From aef11685a0c75b8e332b1c3d515e3bbb46206888 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 15 Dec 2021 12:34:29 -0500 Subject: [PATCH 014/401] regenerate schema files after adding the flag to the projects --- src/lib/Hydra/Schema/Result/Projects.pm | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/lib/Hydra/Schema/Result/Projects.pm b/src/lib/Hydra/Schema/Result/Projects.pm index 35c3eeab..9e630b16 100644 --- a/src/lib/Hydra/Schema/Result/Projects.pm +++ b/src/lib/Hydra/Schema/Result/Projects.pm @@ -88,6 +88,12 @@ __PACKAGE__->table("projects"); data_type: 'text' is_nullable: 1 +=head2 enable_dynamic_run_command + + data_type: 'boolean' + default_value: false + is_nullable: 0 + =cut __PACKAGE__->add_columns( @@ -111,6 +117,8 @@ __PACKAGE__->add_columns( { data_type => "text", is_nullable => 1 }, "declvalue", { data_type => "text", is_nullable => 1 }, + "enable_dynamic_run_command", + { data_type => "boolean", default_value => \"false", is_nullable => 0 }, ); =head1 PRIMARY KEY @@ -228,8 +236,8 @@ Composing rels: L -> username __PACKAGE__->many_to_many("usernames", "projectmembers", "username"); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-01-08 22:24:10 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:r/wbX3FAm5/OFrrwOQL5fA +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-01-24 14:20:32 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:PtXDyT8Pc7LYhhdEG39EKQ use JSON::MaybeXS; From 0810f5debcaa4372f78b225453b578ec09db097b Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 15 Dec 2021 12:36:19 -0500 Subject: [PATCH 015/401] finish making the dynamic hooks only run on project & jobset agreement --- src/lib/Hydra/Plugin/RunCommand.pm | 10 ++++++++-- t/Hydra/Plugin/RunCommand/fanout.t | 6 +++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index 79bf72a8..6d099142 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -71,9 +71,15 @@ sub isBuildEligibleForDynamicRunCommand { return 0; } - if ($build->jobset->enable_dynamic_run_command) { - return 1; + if (! $build->jobset->enable_dynamic_run_command) { + return 0; } + + if (! $build->project->enable_dynamic_run_command) { + return 0; + } + + return 1; } return 0; diff --git a/t/Hydra/Plugin/RunCommand/fanout.t b/t/Hydra/Plugin/RunCommand/fanout.t index 90bf4a6f..bd2502ec 100644 --- a/t/Hydra/Plugin/RunCommand/fanout.t +++ b/t/Hydra/Plugin/RunCommand/fanout.t @@ -173,7 +173,7 @@ subtest "isBuildEligibleForDynamicRunCommand" => sub { }; subtest "With dynamic runcommand disabled ..." => sub { - subtest "disabled on the project, enabled on the jobset" => { + subtest "disabled on the project, enabled on the jobset" => sub { $build->project->update({enable_dynamic_run_command => 0}); $build->jobset->update({enable_dynamic_run_command => 1}); @@ -184,7 +184,7 @@ subtest "isBuildEligibleForDynamicRunCommand" => sub { ); }; - subtest "enabled on the project, disabled on the jobset" => { + subtest "enabled on the project, disabled on the jobset" => sub { $build->project->update({enable_dynamic_run_command => 1}); $build->jobset->update({enable_dynamic_run_command => 0}); @@ -195,7 +195,7 @@ subtest "isBuildEligibleForDynamicRunCommand" => sub { ); }; - subtest "disabled on the project, disabled on the jobset" => { + subtest "disabled on the project, disabled on the jobset" => sub { $build->project->update({enable_dynamic_run_command => 0}); $build->jobset->update({enable_dynamic_run_command => 0}); From 1802bd011338f227b2100b5947431d9b52c4b4fa Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 15 Dec 2021 12:37:01 -0500 Subject: [PATCH 016/401] Declarative Jobs: add support for the enable_dynamic_run_command flag --- doc/manual/src/plugins/declarative-projects.md | 3 +++ src/lib/Hydra/Helper/AddBuilds.pm | 1 + 2 files changed, 4 insertions(+) diff --git a/doc/manual/src/plugins/declarative-projects.md b/doc/manual/src/plugins/declarative-projects.md index 12dfed18..b72c6fd0 100644 --- a/doc/manual/src/plugins/declarative-projects.md +++ b/doc/manual/src/plugins/declarative-projects.md @@ -34,6 +34,7 @@ To configure a static declarative project, take the following steps: "checkinterval": 300, "schedulingshares": 100, "enableemail": false, + "enable_dynamic_run_command": false, "emailoverride": "", "keepnr": 3, "inputs": { @@ -53,6 +54,7 @@ To configure a static declarative project, take the following steps: "checkinterval": 300, "schedulingshares": 100, "enableemail": false, + "enable_dynamic_run_command": false, "emailoverride": "", "keepnr": 3, "inputs": { @@ -92,6 +94,7 @@ containing the configuration of the jobset, for example: "checkinterval": 300, "schedulingshares": 100, "enableemail": false, + "enable_dynamic_run_command": false, "emailoverride": "", "keepnr": 3, "inputs": { diff --git a/src/lib/Hydra/Helper/AddBuilds.pm b/src/lib/Hydra/Helper/AddBuilds.pm index 1e6d8944..f38737d3 100644 --- a/src/lib/Hydra/Helper/AddBuilds.pm +++ b/src/lib/Hydra/Helper/AddBuilds.pm @@ -39,6 +39,7 @@ sub updateDeclarativeJobset { checkinterval schedulingshares enableemail + enable_dynamic_run_command emailoverride keepnr ); From 726ea80e991aa540cf5ed5f1b66ca8a5c68efa00 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 15 Dec 2021 12:37:35 -0500 Subject: [PATCH 017/401] HTTP/Jobset: support setting / reading enable_dynamic_run_command --- hydra-api.yaml | 3 +++ src/lib/Hydra/Controller/Jobset.pm | 1 + src/root/edit-jobset.tt | 7 +++++++ src/root/jobset.tt | 4 ++++ t/Hydra/Controller/Jobset/http.t | 1 + 5 files changed, 16 insertions(+) diff --git a/hydra-api.yaml b/hydra-api.yaml index 7857162e..0d203a41 100644 --- a/hydra-api.yaml +++ b/hydra-api.yaml @@ -689,6 +689,9 @@ components: enableemail: description: when true the jobset sends emails when previously-successful builds fail type: boolean + enable_dynamic_run_command: + description: when true the jobset supports executing dynamically defined RunCommand hooks. Requires the server and project's configuration to also enable dynamic RunCommand. + type: boolean visible: description: when true the jobset is visible in the web frontend type: boolean diff --git a/src/lib/Hydra/Controller/Jobset.pm b/src/lib/Hydra/Controller/Jobset.pm index b952031f..a2d48597 100644 --- a/src/lib/Hydra/Controller/Jobset.pm +++ b/src/lib/Hydra/Controller/Jobset.pm @@ -268,6 +268,7 @@ sub updateJobset { , nixexprinput => $nixExprInput , enabled => $enabled , enableemail => defined $c->stash->{params}->{enableemail} ? 1 : 0 + , enable_dynamic_run_command => defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0 , emailoverride => trim($c->stash->{params}->{emailoverride}) || "" , hidden => defined $c->stash->{params}->{visible} ? 0 : 1 , keepnr => int(trim($c->stash->{params}->{keepnr} // "0")) diff --git a/src/root/edit-jobset.tt b/src/root/edit-jobset.tt index dbd26dcc..40da8f61 100644 --- a/src/root/edit-jobset.tt +++ b/src/root/edit-jobset.tt @@ -157,6 +157,13 @@ +
+ +
+ +
+
+
diff --git a/src/root/jobset.tt b/src/root/jobset.tt index 4fb52517..3d6ca6ae 100644 --- a/src/root/jobset.tt +++ b/src/root/jobset.tt @@ -160,6 +160,10 @@ Scheduling shares: [% jobset.schedulingshares %] [% IF totalShares %] ([% f = format("%.2f"); f(jobset.schedulingshares / totalShares * 100) %]% out of [% totalShares %] shares)[% END %] + + Enable Dynamic RunCommand Hooks: + [% jobset.enable_dynamic_run_command ? "Yes" : "No" %] + [% IF emailNotification %] Enable email notification: diff --git a/t/Hydra/Controller/Jobset/http.t b/t/Hydra/Controller/Jobset/http.t index 32b3a681..4bca7c15 100644 --- a/t/Hydra/Controller/Jobset/http.t +++ b/t/Hydra/Controller/Jobset/http.t @@ -73,6 +73,7 @@ subtest 'Read newly-created jobset "job"' => sub { emailoverride => "", enabled => 2, enableemail => JSON::MaybeXS::false, + enable_dynamic_run_command => JSON::MaybeXS::false, errortime => undef, errormsg => "", fetcherrormsg => "", From 1affb1cfb198cb07b8f161d03a9696e308eddd90 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 15 Dec 2021 13:55:54 -0500 Subject: [PATCH 018/401] jobset API: expose and check the enable_dynamic_run_command --- src/lib/Hydra/Schema/Result/Jobsets.pm | 1 + t/Hydra/Controller/Jobset/http.t | 1 + 2 files changed, 2 insertions(+) diff --git a/src/lib/Hydra/Schema/Result/Jobsets.pm b/src/lib/Hydra/Schema/Result/Jobsets.pm index bd4b7165..7b96c472 100644 --- a/src/lib/Hydra/Schema/Result/Jobsets.pm +++ b/src/lib/Hydra/Schema/Result/Jobsets.pm @@ -414,6 +414,7 @@ sub as_json { # boolean_columns "enableemail" => $self->get_column("enableemail") ? JSON::MaybeXS::true : JSON::MaybeXS::false, + "enable_dynamic_run_command" => $self->get_column("enable_dynamic_run_command") ? JSON::MaybeXS::true : JSON::MaybeXS::false, "visible" => $self->get_column("hidden") ? JSON::MaybeXS::false : JSON::MaybeXS::true, "inputs" => { map { $_->name => $_ } $self->jobsetinputs } diff --git a/t/Hydra/Controller/Jobset/http.t b/t/Hydra/Controller/Jobset/http.t index 4bca7c15..4e53949d 100644 --- a/t/Hydra/Controller/Jobset/http.t +++ b/t/Hydra/Controller/Jobset/http.t @@ -132,6 +132,7 @@ subtest 'Update jobset "job" to legacy type' => sub { emailoverride => "", enabled => 3, enableemail => JSON::MaybeXS::false, + enable_dynamic_run_command => JSON::MaybeXS::false, errortime => undef, errormsg => "", fetcherrormsg => "", From 8a96f07f58bfd4c9e7da9c573718473e829bf104 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 15 Dec 2021 15:32:49 -0500 Subject: [PATCH 019/401] Project: enable enabling dynamic runcommand per project --- src/lib/Hydra/Controller/Project.pm | 1 + src/lib/Hydra/Schema/Result/Projects.pm | 1 + src/root/edit-project.tt | 8 ++++++++ src/root/project.tt | 4 ++++ 4 files changed, 14 insertions(+) diff --git a/src/lib/Hydra/Controller/Project.pm b/src/lib/Hydra/Controller/Project.pm index ed3c527c..98a8a6eb 100644 --- a/src/lib/Hydra/Controller/Project.pm +++ b/src/lib/Hydra/Controller/Project.pm @@ -157,6 +157,7 @@ sub updateProject { , enabled => defined $c->stash->{params}->{enabled} ? 1 : 0 , hidden => defined $c->stash->{params}->{visible} ? 0 : 1 , owner => $owner + , enable_dynamic_run_command => defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0 , declfile => trim($c->stash->{params}->{declarative}->{file}) , decltype => trim($c->stash->{params}->{declarative}->{type}) , declvalue => trim($c->stash->{params}->{declarative}->{value}) diff --git a/src/lib/Hydra/Schema/Result/Projects.pm b/src/lib/Hydra/Schema/Result/Projects.pm index 9e630b16..42ca22a4 100644 --- a/src/lib/Hydra/Schema/Result/Projects.pm +++ b/src/lib/Hydra/Schema/Result/Projects.pm @@ -259,6 +259,7 @@ sub as_json { # boolean_columns "enabled" => $self->get_column("enabled") ? JSON::MaybeXS::true : JSON::MaybeXS::false, + "enable_dynamic_run_command" => $self->get_column("enable_dynamic_run_command") ? JSON::MaybeXS::true : JSON::MaybeXS::false, "hidden" => $self->get_column("hidden") ? JSON::MaybeXS::true : JSON::MaybeXS::false, "jobsets" => [ map { $_->name } $self->jobsets ] diff --git a/src/root/edit-project.tt b/src/root/edit-project.tt index 6149ec1d..4b99f4ab 100644 --- a/src/root/edit-project.tt +++ b/src/root/edit-project.tt @@ -52,6 +52,14 @@
+ +
+ +
+ +
+
+
From 2635607b6e926ad6ac570763dcaaf5c4be8bbe7c Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 15 Dec 2021 15:41:55 -0500 Subject: [PATCH 020/401] whoops: add a test on the enable_dynamic_run_command field --- hydra-api.yaml | 3 +++ t/Hydra/Controller/projects.t | 3 +++ 2 files changed, 6 insertions(+) diff --git a/hydra-api.yaml b/hydra-api.yaml index 0d203a41..0fe0a130 100644 --- a/hydra-api.yaml +++ b/hydra-api.yaml @@ -607,6 +607,9 @@ components: enabled: description: when set to true the project gets scheduled for evaluation type: boolean + enable_dynamic_run_command: + description: when true the project's jobsets support executing dynamically defined RunCommand hooks. Requires the server and project's configuration to also enable dynamic RunCommand. + type: boolean declarative: description: declarative input configured for this project type: object diff --git a/t/Hydra/Controller/projects.t b/t/Hydra/Controller/projects.t index df1290aa..130724cf 100644 --- a/t/Hydra/Controller/projects.t +++ b/t/Hydra/Controller/projects.t @@ -46,6 +46,7 @@ subtest "Read project 'tests'" => sub { description => "", displayname => "Tests", enabled => JSON::MaybeXS::true, + enable_dynamic_run_command => JSON::MaybeXS::false, hidden => JSON::MaybeXS::false, homepage => "", jobsets => [], @@ -85,6 +86,7 @@ subtest "Transitioning from declarative project to normal" => sub { description => "", displayname => "Tests", enabled => JSON::MaybeXS::true, + enable_dynamic_run_command => JSON::MaybeXS::false, hidden => JSON::MaybeXS::false, homepage => "", jobsets => [".jobsets"], @@ -128,6 +130,7 @@ subtest "Transitioning from declarative project to normal" => sub { description => "", displayname => "Tests", enabled => JSON::MaybeXS::true, + enable_dynamic_run_command => JSON::MaybeXS::false, hidden => JSON::MaybeXS::false, homepage => "", jobsets => [], From bc1630bd27e6d489122e18b6c37a92ea18b83b0a Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Mon, 24 Jan 2022 15:55:18 -0500 Subject: [PATCH 021/401] fixup! RunCommand: Add a WIP execution of dynamic commands --- src/lib/Hydra/Plugin/RunCommand.pm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index 6d099142..b55e96a9 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -126,8 +126,8 @@ sub fanoutToCommands { next unless eventMatches($conf, $event); next unless configSectionMatches( $matcher, - $build->get_column('project'), - $build->get_column('jobset'), + $build->jobset->get_column('project'), + $build->jobset->get_column('name'), $build->get_column('job') ); From 38514ae4940aaa9c0b499f0beed1ab8272dd8fa3 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Mon, 24 Jan 2022 16:07:42 -0500 Subject: [PATCH 022/401] fanout tests: capture warnings and test their relevance --- t/Hydra/Plugin/RunCommand/fanout.t | 84 +++++++++++++++++------------- 1 file changed, 49 insertions(+), 35 deletions(-) diff --git a/t/Hydra/Plugin/RunCommand/fanout.t b/t/Hydra/Plugin/RunCommand/fanout.t index bd2502ec..808f661c 100644 --- a/t/Hydra/Plugin/RunCommand/fanout.t +++ b/t/Hydra/Plugin/RunCommand/fanout.t @@ -121,47 +121,61 @@ subtest "isBuildEligibleForDynamicRunCommand" => sub { }; subtest "On outputs ..." => sub { - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), - 1, - "out is an executable file" - ); + ok(!warns { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), + 1, + "out is an executable file" + ); + }, "No warnings for an executable file."); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.symlink"}), - 1, - "out is a symlink to an executable file" - ); + ok(!warns { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.symlink"}), + 1, + "out is a symlink to an executable file" + ); + }, "No warnings for a symlink to an executable file."); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.no-out"}), - 0, - "No output named out" - ); + like(warning { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.no-out"}), + 0, + "No output named out" + ); + }, qr/rejected: no output named 'out'/, "A relevant warning is provided for a missing output"); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.out-is-directory"}), - 0, - "out is a directory" - ); + like(warning { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.out-is-directory"}), + 0, + "out is a directory" + ); + }, qr/output is not a regular file or symlink/, "A relevant warning is provided for a directory output"); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.out-is-not-executable-file"}), - 0, - "out is a file which is not not executable" - ); + like(warning { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.out-is-not-executable-file"}), + 0, + "out is a file which is not a regular file or symlink" + ); + }, qr/output is not executable/, "A relevant warning is provided if the file isn't executable"); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.symlink-non-executable"}), - 0, - "out is a symlink to a non-executable file" - ); + like(warning { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.symlink-non-executable"}), + 0, + "out is a symlink to a non-executable file" + ); + }, qr/output is not executable/, "A relevant warning is provided for symlinks to non-executables"); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.symlink-directory"}), - 0, - "out is a symlink to a directory" - ); + like(warning { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.symlink-directory"}), + 0, + "out is a symlink to a directory" + ); + }, qr/output is not a regular file or symlink/, "A relevant warning is provided for symlinks to directories"); }; subtest "On build status ..." => sub { From daa6864a58e0f29509c2718d6dc91dc84198808b Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Mon, 24 Jan 2022 16:09:45 -0500 Subject: [PATCH 023/401] Project result: add a supportsDynamicRunCommand helper --- src/lib/Hydra/Plugin/RunCommand.pm | 2 +- src/lib/Hydra/Schema/Result/Projects.pm | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index b55e96a9..9e173278 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -75,7 +75,7 @@ sub isBuildEligibleForDynamicRunCommand { return 0; } - if (! $build->project->enable_dynamic_run_command) { + if (! $build->project->supportsDynamicRunCommand()) { return 0; } diff --git a/src/lib/Hydra/Schema/Result/Projects.pm b/src/lib/Hydra/Schema/Result/Projects.pm index 42ca22a4..d6e66bf7 100644 --- a/src/lib/Hydra/Schema/Result/Projects.pm +++ b/src/lib/Hydra/Schema/Result/Projects.pm @@ -246,6 +246,12 @@ sub builds { return $self->jobsets->related_resultset('builds'); }; +sub supportsDynamicRunCommand { + my ($self) = @_; + + return $self->get_column('enable_dynamic_run_command') == 1; +} + sub as_json { my $self = shift; From 3aa239309181d04dc6831b22c8abb0bbf85cb657 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Mon, 24 Jan 2022 16:11:52 -0500 Subject: [PATCH 024/401] Jobsets: add a supportsDynamicRunCommand which also checks the project's dynamic runcommand support --- src/lib/Hydra/Plugin/RunCommand.pm | 6 +----- src/lib/Hydra/Schema/Result/Jobsets.pm | 7 +++++++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index 9e173278..2b3bb6f4 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -71,11 +71,7 @@ sub isBuildEligibleForDynamicRunCommand { return 0; } - if (! $build->jobset->enable_dynamic_run_command) { - return 0; - } - - if (! $build->project->supportsDynamicRunCommand()) { + if (! $build->jobset->supportsDynamicRunCommand()) { return 0; } diff --git a/src/lib/Hydra/Schema/Result/Jobsets.pm b/src/lib/Hydra/Schema/Result/Jobsets.pm index 7b96c472..cd704ac8 100644 --- a/src/lib/Hydra/Schema/Result/Jobsets.pm +++ b/src/lib/Hydra/Schema/Result/Jobsets.pm @@ -386,6 +386,13 @@ __PACKAGE__->add_column( "+id" => { retrieve_on_insert => 1 } ); +sub supportsDynamicRunCommand { + my ($self) = @_; + + return $self->get_column('enable_dynamic_run_command') == 1 + && $self->project->supportsDynamicRunCommand(); +} + sub as_json { my $self = shift; From d8b56f022d1acd2b9f08106ab502c0a51393e6a7 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Mon, 24 Jan 2022 16:16:58 -0500 Subject: [PATCH 025/401] RunCommand: print a warning if the hook isn't run because the project / jobset doens't have it enabled --- src/lib/Hydra/Plugin/RunCommand.pm | 1 + t/Hydra/Plugin/RunCommand/fanout.t | 37 ++++++++++++++++++------------ 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index 2b3bb6f4..43163764 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -72,6 +72,7 @@ sub isBuildEligibleForDynamicRunCommand { } if (! $build->jobset->supportsDynamicRunCommand()) { + warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The project or jobset don't have dynamic runcommand enabled."; return 0; } diff --git a/t/Hydra/Plugin/RunCommand/fanout.t b/t/Hydra/Plugin/RunCommand/fanout.t index 808f661c..328824f9 100644 --- a/t/Hydra/Plugin/RunCommand/fanout.t +++ b/t/Hydra/Plugin/RunCommand/fanout.t @@ -191,33 +191,40 @@ subtest "isBuildEligibleForDynamicRunCommand" => sub { $build->project->update({enable_dynamic_run_command => 0}); $build->jobset->update({enable_dynamic_run_command => 1}); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), - 0, - "Builds don't run from a jobset with disabled dynamic runcommand" - ); + + like(warning { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), + 0, + "Builds don't run from a jobset with disabled dynamic runcommand" + ); + }, qr/project or jobset don't have dynamic runcommand enabled./, "A relevant warning is provided for a disabled runcommand support") }; subtest "enabled on the project, disabled on the jobset" => sub { $build->project->update({enable_dynamic_run_command => 1}); $build->jobset->update({enable_dynamic_run_command => 0}); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), - 0, - "Builds don't run from a jobset with disabled dynamic runcommand" - ); + like(warning { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), + 0, + "Builds don't run from a jobset with disabled dynamic runcommand" + ); + }, qr/project or jobset don't have dynamic runcommand enabled./, "A relevant warning is provided for a disabled runcommand support") }; subtest "disabled on the project, disabled on the jobset" => sub { $build->project->update({enable_dynamic_run_command => 0}); $build->jobset->update({enable_dynamic_run_command => 0}); - is( - Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), - 0, - "Builds don't run from a jobset with disabled dynamic runcommand" - ); + like(warning { + is( + Hydra::Plugin::RunCommand::isBuildEligibleForDynamicRunCommand($builds->{"runCommandHook.example"}), + 0, + "Builds don't run from a jobset with disabled dynamic runcommand" + ); + }, qr/project or jobset don't have dynamic runcommand enabled./, "A relevant warning is provided for a disabled runcommand support") }; }; }; From 5ae26aa7604f714dcc73edcb74fe71ddc8957f6c Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Sun, 6 Feb 2022 15:05:15 +0100 Subject: [PATCH 026/401] Update Nix to 2.6 --- flake.lock | 43 ++++++++++++++++++++++++++----------- flake.nix | 1 + t/jobs/empty-dir-builder.sh | 3 +++ 3 files changed, 34 insertions(+), 13 deletions(-) diff --git a/flake.lock b/flake.lock index fa71ceb5..e4bf8c71 100644 --- a/flake.lock +++ b/flake.lock @@ -3,16 +3,15 @@ "lowdown-src": { "flake": false, "locked": { - "lastModified": 1617481909, - "narHash": "sha256-SqnfOFuLuVRRNeVJr1yeEPJue/qWoCp5N6o5Kr///p4=", + "lastModified": 1633514407, + "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", "owner": "kristapsdz", "repo": "lowdown", - "rev": "148f9b2f586c41b7e36e73009db43ea68c7a1a4d", + "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", "type": "github" }, "original": { "owner": "kristapsdz", - "ref": "VERSION_0_8_4", "repo": "lowdown", "type": "github" } @@ -20,28 +19,31 @@ "nix": { "inputs": { "lowdown-src": "lowdown-src", - "nixpkgs": "nixpkgs" + "nixpkgs": "nixpkgs", + "nixpkgs-regression": "nixpkgs-regression" }, "locked": { - "lastModified": 1628586117, - "narHash": "sha256-8hS4xy7fq3z9XZIMYm4sQi9SzhcYqEJfdbwgDePoWuc=", + "lastModified": 1643066034, + "narHash": "sha256-xEPeMcNJVOeZtoN+d+aRwolpW8mFSEQx76HTRdlhPhg=", "owner": "NixOS", "repo": "nix", - "rev": "a6ba313a0aac3b6e2fef434cb42d190a0849238e", + "rev": "a1cd7e58606a41fcf62bf8637804cf8306f17f62", "type": "github" }, "original": { - "id": "nix", - "type": "indirect" + "owner": "NixOS", + "ref": "2.6.0", + "repo": "nix", + "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1624862269, - "narHash": "sha256-JFcsh2+7QtfKdJFoPibLFPLgIW6Ycnv8Bts9a7RYme0=", + "lastModified": 1632864508, + "narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f77036342e2b690c61c97202bf48f2ce13acc022", + "rev": "82891b5e2c2359d7e58d08849e4c89511ab94234", "type": "github" }, "original": { @@ -50,6 +52,21 @@ "type": "indirect" } }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "id": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "indirect" + } + }, "root": { "inputs": { "nix": "nix", diff --git a/flake.nix b/flake.nix index 1b29fa72..cfeeba85 100644 --- a/flake.nix +++ b/flake.nix @@ -2,6 +2,7 @@ description = "A Nix-based continuous build system"; inputs.nixpkgs.follows = "nix/nixpkgs"; + inputs.nix.url = github:NixOS/nix/2.6.0; outputs = { self, nixpkgs, nix }: let diff --git a/t/jobs/empty-dir-builder.sh b/t/jobs/empty-dir-builder.sh index addc7ef6..949216e0 100755 --- a/t/jobs/empty-dir-builder.sh +++ b/t/jobs/empty-dir-builder.sh @@ -1,3 +1,6 @@ #! /bin/sh +# Workaround for https://github.com/NixOS/nix/pull/6051 +echo "some output" + mkdir $out From 76b4b43ac5e8a11c8d66e84895c3de0ec598965c Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 9 Feb 2022 15:01:42 -0500 Subject: [PATCH 027/401] Move ldap.t to a legacy-ldap.t, make ldap.t use the new format config. --- t/Hydra/Controller/User/ldap-legacy.t | 105 ++++++++++++++++++++++++++ t/Hydra/Controller/User/ldap.t | 85 ++++++++++++--------- 2 files changed, 155 insertions(+), 35 deletions(-) create mode 100644 t/Hydra/Controller/User/ldap-legacy.t diff --git a/t/Hydra/Controller/User/ldap-legacy.t b/t/Hydra/Controller/User/ldap-legacy.t new file mode 100644 index 00000000..64da6112 --- /dev/null +++ b/t/Hydra/Controller/User/ldap-legacy.t @@ -0,0 +1,105 @@ +use strict; +use warnings; +use Setup; +use LDAPContext; +use Test2::V0; +use Catalyst::Test (); +use HTTP::Request::Common; +use JSON::MaybeXS; + +my $ldap = LDAPContext->new(); +my $users = { + unrelated => $ldap->add_user("unrelated_user"), + admin => $ldap->add_user("admin_user"), + not_admin => $ldap->add_user("not_admin_user"), + many_roles => $ldap->add_user("many_roles"), +}; + +$ldap->add_group("hydra_admin", $users->{"admin"}->{"username"}); +$ldap->add_group("hydra-admin", $users->{"not_admin"}->{"username"}); + +$ldap->add_group("hydra_create-projects", $users->{"many_roles"}->{"username"}); +$ldap->add_group("hydra_restart-jobs", $users->{"many_roles"}->{"username"}); +$ldap->add_group("hydra_bump-to-front", $users->{"many_roles"}->{"username"}); +$ldap->add_group("hydra_cancel-build", $users->{"many_roles"}->{"username"}); + +my $hydra_ldap_config = "${\$ldap->tmpdir()}/hydra_ldap_config.yaml"; +LDAPContext::write_file($hydra_ldap_config, <server_url()}" + ldap_server_options: + timeout: 30 + debug: 0 + binddn: "cn=root,dc=example" + bindpw: notapassword + start_tls: 0 + start_tls_options: + verify: none + user_basedn: "ou=users,dc=example" + user_filter: "(&(objectClass=inetOrgPerson)(cn=%s))" + user_scope: one + user_field: cn + user_search_options: + deref: always + use_roles: 1 + role_basedn: "ou=groups,dc=example" + role_filter: "(&(objectClass=groupOfNames)(member=%s))" + role_scope: one + role_field: cn + role_value: dn + role_search_options: + deref: always +YAML + +$ENV{'HYDRA_LDAP_CONFIG'} = $hydra_ldap_config; +my $ctx = test_context(); + +Catalyst::Test->import('Hydra'); + +subtest "Valid login attempts" => sub { + my %users_to_roles = ( + unrelated => [], + admin => ["admin"], + not_admin => [], + many_roles => [ "create-projects", "restart-jobs", "bump-to-front", "cancel-build" ], + ); + for my $username (keys %users_to_roles) { + my $user = $users->{$username}; + my $roles = $users_to_roles{$username}; + + subtest "Verifying $username" => sub { + my $req = request(POST '/login', + Referer => 'http://localhost/', + Accept => 'application/json', + Content => { + username => $user->{"username"}, + password => $user->{"password"} + } + ); + + is($req->code, 302, "The login redirects"); + my $data = decode_json($req->content()); + is($data->{"username"}, $user->{"username"}, "Username matches"); + is($data->{"emailaddress"}, $user->{"email"}, "Email matches"); + is([sort @{$data->{"userroles"}}], [sort @$roles], "Roles match"); + }; + } +}; + +# Logging in with an invalid user is rejected +is(request(POST '/login', + Referer => 'http://localhost/', + Content => { + username => 'alice', + password => 'foobar' + } +)->code, 403, "Logging in with invalid credentials does not work"); + + + +done_testing; diff --git a/t/Hydra/Controller/User/ldap.t b/t/Hydra/Controller/User/ldap.t index 64da6112..caa3433c 100644 --- a/t/Hydra/Controller/User/ldap.t +++ b/t/Hydra/Controller/User/ldap.t @@ -23,41 +23,56 @@ $ldap->add_group("hydra_restart-jobs", $users->{"many_roles"}->{"username"}); $ldap->add_group("hydra_bump-to-front", $users->{"many_roles"}->{"username"}); $ldap->add_group("hydra_cancel-build", $users->{"many_roles"}->{"username"}); -my $hydra_ldap_config = "${\$ldap->tmpdir()}/hydra_ldap_config.yaml"; -LDAPContext::write_file($hydra_ldap_config, <server_url()}" - ldap_server_options: - timeout: 30 - debug: 0 - binddn: "cn=root,dc=example" - bindpw: notapassword - start_tls: 0 - start_tls_options: - verify: none - user_basedn: "ou=users,dc=example" - user_filter: "(&(objectClass=inetOrgPerson)(cn=%s))" - user_scope: one - user_field: cn - user_search_options: - deref: always - use_roles: 1 - role_basedn: "ou=groups,dc=example" - role_filter: "(&(objectClass=groupOfNames)(member=%s))" - role_scope: one - role_field: cn - role_value: dn - role_search_options: - deref: always -YAML - -$ENV{'HYDRA_LDAP_CONFIG'} = $hydra_ldap_config; -my $ctx = test_context(); +my $ctx = test_context( + hydra_config => < + + + class = Password + password_field = password + password_type = self_check + + + class = LDAP + ldap_server = ${\$ldap->server_url()} + + timeout = 30 + debug = 0 + + binddn = "cn=root,dc=example" + bindpw = notapassword + start_tls = 0 + + verify = none + + user_basedn = "ou=users,dc=example" + user_filter = "(&(objectClass=inetOrgPerson)(cn=%s))" + user_scope = one + user_field = cn + + deref = always + + use_roles = 1 + role_basedn = "ou=groups,dc=example" + role_filter = "(&(objectClass=groupOfNames)(member=%s))" + role_scope = one + role_field = cn + role_value = dn + + deref = always + + + + + hydra_admin = admin + hydra_create-projects = create-projects + hydra_cancel-build = cancel-build + hydra_bump-to-front = bump-to-front + hydra_restart-jobs = restart-jobs + + +CFG +); Catalyst::Test->import('Hydra'); From 61d74a71944513a99a095e08fa443211fb4bbf41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Janne=20He=C3=9F?= Date: Fri, 21 Jan 2022 21:20:02 +0100 Subject: [PATCH 028/401] Redo LDAP config in the main configuration and add role mappings --- doc/manual/src/configuration.md | 101 +++++++++++++++++++------------ flake.nix | 1 - src/lib/Hydra.pm | 6 +- src/lib/Hydra/Controller/User.pm | 10 ++- 4 files changed, 71 insertions(+), 47 deletions(-) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index e0a20d3c..fe1a8402 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -108,47 +108,70 @@ Using LDAP as authentication backend (optional) Instead of using Hydra\'s built-in user management you can optionally use LDAP to manage roles and users. -The `hydra-server` accepts the environment variable -*HYDRA\_LDAP\_CONFIG*. The value of the variable should point to a valid -YAML file containing the Catalyst LDAP configuration. The format of the -configuration file is describe in the -[*Catalyst::Authentication::Store::LDAP* -documentation](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS). -An example is given below. +This is configured by defining the `` block in the configuration file. +In this block it\'s possible to configure the authentication plugin in the +`` block, all options are directly passed to `Catalyst::Authentication +::Store::LDAP`. The documentation for the available settings can be found [here] +(https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS). -Roles can be assigned to users based on their LDAP group membership -(*use\_roles: 1* in the below example). For a user to have the role -*admin* assigned to them they should be in the group *hydra\_admin*. In -general any LDAP group of the form *hydra\_some\_role* (notice the -*hydra\_* prefix) will work. +Note that the bind password (if needed) should be supplied as an included file to +prevent it from leaking to the Nix store. - credential: - class: Password - password_field: password - password_type: self_check - store: - class: LDAP - ldap_server: localhost - ldap_server_options.timeout: 30 - binddn: "cn=root,dc=example" - bindpw: notapassword - start_tls: 0 - start_tls_options: - verify: none - user_basedn: "ou=users,dc=example" - user_filter: "(&(objectClass=inetOrgPerson)(cn=%s))" - user_scope: one - user_field: cn - user_search_options: - deref: always - use_roles: 1 - role_basedn: "ou=groups,dc=example" - role_filter: "(&(objectClass=groupOfNames)(member=%s))" - role_scope: one - role_field: cn - role_value: dn - role_search_options: - deref: always +Roles can be assigned to users based on their LDAP group membership. For this +to work *use\_roles = 1* needs to be defined for the authentication plugin. +LDAP groups can then be mapped to Hydra roles using the `` block. + +Example configuration: +``` + + + + class = Password + password_field = password + password_type= self_check + + + class = LDAP + ldap_server = localhost + + timeout = 30 + debug = 2 + + binddn = "cn=root,dc=example" + bindpw = notapassword + start_tls = 0 + + verify = none + + user_basedn = "ou=users,dc=example" + user_filter = "(&(objectClass=inetOrgPerson)(cn=%s))" + user_scope = one + user_field = cn + + deref = always + + # Important for role mappings to work: + use_roles = 1 + role_basedn = "ou=groups,dc=example" + role_filter = "(&(objectClass=groupOfNames)(member=%s))" + role_scope = one + role_field = cn + role_value = dn + + deref = always + + + + # Make all users in the hydra_admin group Hydra admins + hydra_admin = admin + # Allow all users in the dev group to restart jobs + dev = restart-jobs + + +``` + +This example configuration also enables the (very verbose) LDAP debug logging +by setting `config.ldap_server_options.debug`. Embedding Extra HTML -------------------- diff --git a/flake.nix b/flake.nix index b61d7d0d..736cbd53 100644 --- a/flake.nix +++ b/flake.nix @@ -522,7 +522,6 @@ TextTable UUID4Tiny XMLSimple - YAML ]; }; diff --git a/src/lib/Hydra.pm b/src/lib/Hydra.pm index 07aec922..47ada081 100644 --- a/src/lib/Hydra.pm +++ b/src/lib/Hydra.pm @@ -6,6 +6,7 @@ use parent 'Catalyst'; use Moose; use Hydra::Plugin; use Hydra::Model::DB; +use Hydra::Helper::Nix qw(getHydraConfig); use Catalyst::Runtime '5.70'; use Catalyst qw/ConfigLoader Static::Simple @@ -19,7 +20,6 @@ use Catalyst qw/ConfigLoader PrometheusTiny/, '-Log=warn,fatal,error'; use CatalystX::RoleApplicator; -use YAML qw(LoadFile); use Path::Class 'file'; our $VERSION = '0.01'; @@ -43,9 +43,7 @@ __PACKAGE__->config( role_field => "role", }, }, - ldap => $ENV{'HYDRA_LDAP_CONFIG'} ? LoadFile( - file($ENV{'HYDRA_LDAP_CONFIG'}) - ) : undef + ldap => Hydra::Helper::Nix::getHydraConfig->{'ldap'}->{'config'} }, 'Plugin::ConfigLoader' => { driver => { diff --git a/src/lib/Hydra/Controller/User.pm b/src/lib/Hydra/Controller/User.pm index 01f59dee..08b2c91b 100644 --- a/src/lib/Hydra/Controller/User.pm +++ b/src/lib/Hydra/Controller/User.pm @@ -59,7 +59,9 @@ sub doLDAPLogin { my $user = $c->find_user({ username => $username }); my $LDAPUser = $c->find_user({ username => $username }, 'ldap'); - my @LDAPRoles = grep { (substr $_, 0, 6) eq "hydra_" } $LDAPUser->roles; + my @LDAPRoles = $LDAPUser->roles; + my %ldap_config = %{Hydra::Helper::Nix::getHydraConfig->{'ldap'}}; + my %role_mapping = $ldap_config{'role_mapping'} ? %{$ldap_config{'role_mapping'}} : (); if (!$user) { $c->model('DB::Users')->create( @@ -79,8 +81,10 @@ sub doLDAPLogin { }); } $user->userroles->delete; - if (@LDAPRoles) { - $user->userroles->create({ role => (substr $_, 6) }) for @LDAPRoles; + foreach my $ldap_role (@LDAPRoles) { + if (%role_mapping{$ldap_role}) { + $user->userroles->create({ role => $role_mapping{$ldap_role} }); + } } $c->set_authenticated($user); } From f07fb7d2799881fe422e6cf1c9c4811047c1239b Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 9 Feb 2022 21:06:28 -0500 Subject: [PATCH 029/401] LDAP support: include BC support for the YAML based loading Includes a refactoring of the configuration loader. --- doc/manual/src/configuration.md | 49 ++++- flake.nix | 1 + src/lib/Hydra.pm | 4 +- src/lib/Hydra/Config.pm | 158 ++++++++++++++ src/lib/Hydra/Controller/User.pm | 12 +- src/lib/Hydra/Helper/Nix.pm | 19 -- src/script/hydra-eval-jobset | 1 + src/script/hydra-notify | 1 + src/script/hydra-s3-backup-collect-garbage | 1 + src/script/hydra-send-stats | 1 + src/script/hydra-update-gc-roots | 1 + t/Hydra/Config/hydra-notify.t | 3 +- t/Hydra/Config/include.t | 7 +- t/Hydra/Config/ldap_role_map.t | 242 +++++++++++++++++++++ t/Hydra/Config/statsd.t | 3 +- t/Hydra/Controller/User/ldap.t | 13 +- 16 files changed, 475 insertions(+), 41 deletions(-) create mode 100644 t/Hydra/Config/ldap_role_map.t diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index fe1a8402..1a4db163 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -105,11 +105,11 @@ in the hydra configuration file, as below: Using LDAP as authentication backend (optional) ----------------------------------------------- -Instead of using Hydra\'s built-in user management you can optionally +Instead of using Hydra's built-in user management you can optionally use LDAP to manage roles and users. This is configured by defining the `` block in the configuration file. -In this block it\'s possible to configure the authentication plugin in the +In this block it's possible to configure the authentication plugin in the `` block, all options are directly passed to `Catalyst::Authentication ::Store::LDAP`. The documentation for the available settings can be found [here] (https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS). @@ -135,7 +135,6 @@ Example configuration: ldap_server = localhost timeout = 30 - debug = 2 binddn = "cn=root,dc=example" bindpw = notapassword @@ -164,14 +163,52 @@ Example configuration: # Make all users in the hydra_admin group Hydra admins hydra_admin = admin - # Allow all users in the dev group to restart jobs + # Allow all users in the dev group to restart jobs and cancel builds dev = restart-jobs + dev = cancel-builds ``` -This example configuration also enables the (very verbose) LDAP debug logging -by setting `config.ldap_server_options.debug`. +### Debugging LDAP + +Set the `debug` parameter under `ldap.config.ldap_server_options.debug`: + +``` + + + + + debug = 2 + + + + +``` + +### Legacy LDAP Configuration + +Hydra used to load the LDAP configuration from a YAML file in the +`HYDRA_LDAP_CONFIG` environment variable. This behavior is deperecated +and will be removed. + +When Hydra uses the deprecated YAML file, Hydra applies the following +default role mapping: + +``` + + + hydra_admin = admin + hydra_bump-to-front = bump-to-front + hydra_cancel-build = cancel-build + hydra_create-projects = create-projects + hydra_restart-jobs = restart-jobs + + +``` + +Note that configuring both the LDAP parameters in the hydra.conf and via +the environment variable is a fatal error. Embedding Extra HTML -------------------- diff --git a/flake.nix b/flake.nix index 736cbd53..56cb2960 100644 --- a/flake.nix +++ b/flake.nix @@ -521,6 +521,7 @@ TextDiff TextTable UUID4Tiny + YAML XMLSimple ]; }; diff --git a/src/lib/Hydra.pm b/src/lib/Hydra.pm index 47ada081..910212ce 100644 --- a/src/lib/Hydra.pm +++ b/src/lib/Hydra.pm @@ -6,7 +6,7 @@ use parent 'Catalyst'; use Moose; use Hydra::Plugin; use Hydra::Model::DB; -use Hydra::Helper::Nix qw(getHydraConfig); +use Hydra::Config qw(getLDAPConfigAmbient); use Catalyst::Runtime '5.70'; use Catalyst qw/ConfigLoader Static::Simple @@ -43,7 +43,7 @@ __PACKAGE__->config( role_field => "role", }, }, - ldap => Hydra::Helper::Nix::getHydraConfig->{'ldap'}->{'config'} + ldap => getLDAPConfigAmbient()->{'config'} }, 'Plugin::ConfigLoader' => { driver => { diff --git a/src/lib/Hydra/Config.pm b/src/lib/Hydra/Config.pm index bb991822..d96292fe 100644 --- a/src/lib/Hydra/Config.pm +++ b/src/lib/Hydra/Config.pm @@ -2,7 +2,165 @@ package Hydra::Config; use strict; use warnings; +use Config::General; +use List::SomeUtils qw(none); +use YAML qw(LoadFile); + +our @ISA = qw(Exporter); +our @EXPORT = qw( + getHydraConfig + getLDAPConfig + getLDAPConfigAmbient +); our %configGeneralOpts = (-UseApacheInclude => 1, -IncludeAgain => 1, -IncludeRelative => 1); +my $hydraConfigCache; + +sub getHydraConfig { + return $hydraConfigCache if defined $hydraConfigCache; + + my $conf; + + if ($ENV{"HYDRA_CONFIG"}) { + $conf = $ENV{"HYDRA_CONFIG"}; + } else { + require Hydra::Model::DB; + $conf = Hydra::Model::DB::getHydraPath() . "/hydra.conf" + }; + + if (-f $conf) { + $hydraConfigCache = loadConfig($conf); + } else { + $hydraConfigCache = {}; + } + + return $hydraConfigCache; +} + +sub loadConfig { + my ($sourceFile) = @_; + + my %opts = (%configGeneralOpts, -ConfigFile => $sourceFile); + + return { Config::General->new(%opts)->getall }; +} + +sub is_ldap_in_legacy_mode { + my ($config, %env) = @_; + + my $legacy_defined = defined $env{"HYDRA_LDAP_CONFIG"}; + + if (defined $config->{"ldap"}) { + if ($legacy_defined) { + die "The legacy environment variable HYDRA_LDAP_CONFIG is set, but config is also specified in hydra.conf. Please unset the environment variable."; + } + + return 0; + } elsif ($legacy_defined) { + warn "Hydra is configured to use LDAP via the HYDRA_LDAP_CONFIG, a deprecated method. Please see the docs about configuring LDAP in the hydra.conf."; + return 1; + } else { + return 0; + } +} + +sub getLDAPConfigAmbient { + return getLDAPConfig(getHydraConfig(), %ENV); +} + +sub getLDAPConfig { + my ($config, %env) = @_; + + my $ldap_config; + + if (is_ldap_in_legacy_mode($config, %env)) { + $ldap_config = get_legacy_ldap_config($env{"HYDRA_LDAP_CONFIG"}); + } else { + $ldap_config = $config->{"ldap"}; + } + + $ldap_config->{"role_mapping"} = normalize_ldap_role_mappings($ldap_config->{"role_mapping"}); + + return $ldap_config; +} + +sub get_legacy_ldap_config { + my ($ldap_yaml_file) = @_; + + return { + config => LoadFile($ldap_yaml_file), + role_mapping => { + "hydra_admin" => [ "admin" ], + "hydra_bump-to-front" => [ "bump-to-front" ], + "hydra_cancel-build" => [ "cancel-build" ], + "hydra_create-projects" => [ "create-projects" ], + "hydra_restart-jobs" => [ "restart-jobs" ], + }, + }; +} + +sub normalize_ldap_role_mappings { + my ($input_map) = @_; + + my $mapping = {}; + + my @errors; + + for my $group (keys %{$input_map}) { + my $input = $input_map->{$group}; + + if (ref $input eq "ARRAY") { + $mapping->{$group} = $input; + } elsif (ref $input eq "") { + $mapping->{$group} = [ $input ]; + } else { + push @errors, "On group '$group': the value is of type ${\ref $input}. Only strings and lists are acceptable."; + $mapping->{$group} = [ ]; + } + + eval { + validate_roles($mapping->{$group}); + }; + if ($@) { + push @errors, "On group '$group': $@"; + } + } + + if (@errors) { + die join "\n", @errors; + } + + return $mapping; +} + +sub validate_roles { + my ($roles) = @_; + + my @invalid; + my $valid = valid_roles(); + + for my $role (@$roles) { + if (none { $_ eq $role } @$valid) { + push @invalid, "'$role'"; + } + } + + if (@invalid) { + die "Invalid roles: ${\join ', ', @invalid}. Valid roles are: ${\join ', ', @$valid}."; + } + + return 1; +} + +sub valid_roles { + return [ + "admin", + "bump-to-front", + "cancel-build", + "create-projects", + "restart-jobs", + ]; +} + 1; diff --git a/src/lib/Hydra/Controller/User.pm b/src/lib/Hydra/Controller/User.pm index 08b2c91b..2a8affae 100644 --- a/src/lib/Hydra/Controller/User.pm +++ b/src/lib/Hydra/Controller/User.pm @@ -7,6 +7,7 @@ use base 'Hydra::Base::Controller::REST'; use File::Slurper qw(read_text); use Crypt::RandPasswd; use Digest::SHA1 qw(sha1_hex); +use Hydra::Config qw(getLDAPConfigAmbient); use Hydra::Helper::Nix; use Hydra::Helper::CatalystUtils; use Hydra::Helper::Email; @@ -56,12 +57,10 @@ sub logout_POST { sub doLDAPLogin { my ($self, $c, $username) = @_; - my $user = $c->find_user({ username => $username }); my $LDAPUser = $c->find_user({ username => $username }, 'ldap'); my @LDAPRoles = $LDAPUser->roles; - my %ldap_config = %{Hydra::Helper::Nix::getHydraConfig->{'ldap'}}; - my %role_mapping = $ldap_config{'role_mapping'} ? %{$ldap_config{'role_mapping'}} : (); + my $role_mapping = getLDAPConfigAmbient()->{"role_mapping"}; if (!$user) { $c->model('DB::Users')->create( @@ -82,8 +81,11 @@ sub doLDAPLogin { } $user->userroles->delete; foreach my $ldap_role (@LDAPRoles) { - if (%role_mapping{$ldap_role}) { - $user->userroles->create({ role => $role_mapping{$ldap_role} }); + if (defined($role_mapping->{$ldap_role})) { + my $roles = $role_mapping->{$ldap_role}; + for my $mapped_role (@$roles) { + $user->userroles->create({ role => $mapped_role }); + } } } $c->set_authenticated($user); diff --git a/src/lib/Hydra/Helper/Nix.pm b/src/lib/Hydra/Helper/Nix.pm index 796d9844..514fb439 100644 --- a/src/lib/Hydra/Helper/Nix.pm +++ b/src/lib/Hydra/Helper/Nix.pm @@ -5,7 +5,6 @@ use warnings; use Exporter; use File::Path; use File::Basename; -use Config::General; use Hydra::Config; use Hydra::Helper::CatalystUtils; use Hydra::Model::DB; @@ -49,24 +48,6 @@ sub getHydraHome { return $dir; } - -my $hydraConfig; - -sub getHydraConfig { - return $hydraConfig if defined $hydraConfig; - my $conf = $ENV{"HYDRA_CONFIG"} || (Hydra::Model::DB::getHydraPath . "/hydra.conf"); - my %opts = (%Hydra::Config::configGeneralOpts, -ConfigFile => $conf); - if (-f $conf) { - my %h = Config::General->new(%opts)->getall; - - $hydraConfig = \%h; - } else { - $hydraConfig = {}; - } - return $hydraConfig; -} - - # Return hash of statsd configuration of the following shape: # ( # host => string, diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index 450c5f50..108c59c8 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -8,6 +8,7 @@ use Data::Dump qw(dump); use Digest::SHA qw(sha256_hex); use Encode; use File::Slurper qw(read_text); +use Hydra::Config; use Hydra::Helper::AddBuilds; use Hydra::Helper::CatalystUtils; use Hydra::Helper::Email; diff --git a/src/script/hydra-notify b/src/script/hydra-notify index 3b8ffe6d..1e666bf7 100755 --- a/src/script/hydra-notify +++ b/src/script/hydra-notify @@ -6,6 +6,7 @@ use utf8; use Getopt::Long; use Time::HiRes qw( gettimeofday tv_interval ); use HTTP::Server::PSGI; +use Hydra::Config; use Hydra::Event; use Hydra::Event::BuildFinished; use Hydra::Helper::AddBuilds; diff --git a/src/script/hydra-s3-backup-collect-garbage b/src/script/hydra-s3-backup-collect-garbage index 23b50c2f..bd8da2db 100755 --- a/src/script/hydra-s3-backup-collect-garbage +++ b/src/script/hydra-s3-backup-collect-garbage @@ -9,6 +9,7 @@ use Net::Amazon::S3; use Net::Amazon::S3::Client; use Nix::Config; use Nix::Store; +use Hydra::Config; use Hydra::Model::DB; use Hydra::Helper::Nix; diff --git a/src/script/hydra-send-stats b/src/script/hydra-send-stats index 596c622a..d8286c01 100755 --- a/src/script/hydra-send-stats +++ b/src/script/hydra-send-stats @@ -3,6 +3,7 @@ use strict; use warnings; use utf8; +use Hydra::Config; use Hydra::Helper::Nix; use Net::Statsd; use File::Slurper qw(read_text); diff --git a/src/script/hydra-update-gc-roots b/src/script/hydra-update-gc-roots index 91d6393b..fbb90488 100755 --- a/src/script/hydra-update-gc-roots +++ b/src/script/hydra-update-gc-roots @@ -6,6 +6,7 @@ use File::Path; use File::stat; use File::Basename; use Nix::Store; +use Hydra::Config; use Hydra::Schema; use Hydra::Helper::Nix; use Hydra::Model::DB; diff --git a/t/Hydra/Config/hydra-notify.t b/t/Hydra/Config/hydra-notify.t index fb050324..272f63b6 100644 --- a/t/Hydra/Config/hydra-notify.t +++ b/t/Hydra/Config/hydra-notify.t @@ -1,6 +1,7 @@ use strict; use warnings; use Setup; +use Hydra::Config; my %ctx = test_init(hydra_config => q| @@ -14,7 +15,7 @@ my %ctx = test_init(hydra_config => q| require Hydra::Helper::Nix; use Test2::V0; -is(Hydra::Helper::Nix::getHydraNotifyPrometheusConfig(Hydra::Helper::Nix::getHydraConfig()), { +is(Hydra::Helper::Nix::getHydraNotifyPrometheusConfig(getHydraConfig()), { 'listen_address' => "127.0.0.1", 'port' => 9199 }, "Reading specific configuration from the hydra.conf works"); diff --git a/t/Hydra/Config/include.t b/t/Hydra/Config/include.t index e161902e..fe2dd1ed 100644 --- a/t/Hydra/Config/include.t +++ b/t/Hydra/Config/include.t @@ -1,6 +1,8 @@ use strict; use warnings; use Setup; +use Hydra::Config; +use Test2::V0; my %ctx = test_init( use_external_destination_store => 0, @@ -17,10 +19,7 @@ write_file($ctx{'tmpdir'} . "/bar.conf", q| bar = baz |); -require Hydra::Helper::Nix; -use Test2::V0; - -is(Hydra::Helper::Nix::getHydraConfig(), { +is(getHydraConfig(), { foo => { bar => "baz" } }, "Nested includes work."); diff --git a/t/Hydra/Config/ldap_role_map.t b/t/Hydra/Config/ldap_role_map.t new file mode 100644 index 00000000..e56dd619 --- /dev/null +++ b/t/Hydra/Config/ldap_role_map.t @@ -0,0 +1,242 @@ + +use strict; +use warnings; +use Setup; +use Hydra::Config; +use Test2::V0; + +my $tmpdir = File::Temp->newdir(); +my $cfgfile = "$tmpdir/conf"; +my $scratchCfgFile = "$tmpdir/hydra.scratch.conf"; + +my $ldapInHydraConfFile = "$tmpdir/hydra.empty.conf"; +write_file($ldapInHydraConfFile, < + + + class = Password + + + + hydra_admin = admin + hydra_one_group_many_roles = create-projects + hydra_one_group_many_roles = cancel-build + + +CONF +my $ldapInHydraConf = Hydra::Config::loadConfig($ldapInHydraConfFile); + +my $emptyHydraConfFile = "$tmpdir/hydra.empty.conf"; +write_file($emptyHydraConfFile, ""); +my $emptyHydraConf = Hydra::Config::loadConfig($emptyHydraConfFile); + +my $ldapYamlFile = "$tmpdir/ldap.yaml"; +write_file($ldapYamlFile, < sub { + subtest "No ldap section and an env var gets us legacy data" => sub { + like( + warning { + is( + Hydra::Config::getLDAPConfig( + $emptyHydraConf, + ( HYDRA_LDAP_CONFIG => $ldapYamlFile ) + ), + { + config => { + credential => { + class => "Password", + }, + }, + role_mapping => { + "hydra_admin" => [ "admin" ], + "hydra_bump-to-front" => [ "bump-to-front" ], + "hydra_cancel-build" => [ "cancel-build" ], + "hydra_create-projects" => [ "create-projects" ], + "hydra_restart-jobs" => [ "restart-jobs" ], + } + }, + "The empty file and set env var make legacy mode active." + ); + }, + qr/configured to use LDAP via the HYDRA_LDAP_CONFIG/, + "Having the environment variable set warns." + ); + }; + + subtest "An ldap section and no env var gets us normalized data" => sub { + is( + warns { + is( + Hydra::Config::getLDAPConfig( + $ldapInHydraConf, + () + ), + { + config => { + credential => { + class => "Password", + }, + }, + role_mapping => { + "hydra_admin" => [ "admin" ], + "hydra_one_group_many_roles" => [ "create-projects", "cancel-build" ], + } + }, + "The empty file and set env var make legacy mode active." + ); + }, + 0, + "No warnings are issued for non-legacy LDAP support." + ); + }; +}; + + +subtest "is_ldap_in_legacy_mode" => sub { + subtest "With the environment variable set and an empty hydra.conf" => sub { + like( + warning { + is( + Hydra::Config::is_ldap_in_legacy_mode( + $emptyHydraConf, + ( HYDRA_LDAP_CONFIG => $ldapYamlFile ) + ), + 1, + "The empty file and set env var make legacy mode active." + ); + }, + qr/configured to use LDAP via the HYDRA_LDAP_CONFIG/, + "Having the environment variable set warns." + ); + }; + + subtest "With the environment variable set and LDAP specified in hydra.conf" => sub { + like( + dies { + Hydra::Config::is_ldap_in_legacy_mode( + $ldapInHydraConf, + ( HYDRA_LDAP_CONFIG => $ldapYamlFile ) + ); + }, + qr/HYDRA_LDAP_CONFIG is set, but config is also specified in hydra\.conf/, + "Having the environment variable set dies to avoid misconfiguration." + ); + }; + + subtest "Without the environment variable set and an empty hydra.conf" => sub { + is( + warns { + is( + Hydra::Config::is_ldap_in_legacy_mode( + $emptyHydraConf, + () + ), + 0, + "The empty file and unset env var means non-legacy." + ); + }, + 0, + "We should receive zero warnings." + ); + }; + + subtest "Without the environment variable set and LDAP specified in hydra.conf" => sub { + is( + warns { + is( + Hydra::Config::is_ldap_in_legacy_mode( + $ldapInHydraConf, + () + ), + 0, + "The empty file and unset env var means non-legacy." + ); + }, + 0, + "We should receive zero warnings." + ); + }; +}; + +subtest "get_legacy_ldap_config" => sub { + is( + Hydra::Config::get_legacy_ldap_config($ldapYamlFile), + { + config => { + credential => { + class => "Password", + }, + }, + role_mapping => { + "hydra_admin" => [ "admin" ], + "hydra_bump-to-front" => [ "bump-to-front" ], + "hydra_cancel-build" => [ "cancel-build" ], + "hydra_create-projects" => [ "create-projects" ], + "hydra_restart-jobs" => [ "restart-jobs" ], + } + }, + "Legacy, default role maps are applied." + ); +}; + +subtest "validate_roles" => sub { + ok(Hydra::Config::validate_roles([]), "An empty list is valid"); + ok(Hydra::Config::validate_roles(Hydra::Config::valid_roles()), "All current roles are valid."); + like( + dies { Hydra::Config::validate_roles([""]) }, + qr/Invalid roles: ''./, + "Invalid roles are failing" + ); + like( + dies { Hydra::Config::validate_roles(["foo", "bar"]) }, + qr/Invalid roles: 'foo', 'bar'./, + "All the invalid roles are present in the error" + ); +}; + +subtest "normalize_ldap_role_mappings" => sub { + is( + Hydra::Config::normalize_ldap_role_mappings({}), + {}, + "An empty input map is an empty output map." + ); + + is( + Hydra::Config::normalize_ldap_role_mappings({ + hydra_admin => "admin", + hydra_one_group_many_roles => [ "create-projects", "bump-to-front" ], + }), + { + hydra_admin => [ "admin" ], + hydra_one_group_many_roles => [ "create-projects", "bump-to-front" ], + }, + "Lists and plain strings normalize to lists" + ); + + like( + dies{ + Hydra::Config::normalize_ldap_role_mappings({ + "group" => "invalid-role", + }), + }, + qr/Invalid roles.*invalid-role/, + "Invalid roles fail to normalize." + ); + + + like( + dies{ + Hydra::Config::normalize_ldap_role_mappings({ + "group" => { "nested" => "data" }, + }), + }, + qr/On group 'group':.* Only strings/, + "Invalid nesting fail to normalize." + ); +}; + +done_testing; diff --git a/t/Hydra/Config/statsd.t b/t/Hydra/Config/statsd.t index c50e8d99..ba23a28a 100644 --- a/t/Hydra/Config/statsd.t +++ b/t/Hydra/Config/statsd.t @@ -1,6 +1,7 @@ use strict; use warnings; use Setup; +use Hydra::Config; my %ctx = test_init(hydra_config => q| @@ -12,7 +13,7 @@ my %ctx = test_init(hydra_config => q| require Hydra::Helper::Nix; use Test2::V0; -is(Hydra::Helper::Nix::getStatsdConfig(Hydra::Helper::Nix::getHydraConfig()), { +is(Hydra::Helper::Nix::getStatsdConfig(getHydraConfig()), { 'host' => "foo.bar", 'port' => 18125 }, "Reading specific configuration from the hydra.conf works"); diff --git a/t/Hydra/Controller/User/ldap.t b/t/Hydra/Controller/User/ldap.t index caa3433c..19e7825a 100644 --- a/t/Hydra/Controller/User/ldap.t +++ b/t/Hydra/Controller/User/ldap.t @@ -13,10 +13,12 @@ my $users = { admin => $ldap->add_user("admin_user"), not_admin => $ldap->add_user("not_admin_user"), many_roles => $ldap->add_user("many_roles"), + many_roles_one_group => $ldap->add_user("many_roles_one_group"), }; $ldap->add_group("hydra_admin", $users->{"admin"}->{"username"}); $ldap->add_group("hydra-admin", $users->{"not_admin"}->{"username"}); +$ldap->add_group("hydra_one_group_many_roles", $users->{"many_roles_one_group"}->{"username"}); $ldap->add_group("hydra_create-projects", $users->{"many_roles"}->{"username"}); $ldap->add_group("hydra_restart-jobs", $users->{"many_roles"}->{"username"}); @@ -69,6 +71,10 @@ my $ctx = test_context( hydra_cancel-build = cancel-build hydra_bump-to-front = bump-to-front hydra_restart-jobs = restart-jobs + + hydra_one_group_many_roles = create-projects + hydra_one_group_many_roles = cancel-build + hydra_one_group_many_roles = bump-to-front CFG @@ -79,9 +85,10 @@ Catalyst::Test->import('Hydra'); subtest "Valid login attempts" => sub { my %users_to_roles = ( unrelated => [], - admin => ["admin"], - not_admin => [], - many_roles => [ "create-projects", "restart-jobs", "bump-to-front", "cancel-build" ], + admin => ["admin"], + not_admin => [], + many_roles => [ "create-projects", "restart-jobs", "bump-to-front", "cancel-build" ], + many_roles_one_group => [ "create-projects", "bump-to-front", "cancel-build" ], ); for my $username (keys %users_to_roles) { my $user = $users->{$username}; From 185100adb81fa5d61cb11ec5a48fedd539c5486c Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 11 Feb 2022 10:50:58 -0500 Subject: [PATCH 030/401] docs: fixup --- doc/manual/src/configuration.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index 1a4db163..b4ccff89 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -110,8 +110,8 @@ use LDAP to manage roles and users. This is configured by defining the `` block in the configuration file. In this block it's possible to configure the authentication plugin in the -`` block, all options are directly passed to `Catalyst::Authentication -::Store::LDAP`. The documentation for the available settings can be found [here] +`` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`. +The documentation for the available settings can be found [here] (https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS). Note that the bind password (if needed) should be supplied as an included file to @@ -128,7 +128,7 @@ Example configuration: class = Password password_field = password - password_type= self_check + password_type = self_check class = LDAP From d6dea399128eda82e58743522cf0a42eb1571db8 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 11 Feb 2022 10:53:08 -0500 Subject: [PATCH 031/401] ldap_role_map.t: fixup indentation --- t/Hydra/Config/ldap_role_map.t | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/t/Hydra/Config/ldap_role_map.t b/t/Hydra/Config/ldap_role_map.t index e56dd619..6ee4c73b 100644 --- a/t/Hydra/Config/ldap_role_map.t +++ b/t/Hydra/Config/ldap_role_map.t @@ -37,7 +37,7 @@ credential: YAML subtest "getLDAPConfig" => sub { - subtest "No ldap section and an env var gets us legacy data" => sub { + subtest "No ldap section and an env var gets us legacy data" => sub { like( warning { is( @@ -65,7 +65,7 @@ subtest "getLDAPConfig" => sub { qr/configured to use LDAP via the HYDRA_LDAP_CONFIG/, "Having the environment variable set warns." ); - }; + }; subtest "An ldap section and no env var gets us normalized data" => sub { is( From 71c06f2ce7caab5186f43a06d05d62cc170d9c13 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 11 Feb 2022 10:55:27 -0500 Subject: [PATCH 032/401] LDAP normalization errors: note that the error came while normalizing the roles. --- src/lib/Hydra/Config.pm | 2 +- t/Hydra/Config/ldap_role_map.t | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/Hydra/Config.pm b/src/lib/Hydra/Config.pm index d96292fe..af686fca 100644 --- a/src/lib/Hydra/Config.pm +++ b/src/lib/Hydra/Config.pm @@ -128,7 +128,7 @@ sub normalize_ldap_role_mappings { } if (@errors) { - die join "\n", @errors; + die "Failed to normalize LDAP role mappings:\n" . (join "\n", @errors); } return $mapping; diff --git a/t/Hydra/Config/ldap_role_map.t b/t/Hydra/Config/ldap_role_map.t index 6ee4c73b..1c54a67d 100644 --- a/t/Hydra/Config/ldap_role_map.t +++ b/t/Hydra/Config/ldap_role_map.t @@ -223,7 +223,7 @@ subtest "normalize_ldap_role_mappings" => sub { "group" => "invalid-role", }), }, - qr/Invalid roles.*invalid-role/, + qr/Failed to normalize.*Invalid roles.*invalid-role/, "Invalid roles fail to normalize." ); From 6637c039853b1a9cf5381ede57b47925fc96d809 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 11 Feb 2022 10:59:24 -0500 Subject: [PATCH 033/401] fixup normalization error regex --- t/Hydra/Config/ldap_role_map.t | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/t/Hydra/Config/ldap_role_map.t b/t/Hydra/Config/ldap_role_map.t index 1c54a67d..cb1adf46 100644 --- a/t/Hydra/Config/ldap_role_map.t +++ b/t/Hydra/Config/ldap_role_map.t @@ -223,7 +223,7 @@ subtest "normalize_ldap_role_mappings" => sub { "group" => "invalid-role", }), }, - qr/Failed to normalize.*Invalid roles.*invalid-role/, + qr/Failed to normalize.*Invalid roles.*invalid-role/s, "Invalid roles fail to normalize." ); From 86bb16d07bceaec1f0c506646d0456278c610173 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 11 Feb 2022 11:01:25 -0500 Subject: [PATCH 034/401] LDAPContext: sort $self hash keys --- t/lib/LDAPContext.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/t/lib/LDAPContext.pm b/t/lib/LDAPContext.pm index e3928a51..47188c2b 100644 --- a/t/lib/LDAPContext.pm +++ b/t/lib/LDAPContext.pm @@ -30,12 +30,12 @@ sub new { my $socket = "$root/slapd.socket"; my $self = { - _tmpdir => $root, _db_dir => $db_dir, _openldap_source => $ENV{"OPENLDAP_ROOT"}, _pid_file => $pid_file, _slapd_dir => $slapd_dir, _socket => $socket, + _tmpdir => $root, }; my $blessed = bless $self, $class; From e13d80f5cf396946ab131cdf451c2be63d7dca1a Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 11 Feb 2022 11:02:04 -0500 Subject: [PATCH 035/401] LDAPContext: take a root_password argument or generate one --- t/lib/LDAPContext.pm | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/t/lib/LDAPContext.pm b/t/lib/LDAPContext.pm index 47188c2b..2cd1a19d 100644 --- a/t/lib/LDAPContext.pm +++ b/t/lib/LDAPContext.pm @@ -12,9 +12,15 @@ use Hydra::Helper::Exec; # It creates a top level organization and structure, and provides # add_user and add_group. # +# Hash Parameters: +# +# * root_password: The clear text password required for connecting to the LDAP server +# # The server is automatically terminated when the class is dropped. sub new { - my ($class) = @_; + my ($class, %opts) = @_; + + my $rootPassword = $opts{'root_password'} // rand_chars(); my $root = File::Temp->newdir(); mkdir $root; @@ -36,6 +42,7 @@ sub new { _slapd_dir => $slapd_dir, _socket => $socket, _tmpdir => $root, + root_password => $rootPassword, }; my $blessed = bless $self, $class; @@ -128,7 +135,7 @@ objectClass: olcMdbConfig olcDatabase: {1}mdb olcDbDirectory: ${\$self->{"_db_dir"}} olcRootDN: cn=root,dc=example -olcRootPW: notapassword +olcRootPW: ${\$self->{"root_password"}} olcSuffix: dc=example EOF } From 05ca71069ffc5d5b50971c77a6a8a64f6f491cc9 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 11 Feb 2022 11:24:28 -0500 Subject: [PATCH 036/401] ldap config: document putting the password in a separate file --- doc/manual/src/configuration.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index b4ccff89..ec071f24 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -137,7 +137,7 @@ Example configuration: timeout = 30 binddn = "cn=root,dc=example" - bindpw = notapassword + include ldap-password.conf start_tls = 0 verify = none @@ -170,6 +170,12 @@ Example configuration: ``` +Then, place the password to your LDAP server in `/var/lib/hydra/ldap-password.conf`: + +``` +bindpw = the-ldap-password +``` + ### Debugging LDAP Set the `debug` parameter under `ldap.config.ldap_server_options.debug`: From 0bd4a7591832fbb144d6a1794ba0935a9f6d0e94 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 11 Feb 2022 11:26:27 -0500 Subject: [PATCH 037/401] HydraTestContext: support running a sub before running hydra-init. --- t/lib/HydraTestContext.pm | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/t/lib/HydraTestContext.pm b/t/lib/HydraTestContext.pm index b8c254d0..ade12280 100644 --- a/t/lib/HydraTestContext.pm +++ b/t/lib/HydraTestContext.pm @@ -6,6 +6,7 @@ use File::Path qw(make_path); use File::Basename; use Cwd qw(abs_path getcwd); use CliRunners; +use Hydra::Helper::Exec; # Set up the environment for running tests. # @@ -16,6 +17,9 @@ use CliRunners; # * use_external_destination_store: Boolean indicating whether hydra should # use a destination store different from the evaluation store. # True by default. +# * before_init: a sub which is called after the database is up, but before +# hydra-init is executed. It receives the HydraTestContext object as +# its argument. # # This clears several environment variables and sets them to ephemeral # values: a temporary database, temporary Nix store, temporary Hydra @@ -63,24 +67,28 @@ sub new { extra_initdb_args => "--locale C.UTF-8" ); $ENV{'HYDRA_DBI'} = $pgsql->dsn; - system("hydra-init") == 0 or die; - my $self = { + my $self = bless { _db => undef, db_handle => $pgsql, tmpdir => $dir, nix_state_dir => "$dir/nix/var/nix", testdir => abs_path(dirname(__FILE__) . "/.."), jobsdir => abs_path(dirname(__FILE__) . "/../jobs") - }; + }, $class; - return bless $self, $class; + if ($opts{'before_init'}) { + $opts{'before_init'}->($self); + } + + expectOkay(5, ("hydra-init")); + + return $self; } sub db { my ($self, $setup) = @_; - if (!defined $self->{_db}) { require Hydra::Schema; require Hydra::Model::DB; From 848fb3b2653e5a52a7928dafb211705b1f220df6 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 11 Feb 2022 11:26:56 -0500 Subject: [PATCH 038/401] ldap-legacy.t: specify the root password manually --- t/Hydra/Controller/User/ldap-legacy.t | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/t/Hydra/Controller/User/ldap-legacy.t b/t/Hydra/Controller/User/ldap-legacy.t index 64da6112..9cb197c0 100644 --- a/t/Hydra/Controller/User/ldap-legacy.t +++ b/t/Hydra/Controller/User/ldap-legacy.t @@ -7,7 +7,9 @@ use Catalyst::Test (); use HTTP::Request::Common; use JSON::MaybeXS; -my $ldap = LDAPContext->new(); +my $ldap = LDAPContext->new( + root_password => "the-root-password", +); my $users = { unrelated => $ldap->add_user("unrelated_user"), admin => $ldap->add_user("admin_user"), @@ -36,7 +38,7 @@ store: timeout: 30 debug: 0 binddn: "cn=root,dc=example" - bindpw: notapassword + bindpw: the-root-password start_tls: 0 start_tls_options: verify: none From 78e987225169ce6b497473eefb4b0aac6e53a305 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 11 Feb 2022 11:27:10 -0500 Subject: [PATCH 039/401] ldap.t: write the password to an external .conf file --- t/Hydra/Controller/User/ldap.t | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/t/Hydra/Controller/User/ldap.t b/t/Hydra/Controller/User/ldap.t index 19e7825a..175b66aa 100644 --- a/t/Hydra/Controller/User/ldap.t +++ b/t/Hydra/Controller/User/ldap.t @@ -25,7 +25,12 @@ $ldap->add_group("hydra_restart-jobs", $users->{"many_roles"}->{"username"}); $ldap->add_group("hydra_bump-to-front", $users->{"many_roles"}->{"username"}); $ldap->add_group("hydra_cancel-build", $users->{"many_roles"}->{"username"}); + my $ctx = test_context( + before_init => sub { + my ($ctx) = @_; + write_file($ctx->{"tmpdir"} . "/password.conf", "bindpw = ${\$ldap->{'root_password'}}"); + }, hydra_config => < @@ -42,7 +47,7 @@ my $ctx = test_context( debug = 0 binddn = "cn=root,dc=example" - bindpw = notapassword + include password.conf start_tls = 0 verify = none From 3b895aec54ba07d03feef7c151ccf980967d29cd Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 17 Dec 2021 10:29:47 -0800 Subject: [PATCH 040/401] DynamicRunCommand: needs to be enabled by server, project, and jobset --- doc/manual/src/plugins/RunCommand.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/manual/src/plugins/RunCommand.md b/doc/manual/src/plugins/RunCommand.md index b186be80..652a171e 100644 --- a/doc/manual/src/plugins/RunCommand.md +++ b/doc/manual/src/plugins/RunCommand.md @@ -33,8 +33,9 @@ Command to run. Can use the `$HYDRA_JSON` environment variable to access informa ### Dynamic Commands -Hydra can optionally run RunCommand hooks defined dynamically by the jobset. -This must be turned on explicitly in the `hydra.conf` and per jobset. +Hydra can optionally run RunCommand hooks defined dynamically by the jobset. In +order to enable dynamic commands, you must enable this feature in your +`hydra.conf`, *as well as* in the parent project and jobset configuration. #### Behavior From 3f4f1837928add779759da21522b617b93748a14 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 17 Dec 2021 10:31:03 -0800 Subject: [PATCH 041/401] jobset.tt: more info on why Dynamic RunCommand is disabled --- src/root/jobset.tt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/root/jobset.tt b/src/root/jobset.tt index 3d6ca6ae..56abdb50 100644 --- a/src/root/jobset.tt +++ b/src/root/jobset.tt @@ -162,7 +162,7 @@ Enable Dynamic RunCommand Hooks: - [% jobset.enable_dynamic_run_command ? "Yes" : "No" %] + [% c.config.dynamicruncommand.enable ? project.enable_dynamic_run_command ? jobset.enable_dynamic_run_command ? "Yes" : "No (not enabled by jobset)" : "No (not enabled by project)" : "No (not enabled by server)" %] [% IF emailNotification %] From dfd3a67424c0cd834d2a009d061336f3b61814ff Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 17 Dec 2021 10:31:46 -0800 Subject: [PATCH 042/401] project.tt: more info on why Dynamic RunCommand is disabled --- src/root/project.tt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/root/project.tt b/src/root/project.tt index f5a51e96..5e8ec0c8 100644 --- a/src/root/project.tt +++ b/src/root/project.tt @@ -94,7 +94,7 @@ Enable Dynamic RunCommand Hooks: - [% project.enable_dynamic_run_command ? "Yes" : "No" %] + [% c.config.dynamicruncommand.enable ? project.enable_dynamic_run_command ? "Yes" : "No (not enabled by project)" : "No (not enabled by server)" %] From 6053e5fd4b2e23aeb69b632417a4e42ca88fe168 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 17 Dec 2021 11:02:59 -0800 Subject: [PATCH 043/401] edit-jobset.tt: disable when disabled by project and server Also add a tooltip describing why it's disabled, to make it easier to chase down. --- src/root/edit-jobset.tt | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/root/edit-jobset.tt b/src/root/edit-jobset.tt index 40da8f61..61e3636f 100644 --- a/src/root/edit-jobset.tt +++ b/src/root/edit-jobset.tt @@ -160,7 +160,15 @@
- +
From d680c209feffb3fe1087cd39e62545cd03cb22f8 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 17 Dec 2021 11:03:55 -0800 Subject: [PATCH 044/401] edit-project.tt: disable when disabled by server Also add a tooltip describing why it's disabled, to make it easier to chase down. --- src/root/edit-project.tt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/root/edit-project.tt b/src/root/edit-project.tt index 4b99f4ab..bb850e5c 100644 --- a/src/root/edit-project.tt +++ b/src/root/edit-project.tt @@ -56,7 +56,13 @@
- +
From 928ba9e854e8fb7ee88f352a32848f114004e70c Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 17 Dec 2021 12:34:19 -0800 Subject: [PATCH 045/401] Controller/{Jobset,Project}: error when enabling dynamic runcommand but it's disabled elsewhere --- src/lib/Hydra/Controller/Jobset.pm | 10 +- src/lib/Hydra/Controller/Project.pm | 7 +- t/Hydra/Plugin/RunCommand/dynamic-disabled.t | 110 +++++++++++++++++++ t/Hydra/Plugin/RunCommand/dynamic-enabled.t | 106 ++++++++++++++++++ 4 files changed, 231 insertions(+), 2 deletions(-) create mode 100644 t/Hydra/Plugin/RunCommand/dynamic-disabled.t create mode 100644 t/Hydra/Plugin/RunCommand/dynamic-enabled.t diff --git a/src/lib/Hydra/Controller/Jobset.pm b/src/lib/Hydra/Controller/Jobset.pm index a2d48597..eeb4232a 100644 --- a/src/lib/Hydra/Controller/Jobset.pm +++ b/src/lib/Hydra/Controller/Jobset.pm @@ -261,6 +261,14 @@ sub updateJobset { my $checkinterval = int(trim($c->stash->{params}->{checkinterval})); + my $enable_dynamic_run_command = defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0; + if ($enable_dynamic_run_command + && !($c->config->{dynamicruncommand}->{enable} + && $jobset->project->enable_dynamic_run_command)) + { + badRequest($c, "Dynamic RunCommand is not enabled by the server or the parent project."); + } + $jobset->update( { name => $jobsetName , description => trim($c->stash->{params}->{"description"}) @@ -268,7 +276,7 @@ sub updateJobset { , nixexprinput => $nixExprInput , enabled => $enabled , enableemail => defined $c->stash->{params}->{enableemail} ? 1 : 0 - , enable_dynamic_run_command => defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0 + , enable_dynamic_run_command => $enable_dynamic_run_command , emailoverride => trim($c->stash->{params}->{emailoverride}) || "" , hidden => defined $c->stash->{params}->{visible} ? 0 : 1 , keepnr => int(trim($c->stash->{params}->{keepnr} // "0")) diff --git a/src/lib/Hydra/Controller/Project.pm b/src/lib/Hydra/Controller/Project.pm index 98a8a6eb..1141de4a 100644 --- a/src/lib/Hydra/Controller/Project.pm +++ b/src/lib/Hydra/Controller/Project.pm @@ -149,6 +149,11 @@ sub updateProject { my $displayName = trim $c->stash->{params}->{displayname}; error($c, "You must specify a display name.") if $displayName eq ""; + my $enable_dynamic_run_command = defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0; + if ($enable_dynamic_run_command && !$c->config->{dynamicruncommand}->{enable}) { + badRequest($c, "Dynamic RunCommand is not enabled by the server."); + } + $project->update( { name => $projectName , displayname => $displayName @@ -157,7 +162,7 @@ sub updateProject { , enabled => defined $c->stash->{params}->{enabled} ? 1 : 0 , hidden => defined $c->stash->{params}->{visible} ? 0 : 1 , owner => $owner - , enable_dynamic_run_command => defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0 + , enable_dynamic_run_command => $enable_dynamic_run_command , declfile => trim($c->stash->{params}->{declarative}->{file}) , decltype => trim($c->stash->{params}->{declarative}->{type}) , declvalue => trim($c->stash->{params}->{declarative}->{value}) diff --git a/t/Hydra/Plugin/RunCommand/dynamic-disabled.t b/t/Hydra/Plugin/RunCommand/dynamic-disabled.t new file mode 100644 index 00000000..ad2e9a4b --- /dev/null +++ b/t/Hydra/Plugin/RunCommand/dynamic-disabled.t @@ -0,0 +1,110 @@ +use strict; +use warnings; +use Setup; +use Test2::V0; + +require Catalyst::Test; +use HTTP::Request::Common qw(POST PUT GET DELETE); +use JSON::MaybeXS qw(decode_json encode_json); + +my $ctx = test_context(); +Catalyst::Test->import('Hydra'); + +# Create a user to log in to +my $user = $ctx->db->resultset('Users')->create({ username => 'alice', emailaddress => 'root@invalid.org', password => '!' }); +$user->setPassword('foobar'); +$user->userroles->update_or_create({ role => 'admin' }); + +subtest "can't enable dynamic RunCommand when disabled by server" => sub { + my $builds = $ctx->makeAndEvaluateJobset( + expression => "runcommand-dynamic.nix", + build => 1 + ); + + my $build = $builds->{"runCommandHook.example"}; + my $project = $build->project; + my $project_name = $project->name; + my $jobset = $build->jobset; + my $jobset_name = $jobset->name; + + is($project->enable_dynamic_run_command, 0, "dynamic RunCommand is disabled on projects by default"); + is($jobset->enable_dynamic_run_command, 0, "dynamic RunCommand is disabled on jobsets by default"); + + my $req = request(POST '/login', + Referer => 'http://localhost/', + Content => { + username => 'alice', + password => 'foobar' + } + ); + is($req->code, 302, "logged in successfully"); + my $cookie = $req->header("set-cookie"); + + subtest "can't enable dynamic RunCommand on project" => sub { + my $projectresponse = request(GET "/project/$project_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + ); + + my $projectjson = decode_json($projectresponse->content); + $projectjson->{enable_dynamic_run_command} = 1; + + my $projectupdate = request(PUT "/project/$project_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + Content => encode_json($projectjson) + ); + + $projectresponse = request(GET "/project/$project_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + ); + $projectjson = decode_json($projectresponse->content); + + is($projectupdate->code, 400); + like( + $projectupdate->content, + qr/Dynamic RunCommand is not/, + "failed to change enable_dynamic_run_command, not any other error" + ); + is($projectjson->{enable_dynamic_run_command}, JSON::MaybeXS::false); + }; + + subtest "can't enable dynamic RunCommand on jobset" => sub { + my $jobsetresponse = request(GET "/jobset/$project_name/$jobset_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + ); + + my $jobsetjson = decode_json($jobsetresponse->content); + $jobsetjson->{enable_dynamic_run_command} = 1; + + my $jobsetupdate = request(PUT "/jobset/$project_name/$jobset_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + Content => encode_json($jobsetjson) + ); + + $jobsetresponse = request(GET "/jobset/$project_name/$jobset_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + ); + $jobsetjson = decode_json($jobsetresponse->content); + + is($jobsetupdate->code, 400); + like( + $jobsetupdate->content, + qr/Dynamic RunCommand is not/, + "failed to change enable_dynamic_run_command, not any other error" + ); + is($jobsetjson->{enable_dynamic_run_command}, JSON::MaybeXS::false); + }; +}; + +done_testing; diff --git a/t/Hydra/Plugin/RunCommand/dynamic-enabled.t b/t/Hydra/Plugin/RunCommand/dynamic-enabled.t new file mode 100644 index 00000000..68c6d593 --- /dev/null +++ b/t/Hydra/Plugin/RunCommand/dynamic-enabled.t @@ -0,0 +1,106 @@ +use strict; +use warnings; +use Setup; +use Test2::V0; + +require Catalyst::Test; +use HTTP::Request::Common qw(POST PUT GET DELETE); +use JSON::MaybeXS qw(decode_json encode_json); + +my $ctx = test_context( + hydra_config => q| + + enable = 1 + + | +); +Catalyst::Test->import('Hydra'); + +# Create a user to log in to +my $user = $ctx->db->resultset('Users')->create({ username => 'alice', emailaddress => 'root@invalid.org', password => '!' }); +$user->setPassword('foobar'); +$user->userroles->update_or_create({ role => 'admin' }); + +subtest "can enable dynamic RunCommand when enabled by server" => sub { + my $builds = $ctx->makeAndEvaluateJobset( + expression => "runcommand-dynamic.nix", + build => 1 + ); + + my $build = $builds->{"runCommandHook.example"}; + my $project = $build->project; + my $project_name = $project->name; + my $jobset = $build->jobset; + my $jobset_name = $jobset->name; + + is($project->enable_dynamic_run_command, 0, "dynamic RunCommand is disabled on projects by default"); + is($jobset->enable_dynamic_run_command, 0, "dynamic RunCommand is disabled on jobsets by default"); + + my $req = request(POST '/login', + Referer => 'http://localhost/', + Content => { + username => 'alice', + password => 'foobar' + } + ); + is($req->code, 302, "logged in successfully"); + my $cookie = $req->header("set-cookie"); + + subtest "can enable dynamic RunCommand on project" => sub { + my $projectresponse = request(GET "/project/$project_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + ); + + my $projectjson = decode_json($projectresponse->content); + $projectjson->{enable_dynamic_run_command} = 1; + + my $projectupdate = request(PUT "/project/$project_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + Content => encode_json($projectjson) + ); + + $projectresponse = request(GET "/project/$project_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + ); + $projectjson = decode_json($projectresponse->content); + + is($projectupdate->code, 200); + is($projectjson->{enable_dynamic_run_command}, JSON::MaybeXS::true); + }; + + subtest "can enable dynamic RunCommand on jobset" => sub { + my $jobsetresponse = request(GET "/jobset/$project_name/$jobset_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + ); + + my $jobsetjson = decode_json($jobsetresponse->content); + $jobsetjson->{enable_dynamic_run_command} = 1; + + my $jobsetupdate = request(PUT "/jobset/$project_name/$jobset_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + Content => encode_json($jobsetjson) + ); + + $jobsetresponse = request(GET "/jobset/$project_name/$jobset_name", + Accept => 'application/json', + Content_Type => 'application/json', + Cookie => $cookie, + ); + $jobsetjson = decode_json($jobsetresponse->content); + + is($jobsetupdate->code, 200); + is($jobsetjson->{enable_dynamic_run_command}, JSON::MaybeXS::true); + }; +}; + +done_testing; From a22a8fa62d777950dca470b9791e5ac8cda2ddbf Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Mon, 20 Dec 2021 09:37:14 -0800 Subject: [PATCH 046/401] AddBuilds: reject declarative jobsets with dynamic runcommand enabled if disabled elsewhere --- src/lib/Hydra/Helper/AddBuilds.pm | 44 +++++++++++--- src/script/hydra-eval-jobset | 2 +- t/Helper/AddBuilds/dynamic-disabled.t | 85 ++++++++++++++++++++++++++ t/Helper/AddBuilds/dynamic-enabled.t | 88 +++++++++++++++++++++++++++ 4 files changed, 209 insertions(+), 10 deletions(-) create mode 100644 t/Helper/AddBuilds/dynamic-disabled.t create mode 100644 t/Helper/AddBuilds/dynamic-enabled.t diff --git a/src/lib/Hydra/Helper/AddBuilds.pm b/src/lib/Hydra/Helper/AddBuilds.pm index f38737d3..9e3ddfd2 100644 --- a/src/lib/Hydra/Helper/AddBuilds.pm +++ b/src/lib/Hydra/Helper/AddBuilds.pm @@ -19,14 +19,16 @@ use Hydra::Helper::CatalystUtils; our @ISA = qw(Exporter); our @EXPORT = qw( + validateDeclarativeJobset + createJobsetInputsRowAndData updateDeclarativeJobset handleDeclarativeJobsetBuild handleDeclarativeJobsetJson ); -sub updateDeclarativeJobset { - my ($db, $project, $jobsetName, $declSpec) = @_; +sub validateDeclarativeJobset { + my ($config, $project, $jobsetName, $declSpec) = @_; my @allowed_keys = qw( enabled @@ -62,16 +64,39 @@ sub updateDeclarativeJobset { } } + my $enable_dynamic_run_command = defined $update{enable_dynamic_run_command} ? 1 : 0; + if ($enable_dynamic_run_command + && !($config->{dynamicruncommand}->{enable} + && $project->{enable_dynamic_run_command})) + { + die "Dynamic RunCommand is not enabled by the server or the parent project."; + } + + return %update; +} + +sub createJobsetInputsRowAndData { + my ($name, $declSpec) = @_; + my $data = $declSpec->{"inputs"}->{$name}; + my $row = { + name => $name, + type => $data->{type} + }; + $row->{emailresponsible} = $data->{emailresponsible} // 0; + + return ($row, $data); +} + +sub updateDeclarativeJobset { + my ($config, $db, $project, $jobsetName, $declSpec) = @_; + + my %update = validateDeclarativeJobset($config, $project, $jobsetName, $declSpec); + $db->txn_do(sub { my $jobset = $project->jobsets->update_or_create(\%update); $jobset->jobsetinputs->delete; foreach my $name (keys %{$declSpec->{"inputs"}}) { - my $data = $declSpec->{"inputs"}->{$name}; - my $row = { - name => $name, - type => $data->{type} - }; - $row->{emailresponsible} = $data->{emailresponsible} // 0; + my ($row, $data) = createJobsetInputsRowAndData($name, $declSpec); my $input = $jobset->jobsetinputs->create($row); $input->jobsetinputalts->create({altnr => 0, value => $data->{value}}); } @@ -82,6 +107,7 @@ sub updateDeclarativeJobset { sub handleDeclarativeJobsetJson { my ($db, $project, $declSpec) = @_; + my $config = getHydraConfig(); $db->txn_do(sub { my @kept = keys %$declSpec; push @kept, ".jobsets"; @@ -89,7 +115,7 @@ sub handleDeclarativeJobsetJson { foreach my $jobsetName (keys %$declSpec) { my $spec = $declSpec->{$jobsetName}; eval { - updateDeclarativeJobset($db, $project, $jobsetName, $spec); + updateDeclarativeJobset($config, $db, $project, $jobsetName, $spec); 1; } or do { print STDERR "ERROR: failed to process declarative jobset ", $project->name, ":${jobsetName}, ", $@, "\n"; diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index de437ecd..a9bd7355 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -617,7 +617,7 @@ sub checkJobsetWrapped { } else { # Update the jobset with the spec's inputs, and the continue # evaluating the .jobsets jobset. - updateDeclarativeJobset($db, $project, ".jobsets", $declSpec); + updateDeclarativeJobset($config, $db, $project, ".jobsets", $declSpec); $jobset->discard_changes; $inputInfo->{"declInput"} = [ $declInput ]; $inputInfo->{"projectName"} = [ fetchInput($plugins, $db, $project, $jobset, "projectName", "string", $project->name, 0) ]; diff --git a/t/Helper/AddBuilds/dynamic-disabled.t b/t/Helper/AddBuilds/dynamic-disabled.t new file mode 100644 index 00000000..0507b03e --- /dev/null +++ b/t/Helper/AddBuilds/dynamic-disabled.t @@ -0,0 +1,85 @@ +use strict; +use warnings; +use Setup; +use Test2::V0; + +require Catalyst::Test; +use HTTP::Request::Common qw(POST PUT GET DELETE); +use JSON::MaybeXS qw(decode_json encode_json); +use Hydra::Helper::AddBuilds qw(validateDeclarativeJobset); +use Hydra::Helper::Nix qw(getHydraConfig); + +my $ctx = test_context(); + +sub makeJobsetSpec { + my ($dynamic) = @_; + + return { + enabled => 2, + enable_dynamic_run_command => $dynamic ? JSON::MaybeXS::true : undef, + visible => JSON::MaybeXS::true, + name => "job", + type => 1, + description => "test jobset", + flake => "github:nixos/nix", + checkinterval => 0, + schedulingshares => 100, + keepnr => 3 + }; +}; + +subtest "validate declarative jobset with dynamic RunCommand disabled by server" => sub { + my $config = getHydraConfig(); + + subtest "project enabled dynamic runcommand, declarative jobset enabled dynamic runcommand" => sub { + like( + dies { + validateDeclarativeJobset( + $config, + { enable_dynamic_run_command => 1 }, + "test-jobset", + makeJobsetSpec(JSON::MaybeXS::true), + ), + }, + qr/Dynamic RunCommand is not enabled/, + ); + }; + + subtest "project enabled dynamic runcommand, declarative jobset disabled dynamic runcommand" => sub { + ok( + validateDeclarativeJobset( + $config, + { enable_dynamic_run_command => 1 }, + "test-jobset", + makeJobsetSpec(JSON::MaybeXS::false) + ), + ); + }; + + subtest "project disabled dynamic runcommand, declarative jobset enabled dynamic runcommand" => sub { + like( + dies { + validateDeclarativeJobset( + $config, + { enable_dynamic_run_command => 0 }, + "test-jobset", + makeJobsetSpec(JSON::MaybeXS::true), + ), + }, + qr/Dynamic RunCommand is not enabled/, + ); + }; + + subtest "project disabled dynamic runcommand, declarative jobset disabled dynamic runcommand" => sub { + ok( + validateDeclarativeJobset( + $config, + { enable_dynamic_run_command => 0 }, + "test-jobset", + makeJobsetSpec(JSON::MaybeXS::false) + ), + ); + }; +}; + +done_testing; diff --git a/t/Helper/AddBuilds/dynamic-enabled.t b/t/Helper/AddBuilds/dynamic-enabled.t new file mode 100644 index 00000000..d2f5a386 --- /dev/null +++ b/t/Helper/AddBuilds/dynamic-enabled.t @@ -0,0 +1,88 @@ +use strict; +use warnings; +use Setup; +use Test2::V0; + +require Catalyst::Test; +use HTTP::Request::Common qw(POST PUT GET DELETE); +use JSON::MaybeXS qw(decode_json encode_json); +use Hydra::Helper::AddBuilds qw(validateDeclarativeJobset); +use Hydra::Helper::Nix qw(getHydraConfig); + +my $ctx = test_context( + hydra_config => q| + + enable = 1 + + | +); + +sub makeJobsetSpec { + my ($dynamic) = @_; + + return { + enabled => 2, + enable_dynamic_run_command => $dynamic ? JSON::MaybeXS::true : undef, + visible => JSON::MaybeXS::true, + name => "job", + type => 1, + description => "test jobset", + flake => "github:nixos/nix", + checkinterval => 0, + schedulingshares => 100, + keepnr => 3 + }; +}; + +subtest "validate declarative jobset with dynamic RunCommand enabled by server" => sub { + my $config = getHydraConfig(); + + subtest "project enabled dynamic runcommand, declarative jobset enabled dynamic runcommand" => sub { + ok( + validateDeclarativeJobset( + $config, + { enable_dynamic_run_command => 1 }, + "test-jobset", + makeJobsetSpec(JSON::MaybeXS::true) + ), + ); + }; + + subtest "project enabled dynamic runcommand, declarative jobset disabled dynamic runcommand" => sub { + ok( + validateDeclarativeJobset( + $config, + { enable_dynamic_run_command => 1 }, + "test-jobset", + makeJobsetSpec(JSON::MaybeXS::false) + ), + ); + }; + + subtest "project disabled dynamic runcommand, declarative jobset enabled dynamic runcommand" => sub { + like( + dies { + validateDeclarativeJobset( + $config, + { enable_dynamic_run_command => 0 }, + "test-jobset", + makeJobsetSpec(JSON::MaybeXS::true), + ), + }, + qr/Dynamic RunCommand is not enabled/, + ); + }; + + subtest "project disabled dynamic runcommand, declarative jobset disabled dynamic runcommand" => sub { + ok( + validateDeclarativeJobset( + $config, + { enable_dynamic_run_command => 0 }, + "test-jobset", + makeJobsetSpec(JSON::MaybeXS::false) + ), + ); + }; +}; + +done_testing; From 8c3122cacd82d86467b1e69b58db14f058a7afe5 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Mon, 20 Dec 2021 11:20:17 -0800 Subject: [PATCH 047/401] hydra-api: add enable_dynamic_run_command to Project PUT --- hydra-api.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hydra-api.yaml b/hydra-api.yaml index 0fe0a130..ce7e0f9a 100644 --- a/hydra-api.yaml +++ b/hydra-api.yaml @@ -178,6 +178,9 @@ paths: enabled: description: when set to true the project gets scheduled for evaluation type: boolean + enable_dynamic_run_command: + description: when true the project's jobsets support executing dynamically defined RunCommand hooks. Requires the server and project's configuration to also enable dynamic RunCommand. + type: boolean visible: description: when set to true the project is displayed in the web interface type: boolean From 27ddde1e9eb95b694884dce5dfb24b67c02aefbd Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 11 Feb 2022 15:03:09 -0500 Subject: [PATCH 048/401] dynamic runcommand: print a notice on the build page if it is disabled --- src/lib/Hydra/Controller/Build.pm | 11 +++++++++++ src/root/build.tt | 15 ++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/lib/Hydra/Controller/Build.pm b/src/lib/Hydra/Controller/Build.pm index af648109..552f31af 100644 --- a/src/lib/Hydra/Controller/Build.pm +++ b/src/lib/Hydra/Controller/Build.pm @@ -38,6 +38,17 @@ sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) { $c->stash->{jobset} = $c->stash->{build}->jobset; $c->stash->{job} = $c->stash->{build}->job; $c->stash->{runcommandlogs} = [$c->stash->{build}->runcommandlogs->search({}, {order_by => ["id DESC"]})]; + + $c->stash->{runcommandlogProblem} = undef; + if ($c->stash->{job} =~ qr/^runCommandHook\..*/) { + if (!$c->config->{dynamicruncommand}->{enable}) { + $c->stash->{runcommandlogProblem} = "disabled-server"; + } elsif (!$c->stash->{project}->enable_dynamic_run_command) { + $c->stash->{runcommandlogProblem} = "disabled-project"; + } elsif (!$c->stash->{jobset}->enable_dynamic_run_command) { + $c->stash->{runcommandlogProblem} = "disabled-jobset"; + } + } } diff --git a/src/root/build.tt b/src/root/build.tt index 0848da4a..027ce3e4 100644 --- a/src/root/build.tt +++ b/src/root/build.tt @@ -149,7 +149,7 @@ END; [% IF build.dependents %][% END%] [% IF drvAvailable %][% END %] [% IF localStore && available %][% END %] - [% IF runcommandlogs.size() > 0 %][% END %] + [% IF runcommandlogProblem || runcommandlogs.size() > 0 %][% END %]
@@ -489,6 +489,19 @@ END; [% END %]
+ [% IF runcommandlogProblem %] + + [% END %]
[% FOREACH runcommandlog IN runcommandlogs %]
From 6d146deaf0f9a45376c13d3714061e5435922a22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Janne=20He=C3=9F?= Date: Sun, 13 Feb 2022 13:57:49 +0100 Subject: [PATCH 049/401] build-graphs: Fix readability in dark mode --- src/root/static/css/hydra.css | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/root/static/css/hydra.css b/src/root/static/css/hydra.css index 475d61c2..bbe0f1dd 100644 --- a/src/root/static/css/hydra.css +++ b/src/root/static/css/hydra.css @@ -361,4 +361,17 @@ td.step-status span.warn { div.modal-content { background-color: #1f1f1f; } + + /* + Graphs + */ + div.flot-tooltip { + border: solid 1px white; + background-color: #1f1f1f; + color: #fafafa !important; + } + + div.flot-text { + color: #fafafa !important; + } } From 1c846765273018ff9b97edbfcaf9e5884fd9e140 Mon Sep 17 00:00:00 2001 From: ajs124 Date: Mon, 10 May 2021 14:32:24 +0200 Subject: [PATCH 050/401] Fit more content on screen --- src/root/static/css/hydra.css | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/root/static/css/hydra.css b/src/root/static/css/hydra.css index 475d61c2..53304b19 100644 --- a/src/root/static/css/hydra.css +++ b/src/root/static/css/hydra.css @@ -1,5 +1,5 @@ div.skip-topbar { - padding-top: 40px; + padding-top: 20px; margin-bottom: 1.5em; } @@ -146,6 +146,26 @@ td.step-status span.warn { padding-top: 1.5rem; } +.container { + max-width: 80%; +} + +.tab-content { + margin-right: 0 !important; +} + +body { + line-height: 1; +} + +.navbar-nav { + line-height: 1.5; +} + +.dropdown-item { + line-height: 1.5; +} + @media (prefers-color-scheme: dark) { /* Prevent some flickering */ html { From f14c583ce5188903f7c9db6f99c8c3fb42c77416 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 20 Jan 2022 15:09:21 -0500 Subject: [PATCH 051/401] Use `copyClosure` instead of `computeFSClosure` + `copyPaths` It is more terse, and in the future it is possible `copyClosure` will become more sophisticated. --- src/hydra-queue-runner/build-remote.cc | 6 +++--- src/hydra-queue-runner/queue-monitor.cc | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 696077de..5d0f9a45 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -287,9 +287,9 @@ void State::buildRemote(ref destStore, this will copy the inputs to the binary cache from the local store. */ if (localStore != std::shared_ptr(destStore)) { - StorePathSet closure; - localStore->computeFSClosure(step->drv->inputSrcs, closure); - copyPaths(*localStore, *destStore, closure, NoRepair, NoCheckSigs, NoSubstitute); + copyClosure(*localStore, *destStore, + step->drv->inputSrcs, + NoRepair, NoCheckSigs, NoSubstitute); } { diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 49caf8e3..3f19d36a 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -513,9 +513,9 @@ Step::ptr State::createStep(ref destStore, // FIXME: should copy directly from substituter to destStore. } - StorePathSet closure; - localStore->computeFSClosure({*path}, closure); - copyPaths(*localStore, *destStore, closure, NoRepair, CheckSigs, NoSubstitute); + copyClosure(*localStore, *destStore, + StorePathSet { *path }, + NoRepair, CheckSigs, NoSubstitute); time_t stopTime = time(0); From 445bba337b6a78dad9b72da878ae21aabf85af3d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sun, 20 Feb 2022 17:18:52 +0000 Subject: [PATCH 052/401] Make `copyClosureTo` take a regular C++ ref to the store This is syntactically lighter wait, and demonstates there are no weird dynamic lifetimes involved, just regular passing reference to callee which it only borrows for the duration of the call. --- src/hydra-queue-runner/build-remote.cc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 696077de..464a35c8 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -104,12 +104,12 @@ static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Chil } -static void copyClosureTo(std::timed_mutex & sendMutex, ref destStore, +static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore, FdSource & from, FdSink & to, const StorePathSet & paths, bool useSubstitutes = false) { StorePathSet closure; - destStore->computeFSClosure(paths, closure); + destStore.computeFSClosure(paths, closure); /* Send the "query valid paths" command with the "lock" option enabled. This prevents a race where the remote host @@ -117,16 +117,16 @@ static void copyClosureTo(std::timed_mutex & sendMutex, ref destStore, the remote host to substitute missing paths. */ // FIXME: substitute output pollutes our build log to << cmdQueryValidPaths << 1 << useSubstitutes; - worker_proto::write(*destStore, to, closure); + worker_proto::write(destStore, to, closure); to.flush(); /* Get back the set of paths that are already valid on the remote host. */ - auto present = worker_proto::read(*destStore, from, Phantom {}); + auto present = worker_proto::read(destStore, from, Phantom {}); if (present.size() == closure.size()) return; - auto sorted = destStore->topoSortPaths(closure); + auto sorted = destStore.topoSortPaths(closure); StorePathSet missing; for (auto i = sorted.rbegin(); i != sorted.rend(); ++i) @@ -138,7 +138,7 @@ static void copyClosureTo(std::timed_mutex & sendMutex, ref destStore, std::chrono::seconds(600)); to << cmdImportPaths; - destStore->exportPaths(missing, to); + destStore.exportPaths(missing, to); to.flush(); if (readInt(from) != 1) @@ -308,7 +308,7 @@ void State::buildRemote(ref destStore, destStore->computeFSClosure(inputs, closure); copyPaths(*destStore, *localStore, closure, NoRepair, NoCheckSigs, NoSubstitute); } else { - copyClosureTo(machine->state->sendLock, destStore, from, to, inputs, true); + copyClosureTo(machine->state->sendLock, *destStore, from, to, inputs, true); } auto now2 = std::chrono::steady_clock::now(); From 5d169e3a2eea27f80981ab419613d5c5e03880c8 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sun, 20 Feb 2022 11:54:14 -0500 Subject: [PATCH 053/401] Add a test validating direct and indirect constituents --- t/jobs/constituents.nix | 40 +++++++++++++++++++ t/queue-runner/direct-indirect-constituents.t | 35 ++++++++++++++++ 2 files changed, 75 insertions(+) create mode 100644 t/jobs/constituents.nix create mode 100644 t/queue-runner/direct-indirect-constituents.t diff --git a/t/jobs/constituents.nix b/t/jobs/constituents.nix new file mode 100644 index 00000000..5b7106b9 --- /dev/null +++ b/t/jobs/constituents.nix @@ -0,0 +1,40 @@ +with import ./config.nix; +rec { + constituentA = mkDerivation { + name = "empty-dir-A"; + builder = ./empty-dir-builder.sh; + }; + + constituentB = mkDerivation { + name = "empty-dir-B"; + builder = ./empty-dir-builder.sh; + }; + + direct_aggregate = mkDerivation { + name = "direct_aggregate"; + _hydraAggregate = true; + constituents = [ + constituentA + ]; + builder = ./empty-dir-builder.sh; + }; + + indirect_aggregate = mkDerivation { + name = "indirect_aggregate"; + _hydraAggregate = true; + constituents = [ + "constituentA" + ]; + builder = ./empty-dir-builder.sh; + }; + + mixed_aggregate = mkDerivation { + name = "mixed_aggregate"; + _hydraAggregate = true; + constituents = [ + "constituentA" + constituentB + ]; + builder = ./empty-dir-builder.sh; + }; +} diff --git a/t/queue-runner/direct-indirect-constituents.t b/t/queue-runner/direct-indirect-constituents.t new file mode 100644 index 00000000..35370450 --- /dev/null +++ b/t/queue-runner/direct-indirect-constituents.t @@ -0,0 +1,35 @@ +use strict; +use warnings; +use Setup; +use Test2::V0; + +my $ctx = test_context(); + +my $builds = $ctx->makeAndEvaluateJobset( + expression => 'constituents.nix', +); + +my $constituentBuildA = $builds->{"constituentA"}; +my $constituentBuildB = $builds->{"constituentB"}; + +my $eval = $constituentBuildA->jobsetevals->first(); +is($eval->evaluationerror->errormsg, ""); + +subtest "Verifying the direct aggregate" => sub { + my $aggBuild = $builds->{"direct_aggregate"}; + is($aggBuild->constituents->first()->id, $constituentBuildA->id, "The ID of the constituent is correct"); +}; + +subtest "Verifying the indirect aggregate" => sub { + my $indirectBuild = $builds->{"indirect_aggregate"}; + is($indirectBuild->constituents->first()->id, $constituentBuildA->id, "The ID of the constituent is correct"); +}; + +subtest "Verifying a mix of direct and indirect aggregate references" => sub { + my $mixedBuild = $builds->{"mixed_aggregate"}; + my ($constituentA, $constituentB) = $mixedBuild->constituents()->search({}, {order_by => { -asc => "job"} }); + is($constituentA->id, $constituentBuildA->id, "The ID of the constituent is correct"); + is($constituentB->id, $constituentBuildB->id, "The ID of the constituent is correct"); +}; + +done_testing; From be46f0216442577718c8a9d640b4021ba1c8b515 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sun, 20 Feb 2022 11:55:31 -0500 Subject: [PATCH 054/401] tests: relocate evaluator tests --- t/{ => evaluator}/evaluate-basic.t | 0 t/{ => evaluator}/evaluate-dependent-jobsets.t | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename t/{ => evaluator}/evaluate-basic.t (100%) rename t/{ => evaluator}/evaluate-dependent-jobsets.t (100%) diff --git a/t/evaluate-basic.t b/t/evaluator/evaluate-basic.t similarity index 100% rename from t/evaluate-basic.t rename to t/evaluator/evaluate-basic.t diff --git a/t/evaluate-dependent-jobsets.t b/t/evaluator/evaluate-dependent-jobsets.t similarity index 100% rename from t/evaluate-dependent-jobsets.t rename to t/evaluator/evaluate-dependent-jobsets.t From e0921eba0a938172c53128de8b91e8310a74a51e Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sun, 20 Feb 2022 12:18:11 -0500 Subject: [PATCH 055/401] Create a basic test which verifies we can't delete the derivation of aggregate jobs --- t/evaluator/evaluate-constituents-gc.t | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 t/evaluator/evaluate-constituents-gc.t diff --git a/t/evaluator/evaluate-constituents-gc.t b/t/evaluator/evaluate-constituents-gc.t new file mode 100644 index 00000000..a9b23e6c --- /dev/null +++ b/t/evaluator/evaluate-constituents-gc.t @@ -0,0 +1,20 @@ +use strict; +use warnings; +use Setup; +use Test2::V0; + +my $ctx = test_context(); + +my $builds = $ctx->makeAndEvaluateJobset( + expression => 'constituents.nix', +); + +my $constituentA = $builds->{"constituentA"}; +my $directAggregate = $builds->{"direct_aggregate"}; +my $indirectAggregate = $builds->{"indirect_aggregate"}; + +is(system('nix-store', '--delete', $constituentA->drvpath), 256, "Deleting a constituent derivation fails"); +is(system('nix-store', '--delete', $directAggregate->drvpath), 256, "Deleting the direct aggregate derivation fails"); +is(system('nix-store', '--delete', $indirectAggregate->drvpath), 256, "Deleting the indirect aggregate derivation fails"); + +done_testing; From 290e0653ad0121b80e53dfc0a5f9eed7540049ff Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sun, 20 Feb 2022 12:15:10 -0500 Subject: [PATCH 056/401] hydra-eval-jobs: GC root aggregate jobs --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index acffe1d1..897956bf 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -504,6 +504,16 @@ int main(int argc, char * * argv) job.erase("namedConstituents"); + /* Register the derivation as a GC root. !!! This + registers roots for jobs that we may have already + done. */ + auto localStore = store.dynamic_pointer_cast(); + if (gcRootsDir != "" && localStore) { + Path root = gcRootsDir + "/" + std::string(baseNameOf((std::string) job["drvPath"])); + if (!pathExists(root)) + localStore->addPermRoot(localStore->parseStorePath((std::string) job["drvPath"]), root); + } + if (!brokenJobs.empty()) { std::stringstream ss; for (const auto& [jobName, error] : brokenJobs) { From 9316544abf53183fa7149b272aed054d0457697e Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Mon, 21 Feb 2022 12:41:21 -0500 Subject: [PATCH 057/401] src/hydra-eval-jobs/hydra-eval-jobs.cc: .get for drvPath Co-authored-by: Kayla Fire --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index 897956bf..44a273a9 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -509,9 +509,10 @@ int main(int argc, char * * argv) done. */ auto localStore = store.dynamic_pointer_cast(); if (gcRootsDir != "" && localStore) { - Path root = gcRootsDir + "/" + std::string(baseNameOf((std::string) job["drvPath"])); + auto drvPath = job["drvPath"].get(); + Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath)); if (!pathExists(root)) - localStore->addPermRoot(localStore->parseStorePath((std::string) job["drvPath"]), root); + localStore->addPermRoot(localStore->parseStorePath(drvPath), root); } if (!brokenJobs.empty()) { From b9ec3a41d6abe7253dd8ef6a840d13f051861664 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Mar 2022 15:01:23 +0000 Subject: [PATCH 058/401] build(deps): bump actions/checkout from 2 to 3 Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5751d1d5..3ba4aba6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -6,7 +6,7 @@ jobs: tests: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 0 - uses: cachix/install-nix-action@v16 From f1f2fc742700c6f80f87758687afedefd9d08ac6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Mar 2022 19:07:06 +0000 Subject: [PATCH 059/401] build(deps): bump actions/checkout from 2 to 3 Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7d98e3ff..8d32f581 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -6,7 +6,7 @@ jobs: tests: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 0 - uses: cachix/install-nix-action@v12 From 5bbaa18a8f0e4271a3ae8a1a8adda40142ec2b4b Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Thu, 10 Mar 2022 12:20:44 -0800 Subject: [PATCH 060/401] flake: fix foreman execution [vin@scadrial:~/workspace/vcs/hydra]$ foreman -h Warning: the running version of Bundler (2.1.4) is older than the version that created the lockfile (2.2.20). We suggest you to upgrade to the version that created the lockfile by running `gem install bundler:2.2.20`. Traceback (most recent call last): 2: from /nix/store/ycshcdssxcj9sjf6yzb1ydw4fcglf66y-foreman-0.87.2/bin/foreman:20:in `
' 1: from /nix/store/ggqacj06n6qfm1iww0bih9ph0j89wcna-bundler-2.1.4/lib/ruby/gems/2.7.0/gems/bundler-2.1.4/lib/bundler/rubygems_integration.rb:413:in `block in replace_bin_path' /nix/store/ggqacj06n6qfm1iww0bih9ph0j89wcna-bundler-2.1.4/lib/ruby/gems/2.7.0/gems/bundler-2.1.4/lib/bundler/rubygems_integration.rb:374:in `block in replace_bin_path': can't find executable foreman for gem foreman. foreman is not currently included in the bundle, perhaps you meant to add it to your Gemfile? (Gem::Exception) --- flake.lock | 17 +++++++++++++++++ flake.nix | 9 +++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/flake.lock b/flake.lock index e4bf8c71..684243af 100644 --- a/flake.lock +++ b/flake.lock @@ -16,6 +16,22 @@ "type": "github" } }, + "newNixpkgs": { + "locked": { + "lastModified": 1646588256, + "narHash": "sha256-ZHljmNlt19nSm0Mz8fx6QEhddKUkU4hhwFmfNmGn+EY=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "2ebb6c1e5ae402ba35cca5eec58385e5f1adea04", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-21.11", + "repo": "nixpkgs", + "type": "github" + } + }, "nix": { "inputs": { "lowdown-src": "lowdown-src", @@ -69,6 +85,7 @@ }, "root": { "inputs": { + "newNixpkgs": "newNixpkgs", "nix": "nix", "nixpkgs": [ "nix", diff --git a/flake.nix b/flake.nix index d69048ed..2c4a4ddb 100644 --- a/flake.nix +++ b/flake.nix @@ -1,10 +1,13 @@ { description = "A Nix-based continuous build system"; + # FIXME: All the pinned versions of nix/nixpkgs have a broken foreman (yes, + # even 2.7.0's Nixpkgs pin). + inputs.newNixpkgs.url = "github:NixOS/nixpkgs/nixos-21.11"; inputs.nixpkgs.follows = "nix/nixpkgs"; inputs.nix.url = github:NixOS/nix/2.6.0; - outputs = { self, nixpkgs, nix }: + outputs = { self, newNixpkgs, nixpkgs, nix }: let version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 self.lastModifiedDate}.${self.shortRev or "DIRTY"}"; @@ -566,7 +569,9 @@ checkInputs = [ cacert - foreman + # FIXME: foreman is broken on all nix/nixpkgs pin, up to and + # including 2.7.0 + newNixpkgs.legacyPackages.${final.system}.foreman glibcLocales netcat-openbsd openldap From 3bf31bd6a6e27be79245496598b64fabb01bcf41 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Thu, 10 Mar 2022 12:21:30 -0800 Subject: [PATCH 061/401] hydra-queue-runner: add simple "up" exporter There are probably better ways to achieve this (and will likely need to be refactored a bit to support further metrics). --- flake.nix | 1 + src/hydra-queue-runner/Makefile.am | 2 +- src/hydra-queue-runner/hydra-queue-runner.cc | 23 ++++++++++++++++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index 2c4a4ddb..175aa406 100644 --- a/flake.nix +++ b/flake.nix @@ -565,6 +565,7 @@ (if lib.versionAtLeast lib.version "20.03pre" then nlohmann_json else nlohmann_json.override { multipleHeaders = true; }) + prometheus-cpp ]; checkInputs = [ diff --git a/src/hydra-queue-runner/Makefile.am b/src/hydra-queue-runner/Makefile.am index ea852334..6e01ef85 100644 --- a/src/hydra-queue-runner/Makefile.am +++ b/src/hydra-queue-runner/Makefile.am @@ -4,5 +4,5 @@ hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.c builder.cc build-result.cc build-remote.cc \ build-result.hh counter.hh state.hh db.hh \ nar-extractor.cc nar-extractor.hh -hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx +hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx -lprometheus-cpp-pull -lprometheus-cpp-core hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 3297730c..af7ec28d 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -6,6 +6,10 @@ #include #include +#include +#include +#include + #include "state.hh" #include "build-result.hh" #include "store-api.hh" @@ -854,6 +858,25 @@ int main(int argc, char * * argv) return handleExceptions(argv[0], [&]() { initNix(); + /* Export a simple "up" metric, to allow monitoring that we're + still alive. */ + std::thread([&]() { + prometheus::Exposer exposer{"127.0.0.1:8080"}; + + // @note it's the users responsibility to keep the object alive + auto registry = std::make_shared(); + + auto& running = prometheus::BuildGauge() + .Name("hydra_queue_runner_running") + .Help("Whether the queue runner is currently running") + .Register(*registry); + + exposer.RegisterCollectable(registry); + running.Add({}).Set(1); + + while (true) { } + }).detach(); + signal(SIGINT, SIG_DFL); signal(SIGTERM, SIG_DFL); signal(SIGHUP, SIG_DFL); From a0cb73579d48aca3431a9294088a3e37f2c52323 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 11 Mar 2022 11:50:44 -0800 Subject: [PATCH 062/401] flake: update newNixpkgs for newer prometheus-cpp --- flake.lock | 8 ++++---- flake.nix | 9 ++++++++- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/flake.lock b/flake.lock index 684243af..586c08e0 100644 --- a/flake.lock +++ b/flake.lock @@ -18,17 +18,17 @@ }, "newNixpkgs": { "locked": { - "lastModified": 1646588256, - "narHash": "sha256-ZHljmNlt19nSm0Mz8fx6QEhddKUkU4hhwFmfNmGn+EY=", + "lastModified": 1647023429, + "narHash": "sha256-LdMTXEgW+G1LXrGrME1b1CpTC6/r+meFZDHeXR2Ps40=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "2ebb6c1e5ae402ba35cca5eec58385e5f1adea04", + "rev": "9b095223a5dc9a6bce6ec54477f31194871eca8e", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-21.11", "repo": "nixpkgs", + "rev": "9b095223a5dc9a6bce6ec54477f31194871eca8e", "type": "github" } }, diff --git a/flake.nix b/flake.nix index 175aa406..7ea0ce35 100644 --- a/flake.nix +++ b/flake.nix @@ -3,7 +3,8 @@ # FIXME: All the pinned versions of nix/nixpkgs have a broken foreman (yes, # even 2.7.0's Nixpkgs pin). - inputs.newNixpkgs.url = "github:NixOS/nixpkgs/nixos-21.11"; + # FIXME: has updated prometheus-cpp: https://github.com/NixOS/nixpkgs/pull/163695 + inputs.newNixpkgs.url = "github:NixOS/nixpkgs/9b095223a5dc9a6bce6ec54477f31194871eca8e"; inputs.nixpkgs.follows = "nix/nixpkgs"; inputs.nix.url = github:NixOS/nix/2.6.0; @@ -41,6 +42,12 @@ # A Nixpkgs overlay that provides a 'hydra' package. overlay = final: prev: { + # Overlay these packages to use dependencies from the Nixpkgs everything + # else uses, to side-step the version difference: glibc is 2.32 in the + # nix-pinned Nixpkgs, but 2.33 in the newNixpkgs commit. + civetweb = final.callPackage "${newNixpkgs}/pkgs/development/libraries/civetweb" { }; + prometheus-cpp = final.callPackage "${newNixpkgs}/pkgs/development/libraries/prometheus-cpp" { }; + # Add LDAP dependencies that aren't currently found within nixpkgs. perlPackages = prev.perlPackages // { TestPostgreSQL = final.perlPackages.buildPerlModule { From 6e6475d860269b5da31f83b4954432b2d071a8d8 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 11 Mar 2022 11:51:26 -0800 Subject: [PATCH 063/401] flake: replace aliases with their proper names Newer Nixpkgs have added a throw for these aliases. --- flake.nix | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/flake.nix b/flake.nix index 7ea0ce35..a0c4ff74 100644 --- a/flake.nix +++ b/flake.nix @@ -71,7 +71,7 @@ }; }; - FunctionParameters = final.buildPerlPackage { + FunctionParameters = final.perlPackages.buildPerlPackage { pname = "Function-Parameters"; version = "2.001003"; src = final.fetchurl { @@ -85,7 +85,7 @@ }; }; - CatalystPluginPrometheusTiny = final.buildPerlPackage { + CatalystPluginPrometheusTiny = final.perlPackages.buildPerlPackage { pname = "Catalyst-Plugin-PrometheusTiny"; version = "0.005"; src = final.fetchurl { @@ -114,7 +114,7 @@ }; }; - CryptPassphrase = final.buildPerlPackage { + CryptPassphrase = final.perlPackages.buildPerlPackage { pname = "Crypt-Passphrase"; version = "0.003"; src = final.fetchurl { @@ -127,7 +127,7 @@ }; }; - CryptPassphraseArgon2 = final.buildPerlPackage { + CryptPassphraseArgon2 = final.perlPackages.buildPerlPackage { pname = "Crypt-Passphrase-Argon2"; version = "0.002"; src = final.fetchurl { @@ -141,7 +141,7 @@ }; }; - DataRandom = final.buildPerlPackage { + DataRandom = final.perlPackages.buildPerlPackage { pname = "Data-Random"; version = "0.13"; src = final.fetchurl { @@ -155,7 +155,7 @@ }; }; - DirSelf = final.buildPerlPackage { + DirSelf = final.perlPackages.buildPerlPackage { pname = "Dir-Self"; version = "0.11"; src = final.fetchurl { @@ -183,7 +183,7 @@ }; }; - PrometheusTiny = final.buildPerlPackage { + PrometheusTiny = final.perlPackages.buildPerlPackage { pname = "Prometheus-Tiny"; version = "0.007"; src = final.fetchurl { @@ -198,7 +198,7 @@ }; }; - PrometheusTinyShared = final.buildPerlPackage { + PrometheusTinyShared = final.perlPackages.buildPerlPackage { pname = "Prometheus-Tiny-Shared"; version = "0.023"; src = final.fetchurl { @@ -229,7 +229,7 @@ }; }; - TieHashMethod = final.buildPerlPackage { + TieHashMethod = final.perlPackages.buildPerlPackage { pname = "Tie-Hash-Method"; version = "0.02"; src = final.fetchurl { @@ -242,7 +242,7 @@ }; }; - Test2Harness = final.buildPerlPackage { + Test2Harness = final.perlPackages.buildPerlPackage { pname = "Test2-Harness"; version = "1.000042"; src = final.fetchurl { @@ -291,7 +291,7 @@ }; }; - LongJump = final.buildPerlPackage { + LongJump = final.perlPackages.buildPerlPackage { pname = "Long-Jump"; version = "0.000001"; src = final.fetchurl { @@ -305,7 +305,7 @@ }; }; - gotofile = final.buildPerlPackage { + gotofile = final.perlPackages.buildPerlPackage { pname = "goto-file"; version = "0.005"; src = final.fetchurl { @@ -435,7 +435,7 @@ }; }; - StringCompareConstantTime = final.buildPerlPackage { + StringCompareConstantTime = final.perlPackages.buildPerlPackage { pname = "String-Compare-ConstantTime"; version = "0.321"; src = final.fetchurl { @@ -448,7 +448,7 @@ }; }; - UUID4Tiny = final.buildPerlPackage { + UUID4Tiny = final.perlPackages.buildPerlPackage { pname = "UUID4-Tiny"; version = "0.002"; src = final.fetchurl { @@ -581,7 +581,7 @@ # including 2.7.0 newNixpkgs.legacyPackages.${final.system}.foreman glibcLocales - netcat-openbsd + libressl.nc openldap python3 ]; From 52a29d43e65d8d7078589269ab854faad0a3bc63 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 11 Mar 2022 11:52:43 -0800 Subject: [PATCH 064/401] hydra-queue-runner: make registry member of State, configurable metrics port Thanks to the updated prometheus-cpp library, specifying a port of 0 will cause it to pick a random (available) port -- ideal for tests. --- src/hydra-queue-runner/hydra-queue-runner.cc | 46 ++++++++++---------- src/hydra-queue-runner/state.hh | 8 +++- 2 files changed, 31 insertions(+), 23 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index af7ec28d..fd4ba50d 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -7,7 +7,6 @@ #include #include -#include #include #include "state.hh" @@ -41,7 +40,7 @@ std::string getEnvOrDie(const std::string & key) } -State::State() +State::State(uint16_t metricsPort) : config(std::make_unique()) , maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0)) , dbPool(config->getIntOption("max_db_connections", 128)) @@ -49,6 +48,8 @@ State::State() , maxLogSize(config->getIntOption("max_log_size", 64ULL << 20)) , uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false)) , rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME")))) + , registry(std::make_shared()) + , metricsPort(metricsPort) { hydraData = getEnvOrDie("HYDRA_DATA"); @@ -758,6 +759,15 @@ void State::run(BuildID buildOne) if (!lock) throw Error("hydra-queue-runner is already running"); + /* Set up simple exporter, to show that we're still alive. */ + std::string metricsAddress{"127.0.0.1:" + std::to_string(metricsPort)}; + prometheus::Exposer exposer{metricsAddress}; + exposer.RegisterCollectable(registry); + + std::cout << "Starting the Prometheus exporter, listening on " + << "http://" << metricsAddress << "/metrics" + << std::endl; + Store::Params localParams; localParams["max-connections"] = "16"; localParams["max-connection-age"] = "600"; @@ -858,25 +868,6 @@ int main(int argc, char * * argv) return handleExceptions(argv[0], [&]() { initNix(); - /* Export a simple "up" metric, to allow monitoring that we're - still alive. */ - std::thread([&]() { - prometheus::Exposer exposer{"127.0.0.1:8080"}; - - // @note it's the users responsibility to keep the object alive - auto registry = std::make_shared(); - - auto& running = prometheus::BuildGauge() - .Name("hydra_queue_runner_running") - .Help("Whether the queue runner is currently running") - .Register(*registry); - - exposer.RegisterCollectable(registry); - running.Add({}).Set(1); - - while (true) { } - }).detach(); - signal(SIGINT, SIG_DFL); signal(SIGTERM, SIG_DFL); signal(SIGHUP, SIG_DFL); @@ -887,6 +878,7 @@ int main(int argc, char * * argv) bool unlock = false; bool status = false; BuildID buildOne = 0; + uint16_t metricsPort = 0; parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) { if (*arg == "--unlock") @@ -898,6 +890,16 @@ int main(int argc, char * * argv) buildOne = *b; else throw Error("‘--build-one’ requires a build ID"); + } else if (*arg == "--port") { + if (auto p = string2Int(getArg(*arg, arg, end))) { + if (*p > std::numeric_limits::max()) { + throw Error("'--port' has a maximum of 65535"); + } else { + metricsPort = *p; + } + } else { + throw Error("'--port' requires a numeric port (0 for a random, usable port; max 65535)"); + } } else return false; return true; @@ -906,7 +908,7 @@ int main(int argc, char * * argv) settings.verboseBuild = true; settings.lockCPU = false; - State state; + State state{metricsPort}; if (status) state.showStatus(); else if (unlock) diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 8f303d28..4add0dbd 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -7,6 +7,8 @@ #include #include +#include + #include "db.hh" #include "parsed-derivations.hh" @@ -432,8 +434,12 @@ private: via gc_roots_dir. */ nix::Path rootsDir; + std::shared_ptr registry; + + uint16_t metricsPort; + public: - State(); + State(uint16_t metricsPort); private: From c0f826b92d5cad9f7633c0a847a5e2c4a892c4dc Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Mon, 14 Mar 2022 08:41:45 -0700 Subject: [PATCH 065/401] hydra-queue-runner: get the listening port from the exposer itself Otherwise, when the port is randomly chosen (e.g. by specifying no port, or a port of 0), it will just show that the port is 0 and not the port that is actually serving the metrics. --- src/hydra-queue-runner/hydra-queue-runner.cc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index fd4ba50d..727a728f 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -760,12 +760,13 @@ void State::run(BuildID buildOne) throw Error("hydra-queue-runner is already running"); /* Set up simple exporter, to show that we're still alive. */ - std::string metricsAddress{"127.0.0.1:" + std::to_string(metricsPort)}; - prometheus::Exposer exposer{metricsAddress}; + std::string metricsAddress{"127.0.0.1"}; + prometheus::Exposer exposer{metricsAddress + ":" + std::to_string(metricsPort)}; + auto exposerPort = exposer.GetListeningPorts().front(); exposer.RegisterCollectable(registry); std::cout << "Starting the Prometheus exporter, listening on " - << "http://" << metricsAddress << "/metrics" + << "http://" << metricsAddress << ":" << exposerPort << "/metrics" << std::endl; Store::Params localParams; From b0c17112c92c4dc3df688b77784468cc2f03c127 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 18 Mar 2022 11:10:57 -0700 Subject: [PATCH 066/401] flake: update to nixos-unstable-small https://github.com/NixOS/nixpkgs/pull/163695 was merged, so no longer need to use my commit! --- flake.lock | 8 ++++---- flake.nix | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/flake.lock b/flake.lock index 586c08e0..4dc1b240 100644 --- a/flake.lock +++ b/flake.lock @@ -18,17 +18,17 @@ }, "newNixpkgs": { "locked": { - "lastModified": 1647023429, - "narHash": "sha256-LdMTXEgW+G1LXrGrME1b1CpTC6/r+meFZDHeXR2Ps40=", + "lastModified": 1647380550, + "narHash": "sha256-909TI9poX7CIUiFx203WL29YON6m/I6k0ExbZvR7bLM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9b095223a5dc9a6bce6ec54477f31194871eca8e", + "rev": "6e3ee8957637a60f5072e33d78e05c0f65c54366", "type": "github" }, "original": { "owner": "NixOS", + "ref": "nixos-unstable-small", "repo": "nixpkgs", - "rev": "9b095223a5dc9a6bce6ec54477f31194871eca8e", "type": "github" } }, diff --git a/flake.nix b/flake.nix index a0c4ff74..e9cc4d2b 100644 --- a/flake.nix +++ b/flake.nix @@ -3,8 +3,7 @@ # FIXME: All the pinned versions of nix/nixpkgs have a broken foreman (yes, # even 2.7.0's Nixpkgs pin). - # FIXME: has updated prometheus-cpp: https://github.com/NixOS/nixpkgs/pull/163695 - inputs.newNixpkgs.url = "github:NixOS/nixpkgs/9b095223a5dc9a6bce6ec54477f31194871eca8e"; + inputs.newNixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable-small"; inputs.nixpkgs.follows = "nix/nixpkgs"; inputs.nix.url = github:NixOS/nix/2.6.0; From 25f6bae84776538aef68235dbd1ba537fe6efce0 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sat, 19 Mar 2022 14:34:43 -0400 Subject: [PATCH 067/401] HydraTestContext: make it easy to create a jobset without evaluating --- t/lib/HydraTestContext.pm | 61 ++++++++++++++++++++++++++++----------- 1 file changed, 44 insertions(+), 17 deletions(-) diff --git a/t/lib/HydraTestContext.pm b/t/lib/HydraTestContext.pm index ade12280..ce05b581 100644 --- a/t/lib/HydraTestContext.pm +++ b/t/lib/HydraTestContext.pm @@ -145,10 +145,47 @@ sub nix_state_dir { sub makeAndEvaluateJobset { my ($self, %opts) = @_; - my $expression = $opts{'expression'} || die "Mandatory 'expression' option not passed to makeAndEValuateJobset."; - my $should_build = $opts{'build'} // 0; + my $expression = $opts{'expression'} || die "Mandatory 'expression' option not passed to makeAndEvaluateJobset."; my $jobsdir = $opts{'jobsdir'} // $self->jobsdir; + my $should_build = $opts{'build'} // 0; + my $jobsetCtx = $self->makeJobset( + expression => $expression, + jobsdir => $jobsdir, + ); + my $jobset = $jobsetCtx->{"jobset"}; + + evalSucceeds($jobset) or die "Evaluating jobs/$expression should exit with return code 0"; + + my $builds = {}; + + for my $build ($jobset->builds) { + if ($should_build) { + runBuild($build) or die "Build '".$build->job."' from jobs/$expression should exit with return code 0"; + $build->discard_changes(); + } + + $builds->{$build->job} = $build; + } + + return $builds; +} + +# Create a jobset. +# +# In return, you get a hash of the user, project, and jobset records. +# +# This always uses an `expression` from the `jobsdir` directory. +# +# Hash Parameters: +# +# * expression: The file in the jobsdir directory to evaluate +# * jobsdir: An alternative jobsdir to source the expression from +sub makeJobset { + my ($self, %opts) = @_; + + my $expression = $opts{'expression'} || die "Mandatory 'expression' option not passed to makeJobset."; + my $jobsdir = $opts{'jobsdir'} // $self->jobsdir; # Create a new user for this test my $user = $self->db()->resultset('Users')->create({ @@ -174,23 +211,13 @@ sub makeAndEvaluateJobset { my $jobsetinput = $jobset->jobsetinputs->create({name => "jobs", type => "path"}); $jobsetinput->jobsetinputalts->create({altnr => 0, value => $jobsdir}); - evalSucceeds($jobset) or die "Evaluating jobs/$expression should exit with return code 0"; - - my $builds = {}; - - for my $build ($jobset->builds) { - if ($should_build) { - runBuild($build) or die "Build '".$build->job."' from jobs/$expression should exit with return code 0"; - $build->discard_changes(); - } - - $builds->{$build->job} = $build; - } - - return $builds; + return { + user => $user, + project => $project, + jobset => $jobset, + }; } - sub DESTROY { my ($self) = @_; From 0c51de6334196683a039689af934344facadf8e2 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sat, 19 Mar 2022 14:35:30 -0400 Subject: [PATCH 068/401] hydra-evaluate-jobset: assert it logs errored constituents properly --- t/evaluator/evaluate-constituents-broken.t | 32 ++++++++++++++++++++++ t/jobs/constituents-broken.nix | 19 +++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 t/evaluator/evaluate-constituents-broken.t create mode 100644 t/jobs/constituents-broken.nix diff --git a/t/evaluator/evaluate-constituents-broken.t b/t/evaluator/evaluate-constituents-broken.t new file mode 100644 index 00000000..ed25d192 --- /dev/null +++ b/t/evaluator/evaluate-constituents-broken.t @@ -0,0 +1,32 @@ +use strict; +use warnings; +use Setup; +use Test2::V0; +use Hydra::Helper::Exec; + +my $ctx = test_context(); + +my $jobsetCtx = $ctx->makeJobset( + expression => 'constituents-broken.nix', +); +my $jobset = $jobsetCtx->{"jobset"}; + +my ($res, $stdout, $stderr) = captureStdoutStderr(60, + ("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name) +); +isnt($res, 0, "hydra-eval-jobset exits non-zero"); +ok(utf8::decode($stderr), "Stderr output is UTF8-clean"); +like( + $stderr, + qr/aggregate job ‘mixed_aggregate’ failed with the error: constituentA: does not exist/, + "The stderr record includes a relevant error message" +); + +$jobset->discard_changes; # refresh from DB +like( + $jobset->errormsg, + qr/aggregate job ‘mixed_aggregate’ failed with the error: constituentA: does not exist/, + "The jobset records a relevant error message" +); + +done_testing; diff --git a/t/jobs/constituents-broken.nix b/t/jobs/constituents-broken.nix new file mode 100644 index 00000000..0445a990 --- /dev/null +++ b/t/jobs/constituents-broken.nix @@ -0,0 +1,19 @@ +with import ./config.nix; +rec { + constituentA = null; + + constituentB = mkDerivation { + name = "empty-dir-B"; + builder = ./empty-dir-builder.sh; + }; + + mixed_aggregate = mkDerivation { + name = "mixed_aggregate"; + _hydraAggregate = true; + constituents = [ + "constituentA" + constituentB + ]; + builder = ./empty-dir-builder.sh; + }; +} From 074a2f96bf381b60c75c156db67836031f069b80 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sat, 19 Mar 2022 14:37:12 -0400 Subject: [PATCH 069/401] hydra-eval-jobset: emit a useful error if constituents errored --- src/script/hydra-eval-jobset | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index 108c59c8..99277fd4 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -799,7 +799,13 @@ sub checkJobsetWrapped { foreach my $job (values %{$jobs}) { next unless $job->{constituents}; - my $x = $drvPathToId{$job->{drvPath}} or die; + + if (defined $job->{error}) { + die "aggregate job ‘$job->{jobName}’ failed with the error: $job->{error}"; + } + + my $x = $drvPathToId{$job->{drvPath}} or + die "aggregate job ‘$job->{jobName}’ has no corresponding build record.\n"; foreach my $drvPath (@{$job->{constituents}}) { my $constituent = $drvPathToId{$drvPath}; if (defined $constituent) { From a582e4c485977d62c02b65e94d25eb5d2b283037 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sat, 19 Mar 2022 14:46:53 -0400 Subject: [PATCH 070/401] HydraTestContext: add \n's to various dies --- src/script/hydra-eval-jobset | 2 +- t/lib/HydraTestContext.pm | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index 99277fd4..8bafe07c 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -801,7 +801,7 @@ sub checkJobsetWrapped { next unless $job->{constituents}; if (defined $job->{error}) { - die "aggregate job ‘$job->{jobName}’ failed with the error: $job->{error}"; + die "aggregate job ‘$job->{jobName}’ failed with the error: $job->{error}\n"; } my $x = $drvPathToId{$job->{drvPath}} or diff --git a/t/lib/HydraTestContext.pm b/t/lib/HydraTestContext.pm index ce05b581..237fcbe4 100644 --- a/t/lib/HydraTestContext.pm +++ b/t/lib/HydraTestContext.pm @@ -145,7 +145,7 @@ sub nix_state_dir { sub makeAndEvaluateJobset { my ($self, %opts) = @_; - my $expression = $opts{'expression'} || die "Mandatory 'expression' option not passed to makeAndEvaluateJobset."; + my $expression = $opts{'expression'} || die "Mandatory 'expression' option not passed to makeAndEvaluateJobset.\n"; my $jobsdir = $opts{'jobsdir'} // $self->jobsdir; my $should_build = $opts{'build'} // 0; @@ -155,13 +155,13 @@ sub makeAndEvaluateJobset { ); my $jobset = $jobsetCtx->{"jobset"}; - evalSucceeds($jobset) or die "Evaluating jobs/$expression should exit with return code 0"; + evalSucceeds($jobset) or die "Evaluating jobs/$expression should exit with return code 0.\n"; my $builds = {}; for my $build ($jobset->builds) { if ($should_build) { - runBuild($build) or die "Build '".$build->job."' from jobs/$expression should exit with return code 0"; + runBuild($build) or die "Build '".$build->job."' from jobs/$expression should exit with return code 0.\n"; $build->discard_changes(); } @@ -184,7 +184,7 @@ sub makeAndEvaluateJobset { sub makeJobset { my ($self, %opts) = @_; - my $expression = $opts{'expression'} || die "Mandatory 'expression' option not passed to makeJobset."; + my $expression = $opts{'expression'} || die "Mandatory 'expression' option not passed to makeJobset.\n"; my $jobsdir = $opts{'jobsdir'} // $self->jobsdir; # Create a new user for this test @@ -227,7 +227,7 @@ sub DESTROY sub write_file { my ($path, $text) = @_; - open(my $fh, '>', $path) or die "Could not open file '$path' $!"; + open(my $fh, '>', $path) or die "Could not open file '$path' $!\n."; print $fh $text || ""; close $fh; } From 145667cb53aebac706eef7b50e42d366b72f53ac Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sat, 19 Mar 2022 22:43:19 -0400 Subject: [PATCH 071/401] hydra-update-gc-roots: allow cached refs to the build's jobset Re-executing this search_related on every access turned out to create very problematic performance. If a jobset had a lot of error output stored in the jobset, and there were many hundreds or thousands of active jobs, this could easily cause >1Gbps of network traffic. --- src/script/hydra-update-gc-roots | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/script/hydra-update-gc-roots b/src/script/hydra-update-gc-roots index fbb90488..f446cdf9 100755 --- a/src/script/hydra-update-gc-roots +++ b/src/script/hydra-update-gc-roots @@ -39,10 +39,7 @@ sub keepBuild { $build->finished; - # After #1093 merges this can become $build->jobset; - # However, with ->jobset being a column on master - # it seems DBIX gets a bit confused. - my ($jobset) = $build->search_related('jobset')->first; + my ($jobset) = $build->jobset; print STDERR " keeping ", ($build->finished ? "" : "scheduled "), "build ", $build->id, " (", $jobset->get_column('project'), ":", $jobset->get_column('name'), ":", $build->get_column('job'), "; ", From f353a7ac41933b376c282b6928b74d3c5e591add Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sat, 19 Mar 2022 23:12:28 -0400 Subject: [PATCH 072/401] update-gc-roots: try subselecting the jobset table --- src/script/hydra-update-gc-roots | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/src/script/hydra-update-gc-roots b/src/script/hydra-update-gc-roots index f446cdf9..8cf24d5d 100755 --- a/src/script/hydra-update-gc-roots +++ b/src/script/hydra-update-gc-roots @@ -37,8 +37,6 @@ sub keepBuild { return if defined $seenBuilds{$build->id}; $seenBuilds{$build->id} = 1; - $build->finished; - my ($jobset) = $build->jobset; print STDERR " keeping ", ($build->finished ? "" : "scheduled "), "build ", $build->id, " (", @@ -76,13 +74,29 @@ closedir $dir; # For scheduled builds, we register the derivation as a GC root. print STDERR "*** looking for scheduled builds\n"; -keepBuild($_, 0) foreach $db->resultset('Builds')->search({ finished => 0 }, { columns => [ @columns ] }); +keepBuild($_, 0) foreach $db->resultset('Builds')->search( + { finished => 0 }, + { + columns => [ @columns ], + join => 'jobset', + '+select' => ['jobset.project', 'jobset.name'], + '+as' => ['jobset.project', 'jobset.name'], + } +); # Keep all builds that have been marked as "keep". print STDERR "*** looking for kept builds\n"; my @buildsToKeep = $db->resultset('Builds')->search( - { finished => 1, keep => 1 }, { order_by => ["jobset_id", "job", "id"], columns => [ @columns ] }); + { finished => 1, keep => 1 }, + { + order_by => ["jobset_id", "job", "id"], + columns => [ @columns ], + join => 'jobset', + '+select' => ['jobset.project', 'jobset.name'], + '+as' => ['jobset.project', 'jobset.name'], + } +); keepBuild($_, 0) foreach @buildsToKeep; From 137be3452e9a8140b373ae195ba396fa115576c4 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sat, 19 Mar 2022 23:24:28 -0400 Subject: [PATCH 073/401] Reduce the jobset cols on the remaining two queries --- src/script/hydra-update-gc-roots | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/script/hydra-update-gc-roots b/src/script/hydra-update-gc-roots index 8cf24d5d..315d4ebb 100755 --- a/src/script/hydra-update-gc-roots +++ b/src/script/hydra-update-gc-roots @@ -138,7 +138,14 @@ foreach my $project ($db->resultset('Projects')->search({}, { order_by => ["name { id => { -in => $db->resultset('JobsetEvalMembers')->search({ eval => { -in => [@evals] } }, { select => "build" })->as_query } , finished => 1 }, - { order_by => ["job", "id"], columns => [ @columns ] }); + { + order_by => ["job", "id"], + columns => [ @columns ], + join => 'jobset', + '+select' => ['jobset.project', 'jobset.name'], + '+as' => ['jobset.project', 'jobset.name'], + } + ); print STDERR "*** looking for the most recent successful builds of current jobs in ", $project->name, ":", $jobset->name, "\n"; @@ -158,7 +165,13 @@ foreach my $project ($db->resultset('Projects')->search({}, { order_by => ["name , select => [ { max => 'id', -as => 'm' } ] })->as_query } }, - { columns => [ @columns ] }); + { + columns => [ @columns ] + join => 'jobset', + '+select' => ['jobset.project', 'jobset.name'], + '+as' => ['jobset.project', 'jobset.name'], + } + ); } } From e5393c2cf87909b251d0ddcdc1ea254956d79660 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sat, 19 Mar 2022 23:34:13 -0400 Subject: [PATCH 074/401] fixup: make id non-ambiguous --- src/script/hydra-update-gc-roots | 10 +++++----- t/scripts/hydra-update-gc-roots/update-gc-roots.t | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/script/hydra-update-gc-roots b/src/script/hydra-update-gc-roots index 315d4ebb..11eba7a6 100755 --- a/src/script/hydra-update-gc-roots +++ b/src/script/hydra-update-gc-roots @@ -135,11 +135,11 @@ foreach my $project ($db->resultset('Projects')->search({}, { order_by => ["name # Note: we also keep the derivations of failed builds so that # they can be restarted. keepBuild($_, 1) foreach $jobset->builds->search( - { id => { -in => $db->resultset('JobsetEvalMembers')->search({ eval => { -in => [@evals] } }, { select => "build" })->as_query } + { "me.id" => { -in => $db->resultset('JobsetEvalMembers')->search({ eval => { -in => [@evals] } }, { select => "build" })->as_query } , finished => 1 }, { - order_by => ["job", "id"], + order_by => ["job", "me.id"], columns => [ @columns ], join => 'jobset', '+select' => ['jobset.project', 'jobset.name'], @@ -153,7 +153,7 @@ foreach my $project ($db->resultset('Projects')->search({}, { order_by => ["name # Keep the most recently succeeded build of a current job. Oh # I really need to stop using DBIx::Class. keepBuild($_, 1) foreach $jobset->builds->search( - { id => { -in => $jobset->builds->search( + { "me.id" => { -in => $jobset->builds->search( { finished => 1 , buildstatus => [0, 6] , job => { -in => $jobset->builds->search( @@ -162,11 +162,11 @@ foreach my $project ($db->resultset('Projects')->search({}, { order_by => ["name )->as_query } }, { group_by => 'job' - , select => [ { max => 'id', -as => 'm' } ] + , select => [ { max => 'me.id', -as => 'm' } ] })->as_query } }, { - columns => [ @columns ] + columns => [ @columns ], join => 'jobset', '+select' => ['jobset.project', 'jobset.name'], '+as' => ['jobset.project', 'jobset.name'], diff --git a/t/scripts/hydra-update-gc-roots/update-gc-roots.t b/t/scripts/hydra-update-gc-roots/update-gc-roots.t index d47e36c1..3e019ece 100644 --- a/t/scripts/hydra-update-gc-roots/update-gc-roots.t +++ b/t/scripts/hydra-update-gc-roots/update-gc-roots.t @@ -16,7 +16,7 @@ subtest "Updating GC roots" => sub { is($res, 0, "hydra-update-gc-roots should exit zero"); if ($res != 0) { print "gc roots stdout: $stdout\n"; - print "gc roots stderr: $stderr"; + print "gc roots stderr: $stderr\n"; } }; From 8503a7917b65eb77c3267312dbbaae686f91a1ec Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Tue, 22 Mar 2022 13:38:13 -0700 Subject: [PATCH 075/401] fixup! hydra-queue-runner: make registry member of State, configurable metrics port --- src/hydra-queue-runner/hydra-queue-runner.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 727a728f..f20c8e73 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -7,7 +7,6 @@ #include #include -#include #include "state.hh" #include "build-result.hh" From 8e3ada2afcc2dd5153d3ae162afbb0633a570285 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Mon, 28 Mar 2022 09:26:28 -0700 Subject: [PATCH 076/401] Revert "Use `copyClosure` instead of `computeFSClosure` + `copyPaths`" This reverts commit f14c583ce5188903f7c9db6f99c8c3fb42c77416. --- src/hydra-queue-runner/build-remote.cc | 6 +++--- src/hydra-queue-runner/queue-monitor.cc | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 69c82e72..464a35c8 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -287,9 +287,9 @@ void State::buildRemote(ref destStore, this will copy the inputs to the binary cache from the local store. */ if (localStore != std::shared_ptr(destStore)) { - copyClosure(*localStore, *destStore, - step->drv->inputSrcs, - NoRepair, NoCheckSigs, NoSubstitute); + StorePathSet closure; + localStore->computeFSClosure(step->drv->inputSrcs, closure); + copyPaths(*localStore, *destStore, closure, NoRepair, NoCheckSigs, NoSubstitute); } { diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 3f19d36a..49caf8e3 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -513,9 +513,9 @@ Step::ptr State::createStep(ref destStore, // FIXME: should copy directly from substituter to destStore. } - copyClosure(*localStore, *destStore, - StorePathSet { *path }, - NoRepair, CheckSigs, NoSubstitute); + StorePathSet closure; + localStore->computeFSClosure({*path}, closure); + copyPaths(*localStore, *destStore, closure, NoRepair, CheckSigs, NoSubstitute); time_t stopTime = time(0); From 127a64459577f470c0b981f22049735b402246f9 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Mon, 28 Mar 2022 09:24:31 -0700 Subject: [PATCH 077/401] Revert "Update Nix to 2.6" This reverts commit 5ae26aa7604f714dcc73edcb74fe71ddc8957f6c. --- flake.lock | 43 +++++++++++-------------------------- flake.nix | 1 - t/jobs/empty-dir-builder.sh | 3 --- 3 files changed, 13 insertions(+), 34 deletions(-) diff --git a/flake.lock b/flake.lock index e4bf8c71..fa71ceb5 100644 --- a/flake.lock +++ b/flake.lock @@ -3,15 +3,16 @@ "lowdown-src": { "flake": false, "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", + "lastModified": 1617481909, + "narHash": "sha256-SqnfOFuLuVRRNeVJr1yeEPJue/qWoCp5N6o5Kr///p4=", "owner": "kristapsdz", "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "rev": "148f9b2f586c41b7e36e73009db43ea68c7a1a4d", "type": "github" }, "original": { "owner": "kristapsdz", + "ref": "VERSION_0_8_4", "repo": "lowdown", "type": "github" } @@ -19,31 +20,28 @@ "nix": { "inputs": { "lowdown-src": "lowdown-src", - "nixpkgs": "nixpkgs", - "nixpkgs-regression": "nixpkgs-regression" + "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1643066034, - "narHash": "sha256-xEPeMcNJVOeZtoN+d+aRwolpW8mFSEQx76HTRdlhPhg=", + "lastModified": 1628586117, + "narHash": "sha256-8hS4xy7fq3z9XZIMYm4sQi9SzhcYqEJfdbwgDePoWuc=", "owner": "NixOS", "repo": "nix", - "rev": "a1cd7e58606a41fcf62bf8637804cf8306f17f62", + "rev": "a6ba313a0aac3b6e2fef434cb42d190a0849238e", "type": "github" }, "original": { - "owner": "NixOS", - "ref": "2.6.0", - "repo": "nix", - "type": "github" + "id": "nix", + "type": "indirect" } }, "nixpkgs": { "locked": { - "lastModified": 1632864508, - "narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=", + "lastModified": 1624862269, + "narHash": "sha256-JFcsh2+7QtfKdJFoPibLFPLgIW6Ycnv8Bts9a7RYme0=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "82891b5e2c2359d7e58d08849e4c89511ab94234", + "rev": "f77036342e2b690c61c97202bf48f2ce13acc022", "type": "github" }, "original": { @@ -52,21 +50,6 @@ "type": "indirect" } }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "id": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "indirect" - } - }, "root": { "inputs": { "nix": "nix", diff --git a/flake.nix b/flake.nix index d69048ed..56cb2960 100644 --- a/flake.nix +++ b/flake.nix @@ -2,7 +2,6 @@ description = "A Nix-based continuous build system"; inputs.nixpkgs.follows = "nix/nixpkgs"; - inputs.nix.url = github:NixOS/nix/2.6.0; outputs = { self, nixpkgs, nix }: let diff --git a/t/jobs/empty-dir-builder.sh b/t/jobs/empty-dir-builder.sh index 949216e0..addc7ef6 100755 --- a/t/jobs/empty-dir-builder.sh +++ b/t/jobs/empty-dir-builder.sh @@ -1,6 +1,3 @@ #! /bin/sh -# Workaround for https://github.com/NixOS/nix/pull/6051 -echo "some output" - mkdir $out From 2ba83a5cba8db06cb26aa14e6ba99e6764682ace Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Mon, 28 Mar 2022 09:33:58 -0700 Subject: [PATCH 078/401] t/jobs/empty-dir-builder: provide output for `nix log` --- t/jobs/empty-dir-builder.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/t/jobs/empty-dir-builder.sh b/t/jobs/empty-dir-builder.sh index addc7ef6..949216e0 100755 --- a/t/jobs/empty-dir-builder.sh +++ b/t/jobs/empty-dir-builder.sh @@ -1,3 +1,6 @@ #! /bin/sh +# Workaround for https://github.com/NixOS/nix/pull/6051 +echo "some output" + mkdir $out From 921e27d6c0e1974adb53f8db4a2a0da120ff65c4 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Mon, 28 Mar 2022 11:36:14 -0700 Subject: [PATCH 079/401] Build against Nix 2.5.1 --- flake.lock | 25 +++++++++++++------------ flake.nix | 1 + 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/flake.lock b/flake.lock index fa71ceb5..377aed12 100644 --- a/flake.lock +++ b/flake.lock @@ -3,16 +3,15 @@ "lowdown-src": { "flake": false, "locked": { - "lastModified": 1617481909, - "narHash": "sha256-SqnfOFuLuVRRNeVJr1yeEPJue/qWoCp5N6o5Kr///p4=", + "lastModified": 1633514407, + "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", "owner": "kristapsdz", "repo": "lowdown", - "rev": "148f9b2f586c41b7e36e73009db43ea68c7a1a4d", + "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", "type": "github" }, "original": { "owner": "kristapsdz", - "ref": "VERSION_0_8_4", "repo": "lowdown", "type": "github" } @@ -23,25 +22,27 @@ "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1628586117, - "narHash": "sha256-8hS4xy7fq3z9XZIMYm4sQi9SzhcYqEJfdbwgDePoWuc=", + "lastModified": 1639739069, + "narHash": "sha256-GOsiqy9EaTwDn2PLZ4eFj1VkXcBUbqrqHehRE9GuGdU=", "owner": "NixOS", "repo": "nix", - "rev": "a6ba313a0aac3b6e2fef434cb42d190a0849238e", + "rev": "b4f250417ab64f237c8b51439fe1f427193ab23b", "type": "github" }, "original": { - "id": "nix", - "type": "indirect" + "owner": "NixOS", + "ref": "2.5.1", + "repo": "nix", + "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1624862269, - "narHash": "sha256-JFcsh2+7QtfKdJFoPibLFPLgIW6Ycnv8Bts9a7RYme0=", + "lastModified": 1632864508, + "narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f77036342e2b690c61c97202bf48f2ce13acc022", + "rev": "82891b5e2c2359d7e58d08849e4c89511ab94234", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 56cb2960..188f2bf9 100644 --- a/flake.nix +++ b/flake.nix @@ -2,6 +2,7 @@ description = "A Nix-based continuous build system"; inputs.nixpkgs.follows = "nix/nixpkgs"; + inputs.nix.url = "github:NixOS/nix/2.5.1"; outputs = { self, nixpkgs, nix }: let From dc709422a63df5a4714888762dc23bdd75cb5bb7 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 29 Mar 2022 09:24:16 -0400 Subject: [PATCH 080/401] Revert "Build against Nix 2.5.1" - build against nix-2.4pre20210810_a6ba313 This reverts commit 921e27d6c0e1974adb53f8db4a2a0da120ff65c4. --- flake.lock | 25 ++++++++++++------------- flake.nix | 1 - 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/flake.lock b/flake.lock index 377aed12..fa71ceb5 100644 --- a/flake.lock +++ b/flake.lock @@ -3,15 +3,16 @@ "lowdown-src": { "flake": false, "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", + "lastModified": 1617481909, + "narHash": "sha256-SqnfOFuLuVRRNeVJr1yeEPJue/qWoCp5N6o5Kr///p4=", "owner": "kristapsdz", "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "rev": "148f9b2f586c41b7e36e73009db43ea68c7a1a4d", "type": "github" }, "original": { "owner": "kristapsdz", + "ref": "VERSION_0_8_4", "repo": "lowdown", "type": "github" } @@ -22,27 +23,25 @@ "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1639739069, - "narHash": "sha256-GOsiqy9EaTwDn2PLZ4eFj1VkXcBUbqrqHehRE9GuGdU=", + "lastModified": 1628586117, + "narHash": "sha256-8hS4xy7fq3z9XZIMYm4sQi9SzhcYqEJfdbwgDePoWuc=", "owner": "NixOS", "repo": "nix", - "rev": "b4f250417ab64f237c8b51439fe1f427193ab23b", + "rev": "a6ba313a0aac3b6e2fef434cb42d190a0849238e", "type": "github" }, "original": { - "owner": "NixOS", - "ref": "2.5.1", - "repo": "nix", - "type": "github" + "id": "nix", + "type": "indirect" } }, "nixpkgs": { "locked": { - "lastModified": 1632864508, - "narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=", + "lastModified": 1624862269, + "narHash": "sha256-JFcsh2+7QtfKdJFoPibLFPLgIW6Ycnv8Bts9a7RYme0=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "82891b5e2c2359d7e58d08849e4c89511ab94234", + "rev": "f77036342e2b690c61c97202bf48f2ce13acc022", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 188f2bf9..56cb2960 100644 --- a/flake.nix +++ b/flake.nix @@ -2,7 +2,6 @@ description = "A Nix-based continuous build system"; inputs.nixpkgs.follows = "nix/nixpkgs"; - inputs.nix.url = "github:NixOS/nix/2.5.1"; outputs = { self, nixpkgs, nix }: let From 5db8642224e996d0ebe3492cd55dfef6276d5a15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9ophane=20Hufschmitt?= Date: Mon, 21 Mar 2022 16:02:51 +0100 Subject: [PATCH 081/401] Factor out a struct representing a connection to a machine --- src/hydra-queue-runner/state.hh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 8f303d28..059b03a1 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -290,6 +290,16 @@ struct Machine { return sshName == "localhost"; } + + // A connection to a machine + struct Connection { + nix::FdSink to; + nix::FdSource from; + unsigned int remoteVersion; + + // Backpointer to the machine + ptr machine; + }; }; From 2f494b783425d6703f23bc4f2cdbc70592a31990 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9ophane=20Hufschmitt?= Date: Mon, 21 Mar 2022 10:42:44 +0100 Subject: [PATCH 082/401] Factor out the creation of the log file --- src/hydra-queue-runner/build-remote.cc | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 464a35c8..2e258484 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -175,6 +175,18 @@ StorePaths reverseTopoSortPaths(const std::map & paths return sorted; } +std::pair openLogFile(const std::string & logDir, const StorePath & drvPath) +{ + string base(drvPath.to_string()); + auto logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2); + + createDirs(dirOf(logFile)); + + AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666); + if (!logFD) throw SysError("creating log file ‘%s’", logFile); + + return {std::move(logFile), std::move(logFD)}; +} void State::buildRemote(ref destStore, Machine::ptr machine, Step::ptr step, @@ -185,14 +197,9 @@ void State::buildRemote(ref destStore, { assert(BuildResult::TimedOut == 8); - string base(step->drvPath.to_string()); - result.logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2); - AutoDelete autoDelete(result.logFile, false); - - createDirs(dirOf(result.logFile)); - - AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666); - if (!logFD) throw SysError("creating log file ‘%s’", result.logFile); + auto [logFile, logFD] = openLogFile(logDir, step->drvPath); + AutoDelete logFileDel(logFile, false); + result.logFile = logFile; nix::Path tmpDir = createTempDir(); AutoDelete tmpDirDel(tmpDir, true); @@ -316,7 +323,7 @@ void State::buildRemote(ref destStore, result.overhead += std::chrono::duration_cast(now2 - now1).count(); } - autoDelete.cancel(); + logFileDel.cancel(); /* Truncate the log to get rid of messages about substitutions etc. on the remote system. */ From 9f1b911625cbc92023b6aec936a505a46af3172f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9ophane=20Hufschmitt?= Date: Mon, 21 Mar 2022 11:35:38 +0100 Subject: [PATCH 083/401] Factor more stuff out --- src/hydra-queue-runner/build-remote.cc | 224 ++++++++++++++----------- 1 file changed, 122 insertions(+), 102 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 2e258484..360a8ef7 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -188,6 +188,87 @@ std::pair openLogFile(const std::string & logDir, const Store return {std::move(logFile), std::move(logFD)}; } +void handshake(Machine::Connection & conn, unsigned int repeats) +{ + conn.to << SERVE_MAGIC_1 << 0x204; + conn.to.flush(); + + unsigned int magic = readInt(conn.from); + if (magic != SERVE_MAGIC_2) + throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", conn.machine->sshName); + conn.remoteVersion = readInt(conn.from); + if (GET_PROTOCOL_MAJOR(conn.remoteVersion) != 0x200) + throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", conn.machine->sshName); + if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 3 && repeats > 0) + throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", conn.machine->sshName); +} + +StorePathSet sendInputs( + State & state, + Step & step, + Store & localStore, + Store & destStore, + Machine::Connection & conn, + unsigned int & overhead, + counter & nrStepsWaiting, + counter & nrStepsCopyingTo +) +{ + + StorePathSet inputs; + BasicDerivation basicDrv(*step.drv); + + for (auto & p : step.drv->inputSrcs) + inputs.insert(p); + + for (auto & input : step.drv->inputDrvs) { + auto drv2 = localStore.readDerivation(input.first); + for (auto & name : input.second) { + if (auto i = get(drv2.outputs, name)) { + auto outPath = i->path(localStore, drv2.name, name); + inputs.insert(*outPath); + basicDrv.inputSrcs.insert(*outPath); + } + } + } + + /* Ensure that the inputs exist in the destination store. This is + a no-op for regular stores, but for the binary cache store, + this will copy the inputs to the binary cache from the local + store. */ + if (localStore.getUri() != destStore.getUri()) { + StorePathSet closure; + localStore.computeFSClosure(step.drv->inputSrcs, closure); + copyPaths(localStore, destStore, closure, NoRepair, NoCheckSigs, NoSubstitute); + } + + { + auto mc1 = std::make_shared>(nrStepsWaiting); + mc1.reset(); + MaintainCount mc2(nrStepsCopyingTo); + + printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’", + localStore.printStorePath(step.drvPath), conn.machine->sshName); + + auto now1 = std::chrono::steady_clock::now(); + + /* Copy the input closure. */ + if (conn.machine->isLocalhost()) { + StorePathSet closure; + destStore.computeFSClosure(inputs, closure); + copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute); + } else { + copyClosureTo(conn.machine->state->sendLock, destStore, conn.from, conn.to, inputs, true); + } + + auto now2 = std::chrono::steady_clock::now(); + + overhead += std::chrono::duration_cast(now2 - now1).count(); + } + + return inputs; +} + void State::buildRemote(ref destStore, Machine::ptr machine, Step::ptr step, unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats, @@ -230,33 +311,21 @@ void State::buildRemote(ref destStore, process. Meh. */ }); - FdSource from(child.from.get()); - FdSink to(child.to.get()); + Machine::Connection conn; + conn.from = child.from.get(); + conn.to = child.to.get(); + conn.machine = machine; Finally updateStats([&]() { - bytesReceived += from.read; - bytesSent += to.written; + bytesReceived += conn.from.read; + bytesSent += conn.to.written; }); - /* Handshake. */ - unsigned int remoteVersion; - try { - to << SERVE_MAGIC_1 << 0x204; - to.flush(); - - unsigned int magic = readInt(from); - if (magic != SERVE_MAGIC_2) - throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", machine->sshName); - remoteVersion = readInt(from); - if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200) - throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", machine->sshName); - if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0) - throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName); - + handshake(conn, repeats); } catch (EndOfFile & e) { child.pid.wait(); - string s = chomp(readFile(result.logFile)); + std::string s = chomp(readFile(result.logFile)); throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s); } @@ -272,61 +341,12 @@ void State::buildRemote(ref destStore, outputs of the input derivations. */ updateStep(ssSendingInputs); - StorePathSet inputs; - BasicDerivation basicDrv(*step->drv); - - for (auto & p : step->drv->inputSrcs) - inputs.insert(p); - - for (auto & input : step->drv->inputDrvs) { - auto drv2 = localStore->readDerivation(input.first); - for (auto & name : input.second) { - if (auto i = get(drv2.outputs, name)) { - auto outPath = i->path(*localStore, drv2.name, name); - inputs.insert(*outPath); - basicDrv.inputSrcs.insert(*outPath); - } - } - } - - /* Ensure that the inputs exist in the destination store. This is - a no-op for regular stores, but for the binary cache store, - this will copy the inputs to the binary cache from the local - store. */ - if (localStore != std::shared_ptr(destStore)) { - StorePathSet closure; - localStore->computeFSClosure(step->drv->inputSrcs, closure); - copyPaths(*localStore, *destStore, closure, NoRepair, NoCheckSigs, NoSubstitute); - } - - { - auto mc1 = std::make_shared>(nrStepsWaiting); - mc1.reset(); - MaintainCount mc2(nrStepsCopyingTo); - - printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’", - localStore->printStorePath(step->drvPath), machine->sshName); - - auto now1 = std::chrono::steady_clock::now(); - - /* Copy the input closure. */ - if (machine->isLocalhost()) { - StorePathSet closure; - destStore->computeFSClosure(inputs, closure); - copyPaths(*destStore, *localStore, closure, NoRepair, NoCheckSigs, NoSubstitute); - } else { - copyClosureTo(machine->state->sendLock, *destStore, from, to, inputs, true); - } - - auto now2 = std::chrono::steady_clock::now(); - - result.overhead += std::chrono::duration_cast(now2 - now1).count(); - } + StorePathSet inputs = sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo); logFileDel.cancel(); /* Truncate the log to get rid of messages about substitutions - etc. on the remote system. */ + etc. on the remote system. */ if (lseek(logFD.get(), SEEK_SET, 0) != 0) throw SysError("seeking to the start of log file ‘%s’", result.logFile); @@ -342,31 +362,31 @@ void State::buildRemote(ref destStore, updateStep(ssBuilding); - to << cmdBuildDerivation << localStore->printStorePath(step->drvPath); - writeDerivation(to, *localStore, basicDrv); - to << maxSilentTime << buildTimeout; - if (GET_PROTOCOL_MINOR(remoteVersion) >= 2) - to << maxLogSize; - if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) { - to << repeats // == build-repeat + conn.to << cmdBuildDerivation << localStore->printStorePath(step->drvPath); + writeDerivation(conn.to, *localStore, BasicDerivation(*step->drv)); + conn.to << maxSilentTime << buildTimeout; + if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 2) + conn.to << maxLogSize; + if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3) { + conn.to << repeats // == build-repeat << step->isDeterministic; // == enforce-determinism } - to.flush(); + conn.to.flush(); result.startTime = time(0); int res; { MaintainCount mc(nrStepsBuilding); - res = readInt(from); + res = readInt(conn.from); } result.stopTime = time(0); - result.errorMsg = readString(from); - if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) { - result.timesBuilt = readInt(from); - result.isNonDeterministic = readInt(from); - auto start = readInt(from); - auto stop = readInt(from); + result.errorMsg = readString(conn.from); + if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3) { + result.timesBuilt = readInt(conn.from); + result.isNonDeterministic = readInt(conn.from); + auto start = readInt(conn.from); + auto stop = readInt(conn.from); if (start && start) { /* Note: this represents the duration of a single round, rather than all rounds. */ @@ -374,8 +394,8 @@ void State::buildRemote(ref destStore, result.stopTime = stop; } } - if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) { - worker_proto::read(*localStore, from, Phantom {}); + if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 6) { + worker_proto::read(*localStore, conn.from, Phantom {}); } switch ((BuildResult::Status) res) { case BuildResult::Built: @@ -451,19 +471,19 @@ void State::buildRemote(ref destStore, /* Get info about each output path. */ std::map infos; size_t totalNarSize = 0; - to << cmdQueryPathInfos; - worker_proto::write(*localStore, to, outputs); - to.flush(); + conn.to << cmdQueryPathInfos; + worker_proto::write(*localStore, conn.to, outputs); + conn.to.flush(); while (true) { - auto storePathS = readString(from); + auto storePathS = readString(conn.from); if (storePathS == "") break; - auto deriver = readString(from); // deriver - auto references = worker_proto::read(*localStore, from, Phantom {}); - readLongLong(from); // download size - auto narSize = readLongLong(from); - auto narHash = Hash::parseAny(readString(from), htSHA256); - auto ca = parseContentAddressOpt(readString(from)); - readStrings(from); // sigs + auto deriver = readString(conn.from); // deriver + auto references = worker_proto::read(*localStore, conn.from, Phantom {}); + readLongLong(conn.from); // download size + auto narSize = readLongLong(conn.from); + auto narHash = Hash::parseAny(readString(conn.from), htSHA256); + auto ca = parseContentAddressOpt(readString(conn.from)); + readStrings(conn.from); // sigs ValidPathInfo info(localStore->parseStorePath(storePathS), narHash); assert(outputs.count(info.path)); info.references = references; @@ -502,10 +522,10 @@ void State::buildRemote(ref destStore, lambda function only gets executed if someone tries to read from source2, we will send the command from here rather than outside the lambda. */ - to << cmdDumpStorePath << localStore->printStorePath(path); - to.flush(); + conn.to << cmdDumpStorePath << localStore->printStorePath(path); + conn.to.flush(); - TeeSource tee(from, sink); + TeeSource tee(conn.from, sink); extractNarData(tee, localStore->printStorePath(path), narMembers); }); From 365776f5d77112842d4a094a596cb725123b535d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9ophane=20Hufschmitt?= Date: Mon, 21 Mar 2022 12:14:37 +0100 Subject: [PATCH 084/401] Factor out the building part --- src/hydra-queue-runner/build-remote.cc | 207 ++++++++++++++++--------- src/hydra-queue-runner/state.hh | 2 + 2 files changed, 132 insertions(+), 77 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 360a8ef7..7934d401 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -269,6 +269,121 @@ StorePathSet sendInputs( return inputs; } +struct BuildOptions { + unsigned int maxSilentTime, buildTimeout, repeats; + size_t maxLogSize; + bool enforceDeterminism; +}; + +void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult) +{ + RemoteResult thisArrow; + + startTime = buildResult.startTime; + stopTime = buildResult.stopTime; + timesBuilt = buildResult.timesBuilt; + errorMsg = buildResult.errorMsg; + isNonDeterministic = buildResult.isNonDeterministic; + + switch ((BuildResult::Status) buildResult.status) { + case BuildResult::Built: + stepStatus = bsSuccess; + break; + case BuildResult::Substituted: + case BuildResult::AlreadyValid: + stepStatus = bsSuccess; + isCached = true; + break; + case BuildResult::PermanentFailure: + stepStatus = bsFailed; + canCache = true; + errorMsg = ""; + break; + case BuildResult::InputRejected: + case BuildResult::OutputRejected: + stepStatus = bsFailed; + canCache = true; + break; + case BuildResult::TransientFailure: + stepStatus = bsFailed; + canRetry = true; + errorMsg = ""; + break; + case BuildResult::TimedOut: + stepStatus = bsTimedOut; + errorMsg = ""; + break; + case BuildResult::MiscFailure: + stepStatus = bsAborted; + canRetry = true; + break; + case BuildResult::LogLimitExceeded: + stepStatus = bsLogLimitExceeded; + break; + case BuildResult::NotDeterministic: + stepStatus = bsNotDeterministic; + canRetry = false; + canCache = true; + break; + default: + stepStatus = bsAborted; + break; + } + +} + +BuildResult performBuild( + Machine::Connection & conn, + Store & localStore, + StorePath drvPath, + const BasicDerivation & drv, + const BuildOptions & options, + counter & nrStepsBuilding +) +{ + + BuildResult result; + + conn.to << cmdBuildDerivation << localStore.printStorePath(drvPath); + writeDerivation(conn.to, localStore, drv); + conn.to << options.maxSilentTime << options.buildTimeout; + if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 2) + conn.to << options.maxLogSize; + if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3) { + conn.to << options.repeats // == build-repeat + << options.enforceDeterminism; + } + conn.to.flush(); + + result.startTime = time(0); + + { + MaintainCount mc(nrStepsBuilding); + result.status = (BuildResult::Status)readInt(conn.from); + } + result.stopTime = time(0); + + + result.errorMsg = readString(conn.from); + if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3) { + result.timesBuilt = readInt(conn.from); + result.isNonDeterministic = readInt(conn.from); + auto start = readInt(conn.from); + auto stop = readInt(conn.from); + if (start && start) { + /* Note: this represents the duration of a single + round, rather than all rounds. */ + result.startTime = start; + result.stopTime = stop; + } + } + if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 6) { + result.builtOutputs = worker_proto::read(localStore, conn.from, Phantom {}); + } + + return result; +} + void State::buildRemote(ref destStore, Machine::ptr machine, Step::ptr step, unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats, @@ -362,85 +477,23 @@ void State::buildRemote(ref destStore, updateStep(ssBuilding); - conn.to << cmdBuildDerivation << localStore->printStorePath(step->drvPath); - writeDerivation(conn.to, *localStore, BasicDerivation(*step->drv)); - conn.to << maxSilentTime << buildTimeout; - if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 2) - conn.to << maxLogSize; - if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3) { - conn.to << repeats // == build-repeat - << step->isDeterministic; // == enforce-determinism - } - conn.to.flush(); + BuildResult buildResult = performBuild( + conn, + *localStore, + step->drvPath, + BasicDerivation(*step->drv), + { + .maxSilentTime = maxSilentTime, + .buildTimeout = buildTimeout, + .repeats = repeats, + .maxLogSize = maxLogSize, + .enforceDeterminism = step->isDeterministic, + }, + nrStepsBuilding + ); - result.startTime = time(0); - int res; - { - MaintainCount mc(nrStepsBuilding); - res = readInt(conn.from); - } - result.stopTime = time(0); + result.updateWithBuildResult(buildResult); - result.errorMsg = readString(conn.from); - if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3) { - result.timesBuilt = readInt(conn.from); - result.isNonDeterministic = readInt(conn.from); - auto start = readInt(conn.from); - auto stop = readInt(conn.from); - if (start && start) { - /* Note: this represents the duration of a single - round, rather than all rounds. */ - result.startTime = start; - result.stopTime = stop; - } - } - if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 6) { - worker_proto::read(*localStore, conn.from, Phantom {}); - } - switch ((BuildResult::Status) res) { - case BuildResult::Built: - result.stepStatus = bsSuccess; - break; - case BuildResult::Substituted: - case BuildResult::AlreadyValid: - result.stepStatus = bsSuccess; - result.isCached = true; - break; - case BuildResult::PermanentFailure: - result.stepStatus = bsFailed; - result.canCache = true; - result.errorMsg = ""; - break; - case BuildResult::InputRejected: - case BuildResult::OutputRejected: - result.stepStatus = bsFailed; - result.canCache = true; - break; - case BuildResult::TransientFailure: - result.stepStatus = bsFailed; - result.canRetry = true; - result.errorMsg = ""; - break; - case BuildResult::TimedOut: - result.stepStatus = bsTimedOut; - result.errorMsg = ""; - break; - case BuildResult::MiscFailure: - result.stepStatus = bsAborted; - result.canRetry = true; - break; - case BuildResult::LogLimitExceeded: - result.stepStatus = bsLogLimitExceeded; - break; - case BuildResult::NotDeterministic: - result.stepStatus = bsNotDeterministic; - result.canRetry = false; - result.canCache = true; - break; - default: - result.stepStatus = bsAborted; - break; - } if (result.stepStatus != bsSuccess) return; result.errorMsg = ""; diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 059b03a1..6292a2db 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -72,6 +72,8 @@ struct RemoteResult { return stepStatus == bsCachedFailure ? bsFailed : stepStatus; } + + void updateWithBuildResult(const nix::BuildResult &); }; From a778a89f0424b5bffa66818cf0b46ffa945403c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9ophane=20Hufschmitt?= Date: Mon, 21 Mar 2022 15:16:32 +0100 Subject: [PATCH 085/401] Factor out the `queryPathInfos` part of the build --- src/hydra-queue-runner/build-remote.cc | 65 +++++++++++++++----------- 1 file changed, 39 insertions(+), 26 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 7934d401..0451ccb7 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -384,6 +384,44 @@ BuildResult performBuild( return result; } +std::map queryPathInfos( + Machine::Connection & conn, + Store & localStore, + StorePathSet & outputs, + size_t & totalNarSize +) +{ + + /* Get info about each output path. */ + std::map infos; + conn.to << cmdQueryPathInfos; + worker_proto::write(localStore, conn.to, outputs); + conn.to.flush(); + while (true) { + auto storePathS = readString(conn.from); + if (storePathS == "") break; + auto deriver = readString(conn.from); // deriver + auto references = worker_proto::read(localStore, conn.from, Phantom {}); + readLongLong(conn.from); // download size + auto narSize = readLongLong(conn.from); + auto narHash = Hash::parseAny(readString(conn.from), htSHA256); + auto ca = parseContentAddressOpt(readString(conn.from)); + readStrings(conn.from); // sigs + ValidPathInfo info(localStore.parseStorePath(storePathS), narHash); + assert(outputs.count(info.path)); + info.references = references; + info.narSize = narSize; + totalNarSize += info.narSize; + info.narHash = narHash; + info.ca = ca; + if (deriver != "") + info.deriver = localStore.parseStorePath(deriver); + infos.insert_or_assign(info.path, info); + } + + return infos; +} + void State::buildRemote(ref destStore, Machine::ptr machine, Step::ptr step, unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats, @@ -521,33 +559,8 @@ void State::buildRemote(ref destStore, outputs.insert(*i.second.second); } - /* Get info about each output path. */ - std::map infos; size_t totalNarSize = 0; - conn.to << cmdQueryPathInfos; - worker_proto::write(*localStore, conn.to, outputs); - conn.to.flush(); - while (true) { - auto storePathS = readString(conn.from); - if (storePathS == "") break; - auto deriver = readString(conn.from); // deriver - auto references = worker_proto::read(*localStore, conn.from, Phantom {}); - readLongLong(conn.from); // download size - auto narSize = readLongLong(conn.from); - auto narHash = Hash::parseAny(readString(conn.from), htSHA256); - auto ca = parseContentAddressOpt(readString(conn.from)); - readStrings(conn.from); // sigs - ValidPathInfo info(localStore->parseStorePath(storePathS), narHash); - assert(outputs.count(info.path)); - info.references = references; - info.narSize = narSize; - totalNarSize += info.narSize; - info.narHash = narHash; - info.ca = ca; - if (deriver != "") - info.deriver = localStore->parseStorePath(deriver); - infos.insert_or_assign(info.path, info); - } + auto infos = queryPathInfos(conn, *localStore, outputs, totalNarSize); if (totalNarSize > maxOutputSize) { result.stepStatus = bsNarSizeLimitExceeded; From fd0ae78eba058ab456590da698b45082fcafb519 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9ophane=20Hufschmitt?= Date: Mon, 21 Mar 2022 15:26:31 +0100 Subject: [PATCH 086/401] Factor out the copying from the build store --- src/hydra-queue-runner/build-remote.cc | 76 +++++++++++++++++--------- 1 file changed, 49 insertions(+), 27 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 0451ccb7..79e5a231 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -422,6 +422,54 @@ std::map queryPathInfos( return infos; } +void copyPathFromRemote( + Machine::Connection & conn, + NarMemberDatas & narMembers, + Store & localStore, + Store & destStore, + const ValidPathInfo & info +) +{ + /* Receive the NAR from the remote and add it to the + destination store. Meanwhile, extract all the info from the + NAR that getBuildOutput() needs. */ + auto source2 = sinkToSource([&](Sink & sink) + { + /* Note: we should only send the command to dump the store + path to the remote if the NAR is actually going to get read + by the destination store, which won't happen if this path + is already valid on the destination store. Since this + lambda function only gets executed if someone tries to read + from source2, we will send the command from here rather + than outside the lambda. */ + conn.to << cmdDumpStorePath << localStore.printStorePath(info.path); + conn.to.flush(); + + TeeSource tee(conn.from, sink); + extractNarData(tee, localStore.printStorePath(info.path), narMembers); + }); + + destStore.addToStore(info, *source2, NoRepair, NoCheckSigs); +} + +void copyPathsFromRemote( + Machine::Connection & conn, + NarMemberDatas & narMembers, + Store & localStore, + Store & destStore, + const std::map & infos +) +{ + auto pathsSorted = reverseTopoSortPaths(infos); + + for (auto & path : pathsSorted) { + auto & info = infos.find(path)->second; + copyPathFromRemote(conn, narMembers, localStore, destStore, info); + } + +} + + void State::buildRemote(ref destStore, Machine::ptr machine, Step::ptr step, unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats, @@ -571,33 +619,7 @@ void State::buildRemote(ref destStore, printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)", localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize); - auto pathsSorted = reverseTopoSortPaths(infos); - - for (auto & path : pathsSorted) { - auto & info = infos.find(path)->second; - - /* Receive the NAR from the remote and add it to the - destination store. Meanwhile, extract all the info from the - NAR that getBuildOutput() needs. */ - auto source2 = sinkToSource([&](Sink & sink) - { - /* Note: we should only send the command to dump the store - path to the remote if the NAR is actually going to get read - by the destination store, which won't happen if this path - is already valid on the destination store. Since this - lambda function only gets executed if someone tries to read - from source2, we will send the command from here rather - than outside the lambda. */ - conn.to << cmdDumpStorePath << localStore->printStorePath(path); - conn.to.flush(); - - TeeSource tee(conn.from, sink); - extractNarData(tee, localStore->printStorePath(path), narMembers); - }); - - destStore->addToStore(info, *source2, NoRepair, NoCheckSigs); - } - + copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos); auto now2 = std::chrono::steady_clock::now(); result.overhead += std::chrono::duration_cast(now2 - now1).count(); From b430d41afd6a4ca0e38343ae2eee4aa6307cf980 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9ophane=20Hufschmitt?= Date: Mon, 21 Mar 2022 16:33:25 +0100 Subject: [PATCH 087/401] Use the `BuildOptions` more eagerly --- src/hydra-queue-runner/build-remote.cc | 20 ++++---------------- src/hydra-queue-runner/builder.cc | 16 +++++++++------- src/hydra-queue-runner/state.hh | 9 +++++++-- 3 files changed, 20 insertions(+), 25 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 79e5a231..1e06e501 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -269,12 +269,6 @@ StorePathSet sendInputs( return inputs; } -struct BuildOptions { - unsigned int maxSilentTime, buildTimeout, repeats; - size_t maxLogSize; - bool enforceDeterminism; -}; - void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult) { RemoteResult thisArrow; @@ -337,7 +331,7 @@ BuildResult performBuild( Store & localStore, StorePath drvPath, const BasicDerivation & drv, - const BuildOptions & options, + const State::BuildOptions & options, counter & nrStepsBuilding ) { @@ -472,7 +466,7 @@ void copyPathsFromRemote( void State::buildRemote(ref destStore, Machine::ptr machine, Step::ptr step, - unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats, + const BuildOptions & buildOptions, RemoteResult & result, std::shared_ptr activeStep, std::function updateStep, NarMemberDatas & narMembers) @@ -523,7 +517,7 @@ void State::buildRemote(ref destStore, }); try { - handshake(conn, repeats); + handshake(conn, buildOptions.repeats); } catch (EndOfFile & e) { child.pid.wait(); std::string s = chomp(readFile(result.logFile)); @@ -568,13 +562,7 @@ void State::buildRemote(ref destStore, *localStore, step->drvPath, BasicDerivation(*step->drv), - { - .maxSilentTime = maxSilentTime, - .buildTimeout = buildTimeout, - .repeats = repeats, - .maxLogSize = maxLogSize, - .enforceDeterminism = step->isDeterministic, - }, + buildOptions, nrStepsBuilding ); diff --git a/src/hydra-queue-runner/builder.cc b/src/hydra-queue-runner/builder.cc index 89aa7d15..b25b4e63 100644 --- a/src/hydra-queue-runner/builder.cc +++ b/src/hydra-queue-runner/builder.cc @@ -98,8 +98,10 @@ State::StepResult State::doBuildStep(nix::ref destStore, it). */ BuildID buildId; std::optional buildDrvPath; - unsigned int maxSilentTime, buildTimeout; - unsigned int repeats = step->isDeterministic ? 1 : 0; + BuildOptions buildOptions; + buildOptions.repeats = step->isDeterministic ? 1 : 0; + buildOptions.maxLogSize = maxLogSize; + buildOptions.enforceDeterminism = step->isDeterministic; auto conn(dbPool.get()); @@ -134,18 +136,18 @@ State::StepResult State::doBuildStep(nix::ref destStore, { auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName)); if (i != jobsetRepeats.end()) - repeats = std::max(repeats, i->second); + buildOptions.repeats = std::max(buildOptions.repeats, i->second); } } if (!build) build = *dependents.begin(); buildId = build->id; buildDrvPath = build->drvPath; - maxSilentTime = build->maxSilentTime; - buildTimeout = build->buildTimeout; + buildOptions.maxSilentTime = build->maxSilentTime; + buildOptions.buildTimeout = build->buildTimeout; printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)", - localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1)); + localStore->printStorePath(step->drvPath), buildOptions.repeats + 1, machine->sshName, buildId, (dependents.size() - 1)); } if (!buildOneDone) @@ -206,7 +208,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, try { /* FIXME: referring builds may have conflicting timeouts. */ - buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep, narMembers); + buildRemote(destStore, machine, step, buildOptions, result, activeStep, updateStep, narMembers); } catch (Error & e) { if (activeStep->state_.lock()->cancelled) { printInfo("marking step %d of build %d as cancelled", stepNr, buildId); diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 6292a2db..f4d8ccce 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -447,6 +447,12 @@ private: public: State(); + struct BuildOptions { + unsigned int maxSilentTime, buildTimeout, repeats; + size_t maxLogSize; + bool enforceDeterminism; + }; + private: nix::MaintainCount startDbUpdate(); @@ -531,8 +537,7 @@ private: void buildRemote(nix::ref destStore, Machine::ptr machine, Step::ptr step, - unsigned int maxSilentTime, unsigned int buildTimeout, - unsigned int repeats, + const BuildOptions & buildOptions, RemoteResult & result, std::shared_ptr activeStep, std::function updateStep, NarMemberDatas & narMembers); From 92b627ac1b3e0f53f6b35fc5940406c12fa977da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9ophane=20Hufschmitt?= <7226587+thufschmitt@users.noreply.github.com> Date: Thu, 24 Mar 2022 09:39:24 +0100 Subject: [PATCH 088/401] Remove an accidental re-indenting of a comment Co-authored-by: Eelco Dolstra --- src/hydra-queue-runner/build-remote.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 1e06e501..901bbc89 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -233,9 +233,9 @@ StorePathSet sendInputs( } /* Ensure that the inputs exist in the destination store. This is - a no-op for regular stores, but for the binary cache store, - this will copy the inputs to the binary cache from the local - store. */ + a no-op for regular stores, but for the binary cache store, + this will copy the inputs to the binary cache from the local + store. */ if (localStore.getUri() != destStore.getUri()) { StorePathSet closure; localStore.computeFSClosure(step.drv->inputSrcs, closure); From 6e571e26ff068386bc949eaab6646e90613b23c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9ophane=20Hufschmitt?= Date: Thu, 24 Mar 2022 14:27:45 +0100 Subject: [PATCH 089/401] Build the resolved derivation and not the original one --- src/hydra-queue-runner/build-remote.cc | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 901bbc89..62461a65 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -203,7 +203,7 @@ void handshake(Machine::Connection & conn, unsigned int repeats) throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", conn.machine->sshName); } -StorePathSet sendInputs( +BasicDerivation sendInputs( State & state, Step & step, Store & localStore, @@ -214,19 +214,13 @@ StorePathSet sendInputs( counter & nrStepsCopyingTo ) { - - StorePathSet inputs; BasicDerivation basicDrv(*step.drv); - for (auto & p : step.drv->inputSrcs) - inputs.insert(p); - for (auto & input : step.drv->inputDrvs) { auto drv2 = localStore.readDerivation(input.first); for (auto & name : input.second) { if (auto i = get(drv2.outputs, name)) { auto outPath = i->path(localStore, drv2.name, name); - inputs.insert(*outPath); basicDrv.inputSrcs.insert(*outPath); } } @@ -255,10 +249,10 @@ StorePathSet sendInputs( /* Copy the input closure. */ if (conn.machine->isLocalhost()) { StorePathSet closure; - destStore.computeFSClosure(inputs, closure); + destStore.computeFSClosure(basicDrv.inputSrcs, closure); copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute); } else { - copyClosureTo(conn.machine->state->sendLock, destStore, conn.from, conn.to, inputs, true); + copyClosureTo(conn.machine->state->sendLock, destStore, conn.from, conn.to, basicDrv.inputSrcs, true); } auto now2 = std::chrono::steady_clock::now(); @@ -266,7 +260,7 @@ StorePathSet sendInputs( overhead += std::chrono::duration_cast(now2 - now1).count(); } - return inputs; + return basicDrv; } void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult) @@ -535,8 +529,7 @@ void State::buildRemote(ref destStore, copy the immediate sources of the derivation and the required outputs of the input derivations. */ updateStep(ssSendingInputs); - - StorePathSet inputs = sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo); + BasicDerivation resolvedDrv = sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo); logFileDel.cancel(); @@ -561,7 +554,7 @@ void State::buildRemote(ref destStore, conn, *localStore, step->drvPath, - BasicDerivation(*step->drv), + resolvedDrv, buildOptions, nrStepsBuilding ); From 9cdc5aceed59d96a41ab866d2e8f575bdca64d70 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Tue, 29 Mar 2022 08:41:19 -0700 Subject: [PATCH 090/401] hydra-queue-runner: log message before and after exporter is started This way, if something goes wrong between the two, it's easier to narrow down where the issue lies. --- src/hydra-queue-runner/hydra-queue-runner.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index f20c8e73..062b0644 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -758,13 +758,15 @@ void State::run(BuildID buildOne) if (!lock) throw Error("hydra-queue-runner is already running"); + std::cout << "Starting the Prometheus exporter on port " << exposerPort << std::endl; + /* Set up simple exporter, to show that we're still alive. */ std::string metricsAddress{"127.0.0.1"}; prometheus::Exposer exposer{metricsAddress + ":" + std::to_string(metricsPort)}; auto exposerPort = exposer.GetListeningPorts().front(); exposer.RegisterCollectable(registry); - std::cout << "Starting the Prometheus exporter, listening on " + std::cout << "Started the Prometheus exporter, listening on " << "http://" << metricsAddress << ":" << exposerPort << "/metrics" << std::endl; From 905a7a7bebb1be02dd0b86735407912b54bfcda1 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Tue, 29 Mar 2022 08:46:43 -0700 Subject: [PATCH 091/401] hydra-queue-runner: read metrics port from `queue_runner_metrics_port` config --- src/hydra-queue-runner/hydra-queue-runner.cc | 2 +- src/hydra-queue-runner/state.hh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 062b0644..40babf33 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -47,8 +47,8 @@ State::State(uint16_t metricsPort) , maxLogSize(config->getIntOption("max_log_size", 64ULL << 20)) , uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false)) , rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME")))) + , metricsPort(config->getIntOption("queue_runner_metrics_port", metricsPort)) , registry(std::make_shared()) - , metricsPort(metricsPort) { hydraData = getEnvOrDie("HYDRA_DATA"); diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 4add0dbd..5299edea 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -434,10 +434,10 @@ private: via gc_roots_dir. */ nix::Path rootsDir; - std::shared_ptr registry; - uint16_t metricsPort; + std::shared_ptr registry; + public: State(uint16_t metricsPort); From 5ddb9a98ca674195d53c6461fe4b165692588ad8 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Tue, 29 Mar 2022 08:47:41 -0700 Subject: [PATCH 092/401] fixup! hydra-queue-runner: log message before and after exporter is started --- src/hydra-queue-runner/hydra-queue-runner.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 40babf33..c7d3b9b2 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -758,7 +758,7 @@ void State::run(BuildID buildOne) if (!lock) throw Error("hydra-queue-runner is already running"); - std::cout << "Starting the Prometheus exporter on port " << exposerPort << std::endl; + std::cout << "Starting the Prometheus exporter on port " << metricsPort << std::endl; /* Set up simple exporter, to show that we're still alive. */ std::string metricsAddress{"127.0.0.1"}; From 928b3b8268fc2f1d2534f1cf7e524d05803105fd Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Tue, 29 Mar 2022 10:42:07 -0700 Subject: [PATCH 093/401] hydra-queue-runner: fix priority of flag over config file --- src/hydra-queue-runner/hydra-queue-runner.cc | 14 +++++++++----- src/hydra-queue-runner/state.hh | 2 +- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index c7d3b9b2..3834f625 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -39,7 +39,7 @@ std::string getEnvOrDie(const std::string & key) } -State::State(uint16_t metricsPort) +State::State(std::optional metricsPortOpt) : config(std::make_unique()) , maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0)) , dbPool(config->getIntOption("max_db_connections", 128)) @@ -47,13 +47,17 @@ State::State(uint16_t metricsPort) , maxLogSize(config->getIntOption("max_log_size", 64ULL << 20)) , uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false)) , rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME")))) - , metricsPort(config->getIntOption("queue_runner_metrics_port", metricsPort)) + , metricsPort(config->getIntOption("queue_runner_metrics_port", 9099)) , registry(std::make_shared()) { hydraData = getEnvOrDie("HYDRA_DATA"); logDir = canonPath(hydraData + "/build-logs"); + if (metricsPortOpt.has_value()) { + metricsPort = metricsPortOpt.value(); + } + /* handle deprecated store specification */ if (config->getStrOption("store_mode") != "") throw Error("store_mode in hydra.conf is deprecated, please use store_uri"); @@ -880,7 +884,7 @@ int main(int argc, char * * argv) bool unlock = false; bool status = false; BuildID buildOne = 0; - uint16_t metricsPort = 0; + std::optional metricsPortOpt = std::nullopt; parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) { if (*arg == "--unlock") @@ -897,7 +901,7 @@ int main(int argc, char * * argv) if (*p > std::numeric_limits::max()) { throw Error("'--port' has a maximum of 65535"); } else { - metricsPort = *p; + metricsPortOpt = *p; } } else { throw Error("'--port' requires a numeric port (0 for a random, usable port; max 65535)"); @@ -910,7 +914,7 @@ int main(int argc, char * * argv) settings.verboseBuild = true; settings.lockCPU = false; - State state{metricsPort}; + State state{metricsPortOpt}; if (status) state.showStatus(); else if (unlock) diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 5299edea..63112c16 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -439,7 +439,7 @@ private: std::shared_ptr registry; public: - State(uint16_t metricsPort); + State(std::optional metricsPortOpt); private: From 4789eba92c65df52a204e4d7a4d3ee6f512df28e Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Tue, 29 Mar 2022 10:55:28 -0700 Subject: [PATCH 094/401] hydra-queue-runer: split metrics functionality into its own function --- src/hydra-queue-runner/hydra-queue-runner.cc | 30 +++++++++++++------- src/hydra-queue-runner/state.hh | 2 ++ 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 3834f625..992b9995 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -750,6 +750,24 @@ void State::unlock() } +void State::runMetricsExporter() +{ + std::cout << "Starting the Prometheus exporter on port " << metricsPort << std::endl; + + /* Set up simple exporter, to show that we're still alive. */ + std::string metricsAddress{"127.0.0.1"}; + prometheus::Exposer exposer{metricsAddress + ":" + std::to_string(metricsPort)}; + auto exposerPort = exposer.GetListeningPorts().front(); + exposer.RegisterCollectable(registry); + + std::cout << "Started the Prometheus exporter, listening on " + << "http://" << metricsAddress << ":" << exposerPort << "/metrics" + << std::endl; + + while (true) {}; +} + + void State::run(BuildID buildOne) { /* Can't be bothered to shut down cleanly. Goodbye! */ @@ -762,17 +780,7 @@ void State::run(BuildID buildOne) if (!lock) throw Error("hydra-queue-runner is already running"); - std::cout << "Starting the Prometheus exporter on port " << metricsPort << std::endl; - - /* Set up simple exporter, to show that we're still alive. */ - std::string metricsAddress{"127.0.0.1"}; - prometheus::Exposer exposer{metricsAddress + ":" + std::to_string(metricsPort)}; - auto exposerPort = exposer.GetListeningPorts().front(); - exposer.RegisterCollectable(registry); - - std::cout << "Started the Prometheus exporter, listening on " - << "http://" << metricsAddress << ":" << exposerPort << "/metrics" - << std::endl; + std::thread(&State::runMetricsExporter, this).detach(); Store::Params localParams; localParams["max-connections"] = "16"; diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 63112c16..fb533559 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -549,6 +549,8 @@ private: void addRoot(const nix::StorePath & storePath); + void runMetricsExporter(); + public: void showStatus(); From 9c1f36c47c2ff19ab8d47760f33cd9d5c32cb229 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Tue, 29 Mar 2022 11:33:40 -0700 Subject: [PATCH 095/401] t/lib/HydraTestContext: set queue runner port to 0 This makes the exposer choose a random, available port. --- t/Hydra/Config/include.t | 1 + t/lib/HydraTestContext.pm | 1 + 2 files changed, 2 insertions(+) diff --git a/t/Hydra/Config/include.t b/t/Hydra/Config/include.t index fe2dd1ed..63186f87 100644 --- a/t/Hydra/Config/include.t +++ b/t/Hydra/Config/include.t @@ -20,6 +20,7 @@ write_file($ctx{'tmpdir'} . "/bar.conf", q| |); is(getHydraConfig(), { + queue_runner_metrics_port => 0, foo => { bar => "baz" } }, "Nested includes work."); diff --git a/t/lib/HydraTestContext.pm b/t/lib/HydraTestContext.pm index ade12280..ce933c09 100644 --- a/t/lib/HydraTestContext.pm +++ b/t/lib/HydraTestContext.pm @@ -51,6 +51,7 @@ sub new { $ENV{'HYDRA_CONFIG'} = "$dir/hydra.conf"; my $hydra_config = $opts{'hydra_config'} || ""; + $hydra_config = "queue_runner_metrics_port = 0\n" . $hydra_config; if ($opts{'use_external_destination_store'} // 1) { $hydra_config = "store_uri = file:$dir/nix/dest-store\n" . $hydra_config; } From 3b048ed136f36d516ea43e2b3af86025cb6f47c8 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 29 Mar 2022 15:28:47 -0400 Subject: [PATCH 096/401] Revert "Revert "Use `copyClosure` instead of `computeFSClosure` + `copyPaths`"" This reverts commit 8e3ada2afcc2dd5153d3ae162afbb0633a570285. --- src/hydra-queue-runner/build-remote.cc | 6 +++--- src/hydra-queue-runner/queue-monitor.cc | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 464a35c8..69c82e72 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -287,9 +287,9 @@ void State::buildRemote(ref destStore, this will copy the inputs to the binary cache from the local store. */ if (localStore != std::shared_ptr(destStore)) { - StorePathSet closure; - localStore->computeFSClosure(step->drv->inputSrcs, closure); - copyPaths(*localStore, *destStore, closure, NoRepair, NoCheckSigs, NoSubstitute); + copyClosure(*localStore, *destStore, + step->drv->inputSrcs, + NoRepair, NoCheckSigs, NoSubstitute); } { diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 49caf8e3..3f19d36a 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -513,9 +513,9 @@ Step::ptr State::createStep(ref destStore, // FIXME: should copy directly from substituter to destStore. } - StorePathSet closure; - localStore->computeFSClosure({*path}, closure); - copyPaths(*localStore, *destStore, closure, NoRepair, CheckSigs, NoSubstitute); + copyClosure(*localStore, *destStore, + StorePathSet { *path }, + NoRepair, CheckSigs, NoSubstitute); time_t stopTime = time(0); From fd3690a0c13285925ac763886360cf51dbec0d49 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 29 Mar 2022 15:29:23 -0400 Subject: [PATCH 097/401] flake.lock: Update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'nix': 'github:NixOS/nix/a6ba313a0aac3b6e2fef434cb42d190a0849238e' (2021-08-10) → 'github:NixOS/nix/a1cd7e58606a41fcf62bf8637804cf8306f17f62' (2022-01-24) • Updated input 'nix/lowdown-src': 'github:kristapsdz/lowdown/148f9b2f586c41b7e36e73009db43ea68c7a1a4d' (2021-04-03) → 'github:kristapsdz/lowdown/d2c2b44ff6c27b936ec27358a2653caaef8f73b8' (2021-10-06) • Updated input 'nix/nixpkgs': 'github:NixOS/nixpkgs/f77036342e2b690c61c97202bf48f2ce13acc022' (2021-06-28) → 'github:NixOS/nixpkgs/82891b5e2c2359d7e58d08849e4c89511ab94234' (2021-09-28) • Added input 'nix/nixpkgs-regression': 'github:NixOS/nixpkgs/215d4d0fd80ca5163643b03a33fde804a29cc1e2' (2022-01-24) --- flake.lock | 43 ++++++++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/flake.lock b/flake.lock index fa71ceb5..e4bf8c71 100644 --- a/flake.lock +++ b/flake.lock @@ -3,16 +3,15 @@ "lowdown-src": { "flake": false, "locked": { - "lastModified": 1617481909, - "narHash": "sha256-SqnfOFuLuVRRNeVJr1yeEPJue/qWoCp5N6o5Kr///p4=", + "lastModified": 1633514407, + "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", "owner": "kristapsdz", "repo": "lowdown", - "rev": "148f9b2f586c41b7e36e73009db43ea68c7a1a4d", + "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", "type": "github" }, "original": { "owner": "kristapsdz", - "ref": "VERSION_0_8_4", "repo": "lowdown", "type": "github" } @@ -20,28 +19,31 @@ "nix": { "inputs": { "lowdown-src": "lowdown-src", - "nixpkgs": "nixpkgs" + "nixpkgs": "nixpkgs", + "nixpkgs-regression": "nixpkgs-regression" }, "locked": { - "lastModified": 1628586117, - "narHash": "sha256-8hS4xy7fq3z9XZIMYm4sQi9SzhcYqEJfdbwgDePoWuc=", + "lastModified": 1643066034, + "narHash": "sha256-xEPeMcNJVOeZtoN+d+aRwolpW8mFSEQx76HTRdlhPhg=", "owner": "NixOS", "repo": "nix", - "rev": "a6ba313a0aac3b6e2fef434cb42d190a0849238e", + "rev": "a1cd7e58606a41fcf62bf8637804cf8306f17f62", "type": "github" }, "original": { - "id": "nix", - "type": "indirect" + "owner": "NixOS", + "ref": "2.6.0", + "repo": "nix", + "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1624862269, - "narHash": "sha256-JFcsh2+7QtfKdJFoPibLFPLgIW6Ycnv8Bts9a7RYme0=", + "lastModified": 1632864508, + "narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f77036342e2b690c61c97202bf48f2ce13acc022", + "rev": "82891b5e2c2359d7e58d08849e4c89511ab94234", "type": "github" }, "original": { @@ -50,6 +52,21 @@ "type": "indirect" } }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "id": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "indirect" + } + }, "root": { "inputs": { "nix": "nix", From 20a8437094c32776752c218bc50996de07dcceb1 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 29 Mar 2022 15:29:33 -0400 Subject: [PATCH 098/401] flake.nix: set nix to 2.6.0 --- flake.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/flake.nix b/flake.nix index 56cb2960..d69048ed 100644 --- a/flake.nix +++ b/flake.nix @@ -2,6 +2,7 @@ description = "A Nix-based continuous build system"; inputs.nixpkgs.follows = "nix/nixpkgs"; + inputs.nix.url = github:NixOS/nix/2.6.0; outputs = { self, nixpkgs, nix }: let From 98da457e16664ba9fc8d07d8a54d1cd49d4ba4d5 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 29 Mar 2022 15:31:11 -0400 Subject: [PATCH 099/401] nix: 2.7.0 --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index d69048ed..7c21b68d 100644 --- a/flake.nix +++ b/flake.nix @@ -2,7 +2,7 @@ description = "A Nix-based continuous build system"; inputs.nixpkgs.follows = "nix/nixpkgs"; - inputs.nix.url = github:NixOS/nix/2.6.0; + inputs.nix.url = github:NixOS/nix/2.7.0; outputs = { self, nixpkgs, nix }: let From 4368ff5d5bfe546952ac3aa218575691919a7566 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 29 Mar 2022 15:33:08 -0400 Subject: [PATCH 100/401] flake.lock: Add MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Added input 'nix': 'github:NixOS/nix/ffe155abd36366a870482625543f9bf924a58281' (2022-03-07) • Added input 'nix/lowdown-src': 'github:kristapsdz/lowdown/d2c2b44ff6c27b936ec27358a2653caaef8f73b8' (2021-10-06) • Added input 'nix/nixpkgs': 'github:NixOS/nixpkgs/82891b5e2c2359d7e58d08849e4c89511ab94234' (2021-09-28) • Added input 'nix/nixpkgs-regression': 'github:NixOS/nixpkgs/215d4d0fd80ca5163643b03a33fde804a29cc1e2' (2022-01-24) • Added input 'nixpkgs': follows 'nix/nixpkgs' --- flake.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flake.lock b/flake.lock index e4bf8c71..d5c5e613 100644 --- a/flake.lock +++ b/flake.lock @@ -23,16 +23,16 @@ "nixpkgs-regression": "nixpkgs-regression" }, "locked": { - "lastModified": 1643066034, - "narHash": "sha256-xEPeMcNJVOeZtoN+d+aRwolpW8mFSEQx76HTRdlhPhg=", + "lastModified": 1646680282, + "narHash": "sha256-m8tqCS6uHveDon5GSro5yZor9H+sHeh+v/veF1IGw24=", "owner": "NixOS", "repo": "nix", - "rev": "a1cd7e58606a41fcf62bf8637804cf8306f17f62", + "rev": "ffe155abd36366a870482625543f9bf924a58281", "type": "github" }, "original": { "owner": "NixOS", - "ref": "2.6.0", + "ref": "2.7.0", "repo": "nix", "type": "github" } From c64c5f0a7e63a74672f1ea66e476ca56abe1b10a Mon Sep 17 00:00:00 2001 From: ajs124 Date: Thu, 10 Mar 2022 02:01:48 +0100 Subject: [PATCH 101/401] hydra-queue-runner: rename build-result.hh to hydra-build-result.hh --- src/hydra-queue-runner/Makefile.am | 2 +- src/hydra-queue-runner/build-result.cc | 2 +- src/hydra-queue-runner/builder.cc | 2 +- .../{build-result.hh => hydra-build-result.hh} | 0 src/hydra-queue-runner/hydra-queue-runner.cc | 2 +- src/hydra-queue-runner/queue-monitor.cc | 2 +- 6 files changed, 5 insertions(+), 5 deletions(-) rename src/hydra-queue-runner/{build-result.hh => hydra-build-result.hh} (100%) diff --git a/src/hydra-queue-runner/Makefile.am b/src/hydra-queue-runner/Makefile.am index ea852334..57808608 100644 --- a/src/hydra-queue-runner/Makefile.am +++ b/src/hydra-queue-runner/Makefile.am @@ -2,7 +2,7 @@ bin_PROGRAMS = hydra-queue-runner hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \ builder.cc build-result.cc build-remote.cc \ - build-result.hh counter.hh state.hh db.hh \ + hydra-build-result.hh counter.hh state.hh db.hh \ nar-extractor.cc nar-extractor.hh hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations diff --git a/src/hydra-queue-runner/build-result.cc b/src/hydra-queue-runner/build-result.cc index f69bf0df..492e5c0f 100644 --- a/src/hydra-queue-runner/build-result.cc +++ b/src/hydra-queue-runner/build-result.cc @@ -1,4 +1,4 @@ -#include "build-result.hh" +#include "hydra-build-result.hh" #include "store-api.hh" #include "util.hh" #include "fs-accessor.hh" diff --git a/src/hydra-queue-runner/builder.cc b/src/hydra-queue-runner/builder.cc index 89aa7d15..37022522 100644 --- a/src/hydra-queue-runner/builder.cc +++ b/src/hydra-queue-runner/builder.cc @@ -1,7 +1,7 @@ #include #include "state.hh" -#include "build-result.hh" +#include "hydra-build-result.hh" #include "finally.hh" #include "binary-cache-store.hh" diff --git a/src/hydra-queue-runner/build-result.hh b/src/hydra-queue-runner/hydra-build-result.hh similarity index 100% rename from src/hydra-queue-runner/build-result.hh rename to src/hydra-queue-runner/hydra-build-result.hh diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 3297730c..d6f83674 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -7,7 +7,7 @@ #include #include "state.hh" -#include "build-result.hh" +#include "hydra-build-result.hh" #include "store-api.hh" #include "remote-store.hh" diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 3f19d36a..d42fcfd9 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -1,5 +1,5 @@ #include "state.hh" -#include "build-result.hh" +#include "hydra-build-result.hh" #include "globals.hh" #include From 089da272c76a8e562239b64cb71fb5b43716efa5 Mon Sep 17 00:00:00 2001 From: ajs124 Date: Wed, 9 Mar 2022 23:50:30 +0100 Subject: [PATCH 102/401] fix build against nix 2.7.0 fix build after such commits as df552ff53e68dff8ca360adbdbea214ece1d08ee and e862833ec662c1bffbe31b9a229147de391e801a --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 16 ++++++++-------- src/hydra-queue-runner/build-remote.cc | 11 ++++++----- src/hydra-queue-runner/build-result.cc | 2 +- src/hydra-queue-runner/hydra-queue-runner.cc | 16 ++++++++-------- src/hydra-queue-runner/nar-extractor.cc | 2 +- src/hydra-queue-runner/queue-monitor.cc | 10 +++++----- src/hydra-queue-runner/state.hh | 1 + src/libhydra/db.hh | 2 +- src/libhydra/hydra-config.hh | 2 +- 9 files changed, 32 insertions(+), 30 deletions(-) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index 44a273a9..ba92113e 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -63,13 +63,13 @@ struct MyArgs : MixEvalArgs, MixCommonArgs static MyArgs myArgs; -static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const string & name, const string & subAttribute) +static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std::string & name, const std::string & subAttribute) { Strings res; std::function rec; rec = [&](Value & v) { - state.forceValue(v); + state.forceValue(v, noPos); if (v.type() == nString) res.push_back(v.string.s); else if (v.isList()) @@ -78,7 +78,7 @@ static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const stri else if (v.type() == nAttrs) { auto a = v.attrs->find(state.symbols.create(subAttribute)); if (a != v.attrs->end()) - res.push_back(state.forceString(*a->value)); + res.push_back(std::string(state.forceString(*a->value))); } }; @@ -113,7 +113,7 @@ static void worker( callFlake(state, lockedFlake, *vFlake); auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value; - state.forceValue(*vOutputs); + state.forceValue(*vOutputs, noPos); auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs")); if (!aHydraJobs) @@ -157,7 +157,7 @@ static void worker( if (drv->querySystem() == "unknown") throw EvalError("derivation must have a 'system' attribute"); - auto drvPath = drv->queryDrvPath(); + auto drvPath = state.store->printStorePath(drv->requireDrvPath()); nlohmann::json job; @@ -186,13 +186,13 @@ static void worker( for (auto & i : context) if (i.at(0) == '!') { size_t index = i.find("!", 1); - job["constituents"].push_back(string(i, index + 1)); + job["constituents"].push_back(std::string(i, index + 1)); } state.forceList(*a->value, *a->pos); for (unsigned int n = 0; n < a->value->listSize(); ++n) { auto v = a->value->listElems()[n]; - state.forceValue(*v); + state.forceValue(*v, noPos); if (v->type() == nString) job["namedConstituents"].push_back(state.forceStringNoCtx(*v)); } @@ -210,7 +210,7 @@ static void worker( nlohmann::json out; for (auto & j : outputs) - out[j.first] = j.second; + out[j.first] = state.store->printStorePath(j.second); job["outputs"] = std::move(out); reply["job"] = std::move(job); diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 69c82e72..bdbd44b9 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -5,6 +5,7 @@ #include #include +#include "build-result.hh" #include "serve-protocol.hh" #include "state.hh" #include "util.hh" @@ -49,7 +50,7 @@ static Strings extraStoreArgs(std::string & machine) static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child) { - string pgmName; + std::string pgmName; Pipe to, from; to.create(); from.create(); @@ -81,7 +82,7 @@ static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Chil if (machine->sshPublicHostKey != "") { Path fileName = tmpDir + "/host-key"; auto p = machine->sshName.find("@"); - string host = p != string::npos ? string(machine->sshName, p + 1) : machine->sshName; + std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName; writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n"); append(argv, {"-oUserKnownHostsFile=" + fileName}); } @@ -185,8 +186,8 @@ void State::buildRemote(ref destStore, { assert(BuildResult::TimedOut == 8); - string base(step->drvPath.to_string()); - result.logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2); + std::string base(step->drvPath.to_string()); + result.logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2); AutoDelete autoDelete(result.logFile, false); createDirs(dirOf(result.logFile)); @@ -249,7 +250,7 @@ void State::buildRemote(ref destStore, } catch (EndOfFile & e) { child.pid.wait(); - string s = chomp(readFile(result.logFile)); + std::string s = chomp(readFile(result.logFile)); throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s); } diff --git a/src/hydra-queue-runner/build-result.cc b/src/hydra-queue-runner/build-result.cc index 492e5c0f..ea8b4a6a 100644 --- a/src/hydra-queue-runner/build-result.cc +++ b/src/hydra-queue-runner/build-result.cc @@ -78,7 +78,7 @@ BuildOutput getBuildOutput( product.type = match[1]; product.subtype = match[2]; std::string s(match[3]); - product.path = s[0] == '"' ? string(s, 1, s.size() - 2) : s; + product.path = s[0] == '"' ? std::string(s, 1, s.size() - 2) : s; product.defaultPath = match[5]; /* Ensure that the path exists and points into the Nix diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index d6f83674..615e470b 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -87,7 +87,7 @@ void State::parseMachines(const std::string & contents) } for (auto line : tokenizeString(contents, "\n")) { - line = trim(string(line, 0, line.find('#'))); + line = trim(std::string(line, 0, line.find('#'))); auto tokens = tokenizeString>(line); if (tokens.size() < 3) continue; tokens.resize(8); @@ -95,7 +95,7 @@ void State::parseMachines(const std::string & contents) auto machine = std::make_shared(); machine->sshName = tokens[0]; machine->systemTypes = tokenizeString(tokens[1], ","); - machine->sshKey = tokens[2] == "-" ? string("") : tokens[2]; + machine->sshKey = tokens[2] == "-" ? std::string("") : tokens[2]; if (tokens[3] != "") machine->maxJobs = string2IntmaxJobs)>(tokens[3]).value(); else @@ -149,7 +149,7 @@ void State::parseMachines(const std::string & contents) void State::monitorMachinesFile() { - string defaultMachinesFile = "/etc/nix/machines"; + std::string defaultMachinesFile = "/etc/nix/machines"; auto machinesFiles = tokenizeString>( getEnv("NIX_REMOTE_SYSTEMS").value_or(pathExists(defaultMachinesFile) ? defaultMachinesFile : ""), ":"); @@ -191,7 +191,7 @@ void State::monitorMachinesFile() debug("reloading machines files"); - string contents; + std::string contents; for (auto & machinesFile : machinesFiles) { try { contents += readFile(machinesFile); @@ -308,7 +308,7 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result, int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime, - Build::ptr build, const StorePath & drvPath, const string & outputName, const StorePath & storePath) + Build::ptr build, const StorePath & drvPath, const std::string & outputName, const StorePath & storePath) { restart: auto stepNr = allocBuildStep(txn, build->id); @@ -683,14 +683,14 @@ void State::showStatus() auto conn(dbPool.get()); receiver statusDumped(*conn, "status_dumped"); - string status; + std::string status; bool barf = false; /* Get the last JSON status dump from the database. */ { pqxx::work txn(*conn); auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'"); - if (res.size()) status = res[0][0].as(); + if (res.size()) status = res[0][0].as(); } if (status != "") { @@ -710,7 +710,7 @@ void State::showStatus() { pqxx::work txn(*conn); auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'"); - if (res.size()) status = res[0][0].as(); + if (res.size()) status = res[0][0].as(); } } diff --git a/src/hydra-queue-runner/nar-extractor.cc b/src/hydra-queue-runner/nar-extractor.cc index 260296c9..9f0eb431 100644 --- a/src/hydra-queue-runner/nar-extractor.cc +++ b/src/hydra-queue-runner/nar-extractor.cc @@ -64,7 +64,7 @@ struct Extractor : ParseSink } } - void createSymlink(const Path & path, const string & target) override + void createSymlink(const Path & path, const std::string & target) override { members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink }); } diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index d42fcfd9..6a5a82db 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -111,12 +111,12 @@ bool State::getQueuedBuilds(Connection & conn, if (builds_->count(id)) continue; auto build = std::make_shared( - localStore->parseStorePath(row["drvPath"].as())); + localStore->parseStorePath(row["drvPath"].as())); build->id = id; build->jobsetId = row["jobset_id"].as(); - build->projectName = row["project"].as(); - build->jobsetName = row["jobset"].as(); - build->jobName = row["job"].as(); + build->projectName = row["project"].as(); + build->jobsetName = row["jobset"].as(); + build->jobName = row["job"].as(); build->maxSilentTime = row["maxsilent"].as(); build->buildTimeout = row["timeout"].as(); build->timestamp = row["timestamp"].as(); @@ -620,7 +620,7 @@ void State::processJobsetSharesChange(Connection & conn) auto res = txn.exec("select project, name, schedulingShares from Jobsets"); for (auto const & row : res) { auto jobsets_(jobsets.lock()); - auto i = jobsets_->find(std::make_pair(row["project"].as(), row["name"].as())); + auto i = jobsets_->find(std::make_pair(row["project"].as(), row["name"].as())); if (i == jobsets_->end()) continue; i->second->setShares(row["schedulingShares"].as()); } diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 8f303d28..7c375cb9 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -12,6 +12,7 @@ #include "parsed-derivations.hh" #include "pathlocks.hh" #include "pool.hh" +#include "build-result.hh" #include "store-api.hh" #include "sync.hh" #include "nar-extractor.hh" diff --git a/src/libhydra/db.hh b/src/libhydra/db.hh index 7d5bdc58..00e8f406 100644 --- a/src/libhydra/db.hh +++ b/src/libhydra/db.hh @@ -18,7 +18,7 @@ struct Connection : pqxx::connection std::string upper_prefix = "DBI:Pg:"; if (hasPrefix(s, lower_prefix) || hasPrefix(s, upper_prefix)) { - return concatStringsSep(" ", tokenizeString(string(s, lower_prefix.size()), ";")); + return concatStringsSep(" ", tokenizeString(std::string(s, lower_prefix.size()), ";")); } throw Error("$HYDRA_DBI does not denote a PostgreSQL database"); diff --git a/src/libhydra/hydra-config.hh b/src/libhydra/hydra-config.hh index bc989f74..1688c278 100644 --- a/src/libhydra/hydra-config.hh +++ b/src/libhydra/hydra-config.hh @@ -17,7 +17,7 @@ struct HydraConfig if (hydraConfigFile && pathExists(*hydraConfigFile)) { for (auto line : tokenizeString(readFile(*hydraConfigFile), "\n")) { - line = trim(string(line, 0, line.find('#'))); + line = trim(std::string(line, 0, line.find('#'))); auto eq = line.find('='); if (eq == std::string::npos) continue; From bcaad1c934fd3bc5d93557c52697a9379acfbaea Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 30 Mar 2022 22:39:48 +0200 Subject: [PATCH 103/401] openConnection(): Don't throw exceptions in forked child On hydra.nixos.org the queue runner had child processes that were stuck handling an exception: Thread 1 (Thread 0x7f501f7fe640 (LWP 1413473) "bld~v54h5zkhmb3"): #0 futex_wait (private=0, expected=2, futex_word=0x7f50c27969b0 <_rtld_local+2480>) at ../sysdeps/nptl/futex-internal.h:146 #1 __lll_lock_wait (futex=0x7f50c27969b0 <_rtld_local+2480>, private=0) at lowlevellock.c:52 #2 0x00007f50c21eaee4 in __GI___pthread_mutex_lock (mutex=0x7f50c27969b0 <_rtld_local+2480>) at ../nptl/pthread_mutex_lock.c:115 #3 0x00007f50c1854bef in __GI___dl_iterate_phdr (callback=0x7f50c190c020 <_Unwind_IteratePhdrCallback>, data=0x7f501f7fb040) at dl-iteratephdr.c:40 #4 0x00007f50c190d2d1 in _Unwind_Find_FDE () from /nix/store/65hafbsx91127farbmyyv4r5ifgjdg43-glibc-2.33-117/lib/libgcc_s.so.1 #5 0x00007f50c19099b3 in uw_frame_state_for () from /nix/store/65hafbsx91127farbmyyv4r5ifgjdg43-glibc-2.33-117/lib/libgcc_s.so.1 #6 0x00007f50c190ab90 in uw_init_context_1 () from /nix/store/65hafbsx91127farbmyyv4r5ifgjdg43-glibc-2.33-117/lib/libgcc_s.so.1 #7 0x00007f50c190b08e in _Unwind_RaiseException () from /nix/store/65hafbsx91127farbmyyv4r5ifgjdg43-glibc-2.33-117/lib/libgcc_s.so.1 #8 0x00007f50c1b02ab7 in __cxa_throw () from /nix/store/dd8swlwhpdhn6bv219562vyxhi8278hs-gcc-10.3.0-lib/lib/libstdc++.so.6 #9 0x00007f50c1d01abe in nix::parseURL (url="root@cb893012.packethost.net") at src/libutil/url.cc:53 #10 0x0000000000484f55 in extraStoreArgs (machine="root@cb893012.packethost.net") at build-remote.cc:35 #11 operator() (__closure=0x7f4fe9fe0420) at build-remote.cc:79 ... Maybe the fork happened while another thread was holding some global stack unwinding lock (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=71744). Anyway, since the hanging child inherits all file descriptors to SSH clients, shutting down remote builds (via 'child.to = -1' in State::buildRemote()) doesn't work and 'child.pid.wait()' hangs forever. So let's not do any significant work between fork and exec. --- src/hydra-queue-runner/build-remote.cc | 48 ++++++++++++-------------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index bdbd44b9..57a5f0df 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -55,8 +55,30 @@ static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Chil to.create(); from.create(); - child.pid = startProcess([&]() { + Strings argv; + if (machine->isLocalhost()) { + pgmName = "nix-store"; + argv = {"nix-store", "--builders", "", "--serve", "--write"}; + } else { + pgmName = "ssh"; + auto sshName = machine->sshName; + Strings extraArgs = extraStoreArgs(sshName); + argv = {"ssh", sshName}; + if (machine->sshKey != "") append(argv, {"-i", machine->sshKey}); + if (machine->sshPublicHostKey != "") { + Path fileName = tmpDir + "/host-key"; + auto p = machine->sshName.find("@"); + std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName; + writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n"); + append(argv, {"-oUserKnownHostsFile=" + fileName}); + } + append(argv, + { "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes" + , "--", "nix-store", "--serve", "--write" }); + append(argv, extraArgs); + } + child.pid = startProcess([&]() { restoreProcessContext(); if (dup2(to.readSide.get(), STDIN_FILENO) == -1) @@ -68,30 +90,6 @@ static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Chil if (dup2(stderrFD, STDERR_FILENO) == -1) throw SysError("cannot dup stderr"); - Strings argv; - if (machine->isLocalhost()) { - pgmName = "nix-store"; - argv = {"nix-store", "--builders", "", "--serve", "--write"}; - } - else { - pgmName = "ssh"; - auto sshName = machine->sshName; - Strings extraArgs = extraStoreArgs(sshName); - argv = {"ssh", sshName}; - if (machine->sshKey != "") append(argv, {"-i", machine->sshKey}); - if (machine->sshPublicHostKey != "") { - Path fileName = tmpDir + "/host-key"; - auto p = machine->sshName.find("@"); - std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName; - writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n"); - append(argv, {"-oUserKnownHostsFile=" + fileName}); - } - append(argv, - { "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes" - , "--", "nix-store", "--serve", "--write" }); - append(argv, extraArgs); - } - execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast throw SysError("cannot start %s", pgmName); From 5e3374cb86ffe65bd07c9a46a160260e1228ea61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Na=C3=AFm=20Favier?= Date: Thu, 31 Mar 2022 12:55:15 +0200 Subject: [PATCH 104/401] Prepare for nixos-search integration --- flake.nix | 2 +- hydra-module.nix | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index 7c21b68d..aad9072d 100644 --- a/flake.nix +++ b/flake.nix @@ -7,7 +7,7 @@ outputs = { self, nixpkgs, nix }: let - version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 self.lastModifiedDate}.${self.shortRev or "DIRTY"}"; + version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}"; pkgs = import nixpkgs { system = "x86_64-linux"; diff --git a/hydra-module.nix b/hydra-module.nix index 6cfa6aa3..0df5e690 100644 --- a/hydra-module.nix +++ b/hydra-module.nix @@ -69,6 +69,7 @@ in package = mkOption { type = types.path; default = pkgs.hydra; + defaultText = literalExpression "pkgs.hydra"; description = "The Hydra package."; }; @@ -171,6 +172,7 @@ in buildMachinesFiles = mkOption { type = types.listOf types.path; default = optional (config.nix.buildMachines != []) "/etc/nix/machines"; + defaultText = literalExpression ''optional (config.nix.buildMachines != []) "/etc/nix/machines"''; example = [ "/etc/nix/machines" "/var/lib/hydra/provisioner/machines" ]; description = "List of files containing build machines."; }; From 8c5636fe187ae29f962a5a27eeefbb5bd34ef91f Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Sat, 2 Apr 2022 17:32:14 -0700 Subject: [PATCH 105/401] hydra-queue-runner: use port 9198 by default Co-authored-by: Graham Christensen --- src/hydra-queue-runner/hydra-queue-runner.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 992b9995..87361aca 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -47,7 +47,7 @@ State::State(std::optional metricsPortOpt) , maxLogSize(config->getIntOption("max_log_size", 64ULL << 20)) , uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false)) , rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME")))) - , metricsPort(config->getIntOption("queue_runner_metrics_port", 9099)) + , metricsPort(config->getIntOption("queue_runner_metrics_port", 9198)) , registry(std::make_shared()) { hydraData = getEnvOrDie("HYDRA_DATA"); From 71a036ed00d7e45c75b8b03c54656de43fa9fe29 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 4 Apr 2022 16:31:36 +0200 Subject: [PATCH 106/401] Update to Nix master MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'nix': 'github:NixOS/nix/ec90fc4d1f42db3c5e3c74dc186487d10a28c221' (2022-04-05) → 'github:NixOS/nix/5fe4fe823c193cbb7bfa05a468de91eeab09058d' (2022-04-05) • Updated input 'nix/nixpkgs': 'github:NixOS/nixpkgs/82891b5e2c2359d7e58d08849e4c89511ab94234' (2021-09-28) → 'github:NixOS/nixpkgs/530a53dcbc9437363471167a5e4762c5fcfa34a1' (2022-02-19) --- flake.lock | 18 ++++++++---------- flake.nix | 2 +- src/hydra-eval-jobs/hydra-eval-jobs.cc | 14 ++++++++++---- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/flake.lock b/flake.lock index d5c5e613..b9dd6995 100644 --- a/flake.lock +++ b/flake.lock @@ -23,27 +23,25 @@ "nixpkgs-regression": "nixpkgs-regression" }, "locked": { - "lastModified": 1646680282, - "narHash": "sha256-m8tqCS6uHveDon5GSro5yZor9H+sHeh+v/veF1IGw24=", + "lastModified": 1649172203, + "narHash": "sha256-Q3nYaXqbseDOvZrlePKeIrx0/KzqyrtNpxHIUbtFHuI=", "owner": "NixOS", "repo": "nix", - "rev": "ffe155abd36366a870482625543f9bf924a58281", + "rev": "5fe4fe823c193cbb7bfa05a468de91eeab09058d", "type": "github" }, "original": { - "owner": "NixOS", - "ref": "2.7.0", - "repo": "nix", - "type": "github" + "id": "nix", + "type": "indirect" } }, "nixpkgs": { "locked": { - "lastModified": 1632864508, - "narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=", + "lastModified": 1645296114, + "narHash": "sha256-y53N7TyIkXsjMpOG7RhvqJFGDacLs9HlyHeSTBioqYU=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "82891b5e2c2359d7e58d08849e4c89511ab94234", + "rev": "530a53dcbc9437363471167a5e4762c5fcfa34a1", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index aad9072d..d2b71724 100644 --- a/flake.nix +++ b/flake.nix @@ -2,7 +2,7 @@ description = "A Nix-based continuous build system"; inputs.nixpkgs.follows = "nix/nixpkgs"; - inputs.nix.url = github:NixOS/nix/2.7.0; + #inputs.nix.url = github:NixOS/nix/2.7.0; outputs = { self, nixpkgs, nix }: let diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index ba92113e..7485b297 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -210,7 +210,9 @@ static void worker( nlohmann::json out; for (auto & j : outputs) - out[j.first] = state.store->printStorePath(j.second); + // FIXME: handle CA/impure builds. + if (j.second) + out[j.first] = state.store->printStorePath(*j.second); job["outputs"] = std::move(out); reply["job"] = std::move(job); @@ -489,10 +491,14 @@ int main(int argc, char * * argv) std::string drvName(drvPath.name()); assert(hasSuffix(drvName, drvExtension)); drvName.resize(drvName.size() - drvExtension.size()); - auto h = std::get(hashDerivationModulo(*store, drv, true)); - auto outPath = store->makeOutputPath("out", h, drvName); + + auto hashModulo = hashDerivationModulo(*store, drv, true); + if (hashModulo.kind != DrvHash::Kind::Regular) continue; + auto h = hashModulo.hashes.find("out"); + if (h == hashModulo.hashes.end()) continue; + auto outPath = store->makeOutputPath("out", h->second, drvName); drv.env["out"] = store->printStorePath(outPath); - drv.outputs.insert_or_assign("out", DerivationOutput { .output = DerivationOutputInputAddressed { .path = outPath } }); + drv.outputs.insert_or_assign("out", DerivationOutput::InputAddressed { .path = outPath }); auto newDrvPath = store->printStorePath(writeDerivation(*store, drv)); debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath); From 0803634a417cd2a99b654b85e1563b50cb1bf14a Mon Sep 17 00:00:00 2001 From: fricklerhandwerk Date: Fri, 1 Apr 2022 16:19:23 +0200 Subject: [PATCH 107/401] add architecture notes meeting notes from @edolstra giving a one-hour tour of the code --- doc/architecture.md | 129 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 doc/architecture.md diff --git a/doc/architecture.md b/doc/architecture.md new file mode 100644 index 00000000..ec67bd37 --- /dev/null +++ b/doc/architecture.md @@ -0,0 +1,129 @@ +This is a rough overview from informal discussions and explanations of inner workings of Hydra. +You can use it as a guide to navigate the codebase or ask questions. + +## Architecture + +### Components + +- Postgres database + - configuration + - build queue + - what is already built + - what is going to build +- `hydra-server` + - Perl, Catalyst + - web frontend +- `hydra-evaluator` + - Perl, C++ + - fetches repositories + - evaluates job sets + - pointers to a repository + - adds builds to the queue +- `hydra-queue-runner` + - C++ + - monitors the queue + - executes build steps + - uploads build results + - copy to a Nix store +- Nix store + - contains `.drv`s + - populated by `hydra-evaluator` + - read by `hydra-queue-runner` +- destination Nix store + - can be a binary cache + - e.g. `[cache.nixos.org](http://cache.nixos.org)` or the same store again (for small Hydra instances) +- plugin architecture + - extend evaluator for new kinds of repositories + - e.g. fetch from `git` + +### Database Schema + +[https://github.com/NixOS/hydra/blob/master/src/sql/hydra.sql](https://github.com/NixOS/hydra/blob/master/src/sql/hydra.sql) + +- `Jobsets` + - populated by calling Nix evaluator + - every Nix derivation in `release.nix` is a Job + - `flake` + - URL to flake, if job is from a flake + - single-point of configuration for flake builds + - flake itself contains pointers to dependencies + - for other builds we need more configuration data +- `JobsetInputs` + - more configuration for a Job +- `JobsetInputAlts` + - historical, where you could have more than one alternative for each input + - it would have done the cross product of all possibilities + - not used any more, as now every input is unique + - originally that was to have alternative values for the system parameter + - `x86-linux`, `x86_64-darwin` + - turned out not to be a good idea, as job set names did not uniquely identify output +- `Builds` + - queue: scheduled and finished builds + - instance of a Job + - corresponds to a top-level derivation + - can have many dependencies that don’t have a corresponding build + - dependencies represented as `BuildSteps` + - a Job is all the builds with a particular name, e.g. + - `git.x86_64-linux` is a job + - there maybe be multiple builds for that job + - build ID: just an auto-increment number + - building one thing can actually cause many (hundreds of) derivations to be built + - for queued builds, the `drv` has to be present in the store + - otherwise build will fail, e.g. after garbage collection +- `BuildSteps` + - corresponds to a derivation or substitution + - are reused through the Nix store + - may be duplicated for unique derivations due to how they relate to `Jobs` +- `BuildStepOutputs` + - corresponds directly to derivation outputs + - `out`, `dev`, ... +- `BuildProducts` + - not a Nix concept + - populated from a special file `$out/nix-support/hydra-build-producs` + - used to scrape parts of build results out to the web frontend + - e.g. manuals, ISO images, etc. +- `BuildMetrics` + - scrapes data from magic location, similar to `BuildProducts` to show fancy graphs + - e.g. test coverage, build times, CPU utilization for build + - `$out/nix-support/hydra-metrics` +- `BuildInputs` + - probably obsolute +- `JobsetEvalMembers` + - joins evaluations with jobs + - huge table, 10k’s of entries for one `nixpkgs` evaluation + - can be imagined as a subset of the eval cache + - could in principle use the eval cache + +### `release.nix` + +- hydra-specific convention to describe the build +- should evaluate to an attribute set that contains derivations +- hydra considers every attribute in that set a job +- every job needs a unique name + - if you want to build for multiple platforms, you need to reflect that in the name +- hydra does a deep traversal of the attribute set + - just evaluating the names may take half an hour + +## FAQ + +Can we imagine Hydra to be a persistence layer for the build graph? + +- partially, it lacks a lot of information + - does not keep edges of the build graph + +How does Hydra relate to `nix build`? + +- reimplements the top level Nix build loop, scheduling, etc. +- Hydra has to persist build results +- Hydra has more sophisticated remote build execution and scheduling than Nix + +Is it conceptually possible to unify Hydra’s capabilities with regular Nix? + +- Nix does not have any scheduling, it just traverses the build graph +- Hydra has scheduling in terms of job set priorities, tracks how much of a job set it has worked on + - makes sure jobs don’t starve each other +- Nix cannot dynamically add build jobs at runtime + - [RFC 92](https://github.com/NixOS/rfcs/blob/master/rfcs/0092-plan-dynamism.md) should enable that + - internally it is already possible, but there is no interface to do that +- Hydra queue runner is a long running process + - Nix takes a static set of jobs, working it off at once From 33bc60b83c1d74da1ccbb6691135ff5176e4af7d Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Wed, 6 Apr 2022 10:46:56 -0700 Subject: [PATCH 108/401] hydra-queue-runner: move exporter back to State::run It's (arguably) better than risking pinning the thread at 100% due to the busy `while` loop. --- src/hydra-queue-runner/hydra-queue-runner.cc | 30 +++++++------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 87361aca..6a84749f 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -750,24 +750,6 @@ void State::unlock() } -void State::runMetricsExporter() -{ - std::cout << "Starting the Prometheus exporter on port " << metricsPort << std::endl; - - /* Set up simple exporter, to show that we're still alive. */ - std::string metricsAddress{"127.0.0.1"}; - prometheus::Exposer exposer{metricsAddress + ":" + std::to_string(metricsPort)}; - auto exposerPort = exposer.GetListeningPorts().front(); - exposer.RegisterCollectable(registry); - - std::cout << "Started the Prometheus exporter, listening on " - << "http://" << metricsAddress << ":" << exposerPort << "/metrics" - << std::endl; - - while (true) {}; -} - - void State::run(BuildID buildOne) { /* Can't be bothered to shut down cleanly. Goodbye! */ @@ -780,7 +762,17 @@ void State::run(BuildID buildOne) if (!lock) throw Error("hydra-queue-runner is already running"); - std::thread(&State::runMetricsExporter, this).detach(); + std::cout << "Starting the Prometheus exporter on port " << metricsPort << std::endl; + + /* Set up simple exporter, to show that we're still alive. */ + std::string metricsAddress{"127.0.0.1"}; // FIXME: configurable + prometheus::Exposer promExposer{metricsAddress + ":" + std::to_string(metricsPort)}; + auto exposerPort = promExposer.GetListeningPorts().front(); + promExposer.RegisterCollectable(registry); + + std::cout << "Started the Prometheus exporter, listening on " + << "http://" << metricsAddress << ":" << exposerPort << "/metrics" + << std::endl; Store::Params localParams; localParams["max-connections"] = "16"; From edf3c348f2156827a43639c8acac7051b87dec98 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Wed, 6 Apr 2022 10:58:57 -0700 Subject: [PATCH 109/401] hydra-queue-runner: make entire address configurable --- src/hydra-queue-runner/hydra-queue-runner.cc | 31 +++++++------------- src/hydra-queue-runner/state.hh | 4 +-- t/Hydra/Config/include.t | 2 +- t/lib/HydraTestContext.pm | 2 +- 4 files changed, 15 insertions(+), 24 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 6a84749f..5ad1a9d9 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -39,7 +39,7 @@ std::string getEnvOrDie(const std::string & key) } -State::State(std::optional metricsPortOpt) +State::State(std::optional metricsAddrOpt) : config(std::make_unique()) , maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0)) , dbPool(config->getIntOption("max_db_connections", 128)) @@ -47,15 +47,15 @@ State::State(std::optional metricsPortOpt) , maxLogSize(config->getIntOption("max_log_size", 64ULL << 20)) , uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false)) , rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME")))) - , metricsPort(config->getIntOption("queue_runner_metrics_port", 9198)) + , metricsAddr(config->getStrOption("queue_runner_metrics_address", std::string{"127.0.0.1:9198"})) , registry(std::make_shared()) { hydraData = getEnvOrDie("HYDRA_DATA"); logDir = canonPath(hydraData + "/build-logs"); - if (metricsPortOpt.has_value()) { - metricsPort = metricsPortOpt.value(); + if (metricsAddrOpt.has_value()) { + metricsAddr = metricsAddrOpt.value(); } /* handle deprecated store specification */ @@ -762,16 +762,15 @@ void State::run(BuildID buildOne) if (!lock) throw Error("hydra-queue-runner is already running"); - std::cout << "Starting the Prometheus exporter on port " << metricsPort << std::endl; + std::cout << "Starting the Prometheus exporter on " << metricsAddr << std::endl; /* Set up simple exporter, to show that we're still alive. */ - std::string metricsAddress{"127.0.0.1"}; // FIXME: configurable - prometheus::Exposer promExposer{metricsAddress + ":" + std::to_string(metricsPort)}; + prometheus::Exposer promExposer{metricsAddr}; auto exposerPort = promExposer.GetListeningPorts().front(); promExposer.RegisterCollectable(registry); std::cout << "Started the Prometheus exporter, listening on " - << "http://" << metricsAddress << ":" << exposerPort << "/metrics" + << metricsAddr << "/metrics (port " << exposerPort << ")" << std::endl; Store::Params localParams; @@ -884,7 +883,7 @@ int main(int argc, char * * argv) bool unlock = false; bool status = false; BuildID buildOne = 0; - std::optional metricsPortOpt = std::nullopt; + std::optional metricsAddrOpt = std::nullopt; parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) { if (*arg == "--unlock") @@ -896,16 +895,8 @@ int main(int argc, char * * argv) buildOne = *b; else throw Error("‘--build-one’ requires a build ID"); - } else if (*arg == "--port") { - if (auto p = string2Int(getArg(*arg, arg, end))) { - if (*p > std::numeric_limits::max()) { - throw Error("'--port' has a maximum of 65535"); - } else { - metricsPortOpt = *p; - } - } else { - throw Error("'--port' requires a numeric port (0 for a random, usable port; max 65535)"); - } + } else if (*arg == "--prometheus-address") { + metricsAddrOpt = getArg(*arg, arg, end); } else return false; return true; @@ -914,7 +905,7 @@ int main(int argc, char * * argv) settings.verboseBuild = true; settings.lockCPU = false; - State state{metricsPortOpt}; + State state{metricsAddrOpt}; if (status) state.showStatus(); else if (unlock) diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index fb533559..a37548a3 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -434,12 +434,12 @@ private: via gc_roots_dir. */ nix::Path rootsDir; - uint16_t metricsPort; + std::string metricsAddr;; std::shared_ptr registry; public: - State(std::optional metricsPortOpt); + State(std::optional metricsAddrOpt); private: diff --git a/t/Hydra/Config/include.t b/t/Hydra/Config/include.t index 63186f87..14f657ff 100644 --- a/t/Hydra/Config/include.t +++ b/t/Hydra/Config/include.t @@ -20,7 +20,7 @@ write_file($ctx{'tmpdir'} . "/bar.conf", q| |); is(getHydraConfig(), { - queue_runner_metrics_port => 0, + queue_runner_metrics_address => "127.0.0.1:0", foo => { bar => "baz" } }, "Nested includes work."); diff --git a/t/lib/HydraTestContext.pm b/t/lib/HydraTestContext.pm index ce933c09..2bb1478c 100644 --- a/t/lib/HydraTestContext.pm +++ b/t/lib/HydraTestContext.pm @@ -51,7 +51,7 @@ sub new { $ENV{'HYDRA_CONFIG'} = "$dir/hydra.conf"; my $hydra_config = $opts{'hydra_config'} || ""; - $hydra_config = "queue_runner_metrics_port = 0\n" . $hydra_config; + $hydra_config = "queue_runner_metrics_address = 127.0.0.1:0\n" . $hydra_config; if ($opts{'use_external_destination_store'} // 1) { $hydra_config = "store_uri = file:$dir/nix/dest-store\n" . $hydra_config; } From 15e8fa8aff1421c99218da58597cd84d0c67afc7 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Wed, 6 Apr 2022 11:41:18 -0700 Subject: [PATCH 110/401] doc/manual: document queue-runner prometheus exporter configuration --- doc/manual/src/configuration.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index ec071f24..7aca17ef 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -102,6 +102,22 @@ in the hydra configuration file, as below: ``` +hydra-queue-runner's Prometheus service +--------------------------------------- + +hydra-queue-runner supports running a Prometheus webserver for metrics. The +exporter's address defaults to exposing on `127.0.0.1:9198`, but is also +configurable through the hydra configuration file and a command line argument, +as below. A port of `:0` will make the exposer choose a random, available port. + +```conf +queue_runner_exporter_address = 127.0.0.1:9198 +``` + +```shell +$ hydra-queue-runner --prometheus-address 127.0.0.1:9198 +``` + Using LDAP as authentication backend (optional) ----------------------------------------------- From 5bff730f2c4378bda6bbb46b5274dd4004a28b97 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Wed, 6 Apr 2022 11:41:04 -0700 Subject: [PATCH 111/401] WIP: I love it when they delete the assignment operator :) --- src/hydra-queue-runner/hydra-queue-runner.cc | 15 +++++++++++++++ src/hydra-queue-runner/queue-monitor.cc | 2 ++ src/hydra-queue-runner/state.hh | 6 +++++- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 5ad1a9d9..f00049a1 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -49,7 +49,20 @@ State::State(std::optional metricsAddrOpt) , rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME")))) , metricsAddr(config->getStrOption("queue_runner_metrics_address", std::string{"127.0.0.1:9198"})) , registry(std::make_shared()) + // , call_ctr_family(prometheus::BuildCounter().Name("queue_queued_builds_calls_total").Help("Number of times State::getQueuedBuilds() was called").Register(*registry)) + // , call_ctr(call_ctr_family.Add({})) { + // call_ctr_family(prometheus::BuildCounter().Name("queue_queued_builds_calls_total").Help("Number of times State::getQueuedBuilds() was called").Register(*registry)); + // call_ctr(call_ctr_family.Add({})); + auto& fam = prometheus::BuildCounter() + .Name("queue_queued_builds_calls_total") + .Help("Number of times State::getQueuedBuilds() was called") + .Register(*registry) + .Add({}); + + // call_ctr_family(fam); + // call_ctr(call_ctr_family.Add({})); + hydraData = getEnvOrDie("HYDRA_DATA"); logDir = canonPath(hydraData + "/build-logs"); @@ -58,6 +71,7 @@ State::State(std::optional metricsAddrOpt) metricsAddr = metricsAddrOpt.value(); } + /* handle deprecated store specification */ if (config->getStrOption("store_mode") != "") throw Error("store_mode in hydra.conf is deprecated, please use store_uri"); @@ -767,6 +781,7 @@ void State::run(BuildID buildOne) /* Set up simple exporter, to show that we're still alive. */ prometheus::Exposer promExposer{metricsAddr}; auto exposerPort = promExposer.GetListeningPorts().front(); + promExposer.RegisterCollectable(registry); std::cout << "Started the Prometheus exporter, listening on " diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 49caf8e3..8fb06f45 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -82,6 +82,8 @@ struct PreviousFailure : public std::exception { bool State::getQueuedBuilds(Connection & conn, ref destStore, unsigned int & lastBuildId) { + call_ctr.Increment(); + printInfo("checking the queue for builds > %d...", lastBuildId); /* Grab the queued builds from the database, but don't process diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index a37548a3..9e89df52 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -7,6 +7,7 @@ #include #include +#include #include #include "db.hh" @@ -434,10 +435,13 @@ private: via gc_roots_dir. */ nix::Path rootsDir; - std::string metricsAddr;; + std::string metricsAddr; std::shared_ptr registry; + // prometheus::Family& call_ctr_family; + prometheus::Counter& call_ctr; + public: State(std::optional metricsAddrOpt); From 46f52b4c4e94ae9d8699f8f857e6e8fcc4c39336 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 6 Apr 2022 15:49:38 -0400 Subject: [PATCH 112/401] bring back the working version Cole made --- src/hydra-queue-runner/hydra-queue-runner.cc | 18 +++++------------- src/hydra-queue-runner/queue-monitor.cc | 2 +- src/hydra-queue-runner/state.hh | 4 ++-- 3 files changed, 8 insertions(+), 16 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index f00049a1..0400b61b 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -49,20 +49,12 @@ State::State(std::optional metricsAddrOpt) , rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME")))) , metricsAddr(config->getStrOption("queue_runner_metrics_address", std::string{"127.0.0.1:9198"})) , registry(std::make_shared()) - // , call_ctr_family(prometheus::BuildCounter().Name("queue_queued_builds_calls_total").Help("Number of times State::getQueuedBuilds() was called").Register(*registry)) - // , call_ctr(call_ctr_family.Add({})) + , call_ctr(prometheus::BuildCounter() + .Name("queue_queued_builds_calls_total") + .Help("Number of times State::getQueuedBuilds() was called") + .Register(*registry)) + , queue_queued_builds_calls(call_ctr.Add({})) // FIXME: add the proper arguments { - // call_ctr_family(prometheus::BuildCounter().Name("queue_queued_builds_calls_total").Help("Number of times State::getQueuedBuilds() was called").Register(*registry)); - // call_ctr(call_ctr_family.Add({})); - auto& fam = prometheus::BuildCounter() - .Name("queue_queued_builds_calls_total") - .Help("Number of times State::getQueuedBuilds() was called") - .Register(*registry) - .Add({}); - - // call_ctr_family(fam); - // call_ctr(call_ctr_family.Add({})); - hydraData = getEnvOrDie("HYDRA_DATA"); logDir = canonPath(hydraData + "/build-logs"); diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 8fb06f45..bcef4e2f 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -82,7 +82,7 @@ struct PreviousFailure : public std::exception { bool State::getQueuedBuilds(Connection & conn, ref destStore, unsigned int & lastBuildId) { - call_ctr.Increment(); + queue_queued_builds_calls.Increment(); printInfo("checking the queue for builds > %d...", lastBuildId); diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 9e89df52..56e01a0e 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -439,8 +439,8 @@ private: std::shared_ptr registry; - // prometheus::Family& call_ctr_family; - prometheus::Counter& call_ctr; + prometheus::Family& call_ctr; + prometheus::Counter& queue_queued_builds_calls; public: State(std::optional metricsAddrOpt); From 5de08d412ea1642be71849d3cd52410c64084958 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 6 Apr 2022 19:59:53 -0400 Subject: [PATCH 113/401] queue metrics: refactor the metrics into a struct --- src/hydra-queue-runner/hydra-queue-runner.cc | 20 +++++++++++++------- src/hydra-queue-runner/queue-monitor.cc | 2 +- src/hydra-queue-runner/state.hh | 11 ++++++++--- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 0400b61b..b540bfe6 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -38,6 +38,18 @@ std::string getEnvOrDie(const std::string & key) return *value; } +State::PromMetrics::PromMetrics() + : registry(std::make_shared()) + , queue_checks_started( + prometheus::BuildCounter() + .Name("hydraqueuerunner_queue_checks_started_total") + .Help("Number of times State::getQueuedBuilds() was started") + .Register(*registry) + .Add({}) + ) +{ + +} State::State(std::optional metricsAddrOpt) : config(std::make_unique()) @@ -48,12 +60,6 @@ State::State(std::optional metricsAddrOpt) , uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false)) , rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME")))) , metricsAddr(config->getStrOption("queue_runner_metrics_address", std::string{"127.0.0.1:9198"})) - , registry(std::make_shared()) - , call_ctr(prometheus::BuildCounter() - .Name("queue_queued_builds_calls_total") - .Help("Number of times State::getQueuedBuilds() was called") - .Register(*registry)) - , queue_queued_builds_calls(call_ctr.Add({})) // FIXME: add the proper arguments { hydraData = getEnvOrDie("HYDRA_DATA"); @@ -774,7 +780,7 @@ void State::run(BuildID buildOne) prometheus::Exposer promExposer{metricsAddr}; auto exposerPort = promExposer.GetListeningPorts().front(); - promExposer.RegisterCollectable(registry); + promExposer.RegisterCollectable(prom.registry); std::cout << "Started the Prometheus exporter, listening on " << metricsAddr << "/metrics (port " << exposerPort << ")" diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index bcef4e2f..c4873cc5 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -82,7 +82,7 @@ struct PreviousFailure : public std::exception { bool State::getQueuedBuilds(Connection & conn, ref destStore, unsigned int & lastBuildId) { - queue_queued_builds_calls.Increment(); + prom.queue_checks_started.Increment(); printInfo("checking the queue for builds > %d...", lastBuildId); diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 56e01a0e..6d1e45c4 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -437,10 +437,15 @@ private: std::string metricsAddr; - std::shared_ptr registry; + struct PromMetrics + { + std::shared_ptr registry; - prometheus::Family& call_ctr; - prometheus::Counter& queue_queued_builds_calls; + prometheus::Counter& queue_checks_started; + + PromMetrics(); + }; + PromMetrics prom; public: State(std::optional metricsAddrOpt); From 1c12c5882f61cc490a589a02475cbf304a58048a Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 6 Apr 2022 20:18:29 -0400 Subject: [PATCH 114/401] hydra queue runner: instrument the process of loading new builds with prom --- src/hydra-queue-runner/hydra-queue-runner.cc | 28 ++++++++++++++++++++ src/hydra-queue-runner/queue-monitor.cc | 13 +++++++-- src/hydra-queue-runner/state.hh | 5 ++++ 3 files changed, 44 insertions(+), 2 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index b540bfe6..bf25258d 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -47,6 +47,34 @@ State::PromMetrics::PromMetrics() .Register(*registry) .Add({}) ) + , queue_build_loads( + prometheus::BuildCounter() + .Name("hydraqueuerunner_queue_build_loads_total") + .Help("Number of builds loaded") + .Register(*registry) + .Add({}) + ) + , queue_checks_early_exits( + prometheus::BuildCounter() + .Name("hydraqueuerunner_queue_checks_early_exits_total") + .Help("Number of times State::getQueuedBuilds() yielded to potential bumps") + .Register(*registry) + .Add({}) + ) + , queue_checks_finished( + prometheus::BuildCounter() + .Name("hydraqueuerunner_queue_checks_finished_total") + .Help("Number of times State::getQueuedBuilds() was completed") + .Register(*registry) + .Add({}) + ) + , queue_max_id( + prometheus::BuildGauge() + .Name("hydraqueuerunner_queue_max_build_id_info") + .Help("Maximum build record ID in the queue") + .Register(*registry) + .Add({}) + ) { } diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index c4873cc5..f2f3d59f 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -109,7 +109,10 @@ bool State::getQueuedBuilds(Connection & conn, auto builds_(builds.lock()); BuildID id = row["id"].as(); if (buildOne && id != buildOne) continue; - if (id > newLastBuildId) newLastBuildId = id; + if (id > newLastBuildId) { + newLastBuildId = id; + prom.queue_max_id.Set(id); + } if (builds_->count(id)) continue; auto build = std::make_shared( @@ -138,6 +141,7 @@ bool State::getQueuedBuilds(Connection & conn, std::set finishedDrvs; createBuild = [&](Build::ptr build) { + prom.queue_build_loads.Increment(); printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName()); nrAdded++; newBuildsByID.erase(build->id); @@ -308,9 +312,14 @@ bool State::getQueuedBuilds(Connection & conn, /* Stop after a certain time to allow priority bumps to be processed. */ - if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) break; + if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) { + prom.queue_checks_early_exits.Increment(); + break; + } } + prom.queue_checks_finished.Increment(); + lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1; return newBuildsByID.empty(); } diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 6d1e45c4..61954848 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -8,6 +8,7 @@ #include #include +#include #include #include "db.hh" @@ -442,6 +443,10 @@ private: std::shared_ptr registry; prometheus::Counter& queue_checks_started; + prometheus::Counter& queue_build_loads; + prometheus::Counter& queue_checks_early_exits; + prometheus::Counter& queue_checks_finished; + prometheus::Gauge& queue_max_id; PromMetrics(); }; From 59ac96a99c25726092bef4b17d3ef284266b8c66 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 6 Apr 2022 20:23:02 -0400 Subject: [PATCH 115/401] Track the number of steps created --- src/hydra-queue-runner/hydra-queue-runner.cc | 7 +++++++ src/hydra-queue-runner/queue-monitor.cc | 2 ++ src/hydra-queue-runner/state.hh | 1 + 3 files changed, 10 insertions(+) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index bf25258d..96dacbd4 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -54,6 +54,13 @@ State::PromMetrics::PromMetrics() .Register(*registry) .Add({}) ) + , queue_steps_created( + prometheus::BuildCounter() + .Name("hydraqueuerunner_queue_steps_created_total") + .Help("Number of steps created") + .Register(*registry) + .Add({}) + ) , queue_checks_early_exits( prometheus::BuildCounter() .Name("hydraqueuerunner_queue_checks_early_exits_total") diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index f2f3d59f..748df37f 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -448,6 +448,8 @@ Step::ptr State::createStep(ref destStore, if (!isNew) return step; + prom.queue_steps_created.Increment(); + printMsg(lvlDebug, "considering derivation ‘%1%’", localStore->printStorePath(drvPath)); /* Initialize the step. Note that the step may be visible in diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 61954848..a8f64b4d 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -444,6 +444,7 @@ private: prometheus::Counter& queue_checks_started; prometheus::Counter& queue_build_loads; + prometheus::Counter& queue_steps_created; prometheus::Counter& queue_checks_early_exits; prometheus::Counter& queue_checks_finished; prometheus::Gauge& queue_max_id; From f8dc48f171b34b7b03bd08331c03f8e2c5a91991 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Wed, 6 Apr 2022 17:53:11 -0700 Subject: [PATCH 116/401] hydra-queue-runner: fixup: remove extraneous newline --- src/hydra-queue-runner/hydra-queue-runner.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 96dacbd4..e3f5b772 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -104,7 +104,6 @@ State::State(std::optional metricsAddrOpt) metricsAddr = metricsAddrOpt.value(); } - /* handle deprecated store specification */ if (config->getStrOption("store_mode") != "") throw Error("store_mode in hydra.conf is deprecated, please use store_uri"); From ae690d6602c6917eb9d79c1e219cb15c880ff5e4 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Thu, 7 Apr 2022 10:40:50 -0700 Subject: [PATCH 117/401] doc/manual: fixup configuration option name Oops. --- doc/manual/src/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index 7aca17ef..bac93c33 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -111,7 +111,7 @@ configurable through the hydra configuration file and a command line argument, as below. A port of `:0` will make the exposer choose a random, available port. ```conf -queue_runner_exporter_address = 127.0.0.1:9198 +queue_runner_metrics_address = 127.0.0.1:9198 ``` ```shell From 3f303b479cf4d04f8210938e032ceb5acec015ef Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Thu, 7 Apr 2022 11:26:45 -0700 Subject: [PATCH 118/401] flake: add ipv6 support to civetweb --- flake.nix | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index b41dc8e0..01b0c988 100644 --- a/flake.nix +++ b/flake.nix @@ -44,7 +44,14 @@ # Overlay these packages to use dependencies from the Nixpkgs everything # else uses, to side-step the version difference: glibc is 2.32 in the # nix-pinned Nixpkgs, but 2.33 in the newNixpkgs commit. - civetweb = final.callPackage "${newNixpkgs}/pkgs/development/libraries/civetweb" { }; + civetweb = (final.callPackage "${newNixpkgs}/pkgs/development/libraries/civetweb" { }).overrideAttrs + # Can be dropped once newNixpkgs points to a revision containing + # https://github.com/NixOS/nixpkgs/pull/167751 + ({ cmakeFlags ? [ ], ... }: { + cmakeFlags = cmakeFlags ++ [ + "-DCIVETWEB_ENABLE_IPV6=1" + ]; + }); prometheus-cpp = final.callPackage "${newNixpkgs}/pkgs/development/libraries/prometheus-cpp" { }; # Add LDAP dependencies that aren't currently found within nixpkgs. From be6077d2bbdc468588059ba7cb07a85664db2d1e Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Thu, 7 Apr 2022 11:28:21 -0700 Subject: [PATCH 119/401] doc/manual: demonstrate ipv6 metrics address for queue-runner --- doc/manual/src/configuration.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index bac93c33..2700625d 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -112,10 +112,14 @@ as below. A port of `:0` will make the exposer choose a random, available port. ```conf queue_runner_metrics_address = 127.0.0.1:9198 +# or +queue_runner_metrics_address = [::]:9198 ``` ```shell $ hydra-queue-runner --prometheus-address 127.0.0.1:9198 +# or +$ hydra-queue-runner --prometheus-address [::]:9198 ``` Using LDAP as authentication backend (optional) From a179f0be610f37635c981de441c73d246b88f1fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 15:26:03 +0000 Subject: [PATCH 120/401] build(deps): bump cachix/install-nix-action from 16 to 17 Bumps [cachix/install-nix-action](https://github.com/cachix/install-nix-action) from 16 to 17. - [Release notes](https://github.com/cachix/install-nix-action/releases) - [Commits](https://github.com/cachix/install-nix-action/compare/v16...v17) --- updated-dependencies: - dependency-name: cachix/install-nix-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3ba4aba6..0f5f43da 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -9,6 +9,6 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - - uses: cachix/install-nix-action@v16 + - uses: cachix/install-nix-action@v17 #- run: nix flake check - run: nix-build -A checks.x86_64-linux.build -A checks.x86_64-linux.validate-openapi From cb4fa0000ff4f884bb0064243d99ba4133ee0ae7 Mon Sep 17 00:00:00 2001 From: Kayla Firestack Date: Thu, 14 Apr 2022 11:03:10 -0400 Subject: [PATCH 121/401] fix(hydra-eval-jobs.cc): add function to report pid status --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index 7485b297..bbc55a2b 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -25,6 +25,28 @@ #include +void check_pid_status_quick(pid_t check_pid) { + // Only check 'initialized' and known PID's + if (check_pid <= 0) { return; } + + int wstatus = 0; + pid_t pid = waitpid(check_pid, &wstatus, WNOHANG); + // -1 = failiure, WNOHANG: 0 = no change + if (pid <= 0) { return; } + + std::cerr << "child process (" << pid << ") "; + + if (WIFEXITED(wstatus)) { + std::cerr << "exited with status=" << WEXITSTATUS(wstatus) << std::endl; + } else if (WIFSIGNALED(wstatus)) { + std::cerr << "killed by signal=" << WTERMSIG(wstatus) << std::endl; + } else if (WIFSTOPPED(wstatus)) { + std::cerr << "stopped by signal=" << WSTOPSIG(wstatus) << std::endl; + } else if (WIFCONTINUED(wstatus)) { + std::cerr << "continued" << std::endl; + } +} + using namespace nix; static Path gcRootsDir; From 62cdbc41389757322ddd0255038de2d6c57d197d Mon Sep 17 00:00:00 2001 From: Kayla Firestack Date: Thu, 14 Apr 2022 11:18:29 -0400 Subject: [PATCH 122/401] feat(hydra-eval-jobs.cc): add check_pid_status_nonblocking to catch handler --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index bbc55a2b..f1cc1434 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -25,7 +25,7 @@ #include -void check_pid_status_quick(pid_t check_pid) { +void check_pid_status_nonblocking(pid_t check_pid) { // Only check 'initialized' and known PID's if (check_pid <= 0) { return; } @@ -333,8 +333,8 @@ int main(int argc, char * * argv) /* Start a handler thread per worker process. */ auto handler = [&]() { + pid_t pid = -1; try { - pid_t pid = -1; AutoCloseFD from, to; while (true) { @@ -436,6 +436,7 @@ int main(int argc, char * * argv) } } } catch (...) { + check_pid_status_nonblocking(pid); auto state(state_.lock()); state->exc = std::current_exception(); wakeup.notify_all(); From 2cdd7974de5e8ed2360caabe586510f0b512feec Mon Sep 17 00:00:00 2001 From: Kayla Firestack Date: Fri, 29 Apr 2022 13:06:16 -0400 Subject: [PATCH 123/401] fix(hydra-eval-jobs): fix typo --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index f1cc1434..918bd451 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -31,7 +31,7 @@ void check_pid_status_nonblocking(pid_t check_pid) { int wstatus = 0; pid_t pid = waitpid(check_pid, &wstatus, WNOHANG); - // -1 = failiure, WNOHANG: 0 = no change + // -1 = failure, WNOHANG: 0 = no change if (pid <= 0) { return; } std::cerr << "child process (" << pid << ") "; From 90769ab5adcb6191e149c65aec72643c89b2a233 Mon Sep 17 00:00:00 2001 From: Kayla Firestack Date: Mon, 2 May 2022 13:49:32 -0400 Subject: [PATCH 124/401] feat(t/jobs): add test job to cause an OOM --- t/jobs/oom.nix | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 t/jobs/oom.nix diff --git a/t/jobs/oom.nix b/t/jobs/oom.nix new file mode 100644 index 00000000..abbd0c0d --- /dev/null +++ b/t/jobs/oom.nix @@ -0,0 +1,3 @@ +{ + oom = builtins.readFile "/dev/zero"; +} From 2c909c038fad7cd6107706915e819a12e88fe425 Mon Sep 17 00:00:00 2001 From: Kayla Firestack Date: Mon, 2 May 2022 13:50:57 -0400 Subject: [PATCH 125/401] feat(t/evaluator/hydra-eval-jobs): add basic evaluation test for hydra-eval-jobs --- t/evaluator/evaluate-oom-job.t | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 t/evaluator/evaluate-oom-job.t diff --git a/t/evaluator/evaluate-oom-job.t b/t/evaluator/evaluate-oom-job.t new file mode 100644 index 00000000..dd494c03 --- /dev/null +++ b/t/evaluator/evaluate-oom-job.t @@ -0,0 +1,25 @@ +use strict; +use warnings; +use Setup; +use Test2::V0; +use Hydra::Helper::Exec; + +my ($res, $stdout, $stderr) = captureStdoutStderr(60, + ( + "systemd-run", "--user", "--collect", "--scope", "--property", "MemoryMax=25M", "--", + "hydra-eval-jobs", + "-I", "/dev/zero", + "-I", "./t/jobs", + "./t/jobs/oom.nix" + ) +); + +isnt($res, 0, "hydra-eval-jobs exits non-zero"); +ok(utf8::decode($stderr), "Stderr output is UTF8-clean"); +like( + $stderr, + qr/^child process \(\d+?\) killed by signal=9$/m, + "The stderr record includes a relevant error message" +); + +done_testing; From 01ec004108177c0077f83ac824f92672fd871678 Mon Sep 17 00:00:00 2001 From: Kayla Firestack Date: Mon, 2 May 2022 14:08:50 -0400 Subject: [PATCH 126/401] feat(t/evaluator/evaluate-oom-job): skip test if systemd-run is not present --- t/evaluator/evaluate-oom-job.t | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/t/evaluator/evaluate-oom-job.t b/t/evaluator/evaluate-oom-job.t index dd494c03..bf8f214f 100644 --- a/t/evaluator/evaluate-oom-job.t +++ b/t/evaluator/evaluate-oom-job.t @@ -4,6 +4,14 @@ use Setup; use Test2::V0; use Hydra::Helper::Exec; +my ($systemdrRes) = captureStdoutStderr(3, ( + "systemd-run", "--user", "--collect", "--scope", "--property", "MemoryMax=25M", "--", + "true" +)); + +skip_all("systemd-run does not work in this environment") if($systemdrRes != 0); + + my ($res, $stdout, $stderr) = captureStdoutStderr(60, ( "systemd-run", "--user", "--collect", "--scope", "--property", "MemoryMax=25M", "--", From e917d9e54662da32d5b4d9a1ff950a05cb7da5c1 Mon Sep 17 00:00:00 2001 From: Kayla Firestack Date: Mon, 2 May 2022 14:40:13 -0400 Subject: [PATCH 127/401] fix(t/evaluator/evaluate-oom): convert systemd-run presence check to eval, fix indentaion, show relationships between flags and commands with indentation --- t/evaluator/evaluate-oom-job.t | 49 +++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/t/evaluator/evaluate-oom-job.t b/t/evaluator/evaluate-oom-job.t index bf8f214f..8d2264c0 100644 --- a/t/evaluator/evaluate-oom-job.t +++ b/t/evaluator/evaluate-oom-job.t @@ -4,30 +4,41 @@ use Setup; use Test2::V0; use Hydra::Helper::Exec; -my ($systemdrRes) = captureStdoutStderr(3, ( - "systemd-run", "--user", "--collect", "--scope", "--property", "MemoryMax=25M", "--", - "true" +eval { + captureStdoutStderr(3, ( + "systemd-run", + "--user", + "--collect", + "--scope", + "--property", + "MemoryMax=25M", + "--", + "true" + )); +} or do { + skip_all("systemd-run does not work in this environment"); +}; + +my ($res, $stdout, $stderr) = captureStdoutStderr(60, ( + "systemd-run", + "--user", + "--collect", + "--scope", + "--property", + "MemoryMax=25M", + "--", + "hydra-eval-jobs", + "-I", "/dev/zero", + "-I", "./t/jobs", + "./t/jobs/oom.nix" )); -skip_all("systemd-run does not work in this environment") if($systemdrRes != 0); - - -my ($res, $stdout, $stderr) = captureStdoutStderr(60, - ( - "systemd-run", "--user", "--collect", "--scope", "--property", "MemoryMax=25M", "--", - "hydra-eval-jobs", - "-I", "/dev/zero", - "-I", "./t/jobs", - "./t/jobs/oom.nix" - ) -); - isnt($res, 0, "hydra-eval-jobs exits non-zero"); ok(utf8::decode($stderr), "Stderr output is UTF8-clean"); like( - $stderr, - qr/^child process \(\d+?\) killed by signal=9$/m, - "The stderr record includes a relevant error message" + $stderr, + qr/^child process \(\d+?\) killed by signal=9$/m, + "The stderr record includes a relevant error message" ); done_testing; From 013a1dcabc7d4bf06340ab23d6dbfd14a783fdcb Mon Sep 17 00:00:00 2001 From: Kayla Firestack Date: Mon, 2 May 2022 15:13:59 -0400 Subject: [PATCH 128/401] fix(t/evaluator/evaluate-oom): check that the exit value of the `systemd-run` check is zero. Rework skip messages --- t/evaluator/evaluate-oom-job.t | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/t/evaluator/evaluate-oom-job.t b/t/evaluator/evaluate-oom-job.t index 8d2264c0..7a527825 100644 --- a/t/evaluator/evaluate-oom-job.t +++ b/t/evaluator/evaluate-oom-job.t @@ -4,8 +4,9 @@ use Setup; use Test2::V0; use Hydra::Helper::Exec; +my $sd_res; eval { - captureStdoutStderr(3, ( + ($sd_res) = captureStdoutStderr(3, ( "systemd-run", "--user", "--collect", @@ -16,8 +17,9 @@ eval { "true" )); } or do { - skip_all("systemd-run does not work in this environment"); + skip_all("`systemd-run` failed when invoked in this environment"); }; +if ($sd_res != 0) { skip_all("`systemd-run` returned non-zero when executing `true` (expected 0)"); } my ($res, $stdout, $stderr) = captureStdoutStderr(60, ( "systemd-run", From 87f610e7c18b85c77098b1530b1d015b935710ab Mon Sep 17 00:00:00 2001 From: Kayla Firestack Date: Mon, 2 May 2022 15:14:46 -0400 Subject: [PATCH 129/401] fix(t/evaluator/evaluate-oom): use `test_context` to get path to ./t/jobs instead of relative paths --- t/evaluator/evaluate-oom-job.t | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/t/evaluator/evaluate-oom-job.t b/t/evaluator/evaluate-oom-job.t index 7a527825..8c8c5f60 100644 --- a/t/evaluator/evaluate-oom-job.t +++ b/t/evaluator/evaluate-oom-job.t @@ -21,6 +21,8 @@ eval { }; if ($sd_res != 0) { skip_all("`systemd-run` returned non-zero when executing `true` (expected 0)"); } +my $ctx = test_context(); + my ($res, $stdout, $stderr) = captureStdoutStderr(60, ( "systemd-run", "--user", @@ -31,11 +33,11 @@ my ($res, $stdout, $stderr) = captureStdoutStderr(60, ( "--", "hydra-eval-jobs", "-I", "/dev/zero", - "-I", "./t/jobs", - "./t/jobs/oom.nix" + "-I", $ctx->jobsdir, + ($ctx->jobsdir . "/oom.nix") )); -isnt($res, 0, "hydra-eval-jobs exits non-zero"); +isnt($res, 0, "`hydra-eval-jobs` exits non-zero"); ok(utf8::decode($stderr), "Stderr output is UTF8-clean"); like( $stderr, From 065039beba4aa8fc998762b145aa6176daa44522 Mon Sep 17 00:00:00 2001 From: Kayla Firestack Date: Mon, 2 May 2022 15:26:26 -0400 Subject: [PATCH 130/401] feat(t/evaluator/evaluate-oom): comment intentions --- t/evaluator/evaluate-oom-job.t | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/t/evaluator/evaluate-oom-job.t b/t/evaluator/evaluate-oom-job.t index 8c8c5f60..6c17d4e4 100644 --- a/t/evaluator/evaluate-oom-job.t +++ b/t/evaluator/evaluate-oom-job.t @@ -4,6 +4,10 @@ use Setup; use Test2::V0; use Hydra::Helper::Exec; +# Ensure that `systemd-run` is +# - Available in the PATH/envionment +# - Accessable to the user executing it +# - Capable of using the command switches we use in our test my $sd_res; eval { ($sd_res) = captureStdoutStderr(3, ( @@ -17,12 +21,21 @@ eval { "true" )); } or do { + # The command failed to execute, likely because `systemd-run` is not present + # in `PATH` skip_all("`systemd-run` failed when invoked in this environment"); }; -if ($sd_res != 0) { skip_all("`systemd-run` returned non-zero when executing `true` (expected 0)"); } +if ($sd_res != 0) { + # `systemd-run` executed but `sytemd-run` failed to call `true` and return + # successfully + skip_all("`systemd-run` returned non-zero when executing `true` (expected 0)"); +} my $ctx = test_context(); +# Contain the memory usage to 25 MegaBytes using `systemd-run` +# Run `hydra-eval-jobs` on test job that will purposefully consume all memory +# available my ($res, $stdout, $stderr) = captureStdoutStderr(60, ( "systemd-run", "--user", @@ -41,6 +54,8 @@ isnt($res, 0, "`hydra-eval-jobs` exits non-zero"); ok(utf8::decode($stderr), "Stderr output is UTF8-clean"); like( $stderr, + # Assert error log contains messages added in PR + # https://github.com/NixOS/hydra/pull/1203 qr/^child process \(\d+?\) killed by signal=9$/m, "The stderr record includes a relevant error message" ); From 3c71be5b5b9b8fd4739c925ab46642f6121ec218 Mon Sep 17 00:00:00 2001 From: Ulrik Strid Date: Wed, 18 May 2022 08:14:00 +0200 Subject: [PATCH 131/401] GithubPulls: Don't fail on missing `Link` --- src/lib/Hydra/Plugin/GithubPulls.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/Hydra/Plugin/GithubPulls.pm b/src/lib/Hydra/Plugin/GithubPulls.pm index db0e8d25..9d8412c3 100644 --- a/src/lib/Hydra/Plugin/GithubPulls.pm +++ b/src/lib/Hydra/Plugin/GithubPulls.pm @@ -30,7 +30,7 @@ sub _iterate { $pulls->{$pull->{number}} = $pull; } # TODO Make Link header parsing more robust!!! - my @links = split ',', $res->header("Link"); + my @links = split ',', ($res->header("Link") // ""); my $next = ""; foreach my $link (@links) { my ($url, $rel) = split ";", $link; From a8b590014b4b3f59aaf8eec3465f600b5899b2fd Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Sun, 22 May 2022 14:14:14 +0200 Subject: [PATCH 132/401] Fix email notifications for jobsets w/git-inputs I started to wonder quite recently why Hydra doesn't send email notifications anymore to me. I saw the following issue in the log of `hydra-notify.service`: May 22 11:57:29 hydra 9bik0bxyxbrklhx6lqwifd6af8kj84va-hydra-notify[1887289]: fatal: unsafe repository ('/var/lib/hydra/scm/git/3e70c16c266ef70dc4198705a688acccf71e932878f178277c9ac47d133cc663' is owned by someone else) May 22 11:57:29 hydra 9bik0bxyxbrklhx6lqwifd6af8kj84va-hydra-notify[1887289]: To add an exception for this directory, call: May 22 11:57:29 hydra 9bik0bxyxbrklhx6lqwifd6af8kj84va-hydra-notify[1887289]: git config --global --add safe.directory /var/lib/hydra/scm/git/3e70c16c266ef70dc4198705a688acccf71e932878f178277c9ac47d133cc663 May 22 11:57:29 hydra 9bik0bxyxbrklhx6lqwifd6af8kj84va-hydra-notify[1886654]: error running build_finished hooks: command `git log --pretty=format:%H%x09%an%x09%ae%x09%at b0c30a7557685d25a8ab3f34fdb775e66db0bc4c..eaf28389fcebc2beca13a802f79b2cca6e9ca309 --git-dir=.git' failed with e> This is also a problem because of Git's fix for CVE-2022-24765[1], so I applied the same fix as for Nix[2], by using `--git-dir` which skips the code-path for the ownership-check[3]. [1] https://lore.kernel.org/git/xmqqv8veb5i6.fsf@gitster.g/ [2] https://github.com/NixOS/nix/pull/6440 [3] To quote `git(1)`: > Specifying the location of the ".git" directory using this option > (or GIT_DIR environment variable) turns off the repository > discovery that tries to find a directory with ".git" subdirectory --- src/lib/Hydra/Plugin/GitInput.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/Hydra/Plugin/GitInput.pm b/src/lib/Hydra/Plugin/GitInput.pm index aca35c30..e5fc7de9 100644 --- a/src/lib/Hydra/Plugin/GitInput.pm +++ b/src/lib/Hydra/Plugin/GitInput.pm @@ -261,7 +261,7 @@ sub getCommits { my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri); - my $out = grab(cmd => ["git", "log", "--pretty=format:%H%x09%an%x09%ae%x09%at", "$rev1..$rev2"], dir => $clonePath); + my $out = grab(cmd => ["git", "--git-dir=.git", "log", "--pretty=format:%H%x09%an%x09%ae%x09%at", "$rev1..$rev2"], dir => $clonePath); my $res = []; foreach my $line (split /\n/, $out) { From b6ea85a601ddac9cb0716d8cb4d446439fa0778f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Josef=20Kemetm=C3=BCller?= Date: Fri, 27 May 2022 11:40:49 +0200 Subject: [PATCH 133/401] scmdiff: Hardcode `--git-dir` The newest version of git refuses to work on repositories not owned by the current user. This leads to issues with the /api/scmdiff endpoint: May 27 11:16:05 myhydra hydra-server[923698]: fatal: unsafe repository ('/var/lib/hydra/scm/git/57ea036ec7ecd85c8dd085e02ecc6f12dd5c079a6203d16aea49f586cadfb2be' is owned by someone else) May 27 11:16:05 myhydra hydra-server[923698]: To add an exception for this directory, call: May 27 11:16:05 myhydra hydra-server[923698]: git config --global --add safe.directory /var/lib/hydra/scm/git/57ea036ec7ecd85c8dd085e02ecc6f12dd5c079a6203d16aea49f586cadfb2be May 27 11:16:05 myhydra hydra-server[923701]: warning: Not a git repository. Use --no-index to compare two paths outside a working tree May 27 11:16:05 myhydra hydra-server[923701]: usage: git diff --no-index [] I used the same solution that was used in NixOS/nix#6440. Fixes #1214 --- src/lib/Hydra/Controller/API.pm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/Hydra/Controller/API.pm b/src/lib/Hydra/Controller/API.pm index 6f10ef57..8ebed599 100644 --- a/src/lib/Hydra/Controller/API.pm +++ b/src/lib/Hydra/Controller/API.pm @@ -216,8 +216,8 @@ sub scmdiff : Path('/api/scmdiff') Args(0) { } elsif ($type eq "git") { my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri); die if ! -d $clonePath; - $diff .= `(cd $clonePath; git log $rev1..$rev2)`; - $diff .= `(cd $clonePath; git diff $rev1..$rev2)`; + $diff .= `(cd $clonePath; git --git-dir .git log $rev1..$rev2)`; + $diff .= `(cd $clonePath; git --git-dir .git diff $rev1..$rev2)`; } $c->stash->{'plain'} = { data => (scalar $diff) || " " }; From 5c01800fbe49939c53566457047283eb5c93f51b Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Thu, 16 Jun 2022 14:54:57 +0200 Subject: [PATCH 134/401] flake: Update Nix to 2.9.1 NOTE: I'm well-aware that we have to be careful with this to avoid new regressions on hydra.nixos.org, so this should only be merged after extensive testing from more people. Motivation: I updated Nix in my deployment to 2.9.1 and decided to also update Hydra in one go (and compile it against the newer Nix). Given that this also updates the C++ code in `hydra-{queue-runner,eval-jobs}` this patch might become useful in the future though. --- flake.lock | 12 +++++++----- flake.nix | 2 +- src/hydra-eval-jobs/Makefile.am | 2 +- src/hydra-eval-jobs/hydra-eval-jobs.cc | 10 +++++----- src/hydra-queue-runner/queue-monitor.cc | 2 +- 5 files changed, 15 insertions(+), 13 deletions(-) diff --git a/flake.lock b/flake.lock index 5c726a24..1310c53d 100644 --- a/flake.lock +++ b/flake.lock @@ -39,16 +39,18 @@ "nixpkgs-regression": "nixpkgs-regression" }, "locked": { - "lastModified": 1649172203, - "narHash": "sha256-Q3nYaXqbseDOvZrlePKeIrx0/KzqyrtNpxHIUbtFHuI=", + "lastModified": 1654014617, + "narHash": "sha256-qNL3lQPBsnStkru3j1ajN/H+knXI+X3dku8/dBfSw3g=", "owner": "NixOS", "repo": "nix", - "rev": "5fe4fe823c193cbb7bfa05a468de91eeab09058d", + "rev": "624e38aa43f304fbb78b4779172809add042b513", "type": "github" }, "original": { - "id": "nix", - "type": "indirect" + "owner": "NixOS", + "ref": "2.9.1", + "repo": "nix", + "type": "github" } }, "nixpkgs": { diff --git a/flake.nix b/flake.nix index 01b0c988..794555ec 100644 --- a/flake.nix +++ b/flake.nix @@ -5,7 +5,7 @@ # even 2.7.0's Nixpkgs pin). inputs.newNixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable-small"; inputs.nixpkgs.follows = "nix/nixpkgs"; - #inputs.nix.url = github:NixOS/nix/2.7.0; + inputs.nix.url = github:NixOS/nix/2.9.1; outputs = { self, newNixpkgs, nixpkgs, nix }: let diff --git a/src/hydra-eval-jobs/Makefile.am b/src/hydra-eval-jobs/Makefile.am index 7a4e9c91..90742a30 100644 --- a/src/hydra-eval-jobs/Makefile.am +++ b/src/hydra-eval-jobs/Makefile.am @@ -1,5 +1,5 @@ bin_PROGRAMS = hydra-eval-jobs hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc -hydra_eval_jobs_LDADD = $(NIX_LIBS) +hydra_eval_jobs_LDADD = $(NIX_LIBS) -lnixcmd hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index 918bd451..18d39620 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -197,21 +197,21 @@ static void worker( /* If this is an aggregate, then get its constituents. */ auto a = v->attrs->get(state.symbols.create("_hydraAggregate")); - if (a && state.forceBool(*a->value, *a->pos)) { + if (a && state.forceBool(*a->value, a->pos)) { auto a = v->attrs->get(state.symbols.create("constituents")); if (!a) throw EvalError("derivation must have a ‘constituents’ attribute"); PathSet context; - state.coerceToString(*a->pos, *a->value, context, true, false); + state.coerceToString(a->pos, *a->value, context, true, false); for (auto & i : context) if (i.at(0) == '!') { size_t index = i.find("!", 1); job["constituents"].push_back(std::string(i, index + 1)); } - state.forceList(*a->value, *a->pos); + state.forceList(*a->value, a->pos); for (unsigned int n = 0; n < a->value->listSize(); ++n) { auto v = a->value->listElems()[n]; state.forceValue(*v, noPos); @@ -243,8 +243,8 @@ static void worker( else if (v->type() == nAttrs) { auto attrs = nlohmann::json::array(); StringSet ss; - for (auto & i : v->attrs->lexicographicOrder()) { - std::string name(i->name); + for (auto & i : v->attrs->lexicographicOrder(state.symbols)) { + std::string name(state.symbols[i->name]); if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) { printError("skipping job with illegal name '%s'", name); continue; diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 3bde0d99..12d55b79 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -460,7 +460,7 @@ Step::ptr State::createStep(ref destStore, step->parsedDrv = std::make_unique(drvPath, *step->drv); step->preferLocalBuild = step->parsedDrv->willBuildLocally(*localStore); - step->isDeterministic = get(step->drv->env, "isDetermistic").value_or("0") == "1"; + step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1"; step->systemType = step->drv->platform; { From 183f58ca9f10a0a6a18e42c75168145a1de864df Mon Sep 17 00:00:00 2001 From: Sandro Date: Thu, 16 Jun 2022 22:48:39 +0200 Subject: [PATCH 135/401] Remove url literal --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index 794555ec..2e891364 100644 --- a/flake.nix +++ b/flake.nix @@ -5,7 +5,7 @@ # even 2.7.0's Nixpkgs pin). inputs.newNixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable-small"; inputs.nixpkgs.follows = "nix/nixpkgs"; - inputs.nix.url = github:NixOS/nix/2.9.1; + inputs.nix.url = "github:NixOS/nix/2.9.1"; outputs = { self, newNixpkgs, nixpkgs, nix }: let From 750978a19232583e17620a1bd80435e957e7213a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sandro=20J=C3=A4ckel?= Date: Sat, 18 Jun 2022 13:22:42 +0200 Subject: [PATCH 136/401] Add gitea push hook --- doc/manual/src/webhooks.md | 20 +++++++++++++++++--- src/lib/Hydra/Controller/API.pm | 16 ++++++++++++++++ src/lib/Hydra/Controller/Root.pm | 3 ++- 3 files changed, 35 insertions(+), 4 deletions(-) diff --git a/doc/manual/src/webhooks.md b/doc/manual/src/webhooks.md index 2b26cd61..674e1064 100644 --- a/doc/manual/src/webhooks.md +++ b/doc/manual/src/webhooks.md @@ -1,9 +1,12 @@ # Webhooks -Hydra can be notified by github's webhook to trigger a new evaluation when a +Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a jobset has a github repo in its input. -To set up a github webhook go to `https://github.com///settings` and in the `Webhooks` tab -click on `Add webhook`. + +## GitHub + +To set up a webhook for a GitHub repository go to `https://github.com///settings` +and in the `Webhooks` tab click on `Add webhook`. - In `Payload URL` fill in `https:///api/push-github`. - In `Content type` switch to `application/json`. @@ -11,3 +14,14 @@ click on `Add webhook`. - For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`. Then add the hook with `Add webhook`. + +## Gitea + +To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance +and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down. + +- In `Target URL` fill in `https:///api/push-gitea`. +- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`. +- Change the branch filter to match the git branch hydra builds. + +Then add the hook with `Add webhook`. diff --git a/src/lib/Hydra/Controller/API.pm b/src/lib/Hydra/Controller/API.pm index 6f10ef57..12073595 100644 --- a/src/lib/Hydra/Controller/API.pm +++ b/src/lib/Hydra/Controller/API.pm @@ -285,6 +285,22 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) { $c->response->body(""); } +sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) { + my ($self, $c) = @_; + + $c->{stash}->{json}->{jobsetsTriggered} = []; + + my $in = $c->request->{data}; + my $url = $in->{repository}->{clone_url} or die; + print STDERR "got push from Gitea repository $url\n"; + + triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search( + { 'project.enabled' => 1, 'me.enabled' => 1 }, + { join => 'project' + , where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ] + }); + $c->response->body(""); +} 1; diff --git a/src/lib/Hydra/Controller/Root.pm b/src/lib/Hydra/Controller/Root.pm index c6843d29..1b33db2a 100644 --- a/src/lib/Hydra/Controller/Root.pm +++ b/src/lib/Hydra/Controller/Root.pm @@ -32,6 +32,7 @@ sub noLoginNeeded { return $whitelisted || $c->request->path eq "api/push-github" || + $c->request->path eq "api/push-gitea" || $c->request->path eq "google-login" || $c->request->path eq "github-redirect" || $c->request->path eq "github-login" || @@ -77,7 +78,7 @@ sub begin :Private { $_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins}; # XSRF protection: require POST requests to have the same origin. - if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") { + if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") { my $referer = $c->req->header('Referer'); $referer //= $c->req->header('Origin'); my $base = $c->req->base; From bab671124df0bc43a521d708bf86eece908d33af Mon Sep 17 00:00:00 2001 From: ajs124 Date: Thu, 30 Jun 2022 00:24:09 +0200 Subject: [PATCH 137/401] replace nix cat-store with nix store cat the former was deprecated in favor of the latter --- src/lib/Hydra/Controller/Build.pm | 4 ++-- src/lib/Hydra/Helper/Nix.pm | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib/Hydra/Controller/Build.pm b/src/lib/Hydra/Controller/Build.pm index 552f31af..c7811c62 100644 --- a/src/lib/Hydra/Controller/Build.pm +++ b/src/lib/Hydra/Controller/Build.pm @@ -234,7 +234,7 @@ sub serveFile { elsif ($ls->{type} eq "regular") { $c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command", - "cat-store", "--store", getStoreUri(), "$path"]) }; + "store", "cat", "--store", getStoreUri(), "$path"]) }; # Detect MIME type. Borrowed from Catalyst::Plugin::Static::Simple. my $type = "text/plain"; @@ -366,7 +366,7 @@ sub contents : Chained('buildChain') PathPart Args(1) { # FIXME: don't use shell invocations below. - # FIXME: use nix cat-store + # FIXME: use nix store cat my $res; diff --git a/src/lib/Hydra/Helper/Nix.pm b/src/lib/Hydra/Helper/Nix.pm index 514fb439..71a8a7d7 100644 --- a/src/lib/Hydra/Helper/Nix.pm +++ b/src/lib/Hydra/Helper/Nix.pm @@ -537,7 +537,7 @@ sub getStoreUri { sub readNixFile { my ($path) = @_; return grab(cmd => ["nix", "--experimental-features", "nix-command", - "cat-store", "--store", getStoreUri(), "$path"]); + "store", "cat", "--store", getStoreUri(), "$path"]); } From bb1f04ed8669cc012fc552a40a376f3b93228833 Mon Sep 17 00:00:00 2001 From: ajs124 Date: Thu, 30 Jun 2022 00:32:31 +0200 Subject: [PATCH 138/401] AddBuilds: fix declarative jobsets with dynamic runcommand enabled $project->{enable_dynamic_run_command} is undefined --- src/lib/Hydra/Helper/AddBuilds.pm | 2 +- t/Helper/AddBuilds/dynamic-disabled.t | 36 +++++++++++++++++++++------ t/Helper/AddBuilds/dynamic-enabled.t | 36 +++++++++++++++++++++------ 3 files changed, 59 insertions(+), 15 deletions(-) diff --git a/src/lib/Hydra/Helper/AddBuilds.pm b/src/lib/Hydra/Helper/AddBuilds.pm index 9e3ddfd2..a6373be5 100644 --- a/src/lib/Hydra/Helper/AddBuilds.pm +++ b/src/lib/Hydra/Helper/AddBuilds.pm @@ -67,7 +67,7 @@ sub validateDeclarativeJobset { my $enable_dynamic_run_command = defined $update{enable_dynamic_run_command} ? 1 : 0; if ($enable_dynamic_run_command && !($config->{dynamicruncommand}->{enable} - && $project->{enable_dynamic_run_command})) + && $project->enable_dynamic_run_command)) { die "Dynamic RunCommand is not enabled by the server or the parent project."; } diff --git a/t/Helper/AddBuilds/dynamic-disabled.t b/t/Helper/AddBuilds/dynamic-disabled.t index 0507b03e..0c91f382 100644 --- a/t/Helper/AddBuilds/dynamic-disabled.t +++ b/t/Helper/AddBuilds/dynamic-disabled.t @@ -6,11 +6,31 @@ use Test2::V0; require Catalyst::Test; use HTTP::Request::Common qw(POST PUT GET DELETE); use JSON::MaybeXS qw(decode_json encode_json); -use Hydra::Helper::AddBuilds qw(validateDeclarativeJobset); -use Hydra::Helper::Nix qw(getHydraConfig); my $ctx = test_context(); +Catalyst::Test->import('Hydra'); + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +my $user = $db->resultset('Users')->create({ username => 'alice', emailaddress => 'root@invalid.org', password => '!' }); +$user->setPassword('foobar'); +$user->userroles->update_or_create({ role => 'admin' }); + +my $project_with_dynamic_run_command = $db->resultset('Projects')->create({ + name => 'tests_with_dynamic_runcommand', + displayname => 'Tests with dynamic runcommand', + owner => 'alice', + enable_dynamic_run_command => 1, +}); +my $project_without_dynamic_run_command = $db->resultset('Projects')->create({ + name => 'tests_without_dynamic_runcommand', + displayname => 'Tests without dynamic runcommand', + owner => 'alice', + enable_dynamic_run_command => 0, +}); + sub makeJobsetSpec { my ($dynamic) = @_; @@ -29,14 +49,16 @@ sub makeJobsetSpec { }; subtest "validate declarative jobset with dynamic RunCommand disabled by server" => sub { - my $config = getHydraConfig(); + my $config = Hydra::Helper::Nix->getHydraConfig(); + require Hydra::Helper::AddBuilds; + Hydra::Helper::AddBuilds->import( qw(validateDeclarativeJobset) ); subtest "project enabled dynamic runcommand, declarative jobset enabled dynamic runcommand" => sub { like( dies { validateDeclarativeJobset( $config, - { enable_dynamic_run_command => 1 }, + $project_with_dynamic_run_command, "test-jobset", makeJobsetSpec(JSON::MaybeXS::true), ), @@ -49,7 +71,7 @@ subtest "validate declarative jobset with dynamic RunCommand disabled by server" ok( validateDeclarativeJobset( $config, - { enable_dynamic_run_command => 1 }, + $project_with_dynamic_run_command, "test-jobset", makeJobsetSpec(JSON::MaybeXS::false) ), @@ -61,7 +83,7 @@ subtest "validate declarative jobset with dynamic RunCommand disabled by server" dies { validateDeclarativeJobset( $config, - { enable_dynamic_run_command => 0 }, + $project_without_dynamic_run_command, "test-jobset", makeJobsetSpec(JSON::MaybeXS::true), ), @@ -74,7 +96,7 @@ subtest "validate declarative jobset with dynamic RunCommand disabled by server" ok( validateDeclarativeJobset( $config, - { enable_dynamic_run_command => 0 }, + $project_without_dynamic_run_command, "test-jobset", makeJobsetSpec(JSON::MaybeXS::false) ), diff --git a/t/Helper/AddBuilds/dynamic-enabled.t b/t/Helper/AddBuilds/dynamic-enabled.t index d2f5a386..46497bed 100644 --- a/t/Helper/AddBuilds/dynamic-enabled.t +++ b/t/Helper/AddBuilds/dynamic-enabled.t @@ -6,8 +6,6 @@ use Test2::V0; require Catalyst::Test; use HTTP::Request::Common qw(POST PUT GET DELETE); use JSON::MaybeXS qw(decode_json encode_json); -use Hydra::Helper::AddBuilds qw(validateDeclarativeJobset); -use Hydra::Helper::Nix qw(getHydraConfig); my $ctx = test_context( hydra_config => q| @@ -17,6 +15,28 @@ my $ctx = test_context( | ); +Catalyst::Test->import('Hydra'); + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +my $user = $db->resultset('Users')->create({ username => 'alice', emailaddress => 'root@invalid.org', password => '!' }); +$user->setPassword('foobar'); +$user->userroles->update_or_create({ role => 'admin' }); + +my $project_with_dynamic_run_command = $db->resultset('Projects')->create({ + name => 'tests_with_dynamic_runcommand', + displayname => 'Tests with dynamic runcommand', + owner => 'alice', + enable_dynamic_run_command => 1, +}); +my $project_without_dynamic_run_command = $db->resultset('Projects')->create({ + name => 'tests_without_dynamic_runcommand', + displayname => 'Tests without dynamic runcommand', + owner => 'alice', + enable_dynamic_run_command => 0, +}); + sub makeJobsetSpec { my ($dynamic) = @_; @@ -35,13 +55,15 @@ sub makeJobsetSpec { }; subtest "validate declarative jobset with dynamic RunCommand enabled by server" => sub { - my $config = getHydraConfig(); + my $config = Hydra::Helper::Nix->getHydraConfig(); + require Hydra::Helper::AddBuilds; + Hydra::Helper::AddBuilds->import( qw(validateDeclarativeJobset) ); subtest "project enabled dynamic runcommand, declarative jobset enabled dynamic runcommand" => sub { ok( validateDeclarativeJobset( $config, - { enable_dynamic_run_command => 1 }, + $project_with_dynamic_run_command, "test-jobset", makeJobsetSpec(JSON::MaybeXS::true) ), @@ -52,7 +74,7 @@ subtest "validate declarative jobset with dynamic RunCommand enabled by server" ok( validateDeclarativeJobset( $config, - { enable_dynamic_run_command => 1 }, + $project_with_dynamic_run_command, "test-jobset", makeJobsetSpec(JSON::MaybeXS::false) ), @@ -64,7 +86,7 @@ subtest "validate declarative jobset with dynamic RunCommand enabled by server" dies { validateDeclarativeJobset( $config, - { enable_dynamic_run_command => 0 }, + $project_without_dynamic_run_command, "test-jobset", makeJobsetSpec(JSON::MaybeXS::true), ), @@ -77,7 +99,7 @@ subtest "validate declarative jobset with dynamic RunCommand enabled by server" ok( validateDeclarativeJobset( $config, - { enable_dynamic_run_command => 0 }, + $project_without_dynamic_run_command, "test-jobset", makeJobsetSpec(JSON::MaybeXS::false) ), From a81c6a3a80d1055aa80934ab229e2dc49594edd2 Mon Sep 17 00:00:00 2001 From: Sandro Date: Fri, 1 Jul 2022 22:21:32 +0200 Subject: [PATCH 139/401] Match URIs that don't end in .git Co-authored-by: Charlotte --- src/lib/Hydra/Controller/API.pm | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lib/Hydra/Controller/API.pm b/src/lib/Hydra/Controller/API.pm index 12073595..5eeb0c04 100644 --- a/src/lib/Hydra/Controller/API.pm +++ b/src/lib/Hydra/Controller/API.pm @@ -292,6 +292,7 @@ sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) { my $in = $c->request->{data}; my $url = $in->{repository}->{clone_url} or die; + $url =~ s/.git$//; print STDERR "got push from Gitea repository $url\n"; triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search( From 38e033e7ceb8fecea9fe3d8c621e77d1af884948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Janne=20He=C3=9F?= Date: Sun, 10 Jul 2022 13:31:21 +0200 Subject: [PATCH 140/401] Remove yet another URL literal --- shell.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shell.nix b/shell.nix index 9e967032..1ad58f49 100644 --- a/shell.nix +++ b/shell.nix @@ -1,6 +1,6 @@ # The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and # returns an attribute set of the shape `{ defaultNix, shellNix }` -(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) { +(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") { src = ./.; }).shellNix From 8dd1daac8a9bd1a5b4d8f37969c344d5ef593c6e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Jul 2022 12:14:50 +0200 Subject: [PATCH 141/401] Update to Nix 2.10 --- flake.nix | 28 +++++++++++++--------------- hydra-module.nix | 2 +- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/flake.nix b/flake.nix index 2e891364..36863913 100644 --- a/flake.nix +++ b/flake.nix @@ -5,7 +5,7 @@ # even 2.7.0's Nixpkgs pin). inputs.newNixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable-small"; inputs.nixpkgs.follows = "nix/nixpkgs"; - inputs.nix.url = "github:NixOS/nix/2.9.1"; + inputs.nix.url = "github:NixOS/nix/2.10.0"; outputs = { self, newNixpkgs, nixpkgs, nix }: let @@ -14,7 +14,7 @@ pkgs = import nixpkgs { system = "x86_64-linux"; - overlays = [ self.overlay nix.overlay ]; + overlays = [ self.overlay nix.overlays.default ]; }; # NixOS configuration used for VM tests. @@ -332,7 +332,7 @@ url = "mirror://cpan/authors/id/A/AA/AAR/Net-LDAP-Server-0.43.tar.gz"; sha256 = "0qmh3cri3fpccmwz6bhwp78yskrb3qmalzvqn0a23hqbsfs4qv6x"; }; - propagatedBuildInputs = with final.perlPackages; [ NetLDAP ConvertASN1 ]; + propagatedBuildInputs = with final.perlPackages; [ perlldap ConvertASN1 ]; meta = { description = "LDAP server side protocol handling"; license = with final.lib.licenses; [ artistic1 ]; @@ -359,7 +359,7 @@ url = "mirror://cpan/authors/id/K/KA/KARMAN/Net-LDAP-Server-Test-0.22.tar.gz"; sha256 = "13idip7jky92v4adw60jn2gcc3zf339gsdqlnc9nnvqzbxxp285i"; }; - propagatedBuildInputs = with final.perlPackages; [ NetLDAP NetLDAPServer TestMore DataDump NetLDAPSID ]; + propagatedBuildInputs = with final.perlPackages; [ perlldap NetLDAPServer DataDump NetLDAPSID ]; meta = { description = "test Net::LDAP code"; license = with final.lib.licenses; [ artistic1 ]; @@ -373,8 +373,8 @@ url = "mirror://cpan/authors/id/I/IL/ILMARI/Catalyst-Authentication-Store-LDAP-1.016.tar.gz"; sha256 = "0cm399vxqqf05cjgs1j5v3sk4qc6nmws5nfhf52qvpbwc4m82mq8"; }; - propagatedBuildInputs = with final.perlPackages; [ NetLDAP CatalystPluginAuthentication ClassAccessorFast ]; - buildInputs = with final.perlPackages; [ TestMore TestMockObject TestException NetLDAPServerTest ]; + propagatedBuildInputs = with final.perlPackages; [ perlldap CatalystPluginAuthentication ClassAccessor ]; + buildInputs = with final.perlPackages; [ TestMockObject TestException NetLDAPServerTest ]; meta = { description = "Authentication from an LDAP Directory"; license = with final.lib.licenses; [ artistic1 ]; @@ -486,7 +486,6 @@ CatalystPluginSessionStateCookie CatalystPluginSessionStoreFastMmap CatalystPluginStackTrace - CatalystPluginUnicodeEncoding CatalystTraitForRequestProxyBase CatalystViewDownload CatalystViewJSON @@ -533,7 +532,6 @@ TermSizeAny TermReadKey Test2Harness - TestMore TestPostgreSQL TextDiff TextTable @@ -558,9 +556,9 @@ libtool unzip nukeReferences - pkgconfig + pkg-config libpqxx - gitAndTools.topGit + top-git mercurial darcs subversion @@ -585,7 +583,7 @@ cacert # FIXME: foreman is broken on all nix/nixpkgs pin, up to and # including 2.7.0 - newNixpkgs.legacyPackages.${final.system}.foreman + newNixpkgs.legacyPackages.${final.stdenv.system}.foreman glibcLocales libressl.nc openldap @@ -602,11 +600,11 @@ pixz gzip bzip2 - lzma + xz gnutar unzip git - gitAndTools.topGit + top-git mercurial darcs gnused @@ -661,7 +659,7 @@ dontStrip = true; - meta.description = "Build of Hydra on ${system}"; + meta.description = "Build of Hydra on ${final.stdenv.system}"; passthru = { inherit perlDeps; inherit (final) nix; }; }; }; @@ -966,7 +964,7 @@ nixosModules.hydra = { imports = [ ./hydra-module.nix ]; - nixpkgs.overlays = [ self.overlay nix.overlay ]; + nixpkgs.overlays = [ self.overlay nix.overlays.default ]; }; nixosModules.hydraTest = { diff --git a/hydra-module.nix b/hydra-module.nix index 0df5e690..f826ef36 100644 --- a/hydra-module.nix +++ b/hydra-module.nix @@ -268,7 +268,7 @@ in environment = env // { HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init"; }; - path = [ pkgs.utillinux ]; + path = [ pkgs.util-linux ]; preStart = '' ln -sf ${hydraConf} ${baseDir}/hydra.conf From e06c480fd67d71fe1ab175c3f14e694f08a0cae0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Jul 2022 11:58:38 +0200 Subject: [PATCH 142/401] flake.lock: Update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'newNixpkgs': 'github:NixOS/nixpkgs/6e3ee8957637a60f5072e33d78e05c0f65c54366' (2022-03-15) → 'github:NixOS/nixpkgs/de5b3dd17034e6106e75746e81618e5bd408de8a' (2022-07-10) • Updated input 'nix': 'github:NixOS/nix/624e38aa43f304fbb78b4779172809add042b513' (2022-05-31) → 'github:NixOS/nix/b9cf655150b52d071c85a337cb5db96e735fa64a' (2022-07-11) • Updated input 'nix/nixpkgs': 'github:NixOS/nixpkgs/530a53dcbc9437363471167a5e4762c5fcfa34a1' (2022-02-19) → 'github:NixOS/nixpkgs/2fa57ed190fd6c7c746319444f34b5917666e5c1' (2022-05-31) --- flake.lock | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/flake.lock b/flake.lock index 1310c53d..befc8e5b 100644 --- a/flake.lock +++ b/flake.lock @@ -18,11 +18,11 @@ }, "newNixpkgs": { "locked": { - "lastModified": 1647380550, - "narHash": "sha256-909TI9poX7CIUiFx203WL29YON6m/I6k0ExbZvR7bLM=", + "lastModified": 1657425264, + "narHash": "sha256-3aHvoI2e8vJKw3hvnHECaBpSsL5mxVsVtaLCnTdNcH8=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "6e3ee8957637a60f5072e33d78e05c0f65c54366", + "rev": "de5b3dd17034e6106e75746e81618e5bd408de8a", "type": "github" }, "original": { @@ -39,33 +39,34 @@ "nixpkgs-regression": "nixpkgs-regression" }, "locked": { - "lastModified": 1654014617, - "narHash": "sha256-qNL3lQPBsnStkru3j1ajN/H+knXI+X3dku8/dBfSw3g=", + "lastModified": 1657569404, + "narHash": "sha256-zJONRtGALmYifrWKzcH6MMfSKLxeuW2iqG13500OrY4=", "owner": "NixOS", "repo": "nix", - "rev": "624e38aa43f304fbb78b4779172809add042b513", + "rev": "b9cf655150b52d071c85a337cb5db96e735fa64a", "type": "github" }, "original": { "owner": "NixOS", - "ref": "2.9.1", + "ref": "2.10.0", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1645296114, - "narHash": "sha256-y53N7TyIkXsjMpOG7RhvqJFGDacLs9HlyHeSTBioqYU=", + "lastModified": 1653988320, + "narHash": "sha256-ZaqFFsSDipZ6KVqriwM34T739+KLYJvNmCWzErjAg7c=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "530a53dcbc9437363471167a5e4762c5fcfa34a1", + "rev": "2fa57ed190fd6c7c746319444f34b5917666e5c1", "type": "github" }, "original": { - "id": "nixpkgs", - "ref": "nixos-21.05-small", - "type": "indirect" + "owner": "NixOS", + "ref": "nixos-22.05-small", + "repo": "nixpkgs", + "type": "github" } }, "nixpkgs-regression": { @@ -78,9 +79,10 @@ "type": "github" }, "original": { - "id": "nixpkgs", + "owner": "NixOS", + "repo": "nixpkgs", "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "indirect" + "type": "github" } }, "root": { From c72bed5cb4f24853ebb7daaf26bb67d3cb4397f7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Jul 2022 14:28:55 +0200 Subject: [PATCH 143/401] Fix tests Use $NIX_REMOTE instead of the legacy environment variables. --- t/lib/HydraTestContext.pm | 16 ++++++++++------ t/queue-runner/notifications.t | 4 +--- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/t/lib/HydraTestContext.pm b/t/lib/HydraTestContext.pm index badb3728..53eaa0f7 100644 --- a/t/lib/HydraTestContext.pm +++ b/t/lib/HydraTestContext.pm @@ -53,16 +53,19 @@ sub new { my $hydra_config = $opts{'hydra_config'} || ""; $hydra_config = "queue_runner_metrics_address = 127.0.0.1:0\n" . $hydra_config; if ($opts{'use_external_destination_store'} // 1) { - $hydra_config = "store_uri = file:$dir/nix/dest-store\n" . $hydra_config; + $hydra_config = "store_uri = file://$dir/nix/dest-store\n" . $hydra_config; } write_file($ENV{'HYDRA_CONFIG'}, $hydra_config); - $ENV{'NIX_LOG_DIR'} = "$dir/nix/var/log/nix"; + my $nix_store_dir = "$dir/nix/store"; + my $nix_state_dir = "$dir/nix/var/nix"; + my $nix_log_dir = "$dir/nix/var/log/nix"; + $ENV{'NIX_REMOTE_SYSTEMS'} = ''; - $ENV{'NIX_REMOTE'} = ''; - $ENV{'NIX_STATE_DIR'} = "$dir/nix/var/nix"; - $ENV{'NIX_STORE_DIR'} = "$dir/nix/store"; + $ENV{'NIX_REMOTE'} = "local?store=$nix_store_dir&state=$nix_state_dir&log=$nix_log_dir"; + $ENV{'NIX_STATE_DIR'} = $nix_state_dir; # FIXME: remove + $ENV{'NIX_STORE_DIR'} = $nix_store_dir; # FIXME: remove my $pgsql = Test::PostgreSQL->new( extra_initdb_args => "--locale C.UTF-8" @@ -73,7 +76,8 @@ sub new { _db => undef, db_handle => $pgsql, tmpdir => $dir, - nix_state_dir => "$dir/nix/var/nix", + nix_state_dir => $nix_state_dir, + nix_log_dir => $nix_log_dir, testdir => abs_path(dirname(__FILE__) . "/.."), jobsdir => abs_path(dirname(__FILE__) . "/../jobs") }, $class; diff --git a/t/queue-runner/notifications.t b/t/queue-runner/notifications.t index b35b2b2f..1966cde1 100644 --- a/t/queue-runner/notifications.t +++ b/t/queue-runner/notifications.t @@ -33,9 +33,6 @@ my $ctx = test_context( # the build locally. subtest "Pre-build the job, upload to the cache, and then delete locally" => sub { - my $scratchlogdir = File::Temp->newdir(); - $ENV{'NIX_LOG_DIR'} = "$scratchlogdir"; - my $outlink = $ctx->tmpdir . "/basic-canbesubstituted"; is(system('nix-build', $ctx->jobsdir . '/notifications.nix', '-A', 'canbesubstituted', '--out-link', $outlink), 0, "Building notifications.nix succeeded"); is(system('nix', 'copy', '--to', "file://${binarycachedir}", $outlink), 0, "Copying the closure to the binary cache succeeded"); @@ -46,6 +43,7 @@ subtest "Pre-build the job, upload to the cache, and then delete locally" => sub is(system('nix', 'log', $outpath), 0, "Reading the output's log succeeds"); is(system('nix-store', '--delete', $outpath), 0, "Deleting the notifications.nix output succeeded"); is(system("nix-collect-garbage"), 0, "Delete all the system's garbage"); + File::Path::rmtree($ctx->{nix_log_dir}); }; subtest "Ensure substituting the job works, but reading the log fails" => sub { From d5ba1bba50a7fa2f343d9ad284e293e13bb13f85 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Jul 2022 14:46:48 +0200 Subject: [PATCH 144/401] Fix deprecation warning --- flake.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.nix b/flake.nix index 36863913..db4eb854 100644 --- a/flake.nix +++ b/flake.nix @@ -681,7 +681,7 @@ tests.install.x86_64-linux = with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; simpleTest { - machine = hydraServer; + nodes.machine = hydraServer; testScript = '' machine.wait_for_job("hydra-init") @@ -696,7 +696,7 @@ tests.notifications.x86_64-linux = with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; simpleTest { - machine = { pkgs, ... }: { + nodes.machine = { pkgs, ... }: { imports = [ hydraServer ]; services.hydra-dev.extraConfig = '' @@ -753,7 +753,7 @@ tests.gitea.x86_64-linux = with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; makeTest { - machine = { pkgs, ... }: { + nodes.machine = { pkgs, ... }: { imports = [ hydraServer ]; services.hydra-dev.extraConfig = '' From 3e001a8f05cbd0f7f5a8c0fc2d8da8f839d7b3f8 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Jul 2022 14:58:25 +0200 Subject: [PATCH 145/401] Remove newNixpkgs and a lot of packages that are in Nixpkgs 22.05 --- flake.lock | 17 --- flake.nix | 419 +---------------------------------------------------- 2 files changed, 3 insertions(+), 433 deletions(-) diff --git a/flake.lock b/flake.lock index befc8e5b..012d2407 100644 --- a/flake.lock +++ b/flake.lock @@ -16,22 +16,6 @@ "type": "github" } }, - "newNixpkgs": { - "locked": { - "lastModified": 1657425264, - "narHash": "sha256-3aHvoI2e8vJKw3hvnHECaBpSsL5mxVsVtaLCnTdNcH8=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "de5b3dd17034e6106e75746e81618e5bd408de8a", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-unstable-small", - "repo": "nixpkgs", - "type": "github" - } - }, "nix": { "inputs": { "lowdown-src": "lowdown-src", @@ -87,7 +71,6 @@ }, "root": { "inputs": { - "newNixpkgs": "newNixpkgs", "nix": "nix", "nixpkgs": [ "nix", diff --git a/flake.nix b/flake.nix index db4eb854..cb1d9aea 100644 --- a/flake.nix +++ b/flake.nix @@ -1,13 +1,10 @@ { description = "A Nix-based continuous build system"; - # FIXME: All the pinned versions of nix/nixpkgs have a broken foreman (yes, - # even 2.7.0's Nixpkgs pin). - inputs.newNixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable-small"; inputs.nixpkgs.follows = "nix/nixpkgs"; inputs.nix.url = "github:NixOS/nix/2.10.0"; - outputs = { self, newNixpkgs, nixpkgs, nix }: + outputs = { self, nixpkgs, nix }: let version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}"; @@ -41,153 +38,8 @@ # A Nixpkgs overlay that provides a 'hydra' package. overlay = final: prev: { - # Overlay these packages to use dependencies from the Nixpkgs everything - # else uses, to side-step the version difference: glibc is 2.32 in the - # nix-pinned Nixpkgs, but 2.33 in the newNixpkgs commit. - civetweb = (final.callPackage "${newNixpkgs}/pkgs/development/libraries/civetweb" { }).overrideAttrs - # Can be dropped once newNixpkgs points to a revision containing - # https://github.com/NixOS/nixpkgs/pull/167751 - ({ cmakeFlags ? [ ], ... }: { - cmakeFlags = cmakeFlags ++ [ - "-DCIVETWEB_ENABLE_IPV6=1" - ]; - }); - prometheus-cpp = final.callPackage "${newNixpkgs}/pkgs/development/libraries/prometheus-cpp" { }; - # Add LDAP dependencies that aren't currently found within nixpkgs. perlPackages = prev.perlPackages // { - TestPostgreSQL = final.perlPackages.buildPerlModule { - pname = "Test-PostgreSQL"; - version = "1.28-1"; - src = final.fetchFromGitHub { - owner = "grahamc"; - repo = "Test-postgresql"; - rev = "release-1.28-1"; - hash = "sha256-SFC1C3q3dbcBos18CYd/s0TIcfJW4g04ld0+XQXVToQ="; - }; - buildInputs = with final.perlPackages; [ ModuleBuildTiny TestSharedFork pkgs.postgresql ]; - propagatedBuildInputs = with final.perlPackages; [ DBDPg DBI FileWhich FunctionParameters Moo TieHashMethod TryTiny TypeTiny ]; - - makeMakerFlags = "POSTGRES_HOME=${final.postgresql}"; - - meta = { - homepage = "https://github.com/grahamc/Test-postgresql/releases/tag/release-1.28-1"; - description = "PostgreSQL runner for tests"; - license = with final.lib.licenses; [ artistic2 ]; - }; - }; - - FunctionParameters = final.perlPackages.buildPerlPackage { - pname = "Function-Parameters"; - version = "2.001003"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/M/MA/MAUKE/Function-Parameters-2.001003.tar.gz"; - sha256 = "eaa22c6b43c02499ec7db0758c2dd218a3b2ab47a714b2bdf8010b5ee113c242"; - }; - buildInputs = with final.perlPackages; [ DirSelf TestFatal ]; - meta = { - description = "Define functions and methods with parameter lists (\"subroutine signatures\")"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - CatalystPluginPrometheusTiny = final.perlPackages.buildPerlPackage { - pname = "Catalyst-Plugin-PrometheusTiny"; - version = "0.005"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/S/SY/SYSPETE/Catalyst-Plugin-PrometheusTiny-0.005.tar.gz"; - sha256 = "a42ef09efdc3053899ae007c41220d3ed7207582cc86e491b4f534539c992c5a"; - }; - buildInputs = with final.perlPackages; [ HTTPMessage Plack SubOverride TestDeep ]; - propagatedBuildInputs = with final.perlPackages; [ CatalystRuntime Moose PrometheusTiny PrometheusTinyShared ]; - meta = { - description = "Prometheus metrics for Catalyst"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - CryptArgon2 = final.perlPackages.buildPerlModule { - pname = "Crypt-Argon2"; - version = "0.010"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/L/LE/LEONT/Crypt-Argon2-0.010.tar.gz"; - sha256 = "3ea1c006f10ef66fd417e502a569df15c4cc1c776b084e35639751c41ce6671a"; - }; - nativeBuildInputs = [ pkgs.ld-is-cc-hook ]; - meta = { - description = "Perl interface to the Argon2 key derivation functions"; - license = final.lib.licenses.cc0; - }; - }; - - CryptPassphrase = final.perlPackages.buildPerlPackage { - pname = "Crypt-Passphrase"; - version = "0.003"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/L/LE/LEONT/Crypt-Passphrase-0.003.tar.gz"; - sha256 = "685aa090f8179a86d6896212ccf8ccfde7a79cce857199bb14e2277a10d240ad"; - }; - meta = { - description = "A module for managing passwords in a cryptographically agile manner"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - CryptPassphraseArgon2 = final.perlPackages.buildPerlPackage { - pname = "Crypt-Passphrase-Argon2"; - version = "0.002"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/L/LE/LEONT/Crypt-Passphrase-Argon2-0.002.tar.gz"; - sha256 = "3906ff81697d13804ee21bd5ab78ffb1c4408b4822ce020e92ecf4737ba1f3a8"; - }; - propagatedBuildInputs = with final.perlPackages; [ CryptArgon2 CryptPassphrase ]; - meta = { - description = "An Argon2 encoder for Crypt::Passphrase"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - DataRandom = final.perlPackages.buildPerlPackage { - pname = "Data-Random"; - version = "0.13"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/B/BA/BAREFOOT/Data-Random-0.13.tar.gz"; - sha256 = "eb590184a8db28a7e49eab09e25f8650c33f1f668b6a472829de74a53256bfc0"; - }; - buildInputs = with final.perlPackages; [ FileShareDirInstall TestMockTime ]; - meta = { - description = "Perl module to generate random data"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - DirSelf = final.perlPackages.buildPerlPackage { - pname = "Dir-Self"; - version = "0.11"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/M/MA/MAUKE/Dir-Self-0.11.tar.gz"; - sha256 = "e251a51abc7d9ba3e708f73c2aa208e09d47a0c528d6254710fa78cc8d6885b5"; - }; - meta = { - homepage = "https://github.com/mauke/Dir-Self"; - description = "A __DIR__ constant for the directory your source file is in"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - HashSharedMem = final.perlPackages.buildPerlModule { - pname = "Hash-SharedMem"; - version = "0.005"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/Z/ZE/ZEFRAM/Hash-SharedMem-0.005.tar.gz"; - sha256 = "324776808602f7bdc44adaa937895365454029a926fa611f321c9bf6b940bb5e"; - }; - buildInputs = with final.perlPackages; [ ScalarString ]; - meta = { - description = "Efficient shared mutable hash"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; PrometheusTiny = final.perlPackages.buildPerlPackage { pname = "Prometheus-Tiny"; @@ -204,269 +56,6 @@ }; }; - PrometheusTinyShared = final.perlPackages.buildPerlPackage { - pname = "Prometheus-Tiny-Shared"; - version = "0.023"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/R/RO/ROBN/Prometheus-Tiny-Shared-0.023.tar.gz"; - sha256 = "7c2c72397be5d8e4839d1bf4033c1800f467f2509689673c6419df48794f2abe"; - }; - buildInputs = with final.perlPackages; [ DataRandom HTTPMessage Plack TestDifferences TestException ]; - propagatedBuildInputs = with final.perlPackages; [ HashSharedMem JSONXS PrometheusTiny ]; - meta = { - homepage = "https://github.com/robn/Prometheus-Tiny-Shared"; - description = "A tiny Prometheus client with a shared database behind it"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - ReadonlyX = final.perlPackages.buildPerlModule { - pname = "ReadonlyX"; - version = "1.04"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/S/SA/SANKO/ReadonlyX-1.04.tar.gz"; - sha256 = "81bb97dba93ac6b5ccbce04a42c3590eb04557d75018773ee18d5a30fcf48188"; - }; - buildInputs = with final.perlPackages; [ ModuleBuildTiny TestFatal ]; - meta = { - homepage = "https://github.com/sanko/readonly"; - description = "Faster facility for creating read-only scalars, arrays, hashes"; - license = final.lib.licenses.artistic2; - }; - }; - - TieHashMethod = final.perlPackages.buildPerlPackage { - pname = "Tie-Hash-Method"; - version = "0.02"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/Y/YV/YVES/Tie-Hash-Method-0.02.tar.gz"; - sha256 = "d513fbb51413f7ca1e64a1bdce6194df7ec6076dea55066d67b950191eec32a9"; - }; - meta = { - description = "Tied hash with specific methods overriden by callbacks"; - license = with final.lib.licenses; [ artistic1 ]; - }; - }; - - Test2Harness = final.perlPackages.buildPerlPackage { - pname = "Test2-Harness"; - version = "1.000042"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Harness-1.000042.tar.gz"; - sha256 = "aaf231a68af1a6ffd6a11188875fcf572e373e43c8285945227b9d687b43db2d"; - }; - - checkPhase = '' - patchShebangs ./t ./scripts/yath - ./scripts/yath test -j $NIX_BUILD_CORES - ''; - - propagatedBuildInputs = with final.perlPackages; [ DataUUID Importer LongJump ScopeGuard TermTable Test2PluginMemUsage Test2PluginUUID Test2Suite gotofile ]; - meta = { - description = "A new and improved test harness with better Test2 integration"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - Test2PluginMemUsage = prev.perlPackages.buildPerlPackage { - pname = "Test2-Plugin-MemUsage"; - version = "0.002003"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Plugin-MemUsage-0.002003.tar.gz"; - sha256 = "5e0662d5a823ae081641f5ce82843111eec1831cd31f883a6c6de54afdf87c25"; - }; - buildInputs = with final.perlPackages; [ Test2Suite ]; - meta = { - description = "Collect and display memory usage information"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - Test2PluginUUID = prev.perlPackages.buildPerlPackage { - pname = "Test2-Plugin-UUID"; - version = "0.002001"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Plugin-UUID-0.002001.tar.gz"; - sha256 = "4c6c8d484d7153d8779dc155a992b203095b5c5aa1cfb1ee8bcedcd0601878c9"; - }; - buildInputs = with final.perlPackages;[ Test2Suite ]; - propagatedBuildInputs = with final.perlPackages; [ DataUUID ]; - meta = { - description = "Use REAL UUIDs in Test2"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - LongJump = final.perlPackages.buildPerlPackage { - pname = "Long-Jump"; - version = "0.000001"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/E/EX/EXODIST/Long-Jump-0.000001.tar.gz"; - sha256 = "d5d6456d86992b559d8f66fc90960f919292cd3803c13403faac575762c77af4"; - }; - buildInputs = with final.perlPackages; [ Test2Suite ]; - meta = { - description = "Mechanism for returning to a specific point from a deeply nested stack"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - gotofile = final.perlPackages.buildPerlPackage { - pname = "goto-file"; - version = "0.005"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/E/EX/EXODIST/goto-file-0.005.tar.gz"; - sha256 = "c6cdd5ee4a6cdcbdbf314d92a4f9985dbcdf9e4258048cae76125c052aa31f77"; - }; - buildInputs = with final.perlPackages; [ Test2Suite ]; - meta = { - description = "Stop parsing the current file and move on to a different one"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - NetLDAPServer = prev.perlPackages.buildPerlPackage { - pname = "Net-LDAP-Server"; - version = "0.43"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/A/AA/AAR/Net-LDAP-Server-0.43.tar.gz"; - sha256 = "0qmh3cri3fpccmwz6bhwp78yskrb3qmalzvqn0a23hqbsfs4qv6x"; - }; - propagatedBuildInputs = with final.perlPackages; [ perlldap ConvertASN1 ]; - meta = { - description = "LDAP server side protocol handling"; - license = with final.lib.licenses; [ artistic1 ]; - }; - }; - - NetLDAPSID = prev.perlPackages.buildPerlPackage { - pname = "Net-LDAP-SID"; - version = "0.0001"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/K/KA/KARMAN/Net-LDAP-SID-0.001.tar.gz"; - sha256 = "1mnnpkmj8kpb7qw50sm8h4sd8py37ssy2xi5hhxzr5whcx0cvhm8"; - }; - meta = { - description = "Active Directory Security Identifier manipulation"; - license = with final.lib.licenses; [ artistic2 ]; - }; - }; - - NetLDAPServerTest = prev.perlPackages.buildPerlPackage { - pname = "Net-LDAP-Server-Test"; - version = "0.22"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/K/KA/KARMAN/Net-LDAP-Server-Test-0.22.tar.gz"; - sha256 = "13idip7jky92v4adw60jn2gcc3zf339gsdqlnc9nnvqzbxxp285i"; - }; - propagatedBuildInputs = with final.perlPackages; [ perlldap NetLDAPServer DataDump NetLDAPSID ]; - meta = { - description = "test Net::LDAP code"; - license = with final.lib.licenses; [ artistic1 ]; - }; - }; - - CatalystAuthenticationStoreLDAP = prev.perlPackages.buildPerlPackage { - pname = "Catalyst-Authentication-Store-LDAP"; - version = "1.016"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/I/IL/ILMARI/Catalyst-Authentication-Store-LDAP-1.016.tar.gz"; - sha256 = "0cm399vxqqf05cjgs1j5v3sk4qc6nmws5nfhf52qvpbwc4m82mq8"; - }; - propagatedBuildInputs = with final.perlPackages; [ perlldap CatalystPluginAuthentication ClassAccessor ]; - buildInputs = with final.perlPackages; [ TestMockObject TestException NetLDAPServerTest ]; - meta = { - description = "Authentication from an LDAP Directory"; - license = with final.lib.licenses; [ artistic1 ]; - }; - }; - - PerlCriticCommunity = prev.perlPackages.buildPerlModule { - pname = "Perl-Critic-Community"; - version = "1.0.0"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/D/DB/DBOOK/Perl-Critic-Community-v1.0.0.tar.gz"; - sha256 = "311b775da4193e9de94cf5225e993cc54dd096ae1e7ef60738cdae1d9b8854e7"; - }; - buildInputs = with final.perlPackages; [ ModuleBuildTiny ]; - propagatedBuildInputs = with final.perlPackages; [ PPI PathTiny PerlCritic PerlCriticPolicyVariablesProhibitLoopOnHash PerlCriticPulp ]; - meta = { - homepage = "https://github.com/Grinnz/Perl-Critic-Freenode"; - description = "Community-inspired Perl::Critic policies"; - license = final.lib.licenses.artistic2; - }; - }; - - PerlCriticPolicyVariablesProhibitLoopOnHash = prev.perlPackages.buildPerlPackage { - pname = "Perl-Critic-Policy-Variables-ProhibitLoopOnHash"; - version = "0.008"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/X/XS/XSAWYERX/Perl-Critic-Policy-Variables-ProhibitLoopOnHash-0.008.tar.gz"; - sha256 = "12f5f0be96ea1bdc7828058577bd1c5c63ca23c17fac9c3709452b3dff5b84e0"; - }; - propagatedBuildInputs = with final.perlPackages; [ PerlCritic ]; - meta = { - description = "Don't write loops on hashes, only on keys and values of hashes"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - PerlCriticPulp = prev.perlPackages.buildPerlPackage { - pname = "Perl-Critic-Pulp"; - version = "99"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/K/KR/KRYDE/Perl-Critic-Pulp-99.tar.gz"; - sha256 = "b8fda842fcbed74d210257c0a284b6dc7b1d0554a47a3de5d97e7d542e23e7fe"; - }; - propagatedBuildInputs = with final.perlPackages; [ IOString ListMoreUtils PPI PerlCritic PodMinimumVersion ]; - meta = { - homepage = "http://user42.tuxfamily.org/perl-critic-pulp/index.html"; - description = "Some add-on policies for Perl::Critic"; - license = final.lib.licenses.gpl3Plus; - }; - }; - - PodMinimumVersion = prev.perlPackages.buildPerlPackage { - pname = "Pod-MinimumVersion"; - version = "50"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/K/KR/KRYDE/Pod-MinimumVersion-50.tar.gz"; - sha256 = "0bd2812d9aacbd99bb71fa103a4bb129e955c138ba7598734207dc9fb67b5a6f"; - }; - propagatedBuildInputs = with final.perlPackages; [ IOString PodParser ]; - meta = { - homepage = "http://user42.tuxfamily.org/pod-minimumversion/index.html"; - description = "Determine minimum Perl version of POD directives"; - license = final.lib.licenses.free; - }; - }; - - StringCompareConstantTime = final.perlPackages.buildPerlPackage { - pname = "String-Compare-ConstantTime"; - version = "0.321"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/F/FR/FRACTAL/String-Compare-ConstantTime-0.321.tar.gz"; - sha256 = "0b26ba2b121d8004425d4485d1d46f59001c83763aa26624dff6220d7735d7f7"; - }; - meta = { - description = "Timing side-channel protected string compare"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - UUID4Tiny = final.perlPackages.buildPerlPackage { - pname = "UUID4-Tiny"; - version = "0.002"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/C/CV/CVLIBRARY/UUID4-Tiny-0.002.tar.gz"; - sha256 = "e7535b31e386d432dec7adde214348389e1d5cf753e7ed07f1ae04c4360840cf"; - }; - meta = { - description = "Cryptographically secure v4 UUIDs for Linux x64"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - }; hydra = with final; let @@ -581,9 +170,7 @@ checkInputs = [ cacert - # FIXME: foreman is broken on all nix/nixpkgs pin, up to and - # including 2.7.0 - newNixpkgs.legacyPackages.${final.stdenv.system}.foreman + foreman glibcLocales libressl.nc openldap @@ -1015,7 +602,7 @@ self.nixosModules.hydraTest self.nixosModules.hydraProxy { - system.configurationRevision = self.rev; + system.configurationRevision = self.lastModifiedDate; boot.isContainer = true; networking.useDHCP = false; From 481ca71d6e7a391b7c93da7149f50ada02e9303f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Jul 2022 15:03:27 +0200 Subject: [PATCH 146/401] Use new flake output naming convention --- flake.nix | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flake.nix b/flake.nix index cb1d9aea..c5d4b547 100644 --- a/flake.nix +++ b/flake.nix @@ -11,7 +11,7 @@ pkgs = import nixpkgs { system = "x86_64-linux"; - overlays = [ self.overlay nix.overlays.default ]; + overlays = [ self.overlays.default nix.overlays.default ]; }; # NixOS configuration used for VM tests. @@ -36,7 +36,7 @@ rec { # A Nixpkgs overlay that provides a 'hydra' package. - overlay = final: prev: { + overlays.default = final: prev: { # Add LDAP dependencies that aren't currently found within nixpkgs. perlPackages = prev.perlPackages // { @@ -547,11 +547,11 @@ checks.x86_64-linux.validate-openapi = hydraJobs.tests.validate-openapi; packages.x86_64-linux.hydra = pkgs.hydra; - defaultPackage.x86_64-linux = pkgs.hydra; + packages.x86_64-linux.default = pkgs.hydra; nixosModules.hydra = { imports = [ ./hydra-module.nix ]; - nixpkgs.overlays = [ self.overlay nix.overlays.default ]; + nixpkgs.overlays = [ self.overlays.default nix.overlays.default ]; }; nixosModules.hydraTest = { From 9656f16509665a2d49ff7de2905d7115b5fe4440 Mon Sep 17 00:00:00 2001 From: Amanda Cameron Date: Fri, 22 Jul 2022 10:35:38 -0400 Subject: [PATCH 147/401] Update hydra-module.nix to use newer nixos options nix.trustedUsers is deprecated as of 22.05, and since the nix.extraOptions config is just doing something similar, I moved that to the new nix.settings as well --- hydra-module.nix | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/hydra-module.nix b/hydra-module.nix index 0df5e690..1f46a3ca 100644 --- a/hydra-module.nix +++ b/hydra-module.nix @@ -228,8 +228,12 @@ in useDefaultShell = true; }; - nix.trustedUsers = [ "hydra-queue-runner" ]; - + nix.settings = { + trusted-users = [ "hydra-queue-runner" ]; + gc-keep-outputs = true; + gc-keep-derivations = true; + }; + services.hydra-dev.extraConfig = '' using_frontend_proxy = 1 @@ -256,11 +260,6 @@ in environment.variables = hydraEnv; - nix.extraOptions = '' - gc-keep-outputs = true - gc-keep-derivations = true - ''; - systemd.services.hydra-init = { wantedBy = [ "multi-user.target" ]; requires = optional haveLocalDB "postgresql.service"; From a58e2f1a642233672f9941a77c0f4bd5c3597878 Mon Sep 17 00:00:00 2001 From: Marco Rebhan Date: Thu, 4 Aug 2022 21:18:55 +0200 Subject: [PATCH 148/401] Use libmagic for better output MIME detection --- flake.nix | 1 + src/lib/Hydra/Controller/Build.pm | 39 ++++++------------------------- 2 files changed, 8 insertions(+), 32 deletions(-) diff --git a/flake.nix b/flake.nix index 2e891364..93186ee0 100644 --- a/flake.nix +++ b/flake.nix @@ -503,6 +503,7 @@ DigestSHA1 EmailMIME EmailSender + FileLibMagic FileSlurper FileWhich final.nix.perl-bindings diff --git a/src/lib/Hydra/Controller/Build.pm b/src/lib/Hydra/Controller/Build.pm index c7811c62..18a0eba3 100644 --- a/src/lib/Hydra/Controller/Build.pm +++ b/src/lib/Hydra/Controller/Build.pm @@ -7,15 +7,16 @@ use base 'Hydra::Base::Controller::NixChannel'; use Hydra::Helper::Nix; use Hydra::Helper::CatalystUtils; use File::Basename; +use File::LibMagic; use File::stat; use Data::Dump qw(dump); use Nix::Store; use Nix::Config; use List::SomeUtils qw(all); use Encode; -use MIME::Types; use JSON::PP; +use feature 'state'; sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) { my ($self, $c, $id) = @_; @@ -236,14 +237,10 @@ sub serveFile { $c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command", "store", "cat", "--store", getStoreUri(), "$path"]) }; - # Detect MIME type. Borrowed from Catalyst::Plugin::Static::Simple. - my $type = "text/plain"; - if ($path =~ /.*\.(\S{1,})$/xms) { - my $ext = $1; - my $mimeTypes = MIME::Types->new(only_complete => 1); - my $t = $mimeTypes->mimeTypeOf($ext); - $type = ref $t ? $t->type : $t if $t; - } + # Detect MIME type. + state $magic = File::LibMagic->new(follow_symlinks => 1); + my $info = $magic->info_from_filename($path); + my $type = $info->{mime_with_encoding}; $c->response->content_type($type); $c->forward('Hydra::View::Plain'); } @@ -288,29 +285,7 @@ sub download : Chained('buildChain') PathPart { my $path = $product->path; $path .= "/" . join("/", @path) if scalar @path > 0; - if (isLocalStore) { - - notFound($c, "File '" . $product->path . "' does not exist.") unless -e $product->path; - - # Make sure the file is in the Nix store. - $path = checkPath($self, $c, $path); - - # If this is a directory but no "/" is attached, then redirect. - if (-d $path && substr($c->request->uri, -1) ne "/") { - return $c->res->redirect($c->request->uri . "/"); - } - - $path = "$path/index.html" if -d $path && -e "$path/index.html"; - - notFound($c, "File '$path' does not exist.") if !-e $path; - - notFound($c, "Path '$path' is a directory.") if -d $path; - - $c->serve_static_file($path); - - } else { - serveFile($c, $path); - } + serveFile($c, $path); $c->response->headers->last_modified($c->stash->{build}->stoptime); } From f6d45b0f0cdedf940df67a935f4eb337b43300e7 Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Mon, 8 Aug 2022 13:35:56 +0200 Subject: [PATCH 149/401] doc/configuration: fix ldap role mapping example To group is called `cancel-build`, not `cancel-builds` (note the trailing `s`). --- doc/manual/src/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index 2700625d..ab68df43 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -185,7 +185,7 @@ Example configuration: hydra_admin = admin # Allow all users in the dev group to restart jobs and cancel builds dev = restart-jobs - dev = cancel-builds + dev = cancel-build ``` From 74caaa696e5732313a621ada9a0d9283a57fa764 Mon Sep 17 00:00:00 2001 From: K900 Date: Thu, 11 Aug 2022 13:30:19 +0300 Subject: [PATCH 150/401] Run the JS code to make build trees collapsible at the right time --- src/root/build.tt | 4 ++-- src/root/common.tt | 6 +++++- src/root/static/js/common.js | 27 ++++++++++++++++----------- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/src/root/build.tt b/src/root/build.tt index 027ce3e4..93a02e0f 100644 --- a/src/root/build.tt +++ b/src/root/build.tt @@ -481,11 +481,11 @@ END; [% END %] [% IF drvAvailable %] - [% INCLUDE makeLazyTab tabName="tabs-build-deps" uri=c.uri_for('/build' build.id 'build-deps') %] + [% INCLUDE makeLazyTab tabName="tabs-build-deps" uri=c.uri_for('/build' build.id 'build-deps') callback="makeTreeCollapsible" %] [% END %] [% IF available %] - [% INCLUDE makeLazyTab tabName="tabs-runtime-deps" uri=c.uri_for('/build' build.id 'runtime-deps') %] + [% INCLUDE makeLazyTab tabName="tabs-runtime-deps" uri=c.uri_for('/build' build.id 'runtime-deps') callback="makeTreeCollapsible" %] [% END %]
diff --git a/src/root/common.tt b/src/root/common.tt index 6b4e406e..32d6c8cc 100644 --- a/src/root/common.tt +++ b/src/root/common.tt @@ -520,7 +520,11 @@ BLOCK makeLazyTab %]
[% END; diff --git a/src/root/static/js/common.js b/src/root/static/js/common.js index 433c06d7..c51f769a 100644 --- a/src/root/static/js/common.js +++ b/src/root/static/js/common.js @@ -1,10 +1,9 @@ -$(document).ready(function() { - +function makeTreeCollapsible(tab) { /*** Tree toggles in logfiles. ***/ /* Set the appearance of the toggle depending on whether the corresponding subtree is initially shown or hidden. */ - $(".tree-toggle").map(function() { + tab.find(".tree-toggle").map(function() { if ($(this).siblings("ul:hidden").length == 0) { $(this).text("-"); } else { @@ -13,7 +12,7 @@ $(document).ready(function() { }); /* When a toggle is clicked, show or hide the subtree. */ - $(".tree-toggle").click(function() { + tab.find(".tree-toggle").click(function() { if ($(this).siblings("ul:hidden").length != 0) { $(this).siblings("ul").show(); $(this).text("-"); @@ -24,21 +23,23 @@ $(document).ready(function() { }); /* Implementation of the expand all link. */ - $(".tree-expand-all").click(function() { - $(".tree-toggle", $(this).parent().siblings(".tree")).map(function() { + tab.find(".tree-expand-all").click(function() { + tab.find(".tree-toggle", $(this).parent().siblings(".tree")).map(function() { $(this).siblings("ul").show(); $(this).text("-"); }); }); /* Implementation of the collapse all link. */ - $(".tree-collapse-all").click(function() { - $(".tree-toggle", $(this).parent().siblings(".tree")).map(function() { + tab.find(".tree-collapse-all").click(function() { + tab.find(".tree-toggle", $(this).parent().siblings(".tree")).map(function() { $(this).siblings("ul").hide(); $(this).text("+"); }); }); +} +$(document).ready(function() { $("table.clickable-rows").click(function(event) { if ($(event.target).closest("a").length) return; link = $(event.target).parents("tr").find("a.row-link"); @@ -132,7 +133,7 @@ $(document).ready(function() { var tabsLoaded = {}; -function makeLazyTab(tabName, uri) { +function makeLazyTab(tabName, uri, callback) { $('.nav-tabs').bind('show.bs.tab', function(e) { var pattern = /#.+/gi; var id = e.target.toString().match(pattern)[0]; @@ -140,11 +141,15 @@ function makeLazyTab(tabName, uri) { tabsLoaded[id] = 1; $('#' + tabName).load(uri, function(response, status, xhr) { var lazy = xhr.getResponseHeader("X-Hydra-Lazy") === "Yes"; + var tab = $('#' + tabName); if (status == "error" && !lazy) { - $('#' + tabName).html("
Error loading tab: " + xhr.status + " " + xhr.statusText + "
"); + tab.html("
Error loading tab: " + xhr.status + " " + xhr.statusText + "
"); } else { - $('#' + tabName).html(response); + tab.html(response); + if (callback) { + callback(tab); + } } }); } From 93bbd6925b8c08d8080d0a317b5f30694da1e67c Mon Sep 17 00:00:00 2001 From: K900 Date: Fri, 12 Aug 2022 09:46:17 +0300 Subject: [PATCH 151/401] Also restore the "expand all" and "collapse all" buttons --- src/root/build-deps.tt | 2 +- src/root/deps.tt | 9 ++++++++- src/root/static/css/hydra.css | 5 +++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/root/build-deps.tt b/src/root/build-deps.tt index 157b113e..f15342fa 100644 --- a/src/root/build-deps.tt +++ b/src/root/build-deps.tt @@ -4,6 +4,6 @@
    - [% INCLUDE renderNode node=buildTimeGraph %] + [% INCLUDE renderNode node=buildTimeGraph isRoot=1 %]
diff --git a/src/root/deps.tt b/src/root/deps.tt index a2c1fbba..6daa9725 100644 --- a/src/root/deps.tt +++ b/src/root/deps.tt @@ -19,9 +19,16 @@ [% node.name %] (no info) [% END %] + [% IF isRoot %] + + (collapse all + – + expand all) + + [% END %] [% IF node.refs.size > 0 %]
    - [% FOREACH ref IN node.refs; INCLUDE renderNode node=ref; END %] + [% FOREACH ref IN node.refs; INCLUDE renderNode node=ref isRoot=0; END %]
[% END %] [% END %] diff --git a/src/root/static/css/hydra.css b/src/root/static/css/hydra.css index 3b50d246..aac3f1ea 100644 --- a/src/root/static/css/hydra.css +++ b/src/root/static/css/hydra.css @@ -33,6 +33,11 @@ span:target > span.dep-tree-line { font-weight: bold; } +span.dep-tree-buttons { + font-style: italic; + padding-left: 10px; +} + span.disabled-project, span.disabled-jobset, span.disabled-job { text-decoration: line-through; } From 2b8a8fdd9cd7c6ea7d2d892fa9c93acdfef0c25d Mon Sep 17 00:00:00 2001 From: K900 Date: Fri, 12 Aug 2022 09:46:32 +0300 Subject: [PATCH 152/401] Make the tree a little less dense --- src/root/static/css/tree.css | 1 + 1 file changed, 1 insertion(+) diff --git a/src/root/static/css/tree.css b/src/root/static/css/tree.css index 71df2f12..e80f6871 100644 --- a/src/root/static/css/tree.css +++ b/src/root/static/css/tree.css @@ -9,6 +9,7 @@ ul.tree, ul.subtree { ul.subtree > li { position: relative; padding-left: 2.0em; + line-height: 140%; border-left: 0.1em solid #6185a0; } From 9addaeb17fc9663f706b58b33e0de8ec57f4bf2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Janne=20He=C3=9F?= Date: Fri, 8 Jul 2022 22:23:35 +0200 Subject: [PATCH 153/401] Add a squiggly line to the Hydra link on hover The effect is the same as the one on links in mail bodys on https://lists.apache.org/ --- src/root/layout.tt | 2 +- src/root/static/css/hydra.css | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/root/layout.tt b/src/root/layout.tt index 8eb1f119..d67ff1b8 100644 --- a/src/root/layout.tt +++ b/src/root/layout.tt @@ -93,7 +93,7 @@