From d9515b208c17f9c892220166ecc9c6a8166bea06 Mon Sep 17 00:00:00 2001 From: Kevin Quick Date: Sun, 9 Sep 2018 22:08:06 -0700 Subject: [PATCH 001/965] Update prompt for Local path input to indicate a URL is also valid. The PathInput input for local paths was previously enhanced to allow URLs for which it would use a nix-prefetch-url operation. This change updates the prompt for the declarative input type to indicate this capability. --- src/lib/Hydra/Plugin/PathInput.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/Hydra/Plugin/PathInput.pm b/src/lib/Hydra/Plugin/PathInput.pm index 551fc94a..7aa7c764 100644 --- a/src/lib/Hydra/Plugin/PathInput.pm +++ b/src/lib/Hydra/Plugin/PathInput.pm @@ -8,7 +8,7 @@ use Nix::Store; sub supportedInputTypes { my ($self, $inputTypes) = @_; - $inputTypes->{'path'} = 'Local path'; + $inputTypes->{'path'} = 'Local path or URL'; } sub fetchInput { From 68e689cacecf8e94153dc9462778eb18888781ca Mon Sep 17 00:00:00 2001 From: Samuel Dionne-Riel Date: Sun, 25 Oct 2020 18:50:54 -0400 Subject: [PATCH 002/965] hydra-eval-jobs: Identify unexpected errors in handling aggregate jobs The vague "[json.exception.type_error.302] type must be string, but is null" is **absolutely** unhelpful in the way Hydra currently handles it on evaluation. This is handling *unexpected* errors only; the following commit will handle the specific instance of the previously mentioned error. --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 78 +++++++++++++++----------- 1 file changed, 45 insertions(+), 33 deletions(-) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index 600d4670..1cbc6fd8 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -442,45 +442,57 @@ int main(int argc, char * * argv) for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) { auto jobName = i.key(); auto & job = i.value(); + // For the error message + std::string lastTriedJobName = i.key(); auto named = job.find("namedConstituents"); if (named == job.end()) continue; - if (myArgs.dryRun) { - for (std::string jobName2 : *named) { - auto job2 = state->jobs.find(jobName2); - if (job2 == state->jobs.end()) - throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2); - std::string drvPath2 = (*job2)["drvPath"]; - job["constituents"].push_back(drvPath2); + try { + if (myArgs.dryRun) { + for (std::string jobName2 : *named) { + lastTriedJobName = jobName2; + auto job2 = state->jobs.find(jobName2); + if (job2 == state->jobs.end()) + throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2); + std::string drvPath2 = (*job2)["drvPath"]; + job["constituents"].push_back(drvPath2); + } + } else { + auto drvPath = store->parseStorePath((std::string) job["drvPath"]); + auto drv = store->readDerivation(drvPath); + + for (std::string jobName2 : *named) { + lastTriedJobName = jobName2; + auto job2 = state->jobs.find(jobName2); + if (job2 == state->jobs.end()) + throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2); + auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]); + auto drv2 = store->readDerivation(drvPath2); + job["constituents"].push_back(store->printStorePath(drvPath2)); + drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first}; + } + + std::string drvName(drvPath.name()); + assert(hasSuffix(drvName, drvExtension)); + drvName.resize(drvName.size() - drvExtension.size()); + auto h = std::get(hashDerivationModulo(*store, drv, true)); + auto outPath = store->makeOutputPath("out", h, drvName); + drv.env["out"] = store->printStorePath(outPath); + drv.outputs.insert_or_assign("out", DerivationOutput { .output = DerivationOutputInputAddressed { .path = outPath } }); + auto newDrvPath = store->printStorePath(writeDerivation(*store, drv)); + + debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath); + + job["drvPath"] = newDrvPath; + job["outputs"]["out"] = store->printStorePath(outPath); } - } else { - auto drvPath = store->parseStorePath((std::string) job["drvPath"]); - auto drv = store->readDerivation(drvPath); + } catch (std::exception & e) { + // Print more information to help debugging. + printError("Unexpected error in hydra-eval-jobs when handling job '%s', when producing aggregate job '%s':", lastTriedJobName, jobName); - for (std::string jobName2 : *named) { - auto job2 = state->jobs.find(jobName2); - if (job2 == state->jobs.end()) - throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2); - auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]); - auto drv2 = store->readDerivation(drvPath2); - job["constituents"].push_back(store->printStorePath(drvPath2)); - drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first}; - } - - std::string drvName(drvPath.name()); - assert(hasSuffix(drvName, drvExtension)); - drvName.resize(drvName.size() - drvExtension.size()); - auto h = std::get(hashDerivationModulo(*store, drv, true)); - auto outPath = store->makeOutputPath("out", h, drvName); - drv.env["out"] = store->printStorePath(outPath); - drv.outputs.insert_or_assign("out", DerivationOutput { .output = DerivationOutputInputAddressed { .path = outPath } }); - auto newDrvPath = store->printStorePath(writeDerivation(*store, drv)); - - debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath); - - job["drvPath"] = newDrvPath; - job["outputs"]["out"] = store->printStorePath(outPath); + // And throw the original exception! + throw; } job.erase("namedConstituents"); From b5140c1da17b1af8ca531d84c0e7f3b05b7cbe60 Mon Sep 17 00:00:00 2001 From: Samuel Dionne-Riel Date: Sun, 25 Oct 2020 19:15:16 -0400 Subject: [PATCH 003/965] hydra-eval-jobs: Transmit original Nix error when handling aggregate jobs It might happen that a job from the aggregate returned an error! This is what the vague "[json.exception.type_error.302] type must be string, but is null" was all about in this instance; there was no `drvPath` to stringify! So we now actively watch for errors and copy them to the aggregate job. --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index 1cbc6fd8..54936da9 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -467,10 +467,18 @@ int main(int argc, char * * argv) auto job2 = state->jobs.find(jobName2); if (job2 == state->jobs.end()) throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2); - auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]); - auto drv2 = store->readDerivation(drvPath2); - job["constituents"].push_back(store->printStorePath(drvPath2)); - drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first}; + + if ((*job2).find("error") != (*job2).end()) { + if (job.find("error") == job.end()) { + job["error"] = fmt("Errors aggregating aggregate job '%1%'.\n", jobName); + } + job["error"] = fmt("While handling '%1%': %2%\n", jobName2, (std::string) (*job2)["error"]); + } else { + auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]); + auto drv2 = store->readDerivation(drvPath2); + job["constituents"].push_back(store->printStorePath(drvPath2)); + drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first}; + } } std::string drvName(drvPath.name()); From f47749a62d9495279e9e43a868ea63a637019ec9 Mon Sep 17 00:00:00 2001 From: "Ricardo M. Correia" Date: Tue, 10 Nov 2020 04:05:59 +0100 Subject: [PATCH 004/965] Fix persistent hash mismatch errors when importing This would start happening if the network connection between the Hydra server and the remote build server breaks after sucessfully importing at least one output of a derivation, but before having finished importing all outputs. Fixes #816. --- src/hydra-queue-runner/build-remote.cc | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 2f9df16f..4565b82e 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -481,14 +481,22 @@ void State::buildRemote(ref destStore, for (auto & path : pathsSorted) { auto & info = infos.find(path)->second; - to << cmdDumpStorePath << localStore->printStorePath(path); - to.flush(); /* Receive the NAR from the remote and add it to the destination store. Meanwhile, extract all the info from the NAR that getBuildOutput() needs. */ auto source2 = sinkToSource([&](Sink & sink) { + /* Note: we should only send the command to dump the store + path to the remote if the NAR is actually going to get read + by the destination store, which won't happen if this path + is already valid on the destination store. Since this + lambda function only gets executed if someone tries to read + from source2, we will send the command from here rather + than outside the lambda. */ + to << cmdDumpStorePath << localStore->printStorePath(path); + to.flush(); + TeeSource tee(from, sink); extractNarData(tee, localStore->printStorePath(path), narMembers); }); From 58dd7f9ed3c5a387c2f47be70c1d1892afa5d223 Mon Sep 17 00:00:00 2001 From: Silvan Mosberger Date: Sat, 6 Feb 2021 00:02:30 +0100 Subject: [PATCH 005/965] Fix Github status plugin for flakes If the root flake is a github: one, github status notifications are sent to it. The githubstatus->inputs configuration isn't used for flakes. --- src/lib/Hydra/Plugin/GithubStatus.pm | 36 ++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/src/lib/Hydra/Plugin/GithubStatus.pm b/src/lib/Hydra/Plugin/GithubStatus.pm index 299a38e2..e2e4d8ed 100644 --- a/src/lib/Hydra/Plugin/GithubStatus.pm +++ b/src/lib/Hydra/Plugin/GithubStatus.pm @@ -57,17 +57,14 @@ sub common { my @inputs = defined $inputs_cfg ? ref $inputs_cfg eq "ARRAY" ? @$inputs_cfg : ($inputs_cfg) : (); my %seen = map { $_ => {} } @inputs; while (my $eval = $evals->next) { - foreach my $input (@inputs) { - my $i = $eval->jobsetevalinputs->find({ name => $input, altnr => 0 }); - next unless defined $i; - my $uri = $i->uri; - my $rev = $i->revision; - my $key = $uri . "-" . $rev; - next if exists $seen{$input}->{$key}; + + my $sendStatus = sub { + my ($input, $owner, $repo, $rev) = @_; + + my $key = $owner . "-" . $repo . "-" . $rev; + return if exists $seen{$input}->{$key}; $seen{$input}->{$key} = 1; - $uri =~ m![:/]([^/]+)/([^/]+?)(?:.git)?$!; - my $owner = $1; - my $repo = $2; + my $url = "https://api.github.com/repos/$owner/$repo/statuses/$rev"; my $req = HTTP::Request->new('POST', $url); $req->header('Content-Type' => 'application/json'); @@ -91,6 +88,25 @@ sub common { } else { print STDERR "GithubStatus ratelimit $limitRemaining/$limit, resets in $diff\n"; } + }; + + if (defined $eval->flake) { + my $fl = $eval->flake; + print STDERR "Flake is $fl\n"; + $eval->flake =~ m!github:([^/]+)/([^/]+)/(.+)$!; + $sendStatus->("src", $1, $2, $3); + } else { + foreach my $input (@inputs) { + my $i = $eval->jobsetevalinputs->find({ name => $input, altnr => 0 }); + if (! defined $i) { + print STDERR "Evaluation $eval doesn't have input $input\n"; + } + next unless defined $i; + my $uri = $i->uri; + my $rev = $i->revision; + $uri =~ m![:/]([^/]+)/([^/]+?)(?:.git)?$!; + $sendStatus->($input, $1, $2, $rev); + } } } } From 150213cbb3ca6a8f5f7b8e5f1b60e0b245fac125 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6gler?= Date: Sun, 7 Feb 2021 19:18:29 +0100 Subject: [PATCH 006/965] Fix login if Hydra runs behind HTTP proxy with sub-path location --- src/lib/Hydra/Controller/Root.pm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/Hydra/Controller/Root.pm b/src/lib/Hydra/Controller/Root.pm index 66aba9e5..24fddbf2 100644 --- a/src/lib/Hydra/Controller/Root.pm +++ b/src/lib/Hydra/Controller/Root.pm @@ -76,8 +76,8 @@ sub begin :Private { # XSRF protection: require POST requests to have the same origin. if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") { - my $referer = $c->req->header('Origin'); - $referer //= $c->req->header('Referer'); + my $referer = $c->req->header('Referer'); + $referer //= $c->req->header('Origin'); my $base = $c->req->base; die unless $base =~ /\/$/; $referer .= "/"; From 2240035e20a5f4e6c621720fd8cd9536b584b338 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 19 Feb 2021 17:04:19 -0500 Subject: [PATCH 007/965] Run tests with yath This will let us run tests in parallel, and creates a more Perl-standard test development experience. --- flake.nix | 2 ++ tests/Makefile.am | 2 +- tests/{evaluation-tests.pl => evaluation.t} | 0 tests/{ => lib}/Setup.pm | 0 tests/test.pl | 24 +++++++++++++++++++++ 5 files changed, 27 insertions(+), 1 deletion(-) rename tests/{evaluation-tests.pl => evaluation.t} (100%) rename tests/{ => lib}/Setup.pm (100%) create mode 100644 tests/test.pl diff --git a/flake.nix b/flake.nix index de4f29c5..08783bbc 100644 --- a/flake.nix +++ b/flake.nix @@ -126,6 +126,7 @@ EmailMIME EmailSender FileSlurp + FileWhich IOCompress IPCRun JSON @@ -145,6 +146,7 @@ TermSizeAny TestMore TextDiff + Test2Harness TextTable XMLSimple YAML diff --git a/tests/Makefile.am b/tests/Makefile.am index b0881bce..f5a92239 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -26,7 +26,7 @@ EXTRA_DIST = \ TESTS = \ set-up.pl \ - evaluation-tests.pl \ + test.pl \ tear-down.pl check_SCRIPTS = repos diff --git a/tests/evaluation-tests.pl b/tests/evaluation.t similarity index 100% rename from tests/evaluation-tests.pl rename to tests/evaluation.t diff --git a/tests/Setup.pm b/tests/lib/Setup.pm similarity index 100% rename from tests/Setup.pm rename to tests/lib/Setup.pm diff --git a/tests/test.pl b/tests/test.pl new file mode 100644 index 00000000..fc5b215a --- /dev/null +++ b/tests/test.pl @@ -0,0 +1,24 @@ +#!/usr/bin/env perl +# HARNESS-NO-PRELOAD +# HARNESS-CAT-LONG +# THIS IS A GENERATED YATH RUNNER TEST +use strict; +use warnings; + +use lib 'lib'; +BEGIN { + use File::Which qw(which); + $App::Yath::Script::SCRIPT = which 'yath'; +} +use App::Yath::Util qw/find_yath/; + +system($^X, find_yath(), '-D', 'test', '--default-search' => './', @ARGV); +my $exit = $?; + +# This makes sure it works with prove. +print "1..1\n"; +print "not " if $exit; +print "ok 1 - Passed tests when run by yath\n"; +print STDERR "yath exited with $exit" if $exit; + +exit($exit ? 255 : 0); From 45d9a22d7393c3f1f6ef60ea7c3abcbd3757e99c Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Fri, 19 Feb 2021 17:06:49 -0500 Subject: [PATCH 008/965] flake.nix: add perlPackages until they're available from nixpkgs These packages were added to Nixpkgs in https://github.com/NixOS/nixpkgs/pull/113702. --- flake.nix | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/flake.nix b/flake.nix index 08783bbc..198f9b8b 100644 --- a/flake.nix +++ b/flake.nix @@ -37,6 +37,83 @@ # Add LDAP dependencies that aren't currently found within nixpkgs. perlPackages = prev.perlPackages // { + Test2Harness = final.buildPerlPackage { + pname = "Test2-Harness"; + version = "1.000042"; + src = final.fetchurl { + url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Harness-1.000042.tar.gz"; + sha256 = "aaf231a68af1a6ffd6a11188875fcf572e373e43c8285945227b9d687b43db2d"; + }; + + checkPhase = '' + patchShebangs ./t ./scripts/yath + ./scripts/yath test -j $NIX_BUILD_CORES + ''; + + propagatedBuildInputs = with final.perlPackages; [ DataUUID Importer LongJump ScopeGuard TermTable Test2PluginMemUsage Test2PluginUUID Test2Suite gotofile ]; + meta = { + description = "A new and improved test harness with better Test2 integration"; + license = with final.lib.licenses; [ artistic1 gpl1Plus ]; + }; + }; + + Test2PluginMemUsage = prev.perlPackages.buildPerlPackage { + pname = "Test2-Plugin-MemUsage"; + version = "0.002003"; + src = final.fetchurl { + url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Plugin-MemUsage-0.002003.tar.gz"; + sha256 = "5e0662d5a823ae081641f5ce82843111eec1831cd31f883a6c6de54afdf87c25"; + }; + buildInputs = with final.perlPackages; [ Test2Suite ]; + meta = { + description = "Collect and display memory usage information"; + license = with final.lib.licenses; [ artistic1 gpl1Plus ]; + }; + }; + + Test2PluginUUID = prev.perlPackages.buildPerlPackage { + pname = "Test2-Plugin-UUID"; + version = "0.002001"; + src = final.fetchurl { + url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Plugin-UUID-0.002001.tar.gz"; + sha256 = "4c6c8d484d7153d8779dc155a992b203095b5c5aa1cfb1ee8bcedcd0601878c9"; + }; + buildInputs = with final.perlPackages;[ Test2Suite ]; + propagatedBuildInputs = with final.perlPackages; [ DataUUID ]; + meta = { + description = "Use REAL UUIDs in Test2"; + license = with final.lib.licenses; [ artistic1 gpl1Plus ]; + }; + }; + + LongJump = final.buildPerlPackage { + pname = "Long-Jump"; + version = "0.000001"; + src = final.fetchurl { + url = "mirror://cpan/authors/id/E/EX/EXODIST/Long-Jump-0.000001.tar.gz"; + sha256 = "d5d6456d86992b559d8f66fc90960f919292cd3803c13403faac575762c77af4"; + }; + buildInputs = with final.perlPackages; [ Test2Suite ]; + meta = { + description = "Mechanism for returning to a specific point from a deeply nested stack"; + license = with final.lib.licenses; [ artistic1 gpl1Plus ]; + }; + }; + + gotofile = final.buildPerlPackage { + pname = "goto-file"; + version = "0.005"; + src = final.fetchurl { + url = "mirror://cpan/authors/id/E/EX/EXODIST/goto-file-0.005.tar.gz"; + sha256 = "c6cdd5ee4a6cdcbdbf314d92a4f9985dbcdf9e4258048cae76125c052aa31f77"; + }; + buildInputs = with final.perlPackages; [ Test2Suite ]; + meta = { + description = "Stop parsing the current file and move on to a different one"; + license = with final.lib.licenses; [ artistic1 gpl1Plus ]; + }; + }; + NetLDAPServer = prev.perlPackages.buildPerlPackage { pname = "Net-LDAP-Server"; version = "0.43"; From 34b438ab6ed66c91d9ec99654bc0f5e6df8caac2 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Feb 2021 15:03:20 +0100 Subject: [PATCH 009/965] flake.lock: Update Flake input changes: * Updated 'nix': 'github:NixOS/nix/8a2ce0f455da32bc20978e68c0aad9efb4560abc' -> 'github:NixOS/nix/548437c2347159c4c79352283dd12ce58324f1d6' * Removed 'nix/lowdown-src' --- flake.lock | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) diff --git a/flake.lock b/flake.lock index e7ec0397..82427e9f 100644 --- a/flake.lock +++ b/flake.lock @@ -1,32 +1,15 @@ { "nodes": { - "lowdown-src": { - "flake": false, - "locked": { - "lastModified": 1598695561, - "narHash": "sha256-gyH/5j+h/nWw0W8AcR2WKvNBUsiQ7QuxqSJNXAwV+8E=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "1705b4a26fbf065d9574dce47a94e8c7c79e052f", - "type": "github" - }, - "original": { - "owner": "kristapsdz", - "repo": "lowdown", - "type": "github" - } - }, "nix": { "inputs": { - "lowdown-src": "lowdown-src", "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1609520816, - "narHash": "sha256-IGO7tfJXsv9u2wpW76VCzOsHYapRZqH9pHGVsoffPrI=", + "lastModified": 1613747933, + "narHash": "sha256-Q6VuNRdr87B4F3ILiM6IlQ+bkIYbQTs6EEAtwNrvl1Y=", "owner": "NixOS", "repo": "nix", - "rev": "8a2ce0f455da32bc20978e68c0aad9efb4560abc", + "rev": "548437c2347159c4c79352283dd12ce58324f1d6", "type": "github" }, "original": { From a7d8ee98da37ffc6d4178fcbe60802d7d87e0dd4 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Feb 2021 15:10:24 +0100 Subject: [PATCH 010/965] Fix build --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 9 --------- src/hydra-queue-runner/hydra-queue-runner.cc | 6 ++++-- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index e6f9a4d4..1cea8390 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -37,15 +37,6 @@ struct MyArgs : MixEvalArgs, MixCommonArgs MyArgs() : MixCommonArgs("hydra-eval-jobs") { - addFlag({ - .longName = "help", - .description = "show usage information", - .handler = {[&]() { - printHelp(programName, std::cout); - throw Exit(); - }} - }); - addFlag({ .longName = "gc-roots-dir", .description = "garbage collector roots directory", diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index f50c00e5..3b9ae480 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -97,7 +97,7 @@ void State::parseMachines(const std::string & contents) machine->systemTypes = tokenizeString(tokens[1], ","); machine->sshKey = tokens[2] == "-" ? string("") : tokens[2]; if (tokens[3] != "") - string2Int(tokens[3], machine->maxJobs); + machine->maxJobs = string2IntmaxJobs)>(tokens[3]).value(); else machine->maxJobs = 1; machine->speedFactor = atof(tokens[4].c_str()); @@ -862,7 +862,9 @@ int main(int argc, char * * argv) else if (*arg == "--status") status = true; else if (*arg == "--build-one") { - if (!string2Int(getArg(*arg, arg, end), buildOne)) + if (auto b = string2Int(getArg(*arg, arg, end))) + buildOne = *b; + else throw Error("‘--build-one’ requires a build ID"); } else return false; From 107d60027fa22c3b919cce3e8565affbbd8aed3b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Feb 2021 16:29:07 +0100 Subject: [PATCH 011/965] hydra-eval-jobs: Fix unexpected EOF when a top-level attr fails --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index 1cea8390..934bf42e 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -235,12 +235,13 @@ static void worker( else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v)); } catch (EvalError & e) { + auto msg = e.msg(); // Transmits the error we got from the previous evaluation // in the JSON output. - reply["error"] = filterANSIEscapes(e.msg(), true); + reply["error"] = filterANSIEscapes(msg, true); // Don't forget to print it into the STDERR log, this is // what's shown in the Hydra UI. - printError("error: %s", reply["error"]); + printError(msg); } writeLine(to.get(), reply.dump()); @@ -326,13 +327,15 @@ int main(int argc, char * * argv) EvalState state(myArgs.searchPath, openStore()); Bindings & autoArgs = *myArgs.getAutoArgs(state); worker(state, autoArgs, *to, *from); - } catch (std::exception & e) { + } catch (Error & e) { nlohmann::json err; - err["error"] = e.what(); + auto msg = e.msg(); + err["error"] = filterANSIEscapes(msg, true); + printError(msg); writeLine(to->get(), err.dump()); // Don't forget to print it into the STDERR log, this is // what's shown in the Hydra UI. - printError("error: %s", err["error"]); + writeLine(to->get(), "restart"); } }, ProcessOptions { .allowVfork = false }); From f602ed0d86a661d6f668c8b0f99c6ce7ca417e97 Mon Sep 17 00:00:00 2001 From: regnat Date: Tue, 23 Feb 2021 09:50:15 +0100 Subject: [PATCH 012/965] Remove the `sendDerivation` logic from the builder MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The queue runner used to special-case `localhost` as a remote builder: Rather than using the normal remote-build (using the `cmdBuildDerivation` command), it was using the (generally less efficient, except when running against localhost) `cmdBuildPaths` command because the latter didn't require a privileged Nix user (so made testing easier − allowing to run hydra in a container in particular). However: 1. this means that the build loop can follow two discint code paths depending on the setup, the irony being that the most commonly used one in production (the “non-localhost” case) isn't the one used in the testsuite (because all the tests run against a local store); 2. It turns out that the “localhost” version is buggy in relatively obvious ways − in particular a failure in a fixed-output derivation or a hash mismatch isn't reported properly; 3. If the “run in a container” use-case is indeed that important, it can be (partially) restored using a chroot store (which wouldn't behave excactly the same way of course, but would be more than good-enough for testing) --- src/hydra-queue-runner/build-remote.cc | 154 ++++++++++--------------- 1 file changed, 60 insertions(+), 94 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 2f9df16f..a68575c7 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -210,7 +210,6 @@ void State::buildRemote(ref destStore, }); /* Handshake. */ - bool sendDerivation = true; unsigned int remoteVersion; try { @@ -223,12 +222,6 @@ void State::buildRemote(ref destStore, remoteVersion = readInt(from); if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200) throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", machine->sshName); - // Always send the derivation to localhost, since it's a - // no-op anyway but we might not be privileged to use - // cmdBuildDerivation (e.g. if we're running in a NixOS - // container). - if (GET_PROTOCOL_MINOR(remoteVersion) >= 1 && !machine->isLocalhost()) - sendDerivation = false; if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0) throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName); @@ -253,11 +246,8 @@ void State::buildRemote(ref destStore, StorePathSet inputs; BasicDerivation basicDrv(*step->drv); - if (sendDerivation) - inputs.insert(step->drvPath); - else - for (auto & p : step->drv->inputSrcs) - inputs.insert(p); + for (auto & p : step->drv->inputSrcs) + inputs.insert(p); for (auto & input : step->drv->inputDrvs) { auto drv2 = localStore->readDerivation(input.first); @@ -313,13 +303,8 @@ void State::buildRemote(ref destStore, updateStep(ssBuilding); - if (sendDerivation) { - to << cmdBuildPaths; - worker_proto::write(*localStore, to, StorePathSet{step->drvPath}); - } else { - to << cmdBuildDerivation << localStore->printStorePath(step->drvPath); - writeDerivation(to, *localStore, basicDrv); - } + to << cmdBuildDerivation << localStore->printStorePath(step->drvPath); + writeDerivation(to, *localStore, basicDrv); to << maxSilentTime << buildTimeout; if (GET_PROTOCOL_MINOR(remoteVersion) >= 2) to << maxLogSize; @@ -337,83 +322,64 @@ void State::buildRemote(ref destStore, } result.stopTime = time(0); - if (sendDerivation) { - if (res) { - result.errorMsg = fmt("%s on ‘%s’", readString(from), machine->sshName); - if (res == 100) { - result.stepStatus = bsFailed; - result.canCache = true; - } - else if (res == 101) { - result.stepStatus = bsTimedOut; - } - else { - result.stepStatus = bsAborted; - result.canRetry = true; - } - return; + result.errorMsg = readString(from); + if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) { + result.timesBuilt = readInt(from); + result.isNonDeterministic = readInt(from); + auto start = readInt(from); + auto stop = readInt(from); + if (start && start) { + /* Note: this represents the duration of a single + round, rather than all rounds. */ + result.startTime = start; + result.stopTime = stop; } - result.stepStatus = bsSuccess; - } else { - result.errorMsg = readString(from); - if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) { - result.timesBuilt = readInt(from); - result.isNonDeterministic = readInt(from); - auto start = readInt(from); - auto stop = readInt(from); - if (start && start) { - /* Note: this represents the duration of a single - round, rather than all rounds. */ - result.startTime = start; - result.stopTime = stop; - } - } - switch ((BuildResult::Status) res) { - case BuildResult::Built: - result.stepStatus = bsSuccess; - break; - case BuildResult::Substituted: - case BuildResult::AlreadyValid: - result.stepStatus = bsSuccess; - result.isCached = true; - break; - case BuildResult::PermanentFailure: - result.stepStatus = bsFailed; - result.canCache = true; - result.errorMsg = ""; - break; - case BuildResult::InputRejected: - case BuildResult::OutputRejected: - result.stepStatus = bsFailed; - result.canCache = true; - break; - case BuildResult::TransientFailure: - result.stepStatus = bsFailed; - result.canRetry = true; - result.errorMsg = ""; - break; - case BuildResult::TimedOut: - result.stepStatus = bsTimedOut; - result.errorMsg = ""; - break; - case BuildResult::MiscFailure: - result.stepStatus = bsAborted; - result.canRetry = true; - break; - case BuildResult::LogLimitExceeded: - result.stepStatus = bsLogLimitExceeded; - break; - case BuildResult::NotDeterministic: - result.stepStatus = bsNotDeterministic; - result.canRetry = false; - result.canCache = true; - break; - default: - result.stepStatus = bsAborted; - break; - } - if (result.stepStatus != bsSuccess) return; } + switch ((BuildResult::Status) res) { + case BuildResult::Built: + result.stepStatus = bsSuccess; + break; + case BuildResult::Substituted: + case BuildResult::AlreadyValid: + result.stepStatus = bsSuccess; + result.isCached = true; + break; + case BuildResult::PermanentFailure: + result.stepStatus = bsFailed; + result.canCache = true; + result.errorMsg = ""; + break; + case BuildResult::InputRejected: + case BuildResult::OutputRejected: + result.stepStatus = bsFailed; + result.canCache = true; + break; + case BuildResult::TransientFailure: + result.stepStatus = bsFailed; + result.canRetry = true; + result.errorMsg = ""; + break; + case BuildResult::TimedOut: + result.stepStatus = bsTimedOut; + result.errorMsg = ""; + break; + case BuildResult::MiscFailure: + result.stepStatus = bsAborted; + result.canRetry = true; + break; + case BuildResult::LogLimitExceeded: + result.stepStatus = bsLogLimitExceeded; + break; + case BuildResult::NotDeterministic: + result.stepStatus = bsNotDeterministic; + result.canRetry = false; + result.canCache = true; + break; + default: + result.stepStatus = bsAborted; + break; + } + if (result.stepStatus != bsSuccess) return; result.errorMsg = ""; From 3ebcaef1273eeece1ddf6a73417923540b1e653f Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 23 Feb 2021 17:25:29 -0500 Subject: [PATCH 013/965] README: update with instructions on running tests --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index cbc50650..6139edad 100644 --- a/README.md +++ b/README.md @@ -106,6 +106,15 @@ conflicts with services that might be running on your host, hydra and postgress Note that this is only ever meant as an ad-hoc way of executing Hydra during development. Please make use of the NixOS module for actually running Hydra in production. +### Running Tests + +After making your changes, verify the test suite still passes. After following the steps in [Development Environment](#development-environment), run: + +``` +$ nix-shell +$ make check +``` + ### JSON API You can also interface with Hydra through a JSON API. The API is defined in [hydra-api.yaml](./hydra-api.yaml) and you can test and explore via the [swagger editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/NixOS/hydra/master/hydra-api.yaml) From 9d916877fb7c501c138cdc8d1abb2f1250f4a46a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Isma=C3=ABl=20Bouya?= Date: Wed, 24 Feb 2021 00:25:56 +0100 Subject: [PATCH 014/965] Add markdown files for documentation projects.xml and declarative-projects.xml were merged with xmllint, and then I ran that to convert files for i in *.xml; do pandoc -s -f docbook -t markdown $i -o ${i/xml/md}; done --- doc/manual/Makefile.am | 35 +-- doc/manual/src/SUMMARY.md | 9 + doc/manual/src/about.md | 6 + doc/manual/src/api.md | 249 ++++++++++++++++++ doc/manual/src/hacking.md | 28 ++ doc/manual/src/installation.md | 237 +++++++++++++++++ doc/manual/src/introduction.md | 173 ++++++++++++ doc/manual/src/projects.md | 463 +++++++++++++++++++++++++++++++++ flake.nix | 4 +- 9 files changed, 1170 insertions(+), 34 deletions(-) create mode 100644 doc/manual/src/SUMMARY.md create mode 100644 doc/manual/src/about.md create mode 100644 doc/manual/src/api.md create mode 100644 doc/manual/src/hacking.md create mode 100644 doc/manual/src/installation.md create mode 100644 doc/manual/src/introduction.md create mode 100644 doc/manual/src/projects.md diff --git a/doc/manual/Makefile.am b/doc/manual/Makefile.am index 10c8f6ee..ec732166 100644 --- a/doc/manual/Makefile.am +++ b/doc/manual/Makefile.am @@ -1,33 +1,6 @@ -DOCBOOK_FILES = installation.xml introduction.xml manual.xml projects.xml hacking.xml +MD_FILES = src/*.md -EXTRA_DIST = $(DOCBOOK_FILES) +EXTRA_DIST = $(MD_FILES) -xsltproc_opts = \ - --param callout.graphics.extension \'.gif\' \ - --param section.autolabel 1 \ - --param section.label.includes.component.label 1 - - -# Include the manual in the tarball. -dist_html_DATA = manual.html - -# Embed Docbook's callout images in the distribution. -EXTRA_DIST += images - -manual.html: $(DOCBOOK_FILES) - $(XSLTPROC) $(xsltproc_opts) --nonet --xinclude \ - --output manual.html \ - $(docbookxsl)/xhtml/docbook.xsl manual.xml - -images: - $(MKDIR_P) images/callouts - cp $(docbookxsl)/images/callouts/*.gif images/callouts - chmod +wx images images/callouts - -install-data-hook: images - $(INSTALL) -d $(DESTDIR)$(htmldir)/images/callouts - $(INSTALL_DATA) images/callouts/* $(DESTDIR)$(htmldir)/images/callouts - ln -sfn manual.html $(DESTDIR)$(htmldir)/index.html - -distclean-hook: - -rm -rf images +install: $(MD_FILES) + mdbook build . -d $(docdir) diff --git a/doc/manual/src/SUMMARY.md b/doc/manual/src/SUMMARY.md new file mode 100644 index 00000000..f0dc77a4 --- /dev/null +++ b/doc/manual/src/SUMMARY.md @@ -0,0 +1,9 @@ +# Hydra User's Guide + +- [Introduction](introduction.md) +- [Installation](installation.md) +- [Creating and Managing Projects](projects.md) +- [Using the external API](api.md) +----------- +[About](about.md) +[Hacking](hacking.md) diff --git a/doc/manual/src/about.md b/doc/manual/src/about.md new file mode 100644 index 00000000..6e65c55c --- /dev/null +++ b/doc/manual/src/about.md @@ -0,0 +1,6 @@ +# Authors + +* Eelco Dolstra, Delft University of Technology, Department of Software Technology +* Rob Vermaas, Delft University of Technology, Department of Software Technology +* Eelco Visser, Delft University of Technology, Department of Software Technology +* Ludovic Courtès diff --git a/doc/manual/src/api.md b/doc/manual/src/api.md new file mode 100644 index 00000000..1e27c644 --- /dev/null +++ b/doc/manual/src/api.md @@ -0,0 +1,249 @@ +Using the external API +====================== + +To be able to create integrations with other services, Hydra exposes an +external API that you can manage projects with. + +The API is accessed over HTTP(s) where all data is sent and received as +JSON. + +Creating resources requires the caller to be authenticated, while +retrieving resources does not. + +The API does not have a separate URL structure for it\'s endpoints. +Instead you request the pages of the web interface as `application/json` +to use the API. + +List projects +------------- + +To list all the `projects` of the Hydra install: + + GET / + Accept: application/json + +This will give you a list of `projects`, where each `project` contains +general information and a list of its `job sets`. + +**Example** + + curl -i -H 'Accept: application/json' \ + https://hydra.nixos.org + +**Note:** this response is truncated + + GET https://hydra.nixos.org/ + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "displayname": "Acoda", + "name": "acoda", + "description": "Acoda is a tool set for automatic data migration along an evolving data model", + "enabled": 0, + "owner": "sander", + "hidden": 1, + "jobsets": [ + "trunk" + ] + }, + { + "displayname": "cabal2nix", + "name": "cabal2nix", + "description": "Convert Cabal files into Nix build instructions", + "enabled": 0, + "owner": "simons@cryp.to", + "hidden": 1, + "jobsets": [ + "master" + ] + } + ] + +Get a single project +-------------------- + +To get a single `project` by identifier: + + GET /project/:project-identifier + Accept: application/json + +**Example** + + curl -i -H 'Accept: application/json' \ + https://hydra.nixos.org/project/hydra + + GET https://hydra.nixos.org/project/hydra + HTTP/1.1 200 OK + Content-Type: application/json + + { + "description": "Hydra, the Nix-based continuous build system", + "hidden": 0, + "displayname": "Hydra", + "jobsets": [ + "hydra-master", + "hydra-ant-logger-trunk", + "master", + "build-ng" + ], + "name": "hydra", + "enabled": 1, + "owner": "eelco" + } + +Get a single job set +-------------------- + +To get a single `job set` by identifier: + + GET /jobset/:project-identifier/:jobset-identifier + Content-Type: application/json + +**Example** + + curl -i -H 'Accept: application/json' \ + https://hydra.nixos.org/jobset/hydra/build-ng + + GET https://hydra.nixos.org/jobset/hydra/build-ng + HTTP/1.1 200 OK + Content-Type: application/json + + { + "errormsg": "evaluation failed due to signal 9 (Killed)", + "fetcherrormsg": null, + "nixexprpath": "release.nix", + "nixexprinput": "hydraSrc", + "emailoverride": "rob.vermaas@gmail.com, eelco.dolstra@logicblox.com", + "jobsetinputs": { + "officialRelease": { + "jobsetinputalts": [ + "false" + ] + }, + "hydraSrc": { + "jobsetinputalts": [ + "https://github.com/NixOS/hydra.git build-ng" + ] + }, + "nixpkgs": { + "jobsetinputalts": [ + "https://github.com/NixOS/nixpkgs.git release-14.12" + ] + } + }, + "enabled": 0 + } + +List evaluations +---------------- + +To list the `evaluations` of a `job set` by identifier: + + GET /jobset/:project-identifier/:jobset-identifier/evals + Content-Type: application/json + +**Example** + + curl -i -H 'Accept: application/json' \ + https://hydra.nixos.org/jobset/hydra/build-ng/evals + +**Note:** this response is truncated + + GET https://hydra.nixos.org/jobset/hydra/build-ng/evals + HTTP/1.1 200 OK + Content-Type: application/json + + { + "evals": [ + { + "jobsetevalinputs": { + "nixpkgs": { + "dependency": null, + "type": "git", + "value": null, + "uri": "https://github.com/NixOS/nixpkgs.git", + "revision": "f60e48ce81b6f428d072d3c148f6f2e59f1dfd7a" + }, + "hydraSrc": { + "dependency": null, + "type": "git", + "value": null, + "uri": "https://github.com/NixOS/hydra.git", + "revision": "48d6f0de2ab94f728d287b9c9670c4d237e7c0f6" + }, + "officialRelease": { + "dependency": null, + "value": "false", + "type": "boolean", + "uri": null, + "revision": null + } + }, + "hasnewbuilds": 1, + "builds": [ + 24670686, + 24670684, + 24670685, + 24670687 + ], + "id": 1213758 + } + ], + "first": "?page=1", + "last": "?page=1" + } + +Get a single build +------------------ + +To get a single `build` by its id: + + GET /build/:build-id + Content-Type: application/json + +**Example** + + curl -i -H 'Accept: application/json' \ + https://hydra.nixos.org/build/24670686 + + GET /build/24670686 + HTTP/1.1 200 OK + Content-Type: application/json + + { + "job": "tests.api.x86_64-linux", + "jobsetevals": [ + 1213758 + ], + "buildstatus": 0, + "buildmetrics": null, + "project": "hydra", + "system": "x86_64-linux", + "priority": 100, + "releasename": null, + "starttime": 1439402853, + "nixname": "vm-test-run-unnamed", + "timestamp": 1439388618, + "id": 24670686, + "stoptime": 1439403403, + "jobset": "build-ng", + "buildoutputs": { + "out": { + "path": "/nix/store/lzrxkjc35mhp8w7r8h82g0ljyizfchma-vm-test-run-unnamed" + } + }, + "buildproducts": { + "1": { + "path": "/nix/store/lzrxkjc35mhp8w7r8h82g0ljyizfchma-vm-test-run-unnamed", + "defaultpath": "log.html", + "type": "report", + "sha256hash": null, + "filesize": null, + "name": "", + "subtype": "testlog" + } + }, + "finished": 1 + } diff --git a/doc/manual/src/hacking.md b/doc/manual/src/hacking.md new file mode 100644 index 00000000..6bf447f4 --- /dev/null +++ b/doc/manual/src/hacking.md @@ -0,0 +1,28 @@ +Hacking +======= + +This section provides some notes on how to hack on Hydra. To get the +latest version of Hydra from GitHub: + + $ git clone git://github.com/NixOS/hydra.git + $ cd hydra + +To build it and its dependencies: + + $ nix-build release.nix -A build.x86_64-linux + +To build all dependencies and start a shell in which all environment +variables (such as PERL5LIB) are set up so that those dependencies can +be found: + + $ nix-shell + +To build Hydra, you should then do: + + [nix-shell]$ ./bootstrap + [nix-shell]$ configurePhase + [nix-shell]$ make + +You can run the Hydra web server in your source tree as follows: + + $ ./src/script/hydra-server diff --git a/doc/manual/src/installation.md b/doc/manual/src/installation.md new file mode 100644 index 00000000..c038a450 --- /dev/null +++ b/doc/manual/src/installation.md @@ -0,0 +1,237 @@ +Installation +============ + +This chapter explains how to install Hydra on your own build farm +server. + +Prerequisites +------------- + +To install and use Hydra you need to have installed the following +dependencies: + +- Nix + +- PostgreSQL + +- many Perl packages, notably Catalyst, EmailSender, and NixPerl (see + the [Hydra expression in + Nixpkgs](https://github.com/NixOS/hydra/blob/master/release.nix) for + the complete list) + +At the moment, Hydra runs only on GNU/Linux (*i686-linux* and +*x86\_64\_linux*). + +For small projects, Hydra can be run on any reasonably modern machine. +For individual projects you can even run Hydra on a laptop. However, the +charm of a buildfarm server is usually that it operates without +disturbing the developer\'s working environment and can serve releases +over the internet. In conjunction you should typically have your source +code administered in a version management system, such as subversion. +Therefore, you will probably want to install a server that is connected +to the internet. To scale up to large and/or many projects, you will +need at least a considerable amount of diskspace to store builds. Since +Hydra can schedule multiple simultaneous build jobs, it can be useful to +have a multi-core machine, and/or attach multiple build machines in a +network to the central Hydra server. + +Of course we think it is a good idea to use the +[NixOS](http://nixos.org/nixos) GNU/Linux distribution for your +buildfarm server. But this is not a requirement. The Nix software +deployment system can be installed on any GNU/Linux distribution in +parallel to the regular package management system. Thus, you can use +Hydra on a Debian, Fedora, SuSE, or Ubuntu system. + +Getting Nix +----------- + +If your server runs NixOS you are all set to continue with installation +of Hydra. Otherwise you first need to install Nix. The latest stable +version can be found one [the Nix web +site](http://nixos.org/nix/download.html), along with a manual, which +includes installation instructions. + +Installation +------------ + +The latest development snapshot of Hydra can be installed by visiting +the URL +[`http://hydra.nixos.org/view/hydra/unstable`](http://hydra.nixos.org/view/hydra/unstable) +and using the one-click install available at one of the build pages. You +can also install Hydra through the channel by performing the following +commands: + + nix-channel --add http://hydra.nixos.org/jobset/hydra/master/channel/latest + nix-channel --update + nix-env -i hydra + +Command completion should reveal a number of command-line tools from +Hydra, such as `hydra-queue-runner`. + +Creating the database +--------------------- + +Hydra stores its results in a PostgreSQL database. + +To setup a PostgreSQL database with *hydra* as database name and user +name, issue the following commands on the PostgreSQL server: + + createuser -S -D -R -P hydra + createdb -O hydra hydra + +Note that *\$prefix* is the location of Hydra in the nix store. + +Hydra uses an environment variable to know which database should be +used, and a variable which point to a location that holds some state. To +set these variables for a PostgreSQL database, add the following to the +file `~/.profile` of the user running the Hydra services. + + export HYDRA_DBI="dbi:Pg:dbname=hydra;host=dbserver.example.org;user=hydra;" + export HYDRA_DATA=/var/lib/hydra + +You can provide the username and password in the file `~/.pgpass`, e.g. + + dbserver.example.org:*:hydra:hydra:password + +Make sure that the *HYDRA\_DATA* directory exists and is writable for +the user which will run the Hydra services. + +Having set these environment variables, you can now initialise the +database by doing: + + hydra-init + +To create projects, you need to create a user with *admin* privileges. +This can be done using the command `hydra-create-user`: + + $ hydra-create-user alice --full-name 'Alice Q. User' \ + --email-address 'alice@example.org' --password foobar --role admin + +Additional users can be created through the web interface. + +Upgrading +--------- + +If you\'re upgrading Hydra from a previous version, you should do the +following to perform any necessary database schema migrations: + + hydra-init + +Getting Started +--------------- + +To start the Hydra web server, execute: + + hydra-server + +When the server is started, you can browse to [http://localhost:3000/]() +to start configuring your Hydra instance. + +The `hydra-server` command launches the web server. There are two other +processes that come into play: + +- The + evaluator + is responsible for periodically evaluating job sets, checking out + their dependencies off their version control systems (VCS), and + queueing new builds if the result of the evaluation changed. It is + launched by the + hydra-evaluator + command. +- The + queue runner + launches builds (using Nix) as they are queued by the evaluator, + scheduling them onto the configured Nix hosts. It is launched using + the + hydra-queue-runner + command. + +All three processes must be running for Hydra to be fully functional, +though it\'s possible to temporarily stop any one of them for +maintenance purposes, for instance. + +Serving behind reverse proxy +---------------------------- + +To serve hydra web server behind reverse proxy like *nginx* or *httpd* +some additional configuration must be made. + +Edit your `hydra.conf` file in a similar way to this example: + + using_frontend_proxy 1 + base_uri example.com + +`base_uri` should be your hydra servers proxied URL. If you are using +Hydra nixos module then setting `hydraURL` option should be enough. + +If you want to serve Hydra with a prefix path, for example +[http://example.com/hydra]() then you need to configure your reverse +proxy to pass `X-Request-Base` to hydra, with prefix path as value. For +example if you are using nginx, then use configuration similar to +following: + + server { + listen 433 ssl; + server_name example.com; + .. other configuration .. + location /hydra/ { + + proxy_pass http://127.0.0.1:3000; + proxy_redirect http://127.0.0.1:3000 https://example.com/hydra; + + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Request-Base /hydra; + } + } + +Using LDAP as authentication backend (optional) +----------------------------------------------- + +Instead of using Hydra\'s built-in user management you can optionally +use LDAP to manage roles and users. + +The `hydra-server` accepts the environment variable +*HYDRA\_LDAP\_CONFIG*. The value of the variable should point to a valid +YAML file containing the Catalyst LDAP configuration. The format of the +configuration file is describe in the +[*Catalyst::Authentication::Store::LDAP* +documentation](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS). +An example is given below. + +Roles can be assigned to users based on their LDAP group membership +(*use\_roles: 1* in the below example). For a user to have the role +*admin* assigned to them they should be in the group *hydra\_admin*. In +general any LDAP group of the form *hydra\_some\_role* (notice the +*hydra\_* prefix) will work. + + credential: + class: Password + password_field: password + password_type: self_check + store: + class: LDAP + ldap_server: localhost + ldap_server_options.timeout: 30 + binddn: "cn=root,dc=example" + bindpw: notapassword + start_tls: 0 + start_tls_options + verify: none + user_basedn: "ou=users,dc=example" + user_filter: "(&(objectClass=inetOrgPerson)(cn=%s))" + user_scope: one + user_field: cn + user_search_options: + deref: always + use_roles: 1 + role_basedn: "ou=groups,dc=example" + role_filter: "(&(objectClass=groupOfNames)(member=%s))" + role_scope: one + role_field: cn + role_value: dn + role_search_options: + deref: always + diff --git a/doc/manual/src/introduction.md b/doc/manual/src/introduction.md new file mode 100644 index 00000000..b88f9b0f --- /dev/null +++ b/doc/manual/src/introduction.md @@ -0,0 +1,173 @@ +Introduction +============ + +About Hydra +----------- + +Hydra is a tool for continuous integration testing and software release +that uses a purely functional language to describe build jobs and their +dependencies. Continuous integration is a simple technique to improve +the quality of the software development process. An automated system +continuously or periodically checks out the source code of a project, +builds it, runs tests, and produces reports for the developers. Thus, +various errors that might accidentally be committed into the code base +are automatically caught. Such a system allows more in-depth testing +than what developers could feasibly do manually: + +- Portability testing + : The software may need to be built and tested on many different + platforms. It is infeasible for each developer to do this before + every commit. +- Likewise, many projects have very large test sets (e.g., regression + tests in a compiler, or stress tests in a DBMS) that can take hours + or days to run to completion. +- Many kinds of static and dynamic analyses can be performed as part + of the tests, such as code coverage runs and static analyses. +- It may also be necessary to build many different + variants + of the software. For instance, it may be necessary to verify that + the component builds with various versions of a compiler. +- Developers typically use incremental building to test their changes + (since a full build may take too long), but this is unreliable with + many build management tools (such as Make), i.e., the result of the + incremental build might differ from a full build. +- It ensures that the software can be built from the sources under + revision control. Users of version management systems such as CVS + and Subversion often forget to place source files under revision + control. +- The machines on which the continuous integration system runs ideally + provides a clean, well-defined build environment. If this + environment is administered through proper SCM techniques, then + builds produced by the system can be reproduced. In contrast, + developer work environments are typically not under any kind of SCM + control. +- In large projects, developers often work on a particular component + of the project, and do not build and test the composition of those + components (again since this is likely to take too long). To prevent + the phenomenon of \`\`big bang integration\'\', where components are + only tested together near the end of the development process, it is + important to test components together as soon as possible (hence + continuous integration + ). +- It allows software to be + released + by automatically creating packages that users can download and + install. To do this manually represents an often prohibitive amount + of work, as one may want to produce releases for many different + platforms: e.g., installers for Windows and Mac OS X, RPM or Debian + packages for certain Linux distributions, and so on. + +In its simplest form, a continuous integration tool sits in a loop +building and releasing software components from a version management +system. For each component, it performs the following tasks: + +- It obtains the latest version of the component\'s source code from + the version management system. +- It runs the component\'s build process (which presumably includes + the execution of the component\'s test set). +- It presents the results of the build (such as error logs and + releases) to the developers, e.g., by producing a web page. + +Examples of continuous integration tools include Jenkins, CruiseControl +Tinderbox, Sisyphus, Anthill and BuildBot. These tools have various +limitations. + +- They do not manage the + build environment + . The build environment consists of the dependencies necessary to + perform a build action, e.g., compilers, libraries, etc. Setting up + the environment is typically done manually, and without proper SCM + control (so it may be hard to reproduce a build at a later time). + Manual management of the environment scales poorly in the number of + configurations that must be supported. For instance, suppose that we + want to build a component that requires a certain compiler X. We + then have to go to each machine and install X. If we later need a + newer version of X, the process must be repeated all over again. An + ever worse problem occurs if there are conflicting, mutually + exclusive versions of the dependencies. Thus, simply installing the + latest version is not an option. Of course, we can install these + components in different directories and manually pass the + appropriate paths to the build processes of the various components. + But this is a rather tiresome and error-prone process. +- They do not easily support + variability in software systems + . A system may have a great deal of build-time variability: optional + functionality, whether to build a debug or production version, + different versions of dependencies, and so on. (For instance, the + Linux kernel now has over 2,600 build-time configuration switches.) + It is therefore important that a continuous integration tool can + easily select and test different instances from the configuration + space of the system to reveal problems, such as erroneous + interactions between features. In a continuous integration setting, + it is also useful to test different combinations of versions of + subsystems, e.g., the head revision of a component against stable + releases of its dependencies, and vice versa, as this can reveal + various integration problems. + +*Hydra*, is a continuous integration tool that solves these problems. It +is built on top of the [Nix package manager](http://nixos.org/nix/), +which has a purely functional language for describing package build +actions and their dependencies. This allows the build environment for +projects to be produced automatically and deterministically, and +variability in components to be expressed naturally using functions; and +as such is an ideal fit for a continuous build system. + +About Us +-------- + +Hydra is the successor of the Nix Buildfarm, which was developed in +tandem with the Nix software deployment system. Nix was originally +developed at the Department of Information and Computing Sciences, +Utrecht University by the TraCE project (2003-2008). The project was +funded by the Software Engineering Research Program Jacquard to improve +the support for variability in software systems. Funding for the +development of Nix and Hydra is now provided by the NIRICT LaQuSo Build +Farm project. + +About this Manual +----------------- + +This manual tells you how to install the Hydra buildfarm software on +your own server and how to operate that server using its web interface. + +License +------- + +Hydra is free software: you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation, either version 3 of the License, or (at your +option) any later version. + +Hydra is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the [GNU General Public +License](http://www.gnu.org/licenses/) for more details. + +Hydra at `nixos.org` +-------------------- + +The `nixos.org` installation of Hydra runs at +[`http://hydra.nixos.org/`](http://hydra.nixos.org/). That installation +is used to build software components from the [Nix](http://nixos.org), +[NixOS](http://nixos.org/nixos), [GNU](http://www.gnu.org/), +[Stratego/XT](http://strategoxt.org), and related projects. + +If you are one of the developers on those projects, it is likely that +you will be using the NixOS Hydra server in some way. If you need to +administer automatic builds for your project, you should pull the right +strings to get an account on the server. This manual will tell you how +to set up new projects and build jobs within those projects and write a +release.nix file to describe the build process of your project to Hydra. +You can skip the next chapter. + +If your project does not yet have automatic builds within the NixOS +Hydra server, it may actually be eligible. We are in the process of +setting up a large buildfarm that should be able to support open source +and academic software projects. Get in touch. + +Hydra on your own buildfarm +--------------------------- + +If you need to run your own Hydra installation, +[installation chapter](installation.md) explains how to download and install the +system on your own server. diff --git a/doc/manual/src/projects.md b/doc/manual/src/projects.md new file mode 100644 index 00000000..b144f1d9 --- /dev/null +++ b/doc/manual/src/projects.md @@ -0,0 +1,463 @@ +Creating and Managing Projects +============================== + +Once Hydra is installed and running, the next step is to add projects to +the build farm. We follow the example of the [Patchelf +project](http://nixos.org/patchelf.html), a software tool written in C +and using the GNU Build System (GNU Autoconf and GNU Automake). + +Log in to the web interface of your Hydra installation using the user +name and password you inserted in the database (by default, Hydra\'s web +server listens on [`localhost:3000`](http://localhost:3000/)). Then +follow the \"Create Project\" link to create a new project. + +Project Information +------------------- + +A project definition consists of some general information and a set of +job sets. The general information identifies a project, its owner, and +current state of activity. Here\'s what we fill in for the patchelf +project: + + Identifier: patchelf + +The *identifier* is the identity of the project. It is used in URLs and +in the names of build results. + +The identifier should be a unique name (it is the primary database key +for the project table in the database). If you try to create a project +with an already existing identifier you\'d get an error message from the +database. So try to create the project after entering just the general +information to figure out if you have chosen a unique name. Job sets can +be added once the project has been created. + + Display name: Patchelf + +The *display name* is used in menus. + + Description: A tool for modifying ELF binaries + +The *description* is used as short documentation of the nature of the +project. + + Owner: eelco + +The *owner* of a project can create and edit job sets. + + Enabled: Yes + +Only if the project is *enabled* are builds performed. + +Once created there should be an entry for the project in the sidebar. Go +to the project page for the +[Patchelf](http://localhost:3000/project/patchelf) project. + +Job Sets +-------- + +A project can consist of multiple *job sets* (hereafter *jobsets*), +separate tasks that can be built separately, but may depend on each +other (without cyclic dependencies, of course). Go to the +[Edit](http://localhost:3000/project/patchelf/edit) page of the Patchelf +project and \"Add a new jobset\" by providing the following +\"Information\": + + Identifier: trunk + Description: Trunk + Nix expression: release.nix in input patchelfSrc + +This states that in order to build the `trunk` jobset, the Nix +expression in the file `release.nix`, which can be obtained from input +`patchelfSrc`, should be evaluated. (We\'ll have a look at `release.nix` +later.) + +To realize a job we probably need a number of inputs, which can be +declared in the table below. As many inputs as required can be added. +For patchelf we declare the following inputs. + + patchelfSrc + 'Git checkout' https://github.com/NixOS/patchelf + + nixpkgs 'Git checkout' https://github.com/NixOS/nixpkgs + + officialRelease Boolean false + + system String value "i686-linux" + +Building Jobs +------------- + +Build Recipes +------------- + +Build jobs and *build recipes* for a jobset are specified in a text file +written in the [Nix language](http://nixos.org/nix/). The recipe is +actually called a *Nix expression* in Nix parlance. By convention this +file is often called `release.nix`. + +The `release.nix` file is typically kept under version control, and the +repository that contains it one of the build inputs of the +corresponding--often called `hydraConfig` by convention. The repository +for that file and the actual file name are specified on the web +interface of Hydra under the `Setup` tab of the jobset\'s overview page, +under the `Nix + expression` heading. See, for example, the [jobset overview +page](http://hydra.nixos.org/jobset/patchelf/trunk) of the PatchELF +project, and [the corresponding Nix +file](https://github.com/NixOS/patchelf/blob/master/release.nix). + +Knowledge of the Nix language is recommended, but the example below +should already give a good idea of how it works: + + let + pkgs = import {}; ① + + jobs = rec { ② + + tarball = ③ + pkgs.releaseTools.sourceTarball { ④ + name = "hello-tarball"; + src = ; ⑤ + buildInputs = (with pkgs; [ gettext texLive texinfo ]); + }; + + build = ⑥ + { system ? builtins.currentSystem }: ⑦ + + let pkgs = import { inherit system; }; in + pkgs.releaseTools.nixBuild { ⑧ + name = "hello"; + src = jobs.tarball; + configureFlags = [ "--disable-silent-rules" ]; + }; + }; + in + jobs ⑨ + + +This file shows what a `release.nix` file for +[GNU Hello](http://www.gnu.org/software/hello/) would look like. +GNU Hello is representative of many GNU and non-GNU free software +projects: + +- it uses the GNU Build System, namely GNU Autoconf, and GNU Automake; + for users, it means it can be installed using the + usual + ./configure && make install + procedure + ; +- it uses Gettext for internationalization; +- it has a Texinfo manual, which can be rendered as PDF with TeX. + +The file defines a jobset consisting of two jobs: `tarball`, and +`build`. It contains the following elements (referenced from the figure +by numbers): + +1. This defines a variable `pkgs` holding the set of packages provided + by [Nixpkgs](http://nixos.org/nixpkgs/). + + Since `nixpkgs` appears in angle brackets, there must be a build + input of that name in the Nix search path. In this case, the web + interface should show a `nixpkgs` build input, which is a checkout + of the Nixpkgs source code repository; Hydra then adds this and + other build inputs to the Nix search path when evaluating + `release.nix`. + +2. This defines a variable holding the two Hydra jobs--an *attribute + set* in Nix. + +3. This is the definition of the first job, named `tarball`. The + purpose of this job is to produce a usable source code tarball. + +4. The `tarball` job calls the `sourceTarball` function, which + (roughly) runs `autoreconf && ./configure && + make dist` on the checkout. The `buildInputs` attribute + specifies additional software dependencies for the job. + + > The package names used in `buildInputs`--e.g., `texLive`--are the + > names of the *attributes* corresponding to these packages in + > Nixpkgs, specifically in the + > [`all-packages.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/all-packages.nix) + > file. See the section entitled "Package Naming" in the Nixpkgs + > manual for more information. + +5. The `tarball` jobs expects a `hello` build input to be available in + the Nix search path. Again, this input is passed by Hydra and is + meant to be a checkout of GNU Hello\'s source code repository. + +6. This is the definition of the `build` job, whose purpose is to build + Hello from the tarball produced above. + +7. The `build` function takes one parameter, `system`, which should be + a string defining the Nix system type--e.g., `"x86_64-linux"`. + Additionally, it refers to `jobs.tarball`, seen above. + + Hydra inspects the formal argument list of the function (here, the + `system` argument) and passes it the corresponding parameter + specified as a build input on Hydra\'s web interface. Here, `system` + is passed by Hydra when it calls `build`. Thus, it must be defined + as a build input of type string in Hydra, which could take one of + several values. + + The question mark after `system` defines the default value for this + argument, and is only useful when debugging locally. + +8. The `build` job calls the `nixBuild` function, which unpacks the + tarball, then runs `./configure && make + && make check && make install`. + +9. Finally, the set of jobs is returned to Hydra, as a Nix attribute + set. + +Building from the Command Line +------------------------------ + +It is often useful to test a build recipe, for instance before it is +actually used by Hydra, when testing changes, or when debugging a build +issue. Since build recipes for Hydra jobsets are just plain Nix +expressions, they can be evaluated using the standard Nix tools. + +To evaluate the `tarball` jobset of the above example, just +run: + + $ nix-build release.nix -A tarball + +However, doing this with the example as is will probably +yield an error like this: + + error: user-thrown exception: file `hello' was not found in the Nix search path (add it using $NIX_PATH or -I) + +The error is self-explanatory. Assuming `$HOME/src/hello` points to a +checkout of Hello, this can be fixed this way: + + $ nix-build -I ~/src release.nix -A tarball + +Similarly, the `build` jobset can be evaluated: + + $ nix-build -I ~/src release.nix -A build + +The `build` job reuses the result of the `tarball` job, rebuilding it +only if it needs to. + +Adding More Jobs +---------------- + +The example illustrates how to write the most basic +jobs, `tarball` and `build`. In practice, much more can be done by using +features readily provided by Nixpkgs or by creating new jobs as +customizations of existing jobs. + +For instance, test coverage report for projects compiled with GCC can be +automatically generated using the `coverageAnalysis` function provided +by Nixpkgs instead of `nixBuild`. Back to our GNU Hello example, we can +define a `coverage` job that produces an HTML code coverage report +directly readable from the corresponding Hydra build page: + + coverage = + { system ? builtins.currentSystem }: + + let pkgs = import nixpkgs { inherit system; }; in + pkgs.releaseTools.coverageAnalysis { + name = "hello"; + src = jobs.tarball; + configureFlags = [ "--disable-silent-rules" ]; + }; + +As can be seen, the only difference compared to `build` is the use of +`coverageAnalysis`. + +Nixpkgs provides many more build tools, including the ability to run +build in virtual machines, which can themselves run another GNU/Linux +distribution, which allows for the creation of packages for these +distributions. Please see [the `pkgs/build-support/release` +directory](https://github.com/NixOS/nixpkgs/tree/master/pkgs/build-support/release) +of Nixpkgs for more. The NixOS manual also contains information about +whole-system testing in virtual machine. + +Now, assume we want to build Hello with an old version of GCC, and with +different `configure` flags. A new `build_exotic` job can be written +that simply *overrides* the relevant arguments passed to `nixBuild`: + + build_exotic = + { system ? builtins.currentSystem }: + + let + pkgs = import nixpkgs { inherit system; }; + build = jobs.build { inherit system; }; + in + pkgs.lib.overrideDerivation build (attrs: { + buildInputs = [ pkgs.gcc33 ]; + preConfigure = "gcc --version"; + configureFlags = + attrs.configureFlags ++ [ "--disable-nls" ]; + }); + +The `build_exotic` job reuses `build` and overrides some of its +arguments: it adds a dependency on GCC 3.3, a pre-configure phase that +runs `gcc --version`, and adds the `--disable-nls` configure flags. + +This customization mechanism is very powerful. For instance, it can be +used to change the way Hello and *all* its dependencies--including the C +library and compiler used to build it--are built. See the Nixpkgs manual +for more. + +Declarative projects +-------------------- + +Hydra supports declaratively configuring a project\'s jobsets. This +configuration can be done statically, or generated by a build job. + +> **Note** +> +> Hydra will treat the project\'s declarative input as a static definition +> if and only if the spec file contains a dictionary of dictionaries. If +> the value of any key in the spec is not a dictionary, it will treat the +> spec as a generated declarative spec. + +### Static, Declarative Projects + +Hydra supports declarative projects, where jobsets are configured from a +static JSON document in a repository. + +To configure a static declarative project, take the following steps: + +1. Create a Hydra-fetchable source like a Git repository or local path. + +2. In that source, create a file called `spec.json`, and add the + specification for all of the jobsets. Each key is jobset and each + value is a jobset\'s specification. For example: + + ``` {.json} + { + "nixpkgs": { + "enabled": 1, + "hidden": false, + "description": "Nixpkgs", + "nixexprinput": "nixpkgs", + "nixexprpath": "pkgs/top-level/release.nix", + "checkinterval": 300, + "schedulingshares": 100, + "enableemail": false, + "emailoverride": "", + "keepnr": 3, + "inputs": { + "nixpkgs": { + "type": "git", + "value": "git://github.com/NixOS/nixpkgs.git master", + "emailresponsible": false + } + } + }, + "nixos": { + "enabled": 1, + "hidden": false, + "description": "NixOS: Small Evaluation", + "nixexprinput": "nixpkgs", + "nixexprpath": "nixos/release-small.nix", + "checkinterval": 300, + "schedulingshares": 100, + "enableemail": false, + "emailoverride": "", + "keepnr": 3, + "inputs": { + "nixpkgs": { + "type": "git", + "value": "git://github.com/NixOS/nixpkgs.git master", + "emailresponsible": false + } + } + } + } + ``` + +3. Create a new project, and set the project\'s declarative input type, + declarative input value, and declarative spec file to point to the + source and JSON file you created in step 2. + +Hydra will create a special jobset named `.jobsets`. When the `.jobsets` +jobset is evaluated, this static specification will be used for +configuring the rest of the project\'s jobsets. + +### Generated, Declarative Projects + +Hydra also supports generated declarative projects, where jobsets are +configured automatically from specification files instead of being +managed through the UI. A jobset specification is a JSON object +containing the configuration of the jobset, for example: + +``` {.json} + { + "enabled": 1, + "hidden": false, + "description": "js", + "nixexprinput": "src", + "nixexprpath": "release.nix", + "checkinterval": 300, + "schedulingshares": 100, + "enableemail": false, + "emailoverride": "", + "keepnr": 3, + "inputs": { + "src": { "type": "git", "value": "git://github.com/shlevy/declarative-hydra-example.git", "emailresponsible": false }, + "nixpkgs": { "type": "git", "value": "git://github.com/NixOS/nixpkgs.git release-16.03", "emailresponsible": false } + } + } + +``` + +To configure a declarative project, take the following steps: + +1. Create a jobset repository in the normal way (e.g. a git repo with a + `release.nix` file, any other needed helper files, and taking any + kind of hydra input), but without adding it to the UI. The nix + expression of this repository should contain a single job, named + `jobsets`. The output of the `jobsets` job should be a JSON file + containing an object of jobset specifications. Each member of the + object will become a jobset of the project, configured by the + corresponding jobset specification. + +2. In some hydra-fetchable source (potentially, but not necessarily, + the same repo you created in step 1), create a JSON file containing + a jobset specification that points to the jobset repository you + created in the first step, specifying any needed inputs + (e.g. nixpkgs) as necessary. + +3. In the project creation/edit page, set declarative input type, + declarative input value, and declarative spec file to point to the + source and JSON file you created in step 2. + +Hydra will create a special jobset named `.jobsets`, which whenever +evaluated will go through the steps above in reverse order: + +1. Hydra will fetch the input specified by the declarative input type + and value. + +2. Hydra will use the configuration given in the declarative spec file + as the jobset configuration for this evaluation. In addition to any + inputs specified in the spec file, hydra will also pass the + `declInput` argument corresponding to the input fetched in step 1. + +3. As normal, hydra will build the jobs specified in the jobset + repository, which in this case is the single `jobsets` job. When + that job completes, hydra will read the created jobset + specifications and create corresponding jobsets in the project, + disabling any jobsets that used to exist but are not present in the + current spec. + +Email Notifications +------------------- + +Hydra can send email notifications when the status of a build changes. +This provides immediate feedback to maintainers or committers when a +change causes build failures. + +The simplest approach to enable Email Notifications is to use the ssmtp +package, which simply hands off the emails to another SMTP server. For +details on how to configure ssmtp, see the documentation for the +`networking.defaultMailServer` option. To use ssmtp for the Hydra email +notifications, add it to the path option of the Hydra services in your +`/etc/nixos/configuration.nix` file: + + systemd.services.hydra-queue-runner.path = [ pkgs.ssmtp ]; + systemd.services.hydra-server.path = [ pkgs.ssmtp ]; + diff --git a/flake.nix b/flake.nix index de4f29c5..71add436 100644 --- a/flake.nix +++ b/flake.nix @@ -162,7 +162,7 @@ buildInputs = [ makeWrapper autoconf automake libtool unzip nukeReferences pkgconfig libpqxx gitAndTools.topGit mercurial darcs subversion breezy openssl bzip2 libxslt - final.nix perlDeps perl + final.nix perlDeps perl mdbook boost postgresql_11 (if lib.versionAtLeast lib.version "20.03pre" @@ -179,8 +179,6 @@ gzip bzip2 lzma gnutar unzip git gitAndTools.topGit mercurial darcs gnused breezy ] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] ); - configureFlags = [ "--with-docbook-xsl=${docbook_xsl}/xml/xsl/docbook" ]; - shellHook = '' PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH PERL5LIB=$(pwd)/src/lib:$PERL5LIB From bd64b2481d2e1cd4f4ca11dddfbe95e098dc447b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Isma=C3=ABl=20Bouya?= Date: Wed, 24 Feb 2021 01:07:11 +0100 Subject: [PATCH 015/965] Remove old files --- doc/manual/api.xml | 334 ------------------- doc/manual/declarative-projects.xml | 196 ----------- doc/manual/hacking.xml | 39 --- doc/manual/installation.xml | 338 ------------------- doc/manual/introduction.xml | 267 --------------- doc/manual/manual.xml | 70 ---- doc/manual/projects.xml | 496 ---------------------------- 7 files changed, 1740 deletions(-) delete mode 100644 doc/manual/api.xml delete mode 100644 doc/manual/declarative-projects.xml delete mode 100644 doc/manual/hacking.xml delete mode 100644 doc/manual/installation.xml delete mode 100644 doc/manual/introduction.xml delete mode 100644 doc/manual/manual.xml delete mode 100644 doc/manual/projects.xml diff --git a/doc/manual/api.xml b/doc/manual/api.xml deleted file mode 100644 index db5ba07e..00000000 --- a/doc/manual/api.xml +++ /dev/null @@ -1,334 +0,0 @@ - - - Using the external API - - - To be able to create integrations with other services, Hydra exposes - an external API that you can manage projects with. - - - - The API is accessed over HTTP(s) where all data is sent and received - as JSON. - - - - Creating resources requires the caller to be authenticated, while - retrieving resources does not. - - - - The API does not have a separate URL structure for it's endpoints. - Instead you request the pages of the web interface as - application/json to use the API. - - -
- List projects - - - To list all the projects of the Hydra install: - - - -GET / -Accept: application/json - - - - This will give you a list of projects, where each - project contains general information and a list - of its job sets. - - - - Example - - - -curl -i -H 'Accept: application/json' \ - https://hydra.nixos.org - - - - Note: this response is truncated - - - -GET https://hydra.nixos.org/ -HTTP/1.1 200 OK -Content-Type: application/json - -[ - { - "displayname": "Acoda", - "name": "acoda", - "description": "Acoda is a tool set for automatic data migration along an evolving data model", - "enabled": 0, - "owner": "sander", - "hidden": 1, - "jobsets": [ - "trunk" - ] - }, - { - "displayname": "cabal2nix", - "name": "cabal2nix", - "description": "Convert Cabal files into Nix build instructions", - "enabled": 0, - "owner": "simons@cryp.to", - "hidden": 1, - "jobsets": [ - "master" - ] - } -] - -
- -
- Get a single project - - - To get a single project by identifier: - - - -GET /project/:project-identifier -Accept: application/json - - - - Example - - - -curl -i -H 'Accept: application/json' \ - https://hydra.nixos.org/project/hydra - - - -GET https://hydra.nixos.org/project/hydra -HTTP/1.1 200 OK -Content-Type: application/json - -{ - "description": "Hydra, the Nix-based continuous build system", - "hidden": 0, - "displayname": "Hydra", - "jobsets": [ - "hydra-master", - "hydra-ant-logger-trunk", - "master", - "build-ng" - ], - "name": "hydra", - "enabled": 1, - "owner": "eelco" -} - - -
- -
- Get a single job set - - - To get a single job set by identifier: - - - -GET /jobset/:project-identifier/:jobset-identifier -Content-Type: application/json - - - - Example - - - -curl -i -H 'Accept: application/json' \ - https://hydra.nixos.org/jobset/hydra/build-ng - - - -GET https://hydra.nixos.org/jobset/hydra/build-ng -HTTP/1.1 200 OK -Content-Type: application/json - -{ - "errormsg": "evaluation failed due to signal 9 (Killed)", - "fetcherrormsg": null, - "nixexprpath": "release.nix", - "nixexprinput": "hydraSrc", - "emailoverride": "rob.vermaas@gmail.com, eelco.dolstra@logicblox.com", - "jobsetinputs": { - "officialRelease": { - "jobsetinputalts": [ - "false" - ] - }, - "hydraSrc": { - "jobsetinputalts": [ - "https://github.com/NixOS/hydra.git build-ng" - ] - }, - "nixpkgs": { - "jobsetinputalts": [ - "https://github.com/NixOS/nixpkgs.git release-14.12" - ] - } - }, - "enabled": 0 -} - -
- -
- List evaluations - - - To list the evaluations of a - job set by identifier: - - - -GET /jobset/:project-identifier/:jobset-identifier/evals -Content-Type: application/json - - - - Example - - - -curl -i -H 'Accept: application/json' \ - https://hydra.nixos.org/jobset/hydra/build-ng/evals - - - - Note: this response is truncated - - - -GET https://hydra.nixos.org/jobset/hydra/build-ng/evals -HTTP/1.1 200 OK -Content-Type: application/json - -{ - "evals": [ - { - "jobsetevalinputs": { - "nixpkgs": { - "dependency": null, - "type": "git", - "value": null, - "uri": "https://github.com/NixOS/nixpkgs.git", - "revision": "f60e48ce81b6f428d072d3c148f6f2e59f1dfd7a" - }, - "hydraSrc": { - "dependency": null, - "type": "git", - "value": null, - "uri": "https://github.com/NixOS/hydra.git", - "revision": "48d6f0de2ab94f728d287b9c9670c4d237e7c0f6" - }, - "officialRelease": { - "dependency": null, - "value": "false", - "type": "boolean", - "uri": null, - "revision": null - } - }, - "hasnewbuilds": 1, - "builds": [ - 24670686, - 24670684, - 24670685, - 24670687 - ], - "id": 1213758 - } - ], - "first": "?page=1", - "last": "?page=1" -} - -
- -
- Get a single build - - - To get a single build by its id: - - - -GET /build/:build-id -Content-Type: application/json - - - - Example - - - -curl -i -H 'Accept: application/json' \ - https://hydra.nixos.org/build/24670686 - - - -GET /build/24670686 -HTTP/1.1 200 OK -Content-Type: application/json - -{ - "job": "tests.api.x86_64-linux", - "jobsetevals": [ - 1213758 - ], - "buildstatus": 0, - "buildmetrics": null, - "project": "hydra", - "system": "x86_64-linux", - "priority": 100, - "releasename": null, - "starttime": 1439402853, - "nixname": "vm-test-run-unnamed", - "timestamp": 1439388618, - "id": 24670686, - "stoptime": 1439403403, - "jobset": "build-ng", - "buildoutputs": { - "out": { - "path": "/nix/store/lzrxkjc35mhp8w7r8h82g0ljyizfchma-vm-test-run-unnamed" - } - }, - "buildproducts": { - "1": { - "path": "/nix/store/lzrxkjc35mhp8w7r8h82g0ljyizfchma-vm-test-run-unnamed", - "defaultpath": "log.html", - "type": "report", - "sha256hash": null, - "filesize": null, - "name": "", - "subtype": "testlog" - } - }, - "finished": 1 -} - -
- -
- - diff --git a/doc/manual/declarative-projects.xml b/doc/manual/declarative-projects.xml deleted file mode 100644 index 59178e23..00000000 --- a/doc/manual/declarative-projects.xml +++ /dev/null @@ -1,196 +0,0 @@ -
- -Declarative projects - - - Hydra supports declaratively configuring a project's jobsets. This - configuration can be done statically, or generated by a build job. - - - - Hydra will treat the project's declarative input as a static definition - if and only if the spec file contains a dictionary of dictionaries. - If the value of any key in the spec is not a dictionary, it will - treat the spec as a generated declarative spec. - - -
- - Static, Declarative Projects - - Hydra supports declarative projects, where jobsets are configured - from a static JSON document in a repository. - - - - To configure a static declarative project, take the following steps: - - - - - Create a Hydra-fetchable source like a Git repository or local path. - - - - - In that source, create a file called spec.json, - and add the specification for all of the jobsets. Each key is jobset - and each value is a jobset's specification. For example: - - -{ - "nixpkgs": { - "enabled": 1, - "hidden": false, - "description": "Nixpkgs", - "nixexprinput": "nixpkgs", - "nixexprpath": "pkgs/top-level/release.nix", - "checkinterval": 300, - "schedulingshares": 100, - "enableemail": false, - "emailoverride": "", - "keepnr": 3, - "inputs": { - "nixpkgs": { - "type": "git", - "value": "git://github.com/NixOS/nixpkgs.git master", - "emailresponsible": false - } - } - }, - "nixos": { - "enabled": 1, - "hidden": false, - "description": "NixOS: Small Evaluation", - "nixexprinput": "nixpkgs", - "nixexprpath": "nixos/release-small.nix", - "checkinterval": 300, - "schedulingshares": 100, - "enableemail": false, - "emailoverride": "", - "keepnr": 3, - "inputs": { - "nixpkgs": { - "type": "git", - "value": "git://github.com/NixOS/nixpkgs.git master", - "emailresponsible": false - } - } - } -} - - - - - - Create a new project, and set the project's declarative input type, - declarative input value, and declarative spec file to point to the - source and JSON file you created in step 2. - - - - - Hydra will create a special jobset named .jobsets. - When the .jobsets jobset is evaluated, this static - specification will be used for configuring the rest of the project's - jobsets. - -
- -
- - Generated, Declarative Projects - - Hydra also supports generated declarative projects, where jobsets are - configured automatically from specification files instead of being - managed through the UI. A jobset specification is a JSON object - containing the configuration of the jobset, for example: - - - { - "enabled": 1, - "hidden": false, - "description": "js", - "nixexprinput": "src", - "nixexprpath": "release.nix", - "checkinterval": 300, - "schedulingshares": 100, - "enableemail": false, - "emailoverride": "", - "keepnr": 3, - "inputs": { - "src": { "type": "git", "value": "git://github.com/shlevy/declarative-hydra-example.git", "emailresponsible": false }, - "nixpkgs": { "type": "git", "value": "git://github.com/NixOS/nixpkgs.git release-16.03", "emailresponsible": false } - } - } - - - To configure a declarative project, take the following steps: - - - - - Create a jobset repository in the normal way (e.g. a git repo with - a release.nix file, any other needed helper - files, and taking any kind of hydra input), but without adding it - to the UI. The nix expression of this repository should contain a - single job, named jobsets. The output of the - jobsets job should be a JSON file containing an - object of jobset specifications. Each member of the object will - become a jobset of the project, configured by the corresponding - jobset specification. - - - - - In some hydra-fetchable source (potentially, but not necessarily, - the same repo you created in step 1), create a JSON file - containing a jobset specification that points to the jobset - repository you created in the first step, specifying any needed - inputs (e.g. nixpkgs) as necessary. - - - - - In the project creation/edit page, set declarative input type, - declarative input value, and declarative spec file to point to the - source and JSON file you created in step 2. - - - - - Hydra will create a special jobset named .jobsets, - which whenever evaluated will go through the steps above in reverse - order: - - - - - Hydra will fetch the input specified by the declarative input type - and value. - - - - - Hydra will use the configuration given in the declarative spec - file as the jobset configuration for this evaluation. In addition - to any inputs specified in the spec file, hydra will also pass the - declInput argument corresponding to the input - fetched in step 1. - - - - - As normal, hydra will build the jobs specified in the jobset - repository, which in this case is the single - jobsets job. When that job completes, hydra - will read the created jobset specifications and create - corresponding jobsets in the project, disabling any jobsets that - used to exist but are not present in the current spec. - - - -
-
diff --git a/doc/manual/hacking.xml b/doc/manual/hacking.xml deleted file mode 100644 index 20cac842..00000000 --- a/doc/manual/hacking.xml +++ /dev/null @@ -1,39 +0,0 @@ - - -Hacking - -This section provides some notes on how to hack on Hydra. To -get the latest version of Hydra from GitHub: - -$ git clone git://github.com/NixOS/hydra.git -$ cd hydra - - - -To build it and its dependencies: - -$ nix-build release.nix -A build.x86_64-linux - - - -To build all dependencies and start a shell in which all -environment variables (such as PERL5LIB) are set up so -that those dependencies can be found: - -$ nix-shell - -To build Hydra, you should then do: - -[nix-shell]$ ./bootstrap -[nix-shell]$ configurePhase -[nix-shell]$ make - -You can run the Hydra web server in your source tree as follows: - -$ ./src/script/hydra-server - - - - diff --git a/doc/manual/installation.xml b/doc/manual/installation.xml deleted file mode 100644 index c9bb0291..00000000 --- a/doc/manual/installation.xml +++ /dev/null @@ -1,338 +0,0 @@ - - - Installation - - - This chapter explains how to install Hydra on your own build farm server. - - -
- Prerequisites - - To install and use Hydra you need to have installed the following dependencies: - - - Nix - PostgreSQL - many Perl packages, notably Catalyst, EmailSender, - and NixPerl (see the Hydra - expression in Nixpkgs for the complete - list) - - - At the moment, Hydra runs only on GNU/Linux - (i686-linux and - x86_64_linux). - - - - For small projects, Hydra can be run on any reasonably modern - machine. For individual projects you can even run Hydra on a - laptop. However, the charm of a buildfarm server is usually that - it operates without disturbing the developer's working - environment and can serve releases over the internet. In - conjunction you should typically have your source code - administered in a version management system, such as - subversion. Therefore, you will probably want to install a - server that is connected to the internet. To scale up to large - and/or many projects, you will need at least a considerable - amount of diskspace to store builds. Since Hydra can schedule - multiple simultaneous build jobs, it can be useful to have a - multi-core machine, and/or attach multiple build machines in a - network to the central Hydra server. - - - - Of course we think it is a good idea to use the NixOS GNU/Linux - distribution for your buildfarm server. But this is not a - requirement. The Nix software deployment system can be - installed on any GNU/Linux distribution in parallel to the - regular package management system. Thus, you can use Hydra on a - Debian, Fedora, SuSE, or Ubuntu system. - - -
- -
- Getting Nix - - - If your server runs NixOS you are all set to continue with - installation of Hydra. Otherwise you first need to install Nix. - The latest stable version can be found one the Nix web - site, along with a manual, which includes installation - instructions. - -
- -
- Installation - - - - - The latest development snapshot of Hydra can be installed - by visiting the URL http://hydra.nixos.org/view/hydra/unstable - and using the one-click install available at one of the build - pages. You can also install Hydra through the channel by - performing the following commands: - - -nix-channel --add http://hydra.nixos.org/jobset/hydra/master/channel/latest -nix-channel --update -nix-env -i hydra - - - - Command completion should reveal a number of command-line tools - from Hydra, such as hydra-queue-runner. - -
- -
- Creating the database - - Hydra stores its results in a PostgreSQL database. - - - - To setup a PostgreSQL database with hydra - as database name and user name, issue the following commands on - the PostgreSQL server: - - -createuser -S -D -R -P hydra -createdb -O hydra hydra - - Note that $prefix is the location of Hydra - in the nix store. - - - - Hydra uses an environment variable to know which database should - be used, and a variable which point to a location that holds - some state. To set these variables for a PostgreSQL database, - add the following to the file ~/.profile of - the user running the Hydra services. - - -export HYDRA_DBI="dbi:Pg:dbname=hydra;host=dbserver.example.org;user=hydra;" -export HYDRA_DATA=/var/lib/hydra - - You can provide the username and password in the file - ~/.pgpass, e.g. - - -dbserver.example.org:*:hydra:hydra:password - - Make sure that the HYDRA_DATA directory - exists and is writable for the user which will run the Hydra - services. - - - - Having set these environment variables, you can now initialise - the database by doing: - -hydra-init - - - - To create projects, you need to create a user with - admin privileges. This can be done using - the command hydra-create-user: - - -$ hydra-create-user alice --full-name 'Alice Q. User' \ - --email-address 'alice@example.org' --password foobar --role admin - - - Additional users can be created through the web interface. - - -
- -
- Upgrading - - If you're upgrading Hydra from a previous version, you - should do the following to perform any necessary database schema migrations: - -hydra-init - - -
- -
- Getting Started - - - To start the Hydra web server, execute: - -hydra-server - - When the server is started, you can browse to - http://localhost:3000/ to start configuring - your Hydra instance. - - - - The hydra-server command launches the web - server. There are two other processes that come into play: - - - - The evaluator is responsible for - periodically evaluating job sets, checking out their - dependencies off their version control systems (VCS), and - queueing new builds if the result of the evaluation changed. - It is launched by the hydra-evaluator - command. - - - The queue runner launches builds (using - Nix) as they are queued by the evaluator, scheduling them - onto the configured Nix hosts. It is launched using the - hydra-queue-runner command. - - - - All three processes must be running for Hydra to be fully - functional, though it's possible to temporarily stop any one of - them for maintenance purposes, for instance. - - -
- -
- Serving behind reverse proxy - - - To serve hydra web server behind reverse proxy like - nginx or httpd some - additional configuration must be made. - - - - Edit your hydra.conf file in a similar way to - this example: - - -using_frontend_proxy 1 -base_uri example.com - - base_uri should be your hydra servers proxied URL. - - If you are using Hydra nixos module then setting hydraURL - option should be enough. - - - - - - If you want to serve Hydra with a prefix path, for example - http://example.com/hydra then you need to configure your - reverse proxy to pass X-Request-Base to hydra, with - prefix path as value. - - For example if you are using nginx, then use configuration similar to following: - -server { - listen 433 ssl; - server_name example.com; - .. other configuration .. - location /hydra/ { - - proxy_pass http://127.0.0.1:3000; - proxy_redirect http://127.0.0.1:3000 https://example.com/hydra; - - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Request-Base /hydra; - } -} - - -
-
- Using LDAP as authentication backend (optional) - - Instead of using Hydra's built-in user management you can optionally use LDAP to manage roles and users. - - - - The hydra-server accepts the environment - variable HYDRA_LDAP_CONFIG. The value of - the variable should point to a valid YAML file containing the - Catalyst LDAP configuration. The format of the configuration - file is describe in the - - Catalyst::Authentication::Store::LDAP documentation. - An example is given below. - - - - Roles can be assigned to users based on their LDAP group membership - (use_roles: 1 in the below example). - For a user to have the role admin assigned to them - they should be in the group hydra_admin. In general - any LDAP group of the form hydra_some_role - (notice the hydra_ prefix) will work. - - - -credential: - class: Password - password_field: password - password_type: self_check -store: - class: LDAP - ldap_server: localhost - ldap_server_options.timeout: 30 - binddn: "cn=root,dc=example" - bindpw: notapassword - start_tls: 0 - start_tls_options - verify: none - user_basedn: "ou=users,dc=example" - user_filter: "(&(objectClass=inetOrgPerson)(cn=%s))" - user_scope: one - user_field: cn - user_search_options: - deref: always - use_roles: 1 - role_basedn: "ou=groups,dc=example" - role_filter: "(&(objectClass=groupOfNames)(member=%s))" - role_scope: one - role_field: cn - role_value: dn - role_search_options: - deref: always - -
-
- - diff --git a/doc/manual/introduction.xml b/doc/manual/introduction.xml deleted file mode 100644 index 956732f1..00000000 --- a/doc/manual/introduction.xml +++ /dev/null @@ -1,267 +0,0 @@ - - - Introduction - -
- About Hydra - - - Hydra is a tool for continuous integration testing and software - release that uses a purely functional language to describe build jobs - and their dependencies. Continuous integration is a simple technique - to improve the quality of the software development process. An - automated system continuously or periodically checks out the source - code of a project, builds it, runs tests, and produces reports for the - developers. Thus, various errors that might accidentally be committed - into the code base are automatically caught. Such a system allows - more in-depth testing than what developers could feasibly do manually: - - - Portability testing: The - software may need to be built and tested on many different - platforms. It is infeasible for each developer to do this - before every commit. - - - Likewise, many projects have very large test sets - (e.g., regression tests in a compiler, or stress tests in a - DBMS) that can take hours or days to run to completion. - - - Many kinds of static and dynamic analyses can be - performed as part of the tests, such as code coverage runs and - static analyses. - - - It may also be necessary to build many different - variants of the software. For instance, - it may be necessary to verify that the component builds with - various versions of a compiler. - - - Developers typically use incremental building to - test their changes (since a full build may take too long), but - this is unreliable with many build management tools (such as - Make), i.e., the result of the incremental build might differ - from a full build. - - - It ensures that the software can be built from the - sources under revision control. Users of version management - systems such as CVS and Subversion often forget to place - source files under revision control. - - - The machines on which the continuous integration - system runs ideally provides a clean, well-defined build - environment. If this environment is administered through - proper SCM techniques, then builds produced by the system can - be reproduced. In contrast, developer work environments are - typically not under any kind of SCM control. - - - In large projects, developers often work on a - particular component of the project, and do not build and test - the composition of those components (again since this is - likely to take too long). To prevent the phenomenon of ``big - bang integration'', where components are only tested together - near the end of the development process, it is important to - test components together as soon as possible (hence - continuous integration). - - - It allows software to be - released by automatically creating - packages that users can download and install. To do this - manually represents an often prohibitive amount of work, as - one may want to produce releases for many different platforms: - e.g., installers for Windows and Mac OS X, RPM or Debian - packages for certain Linux distributions, and so on. - - - - - - - In its simplest form, a continuous integration tool sits in a - loop building and releasing software components from a version - management system. For each component, it performs the - following tasks: - - - - It obtains the latest version of the component's - source code from the version management system. - - - It runs the component's build process (which - presumably includes the execution of the component's test - set). - - - It presents the results of the build (such as error - logs and releases) to the developers, e.g., by producing a web - page. - - - - - Examples of continuous integration tools include Jenkins, - CruiseControl Tinderbox, Sisyphus, Anthill and BuildBot. These - tools have various limitations. - - - - They do not manage the build - environment. The build environment consists of the - dependencies necessary to perform a build action, e.g., - compilers, libraries, etc. Setting up the environment is - typically done manually, and without proper SCM control (so it - may be hard to reproduce a build at a later time). Manual - management of the environment scales poorly in the number of - configurations that must be supported. For instance, suppose - that we want to build a component that requires a certain - compiler X. We then have to go to each machine and install X. - If we later need a newer version of X, the process must be - repeated all over again. An ever worse problem occurs if - there are conflicting, mutually exclusive versions of the - dependencies. Thus, simply installing the latest version is - not an option. Of course, we can install these components in - different directories and manually pass the appropriate paths - to the build processes of the various components. But this is - a rather tiresome and error-prone process. - - - They do not easily support variability in software - systems. A system may have a great deal of build-time - variability: optional functionality, whether to build a debug or - production version, different versions of dependencies, and so on. - (For instance, the Linux kernel now has over 2,600 build-time - configuration switches.) It is therefore important that a continuous - integration tool can easily select and test different instances from - the configuration space of the system to reveal problems, such as - erroneous interactions between features. In a continuous integration - setting, it is also useful to test different combinations of versions - of subsystems, e.g., the head revision of a component against stable - releases of its dependencies, and vice versa, as this can reveal - various integration problems. - - - - - - - Hydra, is a continuous integration tool - that solves these problems. It is built on top of the Nix package manager, - which has a purely functional language for describing package - build actions and their dependencies. This allows the build - environment for projects to be produced automatically and - deterministically, and variability in components to be expressed - naturally using functions; and as such is an ideal fit for a - continuous build system. - - -
- -
- About Us - - - Hydra is the successor of the Nix Buildfarm, which was developed - in tandem with the Nix software deployment system. Nix was - originally developed at the Department of Information and - Computing Sciences, Utrecht University by the TraCE project - (2003-2008). The project was funded by the Software Engineering - Research Program Jacquard to improve the support for variability - in software systems. Funding for the development of Nix and - Hydra is now provided by the NIRICT LaQuSo Build Farm project. - -
- -
- About this Manual - - - This manual tells you how to install the Hydra buildfarm - software on your own server and how to operate that server using - its web interface. - -
- - -
- License - - - Hydra is free software: you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation, either version 3 of - the License, or (at your option) any later version. - - - - Hydra is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General - Public License for more details. - -
- -
- Hydra at <literal>nixos.org</literal> - - - The nixos.org installation of Hydra runs at - http://hydra.nixos.org/. - - That installation is used to build software components from the - Nix, - NixOS, - GNU, - Stratego/XT, - and related projects. - - - - If you are one of the developers on those projects, it is likely - that you will be using the NixOS Hydra server in some way. If - you need to administer automatic builds for your project, you - should pull the right strings to get an account on the - server. This manual will tell you how to set up new projects and - build jobs within those projects and write a release.nix file to - describe the build process of your project to Hydra. You can - skip the next chapter. - - - - If your project does not yet have automatic builds within the - NixOS Hydra server, it may actually be eligible. We are in the - process of setting up a large buildfarm that should be able to - support open source and academic software projects. Get in - touch. - -
- -
- Hydra on your own buildfarm - - - If you need to run your own Hydra installation, explains how to download and - install the system on your own server. - -
- -
- - diff --git a/doc/manual/manual.xml b/doc/manual/manual.xml deleted file mode 100644 index 75e53152..00000000 --- a/doc/manual/manual.xml +++ /dev/null @@ -1,70 +0,0 @@ - - - - - Hydra User's Guide - - Draft - - - - - Eelco - Dolstra - - - Delft University of Technology - Department of Software Technology - - Author - - - - Rob - Vermaas - - - Delft University of Technology - Department of Software Technology - - Author - - - - Eelco - Visser - - - Delft University of Technology - Department of Software Technology - - Author - - - - Ludovic - Courtès - - Author - - - - - - 2009-2013 - Eelco Dolstra - - - March 2010 - - - - - - - - - - - diff --git a/doc/manual/projects.xml b/doc/manual/projects.xml deleted file mode 100644 index 05791765..00000000 --- a/doc/manual/projects.xml +++ /dev/null @@ -1,496 +0,0 @@ - - - Creating and Managing Projects - - - Once Hydra is installed and running, the next step is to add - projects to the build farm. We follow the example of the Patchelf - project, a software tool written in C and using the GNU - Build System (GNU Autoconf and GNU Automake). - - - - Log in to the web interface of your Hydra installation using the - user name and password you inserted in the database (by default, - Hydra's web server listens on localhost:3000). - Then follow the "Create Project" link to create a new project. - - -
- Project Information - - - A project definition consists of some general information and a - set of job sets. The general information identifies a project, - its owner, and current state of activity. - - Here's what we fill in for the patchelf project: - - -Identifier: patchelf - - - The identifier is the identity of the - project. It is used in URLs and in the names of build results. - - - - The identifier should be a unique name (it is the primary - database key for the project table in the database). If you try - to create a project with an already existing identifier you'd - get an error message from the database. - - So try to create the project after entering just the general - information to figure out if you have chosen a unique name. - Job sets can be added once the project has been created. - - -Display name: Patchelf - - - The display name is used in menus. - - -Description: A tool for modifying ELF binaries - - - The description is used as short - documentation of the nature of the project. - - -Owner: eelco - - - The owner of a project can create and edit - job sets. - - -Enabled: Yes - - - Only if the project is enabled are builds - performed. - - - - Once created there should be an entry for the project in the - sidebar. Go to the project page for the Patchelf - project. - -
- -
- Job Sets - - - A project can consist of multiple job sets - (hereafter jobsets), separate tasks that - can be built separately, but may depend on each other (without - cyclic dependencies, of course). Go to the Edit - page of the Patchelf project and "Add a new jobset" by providing - the following "Information": - - -Identifier: trunk -Description: Trunk -Nix expression: release.nix in input patchelfSrc - - - This states that in order to build the trunk - jobset, the Nix expression in the file - release.nix, which can be obtained from - input patchelfSrc, should be - evaluated. (We'll have a look at - release.nix later.) - - - - - To realize a job we probably need a number of inputs, which can - be declared in the table below. As many inputs as required can - be added. For patchelf we declare the following inputs. - - -patchelfSrc -'Git checkout' https://github.com/NixOS/patchelf - -nixpkgs 'Git checkout' https://github.com/NixOS/nixpkgs - -officialRelease Boolean false - -system String value "i686-linux" - - -
- -
- Building Jobs -
- -
- Build Recipes - - - Build jobs and build recipes for a jobset are - specified in a text file written in the Nix language. The - recipe is actually called a Nix expression in - Nix parlance. By convention this file is often called - release.nix. - - - - The release.nix file is typically kept under - version control, and the repository that contains it one of the - build inputs of the corresponding–often called - hydraConfig by convention. The repository for - that file and the actual file name are specified on the web - interface of Hydra under the Setup tab of the - jobset's overview page, under the Nix - expression heading. See, for example, the jobset - overview page of the PatchELF project, and - the corresponding Nix file. - - - - Knowledge of the Nix language is recommended, but the example - below should already give a good idea of how it works: - - - - <filename>release.nix</filename> file for GNU Hello - -let - pkgs = import <nixpkgs> {}; - - jobs = rec { - - tarball = - pkgs.releaseTools.sourceTarball { - name = "hello-tarball"; - src = <hello>; - buildInputs = (with pkgs; [ gettext texLive texinfo ]); - }; - - build = - { system ? builtins.currentSystem }: - - let pkgs = import <nixpkgs> { inherit system; }; in - pkgs.releaseTools.nixBuild { - name = "hello"; - src = jobs.tarball; - configureFlags = [ "--disable-silent-rules" ]; - }; - }; -in - jobs - - - - - shows what a - release.nix file for GNU Hello - would look like. GNU Hello is representative of many GNU - and non-GNU free software projects: - - - it uses the GNU Build System, namely GNU Autoconf, - and GNU Automake; for users, it means it can be installed - using the usual - ./configure && make install - procedure; - - it uses Gettext for internationalization; - it has a Texinfo manual, which can be rendered as PDF - with TeX. - - - The file defines a jobset consisting of two jobs: - tarball, and build. It - contains the following elements (referenced from the figure by - numbers): - - - - - - This defines a variable pkgs holding - the set of packages provided by Nixpkgs. - - - Since nixpkgs appears in angle brackets, - there must be a build input of that name in the Nix search - path. In this case, the web interface should show a - nixpkgs build input, which is a checkout - of the Nixpkgs source code repository; Hydra then adds this - and other build inputs to the Nix search path when - evaluating release.nix. - - - - - - This defines a variable holding the two Hydra - jobs–an attribute set in Nix. - - - - - - This is the definition of the first job, named - tarball. The purpose of this job is to - produce a usable source code tarball. - - - - - The tarball job calls the - sourceTarball function, which (roughly) - runs autoreconf && ./configure && - make dist on the checkout. The - buildInputs attribute specifies - additional software dependencies for the - jobThe package names used in - buildInputs–e.g., - texLive–are the names of the - attributes corresponding to these - packages in Nixpkgs, specifically in the all-packages.nix - file. See the section entitled “Package Naming” in the - Nixpkgs manual for more information.. - - - - - The tarball jobs expects a - hello build input to be available in the - Nix search path. Again, this input is passed by Hydra and - is meant to be a checkout of GNU Hello's source code - repository. - - - - - - This is the definition of the build - job, whose purpose is to build Hello from the tarball - produced above. - - - - - The build function takes one - parameter, system, which should be a string - defining the Nix system type–e.g., - "x86_64-linux". Additionally, it refers - to jobs.tarball, seen above. - - - Hydra inspects the formal argument list of the function - (here, the system argument) and passes it - the corresponding parameter specified as a build input on - Hydra's web interface. Here, system is - passed by Hydra when it calls build. - Thus, it must be defined as a build input of type string in - Hydra, which could take one of several values. - - - The question mark after system defines - the default value for this argument, and is only useful when - debugging locally. - - - - - The build job calls the - nixBuild function, which unpacks the - tarball, then runs ./configure && make - && make check && make install. - - - - - - Finally, the set of jobs is returned to Hydra, as a Nix - attribute set. - - - - -
- -
- Building from the Command Line - - - It is often useful to test a build recipe, for instance before - it is actually used by Hydra, when testing changes, or when - debugging a build issue. Since build recipes for Hydra jobsets - are just plain Nix expressions, they can be evaluated using the - standard Nix tools. - - - - To evaluate the tarball jobset of , just run: - - -$ nix-build release.nix -A tarball - - - However, doing this with as is will - probably yield an error like this: - - -error: user-thrown exception: file `hello' was not found in the Nix search path (add it using $NIX_PATH or -I) - - - The error is self-explanatory. Assuming - $HOME/src/hello points to a checkout of - Hello, this can be fixed this way: - - -$ nix-build -I ~/src release.nix -A tarball - - - Similarly, the build jobset can be evaluated: - - -$ nix-build -I ~/src release.nix -A build - - - The build job reuses the result of the - tarball job, rebuilding it only if it needs to. - - -
- -
- Adding More Jobs - - - illustrates how to write the most - basic jobs, tarball and - build. In practice, much more can be done by - using features readily provided by Nixpkgs or by creating new jobs - as customizations of existing jobs. - - - - For instance, test coverage report for projects compiled with GCC - can be automatically generated using the - coverageAnalysis function provided by Nixpkgs - instead of nixBuild. Back to our GNU Hello - example, we can define a coverage job that - produces an HTML code coverage report directly readable from the - corresponding Hydra build page: - - -coverage = - { system ? builtins.currentSystem }: - - let pkgs = import nixpkgs { inherit system; }; in - pkgs.releaseTools.coverageAnalysis { - name = "hello"; - src = jobs.tarball; - configureFlags = [ "--disable-silent-rules" ]; - }; - - - As can be seen, the only difference compared to - build is the use of - coverageAnalysis. - - - - Nixpkgs provides many more build tools, including the ability to - run build in virtual machines, which can themselves run another - GNU/Linux distribution, which allows for the creation of packages - for these distributions. Please see the - pkgs/build-support/release directory - of Nixpkgs for more. The NixOS manual also contains information - about whole-system testing in virtual machine. - - - - Now, assume we want to build Hello with an old version of GCC, and - with different configure flags. A new - build_exotic job can be written that simply - overrides the relevant arguments passed to - nixBuild: - - -build_exotic = - { system ? builtins.currentSystem }: - - let - pkgs = import nixpkgs { inherit system; }; - build = jobs.build { inherit system; }; - in - pkgs.lib.overrideDerivation build (attrs: { - buildInputs = [ pkgs.gcc33 ]; - preConfigure = "gcc --version"; - configureFlags = - attrs.configureFlags ++ [ "--disable-nls" ]; - }); - - - The build_exotic job reuses - build and overrides some of its arguments: it - adds a dependency on GCC 3.3, a pre-configure phase that runs - gcc --version, and adds the - --disable-nls configure flags. - - - - This customization mechanism is very powerful. For instance, it - can be used to change the way Hello and all - its dependencies–including the C library and compiler used to - build it–are built. See the Nixpkgs manual for more. - - -
- - - -
- Email Notifications - - Hydra can send email notifications when the status of a build changes. This provides - immediate feedback to maintainers or committers when a change causes build failures. - - - - The simplest approach to enable Email Notifications is to use the ssmtp package, which - simply hands off the emails to another SMTP server. For details on how to configure ssmtp, - see the documentation for the networking.defaultMailServer option. - To use ssmtp for the Hydra email notifications, add it to the path option of the Hydra services - in your /etc/nixos/configuration.nix file: - -systemd.services.hydra-queue-runner.path = [ pkgs.ssmtp ]; -systemd.services.hydra-server.path = [ pkgs.ssmtp ]; - - -
- -
- - From 9ddc6e355f69b0cdb1424e3e48ab5bae7e1d7be7 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Mon, 22 Feb 2021 13:29:52 -0500 Subject: [PATCH 016/965] flake: add TestPostgreSQL for per-test DBs --- flake.nix | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/flake.nix b/flake.nix index baa6c1e8..e7bfb65b 100644 --- a/flake.nix +++ b/flake.nix @@ -37,6 +37,66 @@ # Add LDAP dependencies that aren't currently found within nixpkgs. perlPackages = prev.perlPackages // { + TestPostgreSQL = final.perlPackages.buildPerlModule { + pname = "Test-PostgreSQL"; + version = "1.27"; + src = final.fetchurl { + url = "mirror://cpan/authors/id/T/TJ/TJC/Test-PostgreSQL-1.27.tar.gz"; + sha256 = "b1bd231693100cc40905fb0ba3173173201621de9c8301f21c5b593b0a46f907"; + }; + buildInputs = with final.perlPackages; [ ModuleBuildTiny TestSharedFork pkgs.postgresql ]; + propagatedBuildInputs = with final.perlPackages; [ DBDPg DBI FileWhich FunctionParameters Moo TieHashMethod TryTiny TypeTiny ]; + + makeMakerFlags = "POSTGRES_HOME=${final.postgresql}"; + + meta = { + homepage = https://github.com/TJC/Test-postgresql; + description = "PostgreSQL runner for tests"; + license = with final.lib.licenses; [ artistic2 ]; + }; + }; + + FunctionParameters = final.buildPerlPackage { + pname = "Function-Parameters"; + version = "2.001003"; + src = final.fetchurl { + url = "mirror://cpan/authors/id/M/MA/MAUKE/Function-Parameters-2.001003.tar.gz"; + sha256 = "eaa22c6b43c02499ec7db0758c2dd218a3b2ab47a714b2bdf8010b5ee113c242"; + }; + buildInputs = with final.perlPackages; [ DirSelf TestFatal ]; + meta = { + description = "Define functions and methods with parameter lists (\"subroutine signatures\")"; + license = with final.lib.licenses; [ artistic1 gpl1Plus ]; + }; + }; + + DirSelf = final.buildPerlPackage { + pname = "Dir-Self"; + version = "0.11"; + src = final.fetchurl { + url = "mirror://cpan/authors/id/M/MA/MAUKE/Dir-Self-0.11.tar.gz"; + sha256 = "e251a51abc7d9ba3e708f73c2aa208e09d47a0c528d6254710fa78cc8d6885b5"; + }; + meta = { + homepage = "https://github.com/mauke/Dir-Self"; + description = "A __DIR__ constant for the directory your source file is in"; + license = with final.lib.licenses; [ artistic1 gpl1Plus ]; + }; + }; + + TieHashMethod = final.buildPerlPackage { + pname = "Tie-Hash-Method"; + version = "0.02"; + src = final.fetchurl { + url = "mirror://cpan/authors/id/Y/YV/YVES/Tie-Hash-Method-0.02.tar.gz"; + sha256 = "d513fbb51413f7ca1e64a1bdce6194df7ec6076dea55066d67b950191eec32a9"; + }; + meta = { + description = "Tied hash with specific methods overriden by callbacks"; + license = with final.lib.licenses; [ artistic1 ]; + }; + }; + Test2Harness = final.buildPerlPackage { pname = "Test2-Harness"; version = "1.000042"; @@ -222,6 +282,7 @@ SysHostnameLong TermSizeAny TestMore + TestPostgreSQL TextDiff Test2Harness TextTable From b15d8edab17478290bb92fba0da988eb0fd23788 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Mon, 22 Feb 2021 13:33:25 -0500 Subject: [PATCH 017/965] tests: delete set-up.pl / tear-down.pl We'll set these up on a per-test basis. --- tests/Makefile.am | 4 +--- tests/set-up.pl | 5 ----- tests/tear-down.pl | 12 ------------ 3 files changed, 1 insertion(+), 20 deletions(-) delete mode 100644 tests/set-up.pl delete mode 100644 tests/tear-down.pl diff --git a/tests/Makefile.am b/tests/Makefile.am index f5a92239..4f84154c 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -25,9 +25,7 @@ EXTRA_DIST = \ $(TESTS) TESTS = \ - set-up.pl \ - test.pl \ - tear-down.pl + test.pl check_SCRIPTS = repos diff --git a/tests/set-up.pl b/tests/set-up.pl deleted file mode 100644 index 4fb99a49..00000000 --- a/tests/set-up.pl +++ /dev/null @@ -1,5 +0,0 @@ -use strict; -system("initdb -D postgres --locale C.UTF-8 ") == 0 or die; -system("pg_ctl -D postgres -o \"-F -p 6433 -h '' -k /tmp \" -w start") == 0 or die; -system("createdb -l C.UTF-8 -p 6433 hydra-test-suite") == 0 or die; -system("hydra-init") == 0 or die; diff --git a/tests/tear-down.pl b/tests/tear-down.pl deleted file mode 100644 index f30bb278..00000000 --- a/tests/tear-down.pl +++ /dev/null @@ -1,12 +0,0 @@ -use strict; - -my $fail = 0; - -system("dropdb -p 6433 hydra-test-suite") == 0 or $fail = 1; -system("pg_ctl -D postgres -w stop") == 0 or $fail = 1; - -system("chmod -R a+w nix") == 0 or $fail = 1; -system("rm -rf postgres data nix git-repo hg-repo svn-repo svn-checkout svn-checkout-repo bzr-repo bzr-checkout-repo darcs-repo") == 0 or $fail = 1; -system("rm -f .*-state") == 0 or $fail = 1; - -exit $fail; From fe1f2f0806f8ff3134d09141ee28f17093ad85ce Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Mon, 22 Feb 2021 13:47:41 -0500 Subject: [PATCH 018/965] Create an ephemeral PostgreSQL database per test --- src/libhydra/db.hh | 14 ++++++++++---- tests/evaluation.t | 9 ++++++--- tests/lib/Setup.pm | 21 ++++++++++++++++++--- 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/src/libhydra/db.hh b/src/libhydra/db.hh index 35d78edf..ec9a024b 100644 --- a/src/libhydra/db.hh +++ b/src/libhydra/db.hh @@ -13,10 +13,16 @@ struct Connection : pqxx::connection { using namespace nix; auto s = getEnv("HYDRA_DBI").value_or("dbi:Pg:dbname=hydra;"); - std::string prefix = "dbi:Pg:"; - if (std::string(s, 0, prefix.size()) != prefix) - throw Error("$HYDRA_DBI does not denote a PostgreSQL database"); - return concatStringsSep(" ", tokenizeString(string(s, prefix.size()), ";")); + + std::string lower_prefix = "dbi:Pg:"; + std::string upper_prefix = "DBI:Pg:"; + + if ((std::string(s, 0, lower_prefix.size()) == lower_prefix) || + (std::string(s, 0, upper_prefix.size()) == upper_prefix)) { + return concatStringsSep(" ", tokenizeString(string(s, lower_prefix.size()), ";")); + } + + throw Error("$HYDRA_DBI does not denote a PostgreSQL database"); } }; diff --git a/tests/evaluation.t b/tests/evaluation.t index 1d4c37a5..04dc144e 100644 --- a/tests/evaluation.t +++ b/tests/evaluation.t @@ -1,13 +1,16 @@ use strict; -use Hydra::Schema; -use Hydra::Model::DB; use Cwd; use Setup; -my $db = Hydra::Model::DB->new; +my $pgsql = dbinit(); +my $dsn = $pgsql->dsn; + +require Hydra::Schema; +require Hydra::Model::DB; use Test::Simple tests => 76; +my $db = Hydra::Model::DB->new; hydra_setup($db); my $res; diff --git a/tests/lib/Setup.pm b/tests/lib/Setup.pm index 6ddd0162..2989120b 100644 --- a/tests/lib/Setup.pm +++ b/tests/lib/Setup.pm @@ -2,12 +2,27 @@ package Setup; use strict; use Exporter; -use Hydra::Helper::Nix; -use Hydra::Model::DB; +use Test::PostgreSQL; use Cwd; our @ISA = qw(Exporter); -our @EXPORT = qw(hydra_setup nrBuildsForJobset queuedBuildsForJobset nrQueuedBuildsForJobset createBaseJobset createJobsetWithOneInput evalSucceeds runBuild updateRepository); +our @EXPORT = qw(dbinit hydra_setup nrBuildsForJobset queuedBuildsForJobset nrQueuedBuildsForJobset createBaseJobset createJobsetWithOneInput evalSucceeds runBuild updateRepository); + +sub dbinit() { + my $pgsql = Test::PostgreSQL->new(); + $ENV{'HYDRA_DBI'} = $pgsql->dsn; + system("hydra-init") == 0 or die; + return $pgsql; +} + +sub captureStdoutStderr { + # "Lazy"-load Hydra::Helper::Nix to avoid the compile-time + # import of Hydra::Model::DB. Early loading of the DB class + # causes fixation of the DSN, and we need to fixate it after + # the temporary DB is setup. + require Hydra::Helper::Nix; + return Hydra::Helper::Nix::captureStdoutStderr(@_) +} sub hydra_setup { my ($db) = @_; From 62b2880dfc893381d594b32e977fc7344ca80849 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Mon, 22 Feb 2021 16:52:38 -0500 Subject: [PATCH 019/965] Give each test its own Nix directories Otherwise we risk tripping over confusing statuses where a build is "done" and "Cached", but we were expecting to run it. --- tests/evaluate-basic.t | 32 ++++++++++++++++++++++++++++++++ tests/evaluation.t | 17 ++--------------- tests/lib/Setup.pm | 29 +++++++++++++++++++++++++---- 3 files changed, 59 insertions(+), 19 deletions(-) create mode 100644 tests/evaluate-basic.t diff --git a/tests/evaluate-basic.t b/tests/evaluate-basic.t new file mode 100644 index 00000000..0d58d08f --- /dev/null +++ b/tests/evaluate-basic.t @@ -0,0 +1,32 @@ +use feature 'unicode_strings'; +use strict; +use Cwd; +use Setup; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"}); + +# Most basic test case, no parameters +my $jobset = createBaseJobset("basic", "basic.nix"); + +ok(evalSucceeds($jobset), "Evaluating jobs/basic.nix should exit with return code 0"); +is(nrQueuedBuildsForJobset($jobset), 3, "Evaluating jobs/basic.nix should result in 3 builds"); + +for my $build (queuedBuildsForJobset($jobset)) { + ok(runBuild($build), "Build '".$build->job."' from jobs/basic.nix should exit with code 0"); + my $newbuild = $db->resultset('Builds')->find($build->id); + is($newbuild->finished, 1, "Build '".$build->job."' from jobs/basic.nix should be finished."); + my $expected = $build->job eq "fails" ? 1 : $build->job =~ /with_failed/ ? 6 : 0; + is($newbuild->buildstatus, $expected, "Build '".$build->job."' from jobs/basic.nix should have buildstatus $expected."); +} + +done_testing; \ No newline at end of file diff --git a/tests/evaluation.t b/tests/evaluation.t index 04dc144e..56a7c0a0 100644 --- a/tests/evaluation.t +++ b/tests/evaluation.t @@ -2,13 +2,12 @@ use strict; use Cwd; use Setup; -my $pgsql = dbinit(); -my $dsn = $pgsql->dsn; +(my $datadir, my $pgsql) = test_init(); require Hydra::Schema; require Hydra::Model::DB; -use Test::Simple tests => 76; +use Test::Simple tests => 68; my $db = Hydra::Model::DB->new; hydra_setup($db); @@ -21,18 +20,6 @@ my $jobsBaseUri = "file://".getcwd; my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"}); my $jobset; -# Most basic test case, no parameters -$jobset = createBaseJobset("basic", "basic.nix"); - -ok(evalSucceeds($jobset), "Evaluating jobs/basic.nix should exit with return code 0"); -ok(nrQueuedBuildsForJobset($jobset) == 3 , "Evaluating jobs/basic.nix should result in 3 builds"); - -for my $build (queuedBuildsForJobset($jobset)) { - ok(runBuild($build), "Build '".$build->job."' from jobs/basic.nix should exit with code 0"); - my $newbuild = $db->resultset('Builds')->find($build->id); - my $expected = $build->job eq "fails" ? 1 : $build->job =~ /with_failed/ ? 6 : 0; - ok($newbuild->finished == 1 && $newbuild->buildstatus == $expected, "Build '".$build->job."' from jobs/basic.nix should have buildstatus $expected"); -} # Test jobset with 2 jobs, one has parameter of succeeded build of the other $jobset = createJobsetWithOneInput("build-output-as-input", "build-output-as-input.nix", "build1", "build", "build1"); diff --git a/tests/lib/Setup.pm b/tests/lib/Setup.pm index 2989120b..03c7046a 100644 --- a/tests/lib/Setup.pm +++ b/tests/lib/Setup.pm @@ -3,16 +3,37 @@ package Setup; use strict; use Exporter; use Test::PostgreSQL; +use File::Temp; +use File::Path qw(make_path); use Cwd; our @ISA = qw(Exporter); -our @EXPORT = qw(dbinit hydra_setup nrBuildsForJobset queuedBuildsForJobset nrQueuedBuildsForJobset createBaseJobset createJobsetWithOneInput evalSucceeds runBuild updateRepository); +our @EXPORT = qw(test_init hydra_setup nrBuildsForJobset queuedBuildsForJobset nrQueuedBuildsForJobset createBaseJobset createJobsetWithOneInput evalSucceeds runBuild updateRepository); -sub dbinit() { - my $pgsql = Test::PostgreSQL->new(); +sub test_init() { + my $dir = File::Temp->newdir(); + + $ENV{'HYDRA_DATA'} = "$dir/hydra-data"; + mkdir $ENV{'HYDRA_DATA'}; + $ENV{'NIX_CONF_DIR'} = "$dir/nix/etc/nix"; + make_path($ENV{'NIX_CONF_DIR'}); + my $nixconf = "$ENV{'NIX_CONF_DIR'}/nix.conf"; + open(my $fh, '>', $nixconf) or die "Could not open file '$nixconf' $!"; + print $fh "sandbox = false\n"; + close $fh; + + $ENV{'NIX_STATE_DIR'} = "$dir/nix/var/nix"; + + $ENV{'NIX_MANIFESTS_DIR'} = "$dir/nix/var/nix/manifests"; + $ENV{'NIX_STORE_DIR'} = "$dir/nix/store"; + $ENV{'NIX_LOG_DIR'} = "$dir/nix/var/log/nix"; + + my $pgsql = Test::PostgreSQL->new( + extra_initdb_args => "--locale C.UTF-8" + ); $ENV{'HYDRA_DBI'} = $pgsql->dsn; system("hydra-init") == 0 or die; - return $pgsql; + return ($dir, $pgsql); } sub captureStdoutStderr { From e4cda87b5a31e572cb52d7aa644234d2f4e07689 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 24 Feb 2021 07:00:26 -0500 Subject: [PATCH 020/965] db.hh: use hasPrefix for prefix comparisons Co-authored-by: Eelco Dolstra --- src/libhydra/db.hh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/libhydra/db.hh b/src/libhydra/db.hh index ec9a024b..7d5bdc58 100644 --- a/src/libhydra/db.hh +++ b/src/libhydra/db.hh @@ -17,8 +17,7 @@ struct Connection : pqxx::connection std::string lower_prefix = "dbi:Pg:"; std::string upper_prefix = "DBI:Pg:"; - if ((std::string(s, 0, lower_prefix.size()) == lower_prefix) || - (std::string(s, 0, upper_prefix.size()) == upper_prefix)) { + if (hasPrefix(s, lower_prefix) || hasPrefix(s, upper_prefix)) { return concatStringsSep(" ", tokenizeString(string(s, lower_prefix.size()), ";")); } From 9590bababc6c0ba92c36c4ba235d21dc90467f6f Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 23 Feb 2021 12:55:52 -0500 Subject: [PATCH 021/965] Split out dependent tests in to its own .t --- tests/evaluate-dependent-jobsets.t | 34 ++++++++++++++++++++++++++++++ tests/evaluation.t | 22 +------------------ 2 files changed, 35 insertions(+), 21 deletions(-) create mode 100644 tests/evaluate-dependent-jobsets.t diff --git a/tests/evaluate-dependent-jobsets.t b/tests/evaluate-dependent-jobsets.t new file mode 100644 index 00000000..7797b486 --- /dev/null +++ b/tests/evaluate-dependent-jobsets.t @@ -0,0 +1,34 @@ +use strict; +use Cwd; +use Setup; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +# Test jobset with 2 jobs, one has parameter of succeeded build of the other +my $jobset = createJobsetWithOneInput("build-output-as-input", "build-output-as-input.nix", "build1", "build", "build1"); + +ok(evalSucceeds($jobset), "Evaluating jobs/build-output-as-input.nix should exit with return code 0"); +ok(nrQueuedBuildsForJobset($jobset) == 1 , "Evaluating jobs/build-output-as-input.nix for first time should result in 1 build in queue"); +for my $build (queuedBuildsForJobset($jobset)) { + ok(runBuild($build), "Build '".$build->job."' from jobs/basic.nix should exit with code 0"); + my $newbuild = $db->resultset('Builds')->find($build->id); + ok($newbuild->finished == 1 && $newbuild->buildstatus == 0, "Build '".$build->job."' from jobs/basic.nix should have buildstatus 0"); +} + +ok(evalSucceeds($jobset), "Evaluating jobs/build-output-as-input.nix for second time should exit with return code 0"); +ok(nrQueuedBuildsForJobset($jobset) == 1 , "Evaluating jobs/build-output-as-input.nix for second time after building build1 should result in 1 build in queue"); +for my $build (queuedBuildsForJobset($jobset)) { + ok(runBuild($build), "Build '".$build->job."' from jobs/basic.nix should exit with code 0"); + my $newbuild = $db->resultset('Builds')->find($build->id); + ok($newbuild->finished == 1 && $newbuild->buildstatus == 0, "Build '".$build->job."' from jobs/basic.nix should have buildstatus 0"); +} + +done_testing; diff --git a/tests/evaluation.t b/tests/evaluation.t index 56a7c0a0..9a47d44b 100644 --- a/tests/evaluation.t +++ b/tests/evaluation.t @@ -7,7 +7,7 @@ use Setup; require Hydra::Schema; require Hydra::Model::DB; -use Test::Simple tests => 68; +use Test::Simple tests => 60; my $db = Hydra::Model::DB->new; hydra_setup($db); @@ -21,26 +21,6 @@ my $project = $db->resultset('Projects')->create({name => "tests", displayname = my $jobset; -# Test jobset with 2 jobs, one has parameter of succeeded build of the other -$jobset = createJobsetWithOneInput("build-output-as-input", "build-output-as-input.nix", "build1", "build", "build1"); - -ok(evalSucceeds($jobset), "Evaluating jobs/build-output-as-input.nix should exit with return code 0"); -ok(nrQueuedBuildsForJobset($jobset) == 1 , "Evaluating jobs/build-output-as-input.nix for first time should result in 1 build in queue"); -for my $build (queuedBuildsForJobset($jobset)) { - ok(runBuild($build), "Build '".$build->job."' from jobs/basic.nix should exit with code 0"); - my $newbuild = $db->resultset('Builds')->find($build->id); - ok($newbuild->finished == 1 && $newbuild->buildstatus == 0, "Build '".$build->job."' from jobs/basic.nix should have buildstatus 0"); -} - -ok(evalSucceeds($jobset), "Evaluating jobs/build-output-as-input.nix for second time should exit with return code 0"); -ok(nrQueuedBuildsForJobset($jobset) == 1 , "Evaluating jobs/build-output-as-input.nix for second time after building build1 should result in 1 build in queue"); -for my $build (queuedBuildsForJobset($jobset)) { - ok(runBuild($build), "Build '".$build->job."' from jobs/basic.nix should exit with code 0"); - my $newbuild = $db->resultset('Builds')->find($build->id); - ok($newbuild->finished == 1 && $newbuild->buildstatus == 0, "Build '".$build->job."' from jobs/basic.nix should have buildstatus 0"); -} - - # Test scm inputs my @scminputs = ( { From 0b693ad8e85aad2390890b7025eb5f0c1acb6cf2 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 23 Feb 2021 13:23:18 -0500 Subject: [PATCH 022/965] Use is in evaluate-dependent-jobsets --- tests/evaluate-dependent-jobsets.t | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/evaluate-dependent-jobsets.t b/tests/evaluate-dependent-jobsets.t index 7797b486..7180fbc8 100644 --- a/tests/evaluate-dependent-jobsets.t +++ b/tests/evaluate-dependent-jobsets.t @@ -16,19 +16,21 @@ hydra_setup($db); my $jobset = createJobsetWithOneInput("build-output-as-input", "build-output-as-input.nix", "build1", "build", "build1"); ok(evalSucceeds($jobset), "Evaluating jobs/build-output-as-input.nix should exit with return code 0"); -ok(nrQueuedBuildsForJobset($jobset) == 1 , "Evaluating jobs/build-output-as-input.nix for first time should result in 1 build in queue"); +is(nrQueuedBuildsForJobset($jobset), 1 , "Evaluating jobs/build-output-as-input.nix for first time should result in 1 build in queue"); for my $build (queuedBuildsForJobset($jobset)) { - ok(runBuild($build), "Build '".$build->job."' from jobs/basic.nix should exit with code 0"); + ok(runBuild($build), "Build '".$build->job."' from jobs/build-output-as-input.nix should exit with code 0"); my $newbuild = $db->resultset('Builds')->find($build->id); - ok($newbuild->finished == 1 && $newbuild->buildstatus == 0, "Build '".$build->job."' from jobs/basic.nix should have buildstatus 0"); + is($newbuild->finished, 1, "Build '".$build->job."' from jobs/build-output-as-input.nix should be finished."); + is($newbuild->buildstatus, 0, "Build '".$build->job."' from jobs/build-output-as-input.nix should have buildstatus 0."); } ok(evalSucceeds($jobset), "Evaluating jobs/build-output-as-input.nix for second time should exit with return code 0"); -ok(nrQueuedBuildsForJobset($jobset) == 1 , "Evaluating jobs/build-output-as-input.nix for second time after building build1 should result in 1 build in queue"); +is(nrQueuedBuildsForJobset($jobset), 1 , "Evaluating jobs/build-output-as-input.nix for second time after building build1 should result in 1 build in queue"); for my $build (queuedBuildsForJobset($jobset)) { ok(runBuild($build), "Build '".$build->job."' from jobs/basic.nix should exit with code 0"); my $newbuild = $db->resultset('Builds')->find($build->id); - ok($newbuild->finished == 1 && $newbuild->buildstatus == 0, "Build '".$build->job."' from jobs/basic.nix should have buildstatus 0"); + is($newbuild->finished, 1, "Build '".$build->job."' from jobs/build-output-as-input.nix should be finished."); + is($newbuild->buildstatus, 0, "Build '".$build->job."' from jobs/build-output-as-input.nix should have buildstatus 0."); } done_testing; From c8df544046b4d730ccbb333e73f0b6827ef29e94 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 23 Feb 2021 14:16:03 -0500 Subject: [PATCH 023/965] evaluate-dependent-jobsets: clean up test to be more clear --- tests/evaluate-dependent-jobsets.t | 37 ++++++++++++++++++------------ 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/tests/evaluate-dependent-jobsets.t b/tests/evaluate-dependent-jobsets.t index 7180fbc8..279e1f33 100644 --- a/tests/evaluate-dependent-jobsets.t +++ b/tests/evaluate-dependent-jobsets.t @@ -15,22 +15,29 @@ hydra_setup($db); # Test jobset with 2 jobs, one has parameter of succeeded build of the other my $jobset = createJobsetWithOneInput("build-output-as-input", "build-output-as-input.nix", "build1", "build", "build1"); -ok(evalSucceeds($jobset), "Evaluating jobs/build-output-as-input.nix should exit with return code 0"); -is(nrQueuedBuildsForJobset($jobset), 1 , "Evaluating jobs/build-output-as-input.nix for first time should result in 1 build in queue"); -for my $build (queuedBuildsForJobset($jobset)) { - ok(runBuild($build), "Build '".$build->job."' from jobs/build-output-as-input.nix should exit with code 0"); - my $newbuild = $db->resultset('Builds')->find($build->id); - is($newbuild->finished, 1, "Build '".$build->job."' from jobs/build-output-as-input.nix should be finished."); - is($newbuild->buildstatus, 0, "Build '".$build->job."' from jobs/build-output-as-input.nix should have buildstatus 0."); -} +ok(evalSucceeds($jobset), "Evaluating jobs/build-output-as-input.nix should exit with return code 0"); +is(nrQueuedBuildsForJobset($jobset), 1 , "Evaluation should result in 1 build in queue"); -ok(evalSucceeds($jobset), "Evaluating jobs/build-output-as-input.nix for second time should exit with return code 0"); -is(nrQueuedBuildsForJobset($jobset), 1 , "Evaluating jobs/build-output-as-input.nix for second time after building build1 should result in 1 build in queue"); -for my $build (queuedBuildsForJobset($jobset)) { - ok(runBuild($build), "Build '".$build->job."' from jobs/basic.nix should exit with code 0"); +subtest "For the 'build1' job" => sub { + my ($build) = queuedBuildsForJobset($jobset); + is($build->job, "build1", "Verify the only job we got is for 'build1'"); + + ok(runBuild($build), "Build should exit with code 0"); my $newbuild = $db->resultset('Builds')->find($build->id); - is($newbuild->finished, 1, "Build '".$build->job."' from jobs/build-output-as-input.nix should be finished."); - is($newbuild->buildstatus, 0, "Build '".$build->job."' from jobs/build-output-as-input.nix should have buildstatus 0."); -} + is($newbuild->finished, 1, "Build should be finished."); + is($newbuild->buildstatus, 0, "Build should have buildstatus 0."); +}; + +ok(evalSucceeds($jobset), "Evaluating jobs/build-output-as-input.nix for second time should exit with return code 0"); +is(nrQueuedBuildsForJobset($jobset), 1 , "The second evaluation should result in 1 new build in queue: build2"); +subtest "For the 'build2' job" => sub { + my ($build) = queuedBuildsForJobset($jobset); + is($build->job, "build2", "Verify the only job we got is for 'build2'"); + + ok(runBuild($build), "Build should exit with code 0"); + my $newbuild = $db->resultset('Builds')->find($build->id); + is($newbuild->finished, 1, "Build should be finished."); + is($newbuild->buildstatus, 0, "Build should have buildstatus 0."); +}; done_testing; From 2776ae6c785276724254f53462733736a277ce7a Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 23 Feb 2021 14:25:40 -0500 Subject: [PATCH 024/965] Move tests for SCM inputs in to its own .t --- tests/evaluation.t | 104 +-------------------------------------- tests/input-types.t | 116 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 118 insertions(+), 102 deletions(-) create mode 100644 tests/input-types.t diff --git a/tests/evaluation.t b/tests/evaluation.t index 9a47d44b..1368b0f9 100644 --- a/tests/evaluation.t +++ b/tests/evaluation.t @@ -7,7 +7,7 @@ use Setup; require Hydra::Schema; require Hydra::Model::DB; -use Test::Simple tests => 60; +use Test::Simple tests => 8; my $db = Hydra::Model::DB->new; hydra_setup($db); @@ -20,109 +20,9 @@ my $jobsBaseUri = "file://".getcwd; my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"}); my $jobset; - -# Test scm inputs -my @scminputs = ( - { - name => "svn", - nixexpr => "svn-input.nix", - type => "svn", - uri => "$jobsBaseUri/svn-repo", - update => getcwd . "/jobs/svn-update.sh" - }, - { - name => "svn-checkout", - nixexpr => "svn-checkout-input.nix", - type => "svn-checkout", - uri => "$jobsBaseUri/svn-checkout-repo", - update => getcwd . "/jobs/svn-checkout-update.sh" - }, - { - name => "git", - nixexpr => "git-input.nix", - type => "git", - uri => "$jobsBaseUri/git-repo", - update => getcwd . "/jobs/git-update.sh" - }, - { - name => "git-rev", - nixexpr => "git-rev-input.nix", - type => "git", - uri => "$jobsBaseUri/git-repo 7f60df502b96fd54bbfa64dd94b56d936a407701", - update => getcwd . "/jobs/git-rev-update.sh" - }, - { - name => "deepgit", - nixexpr => "deepgit-input.nix", - type => "git", - uri => "$jobsBaseUri/git-repo master 1", - update => getcwd . "/jobs/git-update.sh" - }, - { - name => "bzr", - nixexpr => "bzr-input.nix", - type => "bzr", - uri => "$jobsBaseUri/bzr-repo", - update => getcwd . "/jobs/bzr-update.sh" - }, - { - name => "bzr-checkout", - nixexpr => "bzr-checkout-input.nix", - type => "bzr-checkout", - uri => "$jobsBaseUri/bzr-checkout-repo", - update => getcwd . "/jobs/bzr-checkout-update.sh" - }, - { - name => "hg", - nixexpr => "hg-input.nix", - type => "hg", - uri => "$jobsBaseUri/hg-repo", - update => getcwd . "/jobs/hg-update.sh" - }, - { - name => "darcs", - nixexpr => "darcs-input.nix", - type => "darcs", - uri => "$jobsBaseUri/darcs-repo", - update => getcwd . "/jobs/darcs-update.sh" - } -); - -foreach my $scm ( @scminputs ) { - my $scmName = $scm->{"name"}; - my $nixexpr = $scm->{"nixexpr"}; - my $type = $scm->{"type"}; - my $uri = $scm->{"uri"}; - my $update = $scm->{"update"}; - $jobset = createJobsetWithOneInput($scmName, $nixexpr, "src", $type, $uri); - - my $state = 0; - my $q = 0; - my ($loop, $updated) = updateRepository($scmName, $update); - while($loop) { - my $c = 0; - - # Verify that it can be fetched and possibly queued. - ok(evalSucceeds($jobset), "$scmName:$state.$c: Evaluating nix-expression."); $c++; - - # Verify that the evaluation has queued a new job and evaluate again to ... - if ($updated) { - $q++; - ok(nrQueuedBuildsForJobset($jobset) == $q, "$scmName:$state.$c: Expect $q jobs in the queue."); $c++; - ok(evalSucceeds($jobset), "$scmName:$state.$c: Evaluating nix-expression again."); $c++; - } - - # ... check that it is deterministic and not queued again. - ok(nrQueuedBuildsForJobset($jobset) == $q, "$scmName:$state.$c: Expect $q jobs in the queue."); $c++; - - $state++; - ($loop, $updated) = updateRepository($scmName, $update, getcwd . "/$scmName-repo/"); - } -} - # Test build products -$jobset = createBaseJobset("build-products", "build-products.nix"); +my $jobset = createBaseJobset("build-products", "build-products.nix"); ok(evalSucceeds($jobset), "Evaluating jobs/build-products.nix should exit with return code 0"); ok(nrQueuedBuildsForJobset($jobset) == 2 , "Evaluating jobs/build-products.nix should result in 2 builds"); diff --git a/tests/input-types.t b/tests/input-types.t new file mode 100644 index 00000000..c41b8e55 --- /dev/null +++ b/tests/input-types.t @@ -0,0 +1,116 @@ +use strict; +use Cwd; +use Setup; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +my $jobsBaseUri = "file://".getcwd; + +# Test scm inputs +my @scminputs = ( + { + name => "svn", + nixexpr => "svn-input.nix", + type => "svn", + uri => "$jobsBaseUri/svn-repo", + update => getcwd . "/jobs/svn-update.sh" + }, + { + name => "svn-checkout", + nixexpr => "svn-checkout-input.nix", + type => "svn-checkout", + uri => "$jobsBaseUri/svn-checkout-repo", + update => getcwd . "/jobs/svn-checkout-update.sh" + }, + { + name => "git", + nixexpr => "git-input.nix", + type => "git", + uri => "$jobsBaseUri/git-repo", + update => getcwd . "/jobs/git-update.sh" + }, + { + name => "git-rev", + nixexpr => "git-rev-input.nix", + type => "git", + uri => "$jobsBaseUri/git-repo 7f60df502b96fd54bbfa64dd94b56d936a407701", + update => getcwd . "/jobs/git-rev-update.sh" + }, + { + name => "deepgit", + nixexpr => "deepgit-input.nix", + type => "git", + uri => "$jobsBaseUri/git-repo master 1", + update => getcwd . "/jobs/git-update.sh" + }, + { + name => "bzr", + nixexpr => "bzr-input.nix", + type => "bzr", + uri => "$jobsBaseUri/bzr-repo", + update => getcwd . "/jobs/bzr-update.sh" + }, + { + name => "bzr-checkout", + nixexpr => "bzr-checkout-input.nix", + type => "bzr-checkout", + uri => "$jobsBaseUri/bzr-checkout-repo", + update => getcwd . "/jobs/bzr-checkout-update.sh" + }, + { + name => "hg", + nixexpr => "hg-input.nix", + type => "hg", + uri => "$jobsBaseUri/hg-repo", + update => getcwd . "/jobs/hg-update.sh" + }, + { + name => "darcs", + nixexpr => "darcs-input.nix", + type => "darcs", + uri => "$jobsBaseUri/darcs-repo", + update => getcwd . "/jobs/darcs-update.sh" + } +); + +foreach my $scm ( @scminputs ) { + my $scmName = $scm->{"name"}; + my $nixexpr = $scm->{"nixexpr"}; + my $type = $scm->{"type"}; + my $uri = $scm->{"uri"}; + my $update = $scm->{"update"}; + my $jobset = createJobsetWithOneInput($scmName, $nixexpr, "src", $type, $uri); + + my $state = 0; + my $q = 0; + my ($loop, $updated) = updateRepository($scmName, $update); + while($loop) { + my $c = 0; + + # Verify that it can be fetched and possibly queued. + ok(evalSucceeds($jobset), "$scmName:$state.$c: Evaluating nix-expression."); $c++; + + # Verify that the evaluation has queued a new job and evaluate again to ... + if ($updated) { + $q++; + ok(nrQueuedBuildsForJobset($jobset) == $q, "$scmName:$state.$c: Expect $q jobs in the queue."); $c++; + ok(evalSucceeds($jobset), "$scmName:$state.$c: Evaluating nix-expression again."); $c++; + } + + # ... check that it is deterministic and not queued again. + ok(nrQueuedBuildsForJobset($jobset) == $q, "$scmName:$state.$c: Expect $q jobs in the queue."); $c++; + + $state++; + ($loop, $updated) = updateRepository($scmName, $update, getcwd . "/$scmName-repo/"); + } +} + +done_testing; From 0df9c68422dc1a696190e14ca843f58413314c76 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 23 Feb 2021 14:28:57 -0500 Subject: [PATCH 025/965] Relocate the final evalutation tests to a build-products specific test. --- tests/{evaluation.t => build-products.t} | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) rename tests/{evaluation.t => build-products.t} (86%) diff --git a/tests/evaluation.t b/tests/build-products.t similarity index 86% rename from tests/evaluation.t rename to tests/build-products.t index 1368b0f9..bda1d90d 100644 --- a/tests/evaluation.t +++ b/tests/build-products.t @@ -7,18 +7,11 @@ use Setup; require Hydra::Schema; require Hydra::Model::DB; -use Test::Simple tests => 8; +use Test2::V0; my $db = Hydra::Model::DB->new; hydra_setup($db); -my $res; -my $stdout; -my $stderr; - -my $jobsBaseUri = "file://".getcwd; -my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"}); -my $jobset; # Test build products @@ -41,3 +34,5 @@ for my $build (queuedBuildsForJobset($jobset)) { ok($buildproduct->name eq "some text.txt", "We should have: \"some text.txt\", but found: ".$buildproduct->name."\n"); } } + +done_testing; From 371826f93113a79fffff757c8bfaab28bc63f6c7 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 23 Feb 2021 14:44:23 -0500 Subject: [PATCH 026/965] Tests: build-products: use `is` for good errors on failures --- tests/build-products.t | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/tests/build-products.t b/tests/build-products.t index bda1d90d..3431324c 100644 --- a/tests/build-products.t +++ b/tests/build-products.t @@ -17,22 +17,27 @@ hydra_setup($db); my $jobset = createBaseJobset("build-products", "build-products.nix"); -ok(evalSucceeds($jobset), "Evaluating jobs/build-products.nix should exit with return code 0"); -ok(nrQueuedBuildsForJobset($jobset) == 2 , "Evaluating jobs/build-products.nix should result in 2 builds"); +ok(evalSucceeds($jobset), "Evaluating jobs/build-products.nix should exit with return code 0"); +is(nrQueuedBuildsForJobset($jobset), 2, "Evaluating jobs/build-products.nix should result in 2 builds"); for my $build (queuedBuildsForJobset($jobset)) { - ok(runBuild($build), "Build '".$build->job."' from jobs/build-products.nix should exit with code 0"); - my $newbuild = $db->resultset('Builds')->find($build->id); - ok($newbuild->finished == 1 && $newbuild->buildstatus == 0, "Build '".$build->job."' from jobs/build-products.nix should have buildstatus 0"); + subtest "For the build job '" . $build->job . "'" => sub { + ok(runBuild($build), "Build should exit with code 0"); + my $newbuild = $db->resultset('Builds')->find($build->id); - my $buildproducts = $db->resultset('BuildProducts')->search({ build => $build->id }); - my $buildproduct = $buildproducts->next; + is($newbuild->finished, 1, "Build should have finished"); + is($newbuild->buildstatus, 0, "Build should have buildstatus 0"); + + my $buildproducts = $db->resultset('BuildProducts')->search({ build => $build->id }); + my $buildproduct = $buildproducts->next; + + if($build->job eq "simple") { + is($buildproduct->name, "text.txt", "We should have \"text.txt\""); + } elsif ($build->job eq "with_spaces") { + is($buildproduct->name, "some text.txt", "We should have: \"some text.txt\""); + } + }; - if($build->job eq "simple") { - ok($buildproduct->name eq "text.txt", "We should have text.txt, but found: ".$buildproduct->name."\n"); - } elsif ($build->job eq "with_spaces") { - ok($buildproduct->name eq "some text.txt", "We should have: \"some text.txt\", but found: ".$buildproduct->name."\n"); - } } done_testing; From 611d7b71f230eef1e92efdc09e9a28706b9ff8d3 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 23 Feb 2021 14:52:37 -0500 Subject: [PATCH 027/965] input-types: use is() for test comparisons --- tests/input-types.t | 51 ++++++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/tests/input-types.t b/tests/input-types.t index c41b8e55..68035519 100644 --- a/tests/input-types.t +++ b/tests/input-types.t @@ -83,34 +83,37 @@ my @scminputs = ( foreach my $scm ( @scminputs ) { my $scmName = $scm->{"name"}; - my $nixexpr = $scm->{"nixexpr"}; - my $type = $scm->{"type"}; - my $uri = $scm->{"uri"}; - my $update = $scm->{"update"}; - my $jobset = createJobsetWithOneInput($scmName, $nixexpr, "src", $type, $uri); - my $state = 0; - my $q = 0; - my ($loop, $updated) = updateRepository($scmName, $update); - while($loop) { - my $c = 0; + subtest "With the SCM input named $scmName" => sub { + my $nixexpr = $scm->{"nixexpr"}; + my $type = $scm->{"type"}; + my $uri = $scm->{"uri"}; + my $update = $scm->{"update"}; + my $jobset = createJobsetWithOneInput($scmName, $nixexpr, "src", $type, $uri); - # Verify that it can be fetched and possibly queued. - ok(evalSucceeds($jobset), "$scmName:$state.$c: Evaluating nix-expression."); $c++; + my $state = 0; + my $q = 0; + my ($loop, $updated) = updateRepository($scmName, $update); + while($loop) { + subtest "Mutation number $state" => sub { + # Verify that it can be fetched and possibly queued. + ok(evalSucceeds($jobset), "Evaluating nix-expression."); - # Verify that the evaluation has queued a new job and evaluate again to ... - if ($updated) { - $q++; - ok(nrQueuedBuildsForJobset($jobset) == $q, "$scmName:$state.$c: Expect $q jobs in the queue."); $c++; - ok(evalSucceeds($jobset), "$scmName:$state.$c: Evaluating nix-expression again."); $c++; + # Verify that the evaluation has queued a new job and evaluate again to ... + if ($updated) { + $q++; + is(nrQueuedBuildsForJobset($jobset), $q, "Expect $q jobs in the queue."); + ok(evalSucceeds($jobset), "Evaluating nix-expression again."); + } + + # ... check that it is deterministic and not queued again. + is(nrQueuedBuildsForJobset($jobset), $q, "Expect deterministic evaluation."); + + $state++; + ($loop, $updated) = updateRepository($scmName, $update, getcwd . "/$scmName-repo/"); + }; } - - # ... check that it is deterministic and not queued again. - ok(nrQueuedBuildsForJobset($jobset) == $q, "$scmName:$state.$c: Expect $q jobs in the queue."); $c++; - - $state++; - ($loop, $updated) = updateRepository($scmName, $update, getcwd . "/$scmName-repo/"); - } + }; } done_testing; From cccdc701627e0300920c6ace1dd65c89b6a08cad Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 23 Feb 2021 21:42:59 -0500 Subject: [PATCH 028/965] input-types.t: don't litter ./tests/ --- tests/input-types.t | 27 +++++++++++++++------------ tests/lib/Setup.pm | 5 ++++- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/tests/input-types.t b/tests/input-types.t index 68035519..bda15b96 100644 --- a/tests/input-types.t +++ b/tests/input-types.t @@ -12,7 +12,10 @@ use Test2::V0; my $db = Hydra::Model::DB->new; hydra_setup($db); -my $jobsBaseUri = "file://".getcwd; +my $testdir = getcwd; +my $scratchdir = "$datadir/scratch"; +mkdir $scratchdir; +my $jobsBaseUri = "file://".$scratchdir; # Test scm inputs my @scminputs = ( @@ -21,63 +24,63 @@ my @scminputs = ( nixexpr => "svn-input.nix", type => "svn", uri => "$jobsBaseUri/svn-repo", - update => getcwd . "/jobs/svn-update.sh" + update => $testdir . "/jobs/svn-update.sh" }, { name => "svn-checkout", nixexpr => "svn-checkout-input.nix", type => "svn-checkout", uri => "$jobsBaseUri/svn-checkout-repo", - update => getcwd . "/jobs/svn-checkout-update.sh" + update => $testdir . "/jobs/svn-checkout-update.sh" }, { name => "git", nixexpr => "git-input.nix", type => "git", uri => "$jobsBaseUri/git-repo", - update => getcwd . "/jobs/git-update.sh" + update => $testdir . "/jobs/git-update.sh" }, { name => "git-rev", nixexpr => "git-rev-input.nix", type => "git", uri => "$jobsBaseUri/git-repo 7f60df502b96fd54bbfa64dd94b56d936a407701", - update => getcwd . "/jobs/git-rev-update.sh" + update => $testdir . "/jobs/git-rev-update.sh" }, { name => "deepgit", nixexpr => "deepgit-input.nix", type => "git", uri => "$jobsBaseUri/git-repo master 1", - update => getcwd . "/jobs/git-update.sh" + update => $testdir . "/jobs/git-update.sh" }, { name => "bzr", nixexpr => "bzr-input.nix", type => "bzr", uri => "$jobsBaseUri/bzr-repo", - update => getcwd . "/jobs/bzr-update.sh" + update => $testdir . "/jobs/bzr-update.sh" }, { name => "bzr-checkout", nixexpr => "bzr-checkout-input.nix", type => "bzr-checkout", uri => "$jobsBaseUri/bzr-checkout-repo", - update => getcwd . "/jobs/bzr-checkout-update.sh" + update => $testdir . "/jobs/bzr-checkout-update.sh" }, { name => "hg", nixexpr => "hg-input.nix", type => "hg", uri => "$jobsBaseUri/hg-repo", - update => getcwd . "/jobs/hg-update.sh" + update => $testdir . "/jobs/hg-update.sh" }, { name => "darcs", nixexpr => "darcs-input.nix", type => "darcs", uri => "$jobsBaseUri/darcs-repo", - update => getcwd . "/jobs/darcs-update.sh" + update => $testdir . "/jobs/darcs-update.sh" } ); @@ -93,7 +96,7 @@ foreach my $scm ( @scminputs ) { my $state = 0; my $q = 0; - my ($loop, $updated) = updateRepository($scmName, $update); + my ($loop, $updated) = updateRepository($scmName, $update, $scratchdir); while($loop) { subtest "Mutation number $state" => sub { # Verify that it can be fetched and possibly queued. @@ -110,7 +113,7 @@ foreach my $scm ( @scminputs ) { is(nrQueuedBuildsForJobset($jobset), $q, "Expect deterministic evaluation."); $state++; - ($loop, $updated) = updateRepository($scmName, $update, getcwd . "/$scmName-repo/"); + ($loop, $updated) = updateRepository($scmName, $update, $scratchdir); }; } }; diff --git a/tests/lib/Setup.pm b/tests/lib/Setup.pm index 03c7046a..eba61aef 100644 --- a/tests/lib/Setup.pm +++ b/tests/lib/Setup.pm @@ -115,8 +115,11 @@ sub runBuild { } sub updateRepository { - my ($scm, $update) = @_; + my ($scm, $update, $scratchdir) = @_; + my $curdir = getcwd; + chdir "$scratchdir"; my ($res, $stdout, $stderr) = captureStdoutStderr(60, ($update, $scm)); + chdir "$curdir"; die "unexpected update error with $scm: $stderr\n" if $res; my ($message, $loop, $status) = $stdout =~ m/::(.*) -- (.*) -- (.*)::/; print STDOUT "Update $scm repository: $message\n"; From b2520267a91f0e2284e8dce766ebc64a423d977d Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 23 Feb 2021 16:08:25 -0500 Subject: [PATCH 029/965] Test setup: support arbitrary hydra config --- tests/lib/Setup.pm | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/tests/lib/Setup.pm b/tests/lib/Setup.pm index eba61aef..5e2bc91e 100644 --- a/tests/lib/Setup.pm +++ b/tests/lib/Setup.pm @@ -10,7 +10,30 @@ use Cwd; our @ISA = qw(Exporter); our @EXPORT = qw(test_init hydra_setup nrBuildsForJobset queuedBuildsForJobset nrQueuedBuildsForJobset createBaseJobset createJobsetWithOneInput evalSucceeds runBuild updateRepository); -sub test_init() { +# Set up the environment for running tests. +# +# Hash Parameters: +# +# * hydra_config: configuration for the Hydra processes for your test. +# +# This clears several environment variables and sets them to ephemeral +# values: a temporary database, temporary Nix store, temporary Hydra +# data directory, etc. +# +# Note: This function must run _very_ early, before nearly any Hydra +# libraries are loaded. To use this, you very likely need to `use Setup` +# and then run `test_init`, and then `require` the Hydra libraries you +# need. +# +# It returns a tuple: a handle to a temporary directory and a handle to +# the postgres service. If either of these variables go out of scope, +# those resources are released and the test environment becomes invalid. +# +# Look at the top of an existing `.t` file to see how this should be used +# in practice. +sub test_init { + my %opts = @_; + my $dir = File::Temp->newdir(); $ENV{'HYDRA_DATA'} = "$dir/hydra-data"; @@ -22,6 +45,12 @@ sub test_init() { print $fh "sandbox = false\n"; close $fh; + $ENV{'HYDRA_CONFIG'} = "$dir/hydra.conf"; + + open(my $fh, '>', $ENV{'HYDRA_CONFIG'}) or die "Could not open file '" . $ENV{'HYDRA_CONFIG'}. " $!"; + print $fh $opts{'hydra_config'} || ""; + close $fh; + $ENV{'NIX_STATE_DIR'} = "$dir/nix/var/nix"; $ENV{'NIX_MANIFESTS_DIR'} = "$dir/nix/var/nix/manifests"; From 3fda37f65a6252bb7289fdb3c6aa362719d329c7 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 23 Feb 2021 16:10:34 -0500 Subject: [PATCH 030/965] RunCommand: Test --- src/script/hydra-notify | 10 ++- tests/jobs/runcommand.nix | 25 +++++++ tests/lib/Setup.pm | 11 ++- tests/plugins/runcommand.t | 136 +++++++++++++++++++++++++++++++++++++ 4 files changed, 180 insertions(+), 2 deletions(-) create mode 100644 tests/jobs/runcommand.nix create mode 100644 tests/plugins/runcommand.t diff --git a/src/script/hydra-notify b/src/script/hydra-notify index 6677667c..037bf409 100755 --- a/src/script/hydra-notify +++ b/src/script/hydra-notify @@ -6,11 +6,18 @@ use Hydra::Plugin; use Hydra::Helper::Nix; use Hydra::Helper::AddBuilds; use IO::Select; +use Getopt::Long; STDERR->autoflush(1); STDOUT->autoflush(1); binmode STDERR, ":encoding(utf8)"; +my $queued_only; + +GetOptions( + "queued-only" => \$queued_only +) or exit 1; + my $config = getHydraConfig(); my $db = Hydra::Model::DB->new(); @@ -103,11 +110,12 @@ for my $build ($db->resultset('Builds')->search( buildFinished($build); } + # Process incoming notifications. my $fd = $dbh->func("getfd"); my $sel = IO::Select->new($fd); -while (1) { +while (!$queued_only) { $sel->can_read; while (my $notify = $dbh->func("pg_notifies")) { diff --git a/tests/jobs/runcommand.nix b/tests/jobs/runcommand.nix new file mode 100644 index 00000000..3fc42e04 --- /dev/null +++ b/tests/jobs/runcommand.nix @@ -0,0 +1,25 @@ +with import ./config.nix; +{ + metrics = mkDerivation { + name = "my-build-product"; + builder = "/bin/sh"; + outputs = [ "out" "bin" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh + + echo "$PATH" + + mkdir $bin + echo "foo" > $bin/bar + + metrics=$out/nix-support/hydra-metrics + mkdir -p "$(dirname "$metrics")" + echo "lineCoverage 18 %" >> "$metrics" + echo "maxResident 27 KiB" >> "$metrics" + '' + ) + ]; + }; +} diff --git a/tests/lib/Setup.pm b/tests/lib/Setup.pm index 5e2bc91e..c697a258 100644 --- a/tests/lib/Setup.pm +++ b/tests/lib/Setup.pm @@ -8,7 +8,7 @@ use File::Path qw(make_path); use Cwd; our @ISA = qw(Exporter); -our @EXPORT = qw(test_init hydra_setup nrBuildsForJobset queuedBuildsForJobset nrQueuedBuildsForJobset createBaseJobset createJobsetWithOneInput evalSucceeds runBuild updateRepository); +our @EXPORT = qw(test_init hydra_setup nrBuildsForJobset queuedBuildsForJobset nrQueuedBuildsForJobset createBaseJobset createJobsetWithOneInput evalSucceeds runBuild sendNotifications updateRepository); # Set up the environment for running tests. # @@ -143,6 +143,15 @@ sub runBuild { return !$res; } +sub sendNotifications() { + my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-notify", "--queued-only")); + if ($res) { + print STDERR "hydra notify stdout: $stdout\n" if $stdout ne ""; + print STDERR "hydra notify stderr: $stderr\n" if $stderr ne ""; + } + return !$res; +} + sub updateRepository { my ($scm, $update, $scratchdir) = @_; my $curdir = getcwd; diff --git a/tests/plugins/runcommand.t b/tests/plugins/runcommand.t new file mode 100644 index 00000000..620328fc --- /dev/null +++ b/tests/plugins/runcommand.t @@ -0,0 +1,136 @@ +use feature 'unicode_strings'; +use strict; +use warnings; +use Cwd; +use JSON; +use Setup; + +(my $datadir, my $pgsql) = test_init( + hydra_config => q| + + command = cp "$HYDRA_JSON" "$HYDRA_DATA/joboutput.json" + +|); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"}); + +# Most basic test case, no parameters +my $jobset = createBaseJobset("basic", "runcommand.nix"); + +ok(evalSucceeds($jobset), "Evaluating jobs/runcommand.nix should exit with return code 0"); +is(nrQueuedBuildsForJobset($jobset), 1, "Evaluating jobs/runcommand.nix should result in 1 build1"); + +(my $build) = queuedBuildsForJobset($jobset); + +is($build->job, "metrics", "The only job should be metrics"); +ok(runBuild($build), "Build should exit with code 0"); +my $newbuild = $db->resultset('Builds')->find($build->id); +is($newbuild->finished, 1, "Build should be finished."); +is($newbuild->buildstatus, 0, "Build should have buildstatus 0."); + +ok(sendNotifications(), "Notifications execute successfully."); + +my $dat = do { + my $filename = $ENV{'HYDRA_DATA'} . "/joboutput.json"; + open(my $json_fh, "<", $filename) + or die("Can't open \$filename\": $!\n"); + local $/; + my $json = JSON->new; + $json->decode(<$json_fh>) +}; + +use Data::Dumper; +print Dumper($dat); + +subtest "Validate the top level fields match" => sub { + is($dat->{build}, $newbuild->id, "The build event matches our expected ID."); + is($dat->{buildStatus}, 0, "The build status matches."); + is($dat->{event}, "buildFinished", "The build event matches."); + is($dat->{finished}, 1, "The build finished."); + is($dat->{project}, "tests", "The project matches."); + is($dat->{jobset}, "basic", "The jobset matches."); + is($dat->{job}, "metrics", "The job matches."); + is($dat->{drvPath}, $newbuild->drvpath, "The derivation path matches."); + is($dat->{timestamp}, $newbuild->timestamp, "The result has a timestamp field."); + is($dat->{startTime}, $newbuild->starttime, "The result has a startTime field."); + is($dat->{stopTime}, $newbuild->stoptime, "The result has a stopTime field."); +}; + +subtest "Validate the outputs match" => sub { + is(scalar(@{$dat->{outputs}}), 2, "There are exactly two outputs"); + + subtest "output: out" => sub { + my ($output) = grep { $_->{name} eq "out" } @{$dat->{outputs}}; + my $expectedoutput = $newbuild->buildoutputs->find({name => "out"}); + + is($output->{name}, "out", "Output is named corrrectly"); + is($output->{path}, $expectedoutput->path, "The output path matches the database's path."); + }; + + subtest "output: bin" => sub { + my ($output) = grep { $_->{name} eq "bin" } @{$dat->{outputs}}; + my $expectedoutput = $newbuild->buildoutputs->find({name => "bin"}); + + is($output->{name}, "bin", "Output is named corrrectly"); + is($output->{path}, $expectedoutput->path, "The output path matches the database's path."); + }; +}; + +subtest "Validate the metrics match" => sub { + is(scalar(@{$dat->{metrics}}), 2, "There are exactly two metrics"); + + my ($lineCoverage) = grep { $_->{name} eq "lineCoverage" } @{$dat->{metrics}}; + my ($maxResident) = grep { $_->{name} eq "maxResident" } @{$dat->{metrics}}; + + subtest "verifying the lineCoverage metric" => sub { + is($lineCoverage->{name}, "lineCoverage", "The name matches."); + is($lineCoverage->{value}, 18, "The value matches."); + is($lineCoverage->{unit}, "%", "The unit matches."); + }; + + subtest "verifying the maxResident metric" => sub { + is($maxResident->{name}, "maxResident", "The name matches."); + is($maxResident->{value}, 27, "The value matches."); + is($maxResident->{unit}, "KiB", "The unit matches."); + }; +}; + +subtest "Validate the products match" => sub { + is(scalar(@{$dat->{outputs}}), 2, "There are exactly two outputs"); + + subtest "product: out" => sub { + my ($product) = grep { $_->{name} eq "my-build-product" } @{$dat->{products}}; + my $expectedproduct = $newbuild->buildproducts->find({name => "my-build-product"}); + + is($product->{name}, "my-build-product", "The build product is named correctly."); + is($product->{subtype}, "", "The subtype is empty."); + is($product->{productNr}, $expectedproduct->productnr, "The product number matches."); + is($product->{defaultPath}, "", "The default path matches."); + is($product->{path}, $expectedproduct->path, "The path matches the output."); + is($product->{fileSize}, undef, "The fileSize is undefined for the nix-build output type."); + is($product->{sha256hash}, undef, "The sha256hash is undefined for the nix-build output type."); + }; + + subtest "output: bin" => sub { + my ($product) = grep { $_->{name} eq "my-build-product-bin" } @{$dat->{products}}; + my $expectedproduct = $newbuild->buildproducts->find({name => "my-build-product-bin"}); + + is($product->{name}, "my-build-product-bin", "The build product is named correctly."); + is($product->{subtype}, "bin", "The subtype matches the output name"); + is($product->{productNr}, $expectedproduct->productnr, "The product number matches."); + is($product->{defaultPath}, "", "The default path matches."); + is($product->{path}, $expectedproduct->path, "The path matches the output."); + is($product->{fileSize}, undef, "The fileSize is undefined for the nix-build output type."); + is($product->{sha256hash}, undef, "The sha256hash is undefined for the nix-build output type."); + }; +}; + +done_testing; \ No newline at end of file From a756614fa1593810746ffdd9213786d46e74e9c0 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 24 Feb 2021 11:30:33 -0500 Subject: [PATCH 031/965] RunCommand: pass homepage, description, license, system, and nixname --- src/lib/Hydra/Plugin/RunCommand.pm | 5 ++++ tests/jobs/runcommand.nix | 44 ++++++++++++++++++------------ tests/plugins/runcommand.t | 5 ++++ 3 files changed, 36 insertions(+), 18 deletions(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index 8ae70c6f..fd095222 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -71,6 +71,11 @@ sub buildFinished { startTime => $build->get_column('starttime'), stopTime => $build->get_column('stoptime'), buildStatus => $build->get_column('buildstatus'), + nixName => $build->get_column('nixname'), + system => $build->get_column('system'), + homepage => $build->get_column('homepage'), + description => $build->get_column('description'), + license => $build->get_column('license'), outputs => [], products => [], metrics => [], diff --git a/tests/jobs/runcommand.nix b/tests/jobs/runcommand.nix index 3fc42e04..f4b8b0fc 100644 --- a/tests/jobs/runcommand.nix +++ b/tests/jobs/runcommand.nix @@ -1,25 +1,33 @@ with import ./config.nix; { - metrics = mkDerivation { - name = "my-build-product"; - builder = "/bin/sh"; - outputs = [ "out" "bin" ]; - args = [ - ( - builtins.toFile "builder.sh" '' - #! /bin/sh + metrics = ( + mkDerivation { + name = "my-build-product"; + builder = "/bin/sh"; + outputs = [ "out" "bin" ]; + args = [ + ( + builtins.toFile "builder.sh" '' + #! /bin/sh - echo "$PATH" + echo "$PATH" - mkdir $bin - echo "foo" > $bin/bar + mkdir $bin + echo "foo" > $bin/bar - metrics=$out/nix-support/hydra-metrics - mkdir -p "$(dirname "$metrics")" - echo "lineCoverage 18 %" >> "$metrics" - echo "maxResident 27 KiB" >> "$metrics" - '' - ) - ]; + metrics=$out/nix-support/hydra-metrics + mkdir -p "$(dirname "$metrics")" + echo "lineCoverage 18 %" >> "$metrics" + echo "maxResident 27 KiB" >> "$metrics" + '' + ) + ]; + } + ) // { + meta = { + license = "GPL"; + description = "An example meta property."; + homepage = "https://github.com/NixOS/hydra"; + }; }; } diff --git a/tests/plugins/runcommand.t b/tests/plugins/runcommand.t index 620328fc..de78cb83 100644 --- a/tests/plugins/runcommand.t +++ b/tests/plugins/runcommand.t @@ -58,10 +58,15 @@ subtest "Validate the top level fields match" => sub { is($dat->{project}, "tests", "The project matches."); is($dat->{jobset}, "basic", "The jobset matches."); is($dat->{job}, "metrics", "The job matches."); + is($dat->{nixName}, "my-build-product", "The nixName matches."); + is($dat->{system}, $newbuild->system, "The system matches."); is($dat->{drvPath}, $newbuild->drvpath, "The derivation path matches."); is($dat->{timestamp}, $newbuild->timestamp, "The result has a timestamp field."); is($dat->{startTime}, $newbuild->starttime, "The result has a startTime field."); is($dat->{stopTime}, $newbuild->stoptime, "The result has a stopTime field."); + is($dat->{homepage}, "https://github.com/NixOS/hydra", "The homepage is passed."); + is($dat->{description}, "An example meta property.", "The description is passed."); + is($dat->{license}, "GPL", "The license is passed."); }; subtest "Validate the outputs match" => sub { From fe70160008fad745b597f7ddbec19eaef9d4f149 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Wed, 3 Mar 2021 15:23:30 -0800 Subject: [PATCH 032/965] module: append `application_name` to HYDRA_DBI This will make it easier to track specifically where queries are being made from (assuming a `log_line_prefix` that includes `%a` in the postgres configuration). --- hydra-module.nix | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/hydra-module.nix b/hydra-module.nix index 08d623a0..f9afd446 100644 --- a/hydra-module.nix +++ b/hydra-module.nix @@ -58,6 +58,11 @@ in example = "dbi:Pg:dbname=hydra;host=postgres.example.org;user=foo;"; description = '' The DBI string for Hydra database connection. + + NOTE: Attempts to set `application_name` will be overridden by + `hydra-TYPE` (where TYPE is e.g. `evaluator`, `queue-runner`, + etc.) in all hydra services to more easily distinguish where + queries are coming from. ''; }; @@ -248,7 +253,9 @@ in { wantedBy = [ "multi-user.target" ]; requires = optional haveLocalDB "postgresql.service"; after = optional haveLocalDB "postgresql.service"; - environment = env; + environment = env // { + HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init"; + }; path = [ pkgs.utillinux ]; preStart = '' mkdir -p ${baseDir} @@ -304,7 +311,9 @@ in { wantedBy = [ "multi-user.target" ]; requires = [ "hydra-init.service" ]; after = [ "hydra-init.service" ]; - environment = serverEnv; + environment = serverEnv // { + HYDRA_DBI = "${serverEnv.HYDRA_DBI};application_name=hydra-server"; + }; restartTriggers = [ hydraConf ]; serviceConfig = { ExecStart = @@ -326,6 +335,7 @@ in environment = env // { PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr IN_SYSTEMD = "1"; # to get log severity levels + HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-queue-runner"; }; serviceConfig = { ExecStart = "@${cfg.package}/bin/hydra-queue-runner hydra-queue-runner -v"; @@ -345,7 +355,9 @@ in restartTriggers = [ hydraConf ]; after = [ "hydra-init.service" "network.target" ]; path = with pkgs; [ nettools cfg.package jq ]; - environment = env; + environment = env // { + HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator"; + }; serviceConfig = { ExecStart = "@${cfg.package}/bin/hydra-evaluator hydra-evaluator"; ExecStopPost = "${cfg.package}/bin/hydra-evaluator --unlock"; @@ -358,7 +370,9 @@ in systemd.services.hydra-update-gc-roots = { requires = [ "hydra-init.service" ]; after = [ "hydra-init.service" ]; - environment = env; + environment = env // { + HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-update-gc-roots"; + }; serviceConfig = { ExecStart = "@${cfg.package}/bin/hydra-update-gc-roots hydra-update-gc-roots"; User = "hydra"; @@ -369,7 +383,9 @@ in systemd.services.hydra-send-stats = { wantedBy = [ "multi-user.target" ]; after = [ "hydra-init.service" ]; - environment = env; + environment = env // { + HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-send-stats"; + }; serviceConfig = { ExecStart = "@${cfg.package}/bin/hydra-send-stats hydra-send-stats"; User = "hydra"; @@ -383,6 +399,7 @@ in restartTriggers = [ hydraConf ]; environment = env // { PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr + HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify"; }; serviceConfig = { ExecStart = "@${cfg.package}/bin/hydra-notify hydra-notify"; From 387fe8005a2fb813d4ac842ea2cbaa23ba48e9a0 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Thu, 4 Mar 2021 12:38:56 -0800 Subject: [PATCH 033/965] tests/input-types: split out scminputs into individual tests This makes the test faster (by removing it and replacing it with a `TestScmInput` module that exports the `testScmInput` subroutine). Now, all the input tests can be run in parallel. Some of the `tests/jobs/*-update.sh` scripts were "broken" (e.g. tests failed for various reasons on my machine), so I fixed those up as well. Co-authored-by: gustavderdrache --- .gitignore | 1 + tests/input-types.t | 122 ------------------------------ tests/input-types/bzr-checkout.t | 28 +++++++ tests/input-types/bzr.t | 28 +++++++ tests/input-types/darcs.t | 28 +++++++ tests/input-types/deepgit.t | 29 +++++++ tests/input-types/git-rev.t | 29 +++++++ tests/input-types/git.t | 28 +++++++ tests/input-types/hg.t | 28 +++++++ tests/input-types/svn-checkout.t | 28 +++++++ tests/input-types/svn.t | 28 +++++++ tests/jobs/bzr-checkout-update.sh | 6 ++ tests/jobs/git-rev-update.sh | 1 + tests/jobs/git-update.sh | 1 + tests/jobs/svn-checkout-update.sh | 5 ++ tests/lib/TestScmInput.pm | 74 ++++++++++++++++++ 16 files changed, 342 insertions(+), 122 deletions(-) delete mode 100644 tests/input-types.t create mode 100644 tests/input-types/bzr-checkout.t create mode 100644 tests/input-types/bzr.t create mode 100644 tests/input-types/darcs.t create mode 100644 tests/input-types/deepgit.t create mode 100644 tests/input-types/git-rev.t create mode 100644 tests/input-types/git.t create mode 100644 tests/input-types/hg.t create mode 100644 tests/input-types/svn-checkout.t create mode 100644 tests/input-types/svn.t create mode 100644 tests/lib/TestScmInput.pm diff --git a/.gitignore b/.gitignore index 5a0ab4a2..4cfd7cae 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ Makefile.in /tests/.git* /tests/.hg* /tests/nix +/tests/data /inst hydra-config.h hydra-config.h.in diff --git a/tests/input-types.t b/tests/input-types.t deleted file mode 100644 index bda15b96..00000000 --- a/tests/input-types.t +++ /dev/null @@ -1,122 +0,0 @@ -use strict; -use Cwd; -use Setup; - -(my $datadir, my $pgsql) = test_init(); - -require Hydra::Schema; -require Hydra::Model::DB; - -use Test2::V0; - -my $db = Hydra::Model::DB->new; -hydra_setup($db); - -my $testdir = getcwd; -my $scratchdir = "$datadir/scratch"; -mkdir $scratchdir; -my $jobsBaseUri = "file://".$scratchdir; - -# Test scm inputs -my @scminputs = ( - { - name => "svn", - nixexpr => "svn-input.nix", - type => "svn", - uri => "$jobsBaseUri/svn-repo", - update => $testdir . "/jobs/svn-update.sh" - }, - { - name => "svn-checkout", - nixexpr => "svn-checkout-input.nix", - type => "svn-checkout", - uri => "$jobsBaseUri/svn-checkout-repo", - update => $testdir . "/jobs/svn-checkout-update.sh" - }, - { - name => "git", - nixexpr => "git-input.nix", - type => "git", - uri => "$jobsBaseUri/git-repo", - update => $testdir . "/jobs/git-update.sh" - }, - { - name => "git-rev", - nixexpr => "git-rev-input.nix", - type => "git", - uri => "$jobsBaseUri/git-repo 7f60df502b96fd54bbfa64dd94b56d936a407701", - update => $testdir . "/jobs/git-rev-update.sh" - }, - { - name => "deepgit", - nixexpr => "deepgit-input.nix", - type => "git", - uri => "$jobsBaseUri/git-repo master 1", - update => $testdir . "/jobs/git-update.sh" - }, - { - name => "bzr", - nixexpr => "bzr-input.nix", - type => "bzr", - uri => "$jobsBaseUri/bzr-repo", - update => $testdir . "/jobs/bzr-update.sh" - }, - { - name => "bzr-checkout", - nixexpr => "bzr-checkout-input.nix", - type => "bzr-checkout", - uri => "$jobsBaseUri/bzr-checkout-repo", - update => $testdir . "/jobs/bzr-checkout-update.sh" - }, - { - name => "hg", - nixexpr => "hg-input.nix", - type => "hg", - uri => "$jobsBaseUri/hg-repo", - update => $testdir . "/jobs/hg-update.sh" - }, - { - name => "darcs", - nixexpr => "darcs-input.nix", - type => "darcs", - uri => "$jobsBaseUri/darcs-repo", - update => $testdir . "/jobs/darcs-update.sh" - } -); - -foreach my $scm ( @scminputs ) { - my $scmName = $scm->{"name"}; - - subtest "With the SCM input named $scmName" => sub { - my $nixexpr = $scm->{"nixexpr"}; - my $type = $scm->{"type"}; - my $uri = $scm->{"uri"}; - my $update = $scm->{"update"}; - my $jobset = createJobsetWithOneInput($scmName, $nixexpr, "src", $type, $uri); - - my $state = 0; - my $q = 0; - my ($loop, $updated) = updateRepository($scmName, $update, $scratchdir); - while($loop) { - subtest "Mutation number $state" => sub { - # Verify that it can be fetched and possibly queued. - ok(evalSucceeds($jobset), "Evaluating nix-expression."); - - # Verify that the evaluation has queued a new job and evaluate again to ... - if ($updated) { - $q++; - is(nrQueuedBuildsForJobset($jobset), $q, "Expect $q jobs in the queue."); - ok(evalSucceeds($jobset), "Evaluating nix-expression again."); - } - - # ... check that it is deterministic and not queued again. - is(nrQueuedBuildsForJobset($jobset), $q, "Expect deterministic evaluation."); - - $state++; - ($loop, $updated) = updateRepository($scmName, $update, $scratchdir); - }; - } - }; -} - -done_testing; diff --git a/tests/input-types/bzr-checkout.t b/tests/input-types/bzr-checkout.t new file mode 100644 index 00000000..2bcf556f --- /dev/null +++ b/tests/input-types/bzr-checkout.t @@ -0,0 +1,28 @@ +use strict; +use Cwd; +use Setup; +use TestScmInput; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +# Tests the creation of a Hydra jobset using a bzr checkout as input. +testScmInput( + type => 'bzr-checkout', + expr => 'bzr-checkout-input.nix', + uri => 'bzr-checkout-repo', + update => 'jobs/bzr-checkout-update.sh', + + # directories + datadir => $datadir, + testdir => getcwd, +); + +done_testing; diff --git a/tests/input-types/bzr.t b/tests/input-types/bzr.t new file mode 100644 index 00000000..345f5aca --- /dev/null +++ b/tests/input-types/bzr.t @@ -0,0 +1,28 @@ +use strict; +use Cwd; +use Setup; +use TestScmInput; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +# Tests the creation of a Hydra jobset using a bzr repo as input. +testScmInput( + type => 'bzr', + expr => 'bzr-input.nix', + uri => 'bzr-repo', + update => 'jobs/bzr-update.sh', + + # directories + datadir => $datadir, + testdir => getcwd, +); + +done_testing; diff --git a/tests/input-types/darcs.t b/tests/input-types/darcs.t new file mode 100644 index 00000000..afbbd630 --- /dev/null +++ b/tests/input-types/darcs.t @@ -0,0 +1,28 @@ +use strict; +use Cwd; +use Setup; +use TestScmInput; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +# Tests the creation of a Hydra jobset using a darcs repo as input. +testScmInput( + type => 'darcs', + expr => 'darcs-input.nix', + uri => 'darcs-repo', + update => 'jobs/darcs-update.sh', + + # directories + datadir => $datadir, + testdir => getcwd, +); + +done_testing; diff --git a/tests/input-types/deepgit.t b/tests/input-types/deepgit.t new file mode 100644 index 00000000..7db0e2ce --- /dev/null +++ b/tests/input-types/deepgit.t @@ -0,0 +1,29 @@ +use strict; +use Cwd; +use Setup; +use TestScmInput; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +# Tests the creation of a Hydra jobset using a deep git clone as input. +testScmInput( + type => 'git', + name => 'deepgit', + expr => 'deepgit-input.nix', + uri => 'git-repo master 1', + update => 'jobs/git-update.sh', + + # directories + datadir => $datadir, + testdir => getcwd, +); + +done_testing; diff --git a/tests/input-types/git-rev.t b/tests/input-types/git-rev.t new file mode 100644 index 00000000..58d23a99 --- /dev/null +++ b/tests/input-types/git-rev.t @@ -0,0 +1,29 @@ +use strict; +use Cwd; +use Setup; +use TestScmInput; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +# Tests the creation of a Hydra jobset using a git revision as input. +testScmInput( + type => 'git', + name => 'git-rev', + expr => 'git-rev-input.nix', + uri => 'git-repo 7f60df502b96fd54bbfa64dd94b56d936a407701', + update => 'jobs/git-rev-update.sh', + + # directories + datadir => $datadir, + testdir => getcwd, +); + +done_testing; diff --git a/tests/input-types/git.t b/tests/input-types/git.t new file mode 100644 index 00000000..82eafa97 --- /dev/null +++ b/tests/input-types/git.t @@ -0,0 +1,28 @@ +use strict; +use Cwd; +use Setup; +use TestScmInput; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +# Tests the creation of a Hydra jobset using a git repo as input. +testScmInput( + type => 'git', + expr => 'git-input.nix', + uri => 'git-repo', + update => 'jobs/git-update.sh', + + # directories + datadir => $datadir, + testdir => getcwd, +); + +done_testing; diff --git a/tests/input-types/hg.t b/tests/input-types/hg.t new file mode 100644 index 00000000..d0bdc8d8 --- /dev/null +++ b/tests/input-types/hg.t @@ -0,0 +1,28 @@ +use strict; +use Cwd; +use Setup; +use TestScmInput; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +# Tests the creation of a Hydra jobset using a hg repo as input. +testScmInput( + type => 'hg', + expr => 'hg-input.nix', + uri => 'hg-repo', + update => 'jobs/hg-update.sh', + + # directories + datadir => $datadir, + testdir => getcwd, +); + +done_testing; diff --git a/tests/input-types/svn-checkout.t b/tests/input-types/svn-checkout.t new file mode 100644 index 00000000..7c8543c3 --- /dev/null +++ b/tests/input-types/svn-checkout.t @@ -0,0 +1,28 @@ +use strict; +use Cwd; +use Setup; +use TestScmInput; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +# Tests the creation of a Hydra jobset using a svn checkout as input. +testScmInput( + type => 'svn-checkout', + expr => 'svn-checkout-input.nix', + uri => 'svn-checkout-repo', + update => 'jobs/svn-checkout-update.sh', + + # directories + datadir => $datadir, + testdir => getcwd, +); + +done_testing; diff --git a/tests/input-types/svn.t b/tests/input-types/svn.t new file mode 100644 index 00000000..091a6c74 --- /dev/null +++ b/tests/input-types/svn.t @@ -0,0 +1,28 @@ +use strict; +use Cwd; +use Setup; +use TestScmInput; + +(my $datadir, my $pgsql) = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +# Tests the creation of a Hydra jobset using a svn repo as input. +testScmInput( + type => 'svn', + expr => 'svn-input.nix', + uri => 'svn-repo', + update => 'jobs/svn-update.sh', + + # directories + datadir => $datadir, + testdir => getcwd, +); + +done_testing; diff --git a/tests/jobs/bzr-checkout-update.sh b/tests/jobs/bzr-checkout-update.sh index 0aaeca14..7eb56466 100755 --- a/tests/jobs/bzr-checkout-update.sh +++ b/tests/jobs/bzr-checkout-update.sh @@ -9,8 +9,14 @@ else state=0; fi +export BZR_HOME; # Set by the Makefile case $state in (0) echo "::Create repo. -- continue -- updated::" + bzr init bzr-repo + bzr whoami "build " -d bzr-repo + touch bzr-repo/bzr-file + bzr add bzr-repo/bzr-file + bzr commit -m "add bzr-file" bzr-repo/bzr-file ln -s bzr-repo bzr-checkout-repo ;; (*) echo "::End. -- stop -- nothing::" ;; diff --git a/tests/jobs/git-rev-update.sh b/tests/jobs/git-rev-update.sh index d48268f4..e91af1f9 100755 --- a/tests/jobs/git-rev-update.sh +++ b/tests/jobs/git-rev-update.sh @@ -3,6 +3,7 @@ set -e repo=git-repo export HOME=$(pwd) +export XDG_CONFIG_HOME=$(pwd)/.config STATE_FILE=$(pwd)/.git-rev-state if test -e $STATE_FILE; then state=1 diff --git a/tests/jobs/git-update.sh b/tests/jobs/git-update.sh index afe59717..7c983ccf 100755 --- a/tests/jobs/git-update.sh +++ b/tests/jobs/git-update.sh @@ -4,6 +4,7 @@ set -e repo=git-repo export HOME=$(pwd) +export XDG_CONFIG_HOME=$(pwd)/.config STATE_FILE=$(pwd)/.git-state if test -e $STATE_FILE; then state=$(cat $STATE_FILE) diff --git a/tests/jobs/svn-checkout-update.sh b/tests/jobs/svn-checkout-update.sh index 7aa6c868..01c6c8e9 100755 --- a/tests/jobs/svn-checkout-update.sh +++ b/tests/jobs/svn-checkout-update.sh @@ -11,6 +11,11 @@ fi case $state in (0) echo "::Create repo. -- continue -- updated::" + svnadmin create svn-repo + svn co file://$PWD/$repo svn-checkout + touch svn-checkout/svn-file + svn add svn-checkout/svn-file + svn commit -m "add svn file" svn-checkout/svn-file ln -s svn-repo svn-checkout-repo ;; (*) echo "::End. -- stop -- nothing::" ;; diff --git a/tests/lib/TestScmInput.pm b/tests/lib/TestScmInput.pm new file mode 100644 index 00000000..3aa720d0 --- /dev/null +++ b/tests/lib/TestScmInput.pm @@ -0,0 +1,74 @@ +package TestScmInput; +use warnings; +use strict; + +use Exporter; +use Test2::V0; + +use Setup; + +our @ISA = qw(Exporter); +our @EXPORT = qw(testScmInput); + +# Generic test for the various SCM types Hydra supports. +# +# Takes input in the form of: +# +# ( +# type => "input type", +# name => "jobset name", # defaults to the input's type +# uri => "uri", +# update => "script for updating the input", +# datadir => "data dir", # returned from `test_init()` subroutine +# testdir => "the hydra tests directory", # usually just `getcwd` +# ) +# +# and runs a test that constructs a jobset from the specified input. +sub testScmInput { + # Collect named args, dying if a required arg is missing + my %args = @_; + my $type = $args{type} // die "required arg 'type' missing"; + my $expr = $args{expr} // die "required arg 'expr' missing"; + + # $name is optional and defaults to $type + my $name = $args{name} // $type; + + # Get directories + my $testdir = $args{testdir} // die "required arg 'testdir' missing"; + my $datadir = $args{datadir} // die "required arg 'datadir' missing"; + + my $update = $args{update} // die "required arg 'update' missing"; + $update = "$testdir/$update"; + + # Create scratch locations + my $scratchdir = "$datadir/scratch"; + mkdir $scratchdir or die "mkdir($scratchdir): $!\n"; + + # $uri and $update are constructed from the directories + my $uri = $args{uri} // die "required arg 'uri' missing"; + $uri = "file://$scratchdir/$uri"; + + subtest "With the SCM input named $name" => sub { + my $jobset = createJobsetWithOneInput($name, $expr, 'src', $type, $uri); + + my ($mutations, $queueSize) = (0, 0); + + my ($loop, $updated) = updateRepository($name, $update, $scratchdir); + while ($loop) { + subtest "Mutation number $mutations" => sub { + ok(evalSucceeds($jobset), "Evaluating nix-expression."); + + if ($updated) { + $queueSize++; + is(nrQueuedBuildsForJobset($jobset), $queueSize, "Expect $queueSize jobs in the queue."); + ok(evalSucceeds($jobset), "Evaluating nix-expression again."); + } + + is(nrQueuedBuildsForJobset($jobset), $queueSize, "Expect deterministic evaluation."); + + $mutations++; + ($loop, $updated) = updateRepository($name, $update, $scratchdir); + }; + } + }; +} From f08d0be1bd63c91526affac99e764b1dd0453c3a Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Thu, 25 Feb 2021 19:26:38 -0500 Subject: [PATCH 034/965] tests: replace the flat list of contexts with a hash This way we can return more values without breaking callers. --- tests/build-products.t | 2 +- tests/evaluate-basic.t | 2 +- tests/evaluate-dependent-jobsets.t | 2 +- tests/lib/Setup.pm | 5 ++++- tests/plugins/runcommand.t | 2 +- 5 files changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/build-products.t b/tests/build-products.t index 3431324c..5f1610f9 100644 --- a/tests/build-products.t +++ b/tests/build-products.t @@ -2,7 +2,7 @@ use strict; use Cwd; use Setup; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; diff --git a/tests/evaluate-basic.t b/tests/evaluate-basic.t index 0d58d08f..d143b425 100644 --- a/tests/evaluate-basic.t +++ b/tests/evaluate-basic.t @@ -3,7 +3,7 @@ use strict; use Cwd; use Setup; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; diff --git a/tests/evaluate-dependent-jobsets.t b/tests/evaluate-dependent-jobsets.t index 279e1f33..fa849258 100644 --- a/tests/evaluate-dependent-jobsets.t +++ b/tests/evaluate-dependent-jobsets.t @@ -2,7 +2,7 @@ use strict; use Cwd; use Setup; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; diff --git a/tests/lib/Setup.pm b/tests/lib/Setup.pm index c697a258..3d1ef0fd 100644 --- a/tests/lib/Setup.pm +++ b/tests/lib/Setup.pm @@ -62,7 +62,10 @@ sub test_init { ); $ENV{'HYDRA_DBI'} = $pgsql->dsn; system("hydra-init") == 0 or die; - return ($dir, $pgsql); + return ( + tmpdir => $dir, + db => $pgsql + ); } sub captureStdoutStderr { diff --git a/tests/plugins/runcommand.t b/tests/plugins/runcommand.t index de78cb83..7788d5c5 100644 --- a/tests/plugins/runcommand.t +++ b/tests/plugins/runcommand.t @@ -5,7 +5,7 @@ use Cwd; use JSON; use Setup; -(my $datadir, my $pgsql) = test_init( +my %ctx = test_init( hydra_config => q| command = cp "$HYDRA_JSON" "$HYDRA_DATA/joboutput.json" From 9a3af13c514698e48cf7de8d424b05b498353a43 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Thu, 4 Mar 2021 17:22:37 -0800 Subject: [PATCH 035/965] tests: add jobsdir and testdir to ctx hash This allows us to calculate those directories only once, and just pass them around as necessary. --- tests/build-products.t | 2 +- tests/evaluate-basic.t | 4 ++-- tests/evaluate-dependent-jobsets.t | 2 +- tests/input-types/bzr-checkout.t | 7 ++++--- tests/input-types/bzr.t | 7 ++++--- tests/input-types/darcs.t | 7 ++++--- tests/input-types/deepgit.t | 7 ++++--- tests/input-types/git-rev.t | 7 ++++--- tests/input-types/git.t | 7 ++++--- tests/input-types/hg.t | 7 ++++--- tests/input-types/svn-checkout.t | 7 ++++--- tests/input-types/svn.t | 7 ++++--- tests/lib/Setup.pm | 15 +++++++++------ tests/lib/TestScmInput.pm | 3 ++- tests/plugins/runcommand.t | 4 ++-- 15 files changed, 53 insertions(+), 40 deletions(-) diff --git a/tests/build-products.t b/tests/build-products.t index 5f1610f9..740f5b17 100644 --- a/tests/build-products.t +++ b/tests/build-products.t @@ -15,7 +15,7 @@ hydra_setup($db); # Test build products -my $jobset = createBaseJobset("build-products", "build-products.nix"); +my $jobset = createBaseJobset("build-products", "build-products.nix", $ctx{jobsdir}); ok(evalSucceeds($jobset), "Evaluating jobs/build-products.nix should exit with return code 0"); is(nrQueuedBuildsForJobset($jobset), 2, "Evaluating jobs/build-products.nix should result in 2 builds"); diff --git a/tests/evaluate-basic.t b/tests/evaluate-basic.t index d143b425..73d81b4a 100644 --- a/tests/evaluate-basic.t +++ b/tests/evaluate-basic.t @@ -16,7 +16,7 @@ hydra_setup($db); my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"}); # Most basic test case, no parameters -my $jobset = createBaseJobset("basic", "basic.nix"); +my $jobset = createBaseJobset("basic", "basic.nix", $ctx{jobsdir}); ok(evalSucceeds($jobset), "Evaluating jobs/basic.nix should exit with return code 0"); is(nrQueuedBuildsForJobset($jobset), 3, "Evaluating jobs/basic.nix should result in 3 builds"); @@ -29,4 +29,4 @@ for my $build (queuedBuildsForJobset($jobset)) { is($newbuild->buildstatus, $expected, "Build '".$build->job."' from jobs/basic.nix should have buildstatus $expected."); } -done_testing; \ No newline at end of file +done_testing; diff --git a/tests/evaluate-dependent-jobsets.t b/tests/evaluate-dependent-jobsets.t index fa849258..3b2ac8ec 100644 --- a/tests/evaluate-dependent-jobsets.t +++ b/tests/evaluate-dependent-jobsets.t @@ -13,7 +13,7 @@ my $db = Hydra::Model::DB->new; hydra_setup($db); # Test jobset with 2 jobs, one has parameter of succeeded build of the other -my $jobset = createJobsetWithOneInput("build-output-as-input", "build-output-as-input.nix", "build1", "build", "build1"); +my $jobset = createJobsetWithOneInput("build-output-as-input", "build-output-as-input.nix", "build1", "build", "build1", $ctx{jobsdir}); ok(evalSucceeds($jobset), "Evaluating jobs/build-output-as-input.nix should exit with return code 0"); is(nrQueuedBuildsForJobset($jobset), 1 , "Evaluation should result in 1 build in queue"); diff --git a/tests/input-types/bzr-checkout.t b/tests/input-types/bzr-checkout.t index 2bcf556f..c21151f3 100644 --- a/tests/input-types/bzr-checkout.t +++ b/tests/input-types/bzr-checkout.t @@ -3,7 +3,7 @@ use Cwd; use Setup; use TestScmInput; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; @@ -21,8 +21,9 @@ testScmInput( update => 'jobs/bzr-checkout-update.sh', # directories - datadir => $datadir, - testdir => getcwd, + datadir => $ctx{tmpdir}, + testdir => $ctx{testdir}, + jobsdir => $ctx{jobsdir}, ); done_testing; diff --git a/tests/input-types/bzr.t b/tests/input-types/bzr.t index 345f5aca..99f005bc 100644 --- a/tests/input-types/bzr.t +++ b/tests/input-types/bzr.t @@ -3,7 +3,7 @@ use Cwd; use Setup; use TestScmInput; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; @@ -21,8 +21,9 @@ testScmInput( update => 'jobs/bzr-update.sh', # directories - datadir => $datadir, - testdir => getcwd, + datadir => $ctx{tmpdir}, + testdir => $ctx{testdir}, + jobsdir => $ctx{jobsdir}, ); done_testing; diff --git a/tests/input-types/darcs.t b/tests/input-types/darcs.t index afbbd630..8a6f02dc 100644 --- a/tests/input-types/darcs.t +++ b/tests/input-types/darcs.t @@ -3,7 +3,7 @@ use Cwd; use Setup; use TestScmInput; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; @@ -21,8 +21,9 @@ testScmInput( update => 'jobs/darcs-update.sh', # directories - datadir => $datadir, - testdir => getcwd, + datadir => $ctx{tmpdir}, + testdir => $ctx{testdir}, + jobsdir => $ctx{jobsdir}, ); done_testing; diff --git a/tests/input-types/deepgit.t b/tests/input-types/deepgit.t index 7db0e2ce..d3dfa646 100644 --- a/tests/input-types/deepgit.t +++ b/tests/input-types/deepgit.t @@ -3,7 +3,7 @@ use Cwd; use Setup; use TestScmInput; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; @@ -22,8 +22,9 @@ testScmInput( update => 'jobs/git-update.sh', # directories - datadir => $datadir, - testdir => getcwd, + datadir => $ctx{tmpdir}, + testdir => $ctx{testdir}, + jobsdir => $ctx{jobsdir}, ); done_testing; diff --git a/tests/input-types/git-rev.t b/tests/input-types/git-rev.t index 58d23a99..665e742b 100644 --- a/tests/input-types/git-rev.t +++ b/tests/input-types/git-rev.t @@ -3,7 +3,7 @@ use Cwd; use Setup; use TestScmInput; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; @@ -22,8 +22,9 @@ testScmInput( update => 'jobs/git-rev-update.sh', # directories - datadir => $datadir, - testdir => getcwd, + datadir => $ctx{tmpdir}, + testdir => $ctx{testdir}, + jobsdir => $ctx{jobsdir}, ); done_testing; diff --git a/tests/input-types/git.t b/tests/input-types/git.t index 82eafa97..eba7c2b2 100644 --- a/tests/input-types/git.t +++ b/tests/input-types/git.t @@ -3,7 +3,7 @@ use Cwd; use Setup; use TestScmInput; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; @@ -21,8 +21,9 @@ testScmInput( update => 'jobs/git-update.sh', # directories - datadir => $datadir, - testdir => getcwd, + datadir => $ctx{tmpdir}, + testdir => $ctx{testdir}, + jobsdir => $ctx{jobsdir}, ); done_testing; diff --git a/tests/input-types/hg.t b/tests/input-types/hg.t index d0bdc8d8..756e9653 100644 --- a/tests/input-types/hg.t +++ b/tests/input-types/hg.t @@ -3,7 +3,7 @@ use Cwd; use Setup; use TestScmInput; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; @@ -21,8 +21,9 @@ testScmInput( update => 'jobs/hg-update.sh', # directories - datadir => $datadir, - testdir => getcwd, + datadir => $ctx{tmpdir}, + testdir => $ctx{testdir}, + jobsdir => $ctx{jobsdir}, ); done_testing; diff --git a/tests/input-types/svn-checkout.t b/tests/input-types/svn-checkout.t index 7c8543c3..e8dd7d95 100644 --- a/tests/input-types/svn-checkout.t +++ b/tests/input-types/svn-checkout.t @@ -3,7 +3,7 @@ use Cwd; use Setup; use TestScmInput; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; @@ -21,8 +21,9 @@ testScmInput( update => 'jobs/svn-checkout-update.sh', # directories - datadir => $datadir, - testdir => getcwd, + datadir => $ctx{tmpdir}, + testdir => $ctx{testdir}, + jobsdir => $ctx{jobsdir}, ); done_testing; diff --git a/tests/input-types/svn.t b/tests/input-types/svn.t index 091a6c74..64e677b6 100644 --- a/tests/input-types/svn.t +++ b/tests/input-types/svn.t @@ -3,7 +3,7 @@ use Cwd; use Setup; use TestScmInput; -(my $datadir, my $pgsql) = test_init(); +my %ctx = test_init(); require Hydra::Schema; require Hydra::Model::DB; @@ -21,8 +21,9 @@ testScmInput( update => 'jobs/svn-update.sh', # directories - datadir => $datadir, - testdir => getcwd, + datadir => $ctx{tmpdir}, + testdir => $ctx{testdir}, + jobsdir => $ctx{jobsdir}, ); done_testing; diff --git a/tests/lib/Setup.pm b/tests/lib/Setup.pm index 3d1ef0fd..3cc9828e 100644 --- a/tests/lib/Setup.pm +++ b/tests/lib/Setup.pm @@ -5,7 +5,8 @@ use Exporter; use Test::PostgreSQL; use File::Temp; use File::Path qw(make_path); -use Cwd; +use File::Basename; +use Cwd qw(abs_path getcwd); our @ISA = qw(Exporter); our @EXPORT = qw(test_init hydra_setup nrBuildsForJobset queuedBuildsForJobset nrQueuedBuildsForJobset createBaseJobset createJobsetWithOneInput evalSucceeds runBuild sendNotifications updateRepository); @@ -63,8 +64,10 @@ sub test_init { $ENV{'HYDRA_DBI'} = $pgsql->dsn; system("hydra-init") == 0 or die; return ( + db => $pgsql, tmpdir => $dir, - db => $pgsql + testdir => abs_path(dirname(__FILE__) . "/.."), + jobsdir => abs_path(dirname(__FILE__) . "/../jobs") ); } @@ -98,7 +101,7 @@ sub nrQueuedBuildsForJobset { } sub createBaseJobset { - my ($jobsetName, $nixexprpath) = @_; + my ($jobsetName, $nixexprpath, $jobspath) = @_; my $db = Hydra::Model::DB->new; my $project = $db->resultset('Projects')->update_or_create({name => "tests", displayname => "", owner => "root"}); @@ -108,14 +111,14 @@ sub createBaseJobset { my $jobsetinputals; $jobsetinput = $jobset->jobsetinputs->create({name => "jobs", type => "path"}); - $jobsetinputals = $jobsetinput->jobsetinputalts->create({altnr => 0, value => getcwd."/jobs"}); + $jobsetinputals = $jobsetinput->jobsetinputalts->create({altnr => 0, value => $jobspath}); return $jobset; } sub createJobsetWithOneInput { - my ($jobsetName, $nixexprpath, $name, $type, $uri) = @_; - my $jobset = createBaseJobset($jobsetName, $nixexprpath); + my ($jobsetName, $nixexprpath, $name, $type, $uri, $jobspath) = @_; + my $jobset = createBaseJobset($jobsetName, $nixexprpath, $jobspath); my $jobsetinput; my $jobsetinputals; diff --git a/tests/lib/TestScmInput.pm b/tests/lib/TestScmInput.pm index 3aa720d0..acf7fa27 100644 --- a/tests/lib/TestScmInput.pm +++ b/tests/lib/TestScmInput.pm @@ -36,6 +36,7 @@ sub testScmInput { # Get directories my $testdir = $args{testdir} // die "required arg 'testdir' missing"; my $datadir = $args{datadir} // die "required arg 'datadir' missing"; + my $jobsdir = $args{jobsdir} // die "required arg 'jobsdir' missing"; my $update = $args{update} // die "required arg 'update' missing"; $update = "$testdir/$update"; @@ -49,7 +50,7 @@ sub testScmInput { $uri = "file://$scratchdir/$uri"; subtest "With the SCM input named $name" => sub { - my $jobset = createJobsetWithOneInput($name, $expr, 'src', $type, $uri); + my $jobset = createJobsetWithOneInput($name, $expr, 'src', $type, $uri, $jobsdir); my ($mutations, $queueSize) = (0, 0); diff --git a/tests/plugins/runcommand.t b/tests/plugins/runcommand.t index 7788d5c5..9dd1fbfe 100644 --- a/tests/plugins/runcommand.t +++ b/tests/plugins/runcommand.t @@ -23,7 +23,7 @@ hydra_setup($db); my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"}); # Most basic test case, no parameters -my $jobset = createBaseJobset("basic", "runcommand.nix"); +my $jobset = createBaseJobset("basic", "runcommand.nix", $ctx{jobsdir}); ok(evalSucceeds($jobset), "Evaluating jobs/runcommand.nix should exit with return code 0"); is(nrQueuedBuildsForJobset($jobset), 1, "Evaluating jobs/runcommand.nix should result in 1 build1"); @@ -138,4 +138,4 @@ subtest "Validate the products match" => sub { }; }; -done_testing; \ No newline at end of file +done_testing; From 014778344c7aeea6b73d17802229e5011d2ac1e0 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Thu, 4 Mar 2021 23:42:56 -0800 Subject: [PATCH 036/965] tests: remove unnecessary Cwd imports --- tests/build-products.t | 1 - tests/evaluate-basic.t | 1 - tests/evaluate-dependent-jobsets.t | 1 - tests/input-types/bzr-checkout.t | 1 - tests/input-types/bzr.t | 1 - tests/input-types/darcs.t | 1 - tests/input-types/deepgit.t | 1 - tests/input-types/git-rev.t | 1 - tests/input-types/git.t | 1 - tests/input-types/hg.t | 1 - tests/input-types/svn-checkout.t | 1 - tests/input-types/svn.t | 1 - tests/plugins/runcommand.t | 1 - 13 files changed, 13 deletions(-) diff --git a/tests/build-products.t b/tests/build-products.t index 740f5b17..0f558d86 100644 --- a/tests/build-products.t +++ b/tests/build-products.t @@ -1,5 +1,4 @@ use strict; -use Cwd; use Setup; my %ctx = test_init(); diff --git a/tests/evaluate-basic.t b/tests/evaluate-basic.t index 73d81b4a..85d5547b 100644 --- a/tests/evaluate-basic.t +++ b/tests/evaluate-basic.t @@ -1,6 +1,5 @@ use feature 'unicode_strings'; use strict; -use Cwd; use Setup; my %ctx = test_init(); diff --git a/tests/evaluate-dependent-jobsets.t b/tests/evaluate-dependent-jobsets.t index 3b2ac8ec..0bf3a2f0 100644 --- a/tests/evaluate-dependent-jobsets.t +++ b/tests/evaluate-dependent-jobsets.t @@ -1,5 +1,4 @@ use strict; -use Cwd; use Setup; my %ctx = test_init(); diff --git a/tests/input-types/bzr-checkout.t b/tests/input-types/bzr-checkout.t index c21151f3..ee1f90a0 100644 --- a/tests/input-types/bzr-checkout.t +++ b/tests/input-types/bzr-checkout.t @@ -1,5 +1,4 @@ use strict; -use Cwd; use Setup; use TestScmInput; diff --git a/tests/input-types/bzr.t b/tests/input-types/bzr.t index 99f005bc..0cc0a23d 100644 --- a/tests/input-types/bzr.t +++ b/tests/input-types/bzr.t @@ -1,5 +1,4 @@ use strict; -use Cwd; use Setup; use TestScmInput; diff --git a/tests/input-types/darcs.t b/tests/input-types/darcs.t index 8a6f02dc..aa95a8e6 100644 --- a/tests/input-types/darcs.t +++ b/tests/input-types/darcs.t @@ -1,5 +1,4 @@ use strict; -use Cwd; use Setup; use TestScmInput; diff --git a/tests/input-types/deepgit.t b/tests/input-types/deepgit.t index d3dfa646..4d418148 100644 --- a/tests/input-types/deepgit.t +++ b/tests/input-types/deepgit.t @@ -1,5 +1,4 @@ use strict; -use Cwd; use Setup; use TestScmInput; diff --git a/tests/input-types/git-rev.t b/tests/input-types/git-rev.t index 665e742b..866370d2 100644 --- a/tests/input-types/git-rev.t +++ b/tests/input-types/git-rev.t @@ -1,5 +1,4 @@ use strict; -use Cwd; use Setup; use TestScmInput; diff --git a/tests/input-types/git.t b/tests/input-types/git.t index eba7c2b2..0804cc1a 100644 --- a/tests/input-types/git.t +++ b/tests/input-types/git.t @@ -1,5 +1,4 @@ use strict; -use Cwd; use Setup; use TestScmInput; diff --git a/tests/input-types/hg.t b/tests/input-types/hg.t index 756e9653..28552288 100644 --- a/tests/input-types/hg.t +++ b/tests/input-types/hg.t @@ -1,5 +1,4 @@ use strict; -use Cwd; use Setup; use TestScmInput; diff --git a/tests/input-types/svn-checkout.t b/tests/input-types/svn-checkout.t index e8dd7d95..48f07f09 100644 --- a/tests/input-types/svn-checkout.t +++ b/tests/input-types/svn-checkout.t @@ -1,5 +1,4 @@ use strict; -use Cwd; use Setup; use TestScmInput; diff --git a/tests/input-types/svn.t b/tests/input-types/svn.t index 64e677b6..c2eb8cec 100644 --- a/tests/input-types/svn.t +++ b/tests/input-types/svn.t @@ -1,5 +1,4 @@ use strict; -use Cwd; use Setup; use TestScmInput; diff --git a/tests/plugins/runcommand.t b/tests/plugins/runcommand.t index 9dd1fbfe..1945e1dd 100644 --- a/tests/plugins/runcommand.t +++ b/tests/plugins/runcommand.t @@ -1,7 +1,6 @@ use feature 'unicode_strings'; use strict; use warnings; -use Cwd; use JSON; use Setup; From 025be052b7d3e118df8c4e55eeb910ab22d3ecb7 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Thu, 4 Mar 2021 23:35:57 -0800 Subject: [PATCH 037/965] tests: move to t, allow `yath test` from root By moving the tests subdirectory to t, we gain the ability to run `yath test` with no arguments from inside `nix develop` in the root of the the repo. (`nix develop` is necessary in order to set the proper env vars for `yath` to find our test libraries.) --- .gitignore | 12 ++++++------ .yath.rc | 2 ++ Makefile.am | 2 +- configure.ac | 4 ++-- flake.nix | 14 +++++++++----- {tests => t}/Makefile.am | 0 {tests => t}/api-test.nix | 0 {tests => t}/api-test.pl | 0 {tests => t}/build-products.t | 0 {tests => t}/evaluate-basic.t | 0 {tests => t}/evaluate-dependent-jobsets.t | 0 {tests => t}/input-types/bzr-checkout.t | 0 {tests => t}/input-types/bzr.t | 0 {tests => t}/input-types/darcs.t | 0 {tests => t}/input-types/deepgit.t | 0 {tests => t}/input-types/git-rev.t | 0 {tests => t}/input-types/git.t | 0 {tests => t}/input-types/hg.t | 0 {tests => t}/input-types/svn-checkout.t | 0 {tests => t}/input-types/svn.t | 0 {tests => t}/jobs/basic.nix | 0 {tests => t}/jobs/build-output-as-input.nix | 0 {tests => t}/jobs/build-product-simple.sh | 0 {tests => t}/jobs/build-product-with-spaces.sh | 0 {tests => t}/jobs/build-products.nix | 0 {tests => t}/jobs/bzr-checkout-input.nix | 0 {tests => t}/jobs/bzr-checkout-update.sh | 0 {tests => t}/jobs/bzr-input.nix | 0 {tests => t}/jobs/bzr-update.sh | 0 {tests => t}/jobs/config.nix.in | 0 {tests => t}/jobs/darcs-input.nix | 0 {tests => t}/jobs/darcs-update.sh | 0 {tests => t}/jobs/deepgit-builder.sh | 0 {tests => t}/jobs/deepgit-input.nix | 0 {tests => t}/jobs/empty-dir-builder.sh | 0 {tests => t}/jobs/fail.sh | 0 {tests => t}/jobs/git-input.nix | 0 {tests => t}/jobs/git-rev-input.nix | 0 {tests => t}/jobs/git-rev-update.sh | 0 {tests => t}/jobs/git-update.sh | 0 {tests => t}/jobs/hg-input.nix | 0 {tests => t}/jobs/hg-update.sh | 0 {tests => t}/jobs/runcommand.nix | 0 {tests => t}/jobs/scm-builder.sh | 0 {tests => t}/jobs/succeed-with-failed.sh | 0 {tests => t}/jobs/svn-checkout-input.nix | 0 {tests => t}/jobs/svn-checkout-update.sh | 0 {tests => t}/jobs/svn-input.nix | 0 {tests => t}/jobs/svn-update.sh | 0 {tests => t}/lib/Setup.pm | 0 {tests => t}/lib/TestScmInput.pm | 0 {tests => t}/plugins/runcommand.t | 0 {tests => t}/s3-backup-test.config | 0 {tests => t}/s3-backup-test.pl | 0 {tests => t}/setup-notifications-jobset.pl | 0 {tests => t}/test.pl | 0 56 files changed, 20 insertions(+), 14 deletions(-) create mode 100644 .yath.rc rename {tests => t}/Makefile.am (100%) rename {tests => t}/api-test.nix (100%) rename {tests => t}/api-test.pl (100%) rename {tests => t}/build-products.t (100%) rename {tests => t}/evaluate-basic.t (100%) rename {tests => t}/evaluate-dependent-jobsets.t (100%) rename {tests => t}/input-types/bzr-checkout.t (100%) rename {tests => t}/input-types/bzr.t (100%) rename {tests => t}/input-types/darcs.t (100%) rename {tests => t}/input-types/deepgit.t (100%) rename {tests => t}/input-types/git-rev.t (100%) rename {tests => t}/input-types/git.t (100%) rename {tests => t}/input-types/hg.t (100%) rename {tests => t}/input-types/svn-checkout.t (100%) rename {tests => t}/input-types/svn.t (100%) rename {tests => t}/jobs/basic.nix (100%) rename {tests => t}/jobs/build-output-as-input.nix (100%) rename {tests => t}/jobs/build-product-simple.sh (100%) rename {tests => t}/jobs/build-product-with-spaces.sh (100%) rename {tests => t}/jobs/build-products.nix (100%) rename {tests => t}/jobs/bzr-checkout-input.nix (100%) rename {tests => t}/jobs/bzr-checkout-update.sh (100%) rename {tests => t}/jobs/bzr-input.nix (100%) rename {tests => t}/jobs/bzr-update.sh (100%) rename {tests => t}/jobs/config.nix.in (100%) rename {tests => t}/jobs/darcs-input.nix (100%) rename {tests => t}/jobs/darcs-update.sh (100%) rename {tests => t}/jobs/deepgit-builder.sh (100%) rename {tests => t}/jobs/deepgit-input.nix (100%) rename {tests => t}/jobs/empty-dir-builder.sh (100%) rename {tests => t}/jobs/fail.sh (100%) rename {tests => t}/jobs/git-input.nix (100%) rename {tests => t}/jobs/git-rev-input.nix (100%) rename {tests => t}/jobs/git-rev-update.sh (100%) rename {tests => t}/jobs/git-update.sh (100%) rename {tests => t}/jobs/hg-input.nix (100%) rename {tests => t}/jobs/hg-update.sh (100%) rename {tests => t}/jobs/runcommand.nix (100%) rename {tests => t}/jobs/scm-builder.sh (100%) rename {tests => t}/jobs/succeed-with-failed.sh (100%) rename {tests => t}/jobs/svn-checkout-input.nix (100%) rename {tests => t}/jobs/svn-checkout-update.sh (100%) rename {tests => t}/jobs/svn-input.nix (100%) rename {tests => t}/jobs/svn-update.sh (100%) rename {tests => t}/lib/Setup.pm (100%) rename {tests => t}/lib/TestScmInput.pm (100%) rename {tests => t}/plugins/runcommand.t (100%) rename {tests => t}/s3-backup-test.config (100%) rename {tests => t}/s3-backup-test.pl (100%) rename {tests => t}/setup-notifications-jobset.pl (100%) rename {tests => t}/test.pl (100%) diff --git a/.gitignore b/.gitignore index 4cfd7cae..9855f3d1 100644 --- a/.gitignore +++ b/.gitignore @@ -25,16 +25,16 @@ Makefile.in /doc/manual/images /doc/manual/manual.html /doc/manual/manual.pdf -/tests/.bzr* -/tests/.git* -/tests/.hg* -/tests/nix -/tests/data +/t/.bzr* +/t/.git* +/t/.hg* +/t/nix +/t/data +/t/jobs/config.nix /inst hydra-config.h hydra-config.h.in result -tests/jobs/config.nix outputs config stamp-h1 diff --git a/.yath.rc b/.yath.rc new file mode 100644 index 00000000..19bb35af --- /dev/null +++ b/.yath.rc @@ -0,0 +1,2 @@ +[test] +-I=rel(t/lib) diff --git a/Makefile.am b/Makefile.am index ad911b4d..e744cc33 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,4 +1,4 @@ -SUBDIRS = src tests doc +SUBDIRS = src t doc BOOTCLEAN_SUBDIRS = $(SUBDIRS) DIST_SUBDIRS = $(SUBDIRS) EXTRA_DIST = hydra-module.nix diff --git a/configure.ac b/configure.ac index baff26c2..2fb175c8 100644 --- a/configure.ac +++ b/configure.ac @@ -70,8 +70,8 @@ AC_CONFIG_FILES([ src/lib/Makefile src/root/Makefile src/script/Makefile - tests/Makefile - tests/jobs/config.nix + t/Makefile + t/jobs/config.nix ]) AC_CONFIG_COMMANDS([executable-scripts], []) diff --git a/flake.nix b/flake.nix index e7bfb65b..2e2a6574 100644 --- a/flake.nix +++ b/flake.nix @@ -320,12 +320,16 @@ ] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] ); shellHook = '' + pushd $(git rev-parse --show-toplevel) >/dev/null + PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH PERL5LIB=$(pwd)/src/lib:$PERL5LIB - export HYDRA_HOME="src/" + export HYDRA_HOME="$(pwd)/src/" mkdir -p .hydra-data export HYDRA_DATA="$(pwd)/.hydra-data" export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444' + + popd >/dev/null ''; preConfigure = "autoreconf -vfi"; @@ -416,7 +420,7 @@ su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin" mkdir /run/jobset /tmp/nix chmod 755 /run/jobset /tmp/nix - cp ${./tests/api-test.nix} /run/jobset/default.nix + cp ${./t/api-test.nix} /run/jobset/default.nix chmod 644 /run/jobset/default.nix chown -R hydra /run/jobset /tmp/nix """ @@ -428,7 +432,7 @@ # Run the API tests. machine.succeed( - "su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./tests/api-test.pl}' >&2" + "su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./t/api-test.pl}' >&2" ) ''; }; @@ -455,7 +459,7 @@ su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin" mkdir /run/jobset chmod 755 /run/jobset - cp ${./tests/api-test.nix} /run/jobset/default.nix + cp ${./t/api-test.nix} /run/jobset/default.nix chmod 644 /run/jobset/default.nix chown -R hydra /run/jobset """ @@ -477,7 +481,7 @@ # Setup the project and jobset machine.succeed( - "su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./tests/setup-notifications-jobset.pl}' >&2" + "su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2" ) # Wait until hydra has build the job and diff --git a/tests/Makefile.am b/t/Makefile.am similarity index 100% rename from tests/Makefile.am rename to t/Makefile.am diff --git a/tests/api-test.nix b/t/api-test.nix similarity index 100% rename from tests/api-test.nix rename to t/api-test.nix diff --git a/tests/api-test.pl b/t/api-test.pl similarity index 100% rename from tests/api-test.pl rename to t/api-test.pl diff --git a/tests/build-products.t b/t/build-products.t similarity index 100% rename from tests/build-products.t rename to t/build-products.t diff --git a/tests/evaluate-basic.t b/t/evaluate-basic.t similarity index 100% rename from tests/evaluate-basic.t rename to t/evaluate-basic.t diff --git a/tests/evaluate-dependent-jobsets.t b/t/evaluate-dependent-jobsets.t similarity index 100% rename from tests/evaluate-dependent-jobsets.t rename to t/evaluate-dependent-jobsets.t diff --git a/tests/input-types/bzr-checkout.t b/t/input-types/bzr-checkout.t similarity index 100% rename from tests/input-types/bzr-checkout.t rename to t/input-types/bzr-checkout.t diff --git a/tests/input-types/bzr.t b/t/input-types/bzr.t similarity index 100% rename from tests/input-types/bzr.t rename to t/input-types/bzr.t diff --git a/tests/input-types/darcs.t b/t/input-types/darcs.t similarity index 100% rename from tests/input-types/darcs.t rename to t/input-types/darcs.t diff --git a/tests/input-types/deepgit.t b/t/input-types/deepgit.t similarity index 100% rename from tests/input-types/deepgit.t rename to t/input-types/deepgit.t diff --git a/tests/input-types/git-rev.t b/t/input-types/git-rev.t similarity index 100% rename from tests/input-types/git-rev.t rename to t/input-types/git-rev.t diff --git a/tests/input-types/git.t b/t/input-types/git.t similarity index 100% rename from tests/input-types/git.t rename to t/input-types/git.t diff --git a/tests/input-types/hg.t b/t/input-types/hg.t similarity index 100% rename from tests/input-types/hg.t rename to t/input-types/hg.t diff --git a/tests/input-types/svn-checkout.t b/t/input-types/svn-checkout.t similarity index 100% rename from tests/input-types/svn-checkout.t rename to t/input-types/svn-checkout.t diff --git a/tests/input-types/svn.t b/t/input-types/svn.t similarity index 100% rename from tests/input-types/svn.t rename to t/input-types/svn.t diff --git a/tests/jobs/basic.nix b/t/jobs/basic.nix similarity index 100% rename from tests/jobs/basic.nix rename to t/jobs/basic.nix diff --git a/tests/jobs/build-output-as-input.nix b/t/jobs/build-output-as-input.nix similarity index 100% rename from tests/jobs/build-output-as-input.nix rename to t/jobs/build-output-as-input.nix diff --git a/tests/jobs/build-product-simple.sh b/t/jobs/build-product-simple.sh similarity index 100% rename from tests/jobs/build-product-simple.sh rename to t/jobs/build-product-simple.sh diff --git a/tests/jobs/build-product-with-spaces.sh b/t/jobs/build-product-with-spaces.sh similarity index 100% rename from tests/jobs/build-product-with-spaces.sh rename to t/jobs/build-product-with-spaces.sh diff --git a/tests/jobs/build-products.nix b/t/jobs/build-products.nix similarity index 100% rename from tests/jobs/build-products.nix rename to t/jobs/build-products.nix diff --git a/tests/jobs/bzr-checkout-input.nix b/t/jobs/bzr-checkout-input.nix similarity index 100% rename from tests/jobs/bzr-checkout-input.nix rename to t/jobs/bzr-checkout-input.nix diff --git a/tests/jobs/bzr-checkout-update.sh b/t/jobs/bzr-checkout-update.sh similarity index 100% rename from tests/jobs/bzr-checkout-update.sh rename to t/jobs/bzr-checkout-update.sh diff --git a/tests/jobs/bzr-input.nix b/t/jobs/bzr-input.nix similarity index 100% rename from tests/jobs/bzr-input.nix rename to t/jobs/bzr-input.nix diff --git a/tests/jobs/bzr-update.sh b/t/jobs/bzr-update.sh similarity index 100% rename from tests/jobs/bzr-update.sh rename to t/jobs/bzr-update.sh diff --git a/tests/jobs/config.nix.in b/t/jobs/config.nix.in similarity index 100% rename from tests/jobs/config.nix.in rename to t/jobs/config.nix.in diff --git a/tests/jobs/darcs-input.nix b/t/jobs/darcs-input.nix similarity index 100% rename from tests/jobs/darcs-input.nix rename to t/jobs/darcs-input.nix diff --git a/tests/jobs/darcs-update.sh b/t/jobs/darcs-update.sh similarity index 100% rename from tests/jobs/darcs-update.sh rename to t/jobs/darcs-update.sh diff --git a/tests/jobs/deepgit-builder.sh b/t/jobs/deepgit-builder.sh similarity index 100% rename from tests/jobs/deepgit-builder.sh rename to t/jobs/deepgit-builder.sh diff --git a/tests/jobs/deepgit-input.nix b/t/jobs/deepgit-input.nix similarity index 100% rename from tests/jobs/deepgit-input.nix rename to t/jobs/deepgit-input.nix diff --git a/tests/jobs/empty-dir-builder.sh b/t/jobs/empty-dir-builder.sh similarity index 100% rename from tests/jobs/empty-dir-builder.sh rename to t/jobs/empty-dir-builder.sh diff --git a/tests/jobs/fail.sh b/t/jobs/fail.sh similarity index 100% rename from tests/jobs/fail.sh rename to t/jobs/fail.sh diff --git a/tests/jobs/git-input.nix b/t/jobs/git-input.nix similarity index 100% rename from tests/jobs/git-input.nix rename to t/jobs/git-input.nix diff --git a/tests/jobs/git-rev-input.nix b/t/jobs/git-rev-input.nix similarity index 100% rename from tests/jobs/git-rev-input.nix rename to t/jobs/git-rev-input.nix diff --git a/tests/jobs/git-rev-update.sh b/t/jobs/git-rev-update.sh similarity index 100% rename from tests/jobs/git-rev-update.sh rename to t/jobs/git-rev-update.sh diff --git a/tests/jobs/git-update.sh b/t/jobs/git-update.sh similarity index 100% rename from tests/jobs/git-update.sh rename to t/jobs/git-update.sh diff --git a/tests/jobs/hg-input.nix b/t/jobs/hg-input.nix similarity index 100% rename from tests/jobs/hg-input.nix rename to t/jobs/hg-input.nix diff --git a/tests/jobs/hg-update.sh b/t/jobs/hg-update.sh similarity index 100% rename from tests/jobs/hg-update.sh rename to t/jobs/hg-update.sh diff --git a/tests/jobs/runcommand.nix b/t/jobs/runcommand.nix similarity index 100% rename from tests/jobs/runcommand.nix rename to t/jobs/runcommand.nix diff --git a/tests/jobs/scm-builder.sh b/t/jobs/scm-builder.sh similarity index 100% rename from tests/jobs/scm-builder.sh rename to t/jobs/scm-builder.sh diff --git a/tests/jobs/succeed-with-failed.sh b/t/jobs/succeed-with-failed.sh similarity index 100% rename from tests/jobs/succeed-with-failed.sh rename to t/jobs/succeed-with-failed.sh diff --git a/tests/jobs/svn-checkout-input.nix b/t/jobs/svn-checkout-input.nix similarity index 100% rename from tests/jobs/svn-checkout-input.nix rename to t/jobs/svn-checkout-input.nix diff --git a/tests/jobs/svn-checkout-update.sh b/t/jobs/svn-checkout-update.sh similarity index 100% rename from tests/jobs/svn-checkout-update.sh rename to t/jobs/svn-checkout-update.sh diff --git a/tests/jobs/svn-input.nix b/t/jobs/svn-input.nix similarity index 100% rename from tests/jobs/svn-input.nix rename to t/jobs/svn-input.nix diff --git a/tests/jobs/svn-update.sh b/t/jobs/svn-update.sh similarity index 100% rename from tests/jobs/svn-update.sh rename to t/jobs/svn-update.sh diff --git a/tests/lib/Setup.pm b/t/lib/Setup.pm similarity index 100% rename from tests/lib/Setup.pm rename to t/lib/Setup.pm diff --git a/tests/lib/TestScmInput.pm b/t/lib/TestScmInput.pm similarity index 100% rename from tests/lib/TestScmInput.pm rename to t/lib/TestScmInput.pm diff --git a/tests/plugins/runcommand.t b/t/plugins/runcommand.t similarity index 100% rename from tests/plugins/runcommand.t rename to t/plugins/runcommand.t diff --git a/tests/s3-backup-test.config b/t/s3-backup-test.config similarity index 100% rename from tests/s3-backup-test.config rename to t/s3-backup-test.config diff --git a/tests/s3-backup-test.pl b/t/s3-backup-test.pl similarity index 100% rename from tests/s3-backup-test.pl rename to t/s3-backup-test.pl diff --git a/tests/setup-notifications-jobset.pl b/t/setup-notifications-jobset.pl similarity index 100% rename from tests/setup-notifications-jobset.pl rename to t/setup-notifications-jobset.pl diff --git a/tests/test.pl b/t/test.pl similarity index 100% rename from tests/test.pl rename to t/test.pl From 2befb2c1e11810948783dfed01c6adb8462fb10e Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 5 Mar 2021 00:01:49 -0800 Subject: [PATCH 038/965] doc: document how to run tests Both `make check` and `make && yath test` are now documented ways to run the test suite. --- doc/manual/src/hacking.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/doc/manual/src/hacking.md b/doc/manual/src/hacking.md index 6bf447f4..a7904959 100644 --- a/doc/manual/src/hacking.md +++ b/doc/manual/src/hacking.md @@ -26,3 +26,21 @@ To build Hydra, you should then do: You can run the Hydra web server in your source tree as follows: $ ./src/script/hydra-server + +You can run Hydra's test suite with the following: + + [nix-shell]$ make check + [nix-shell]$ # to run as many tests as you have cores: + [nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES + [nix-shell]$ # or run yath directly: + [nix-shell]$ yath test + [nix-shell]$ # to run as many tests as you have cores: + [nix-shell]$ yath test -j $NIX_BUILD_CORES + +When using `yath` instead of `make check`, ensure you have run `make` +in the root of the repository at least once. + +**Warning**: Currently, the tests can fail +if run with high parallelism [due to an issue in +`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40) +causing database ports to collide. From e0d3a1c1a5e9990f056a982533c42fa39a66a958 Mon Sep 17 00:00:00 2001 From: Josh McSavaney Date: Sat, 6 Mar 2021 23:25:26 -0500 Subject: [PATCH 039/965] Make nix-build args copy-pastable via `set -x` A reproduce script includes a logline that may resemble: > using these flags: --arg nixpkgs { outPath = /tmp/build-137689173/nixpkgs/source; rev = "fdc872fa200a32456f12cc849d33b1fdbd6a933c"; shortRev = "fdc872f"; revCount = 273100; } -I nixpkgs=/tmp/build-137689173/nixpkgs/source --arg officialRelease false --option extra-binary-caches https://hydra.nixos.org/ --option system x86_64-linux /tmp/build-137689173/nixpkgs/source/pkgs/top-level/release.nix -A These are passed along to nix-build and that's fine and dandy, but you can't just copy-paste this as is, as the `{}` introduces a syntax error and the value accompanying `-A` is `''`. A very naive approach is to just `printf "%q"` the individual args, which makes them safe to copy-paste. Unfortunately, this looks awful due to the liberal usage of slashes: ``` $ printf "%q" '{ outPath = /tmp/build-137689173/nixpkgs/source; rev = "fdc872fa200a32456f12cc849d33b1fdbd6a933c"; shortRev = "fdc872f"; revCount = 273100; }' \{\ outPath\ =\ /tmp/build-137689173/nixpkgs/source\;\ rev\ =\ \"fdc872fa200a32456f12cc849d33b1fdbd6a933c\"\;\ shortRev\ =\ \"fdc872f\"\;\ revCount\ =\ 273100\;\ \} ``` Alternatively, if we just use `set -x` before we execute nix-build, we'll get the whole invocation in a friendly, copy-pastable format that nicely displays `{}`-enclosed content and preserves the empty arg following `-A`: ``` running nix-build... using this invocation: + nix-build --arg nixpkgs '{ outPath = /tmp/build-138165173/nixpkgs/source; rev = "e0e4484f2c028d2269f5ebad0660a51bbe46caa4"; shortRev = "e0e4484"; revCount = 274008; }' -I nixpkgs=/tmp/build-138165173/nixpkgs/source --arg officialRelease false --option extra-binary-caches https://hydra.nixos.org/ --option system x86_64-linux /tmp/build-138165173/nixpkgs/source/pkgs/top-level/release.nix -A '' ``` --- src/root/reproduce.tt | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/root/reproduce.tt b/src/root/reproduce.tt index 11eaccc9..dc88edfa 100644 --- a/src/root/reproduce.tt +++ b/src/root/reproduce.tt @@ -210,10 +210,9 @@ if [ -n "$printFlags" ]; then fi info "running nix-build..." -echo "using these flags: ${args[@]}" >&2 - -exec nix-build "${args[@]}" "${extraArgs[@]}" - +echo "using the following invocation:" >&2 +set -x +nix-build "${args[@]}" "${extraArgs[@]}" } main "$@" From a551fba34670e9d40ee7d023a95c484f2695252a Mon Sep 17 00:00:00 2001 From: Matej Cotman Date: Sat, 6 Feb 2016 03:44:16 +0100 Subject: [PATCH 040/965] statsd: add a chance to set hostname and port in hydra.conf Co-authored-by: Graham Christensen --- doc/manual/src/installation.md | 14 +++++++- src/lib/Hydra/Helper/Nix.pm | 19 ++++++++++- src/script/hydra-eval-jobset | 4 +++ src/script/hydra-send-stats | 5 +++ t/Config/statsd.t | 61 ++++++++++++++++++++++++++++++++++ 5 files changed, 101 insertions(+), 2 deletions(-) create mode 100644 t/Config/statsd.t diff --git a/doc/manual/src/installation.md b/doc/manual/src/installation.md index c038a450..94a12041 100644 --- a/doc/manual/src/installation.md +++ b/doc/manual/src/installation.md @@ -187,6 +187,18 @@ following: } } +Statsd Configuration +-------------------- + +By default, Hydra will send stats to statsd at `localhost:8125`. Point Hydra to a different server via: + +``` + + host = alternative.host + port = 18125 + +``` + Using LDAP as authentication backend (optional) ----------------------------------------------- @@ -234,4 +246,4 @@ general any LDAP group of the form *hydra\_some\_role* (notice the role_value: dn role_search_options: deref: always - + diff --git a/src/lib/Hydra/Helper/Nix.pm b/src/lib/Hydra/Helper/Nix.pm index e50723bf..c1f395c9 100644 --- a/src/lib/Hydra/Helper/Nix.pm +++ b/src/lib/Hydra/Helper/Nix.pm @@ -15,7 +15,7 @@ use IPC::Run; our @ISA = qw(Exporter); our @EXPORT = qw( getHydraHome getHydraConfig getBaseUrl - getSCMCacheDir + getSCMCacheDir getStatsdConfig registerRoot getGCRootsDir gcRootFor jobsetOverview jobsetOverview_ getDrvLogPath findLog @@ -55,6 +55,23 @@ sub getHydraConfig { } +# Return hash of statsd configuration of the following shape: +# ( +# host => string, +# port => digit +# ) +sub getStatsdConfig { + my ($config) = @_; + my $cfg = $config->{statsd}; + my %statsd = defined $cfg ? ref $cfg eq "HASH" ? %$cfg : ($cfg) : (); + + return { + "host" => %statsd{'host'} // 'localhost', + "port" => %statsd{'port'} // 8125, + } +} + + sub getBaseUrl { my ($config) = @_; return $config->{'base_uri'} // "http://" . hostname_long . ":3000"; diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index a608dc91..f4a044eb 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -33,6 +33,10 @@ my $plugins = [Hydra::Plugin->instantiate(db => $db, config => $config)]; my $dryRun = defined $ENV{'HYDRA_DRY_RUN'}; +my $statsdConfig = Hydra::Helper::Nix::getStatsdConfig($config); +$Net::Statsd::HOST = $statsdConfig->{'host'}; +$Net::Statsd::PORT = $statsdConfig->{'port'}; + alarm 3600; # FIXME: make configurable diff --git a/src/script/hydra-send-stats b/src/script/hydra-send-stats index 110a8809..8c7d2cd5 100755 --- a/src/script/hydra-send-stats +++ b/src/script/hydra-send-stats @@ -9,6 +9,11 @@ use JSON; STDERR->autoflush(1); binmode STDERR, ":encoding(utf8)"; +my $config = getHydraConfig(); +my $statsdConfig = Hydra::Helper::Nix::getStatsdConfig($config); +$Net::Statsd::HOST = $statsdConfig->{'host'}; +$Net::Statsd::PORT = $statsdConfig->{'port'}; + sub gauge { my ($name, $val) = @_; die unless defined $val; diff --git a/t/Config/statsd.t b/t/Config/statsd.t new file mode 100644 index 00000000..be042127 --- /dev/null +++ b/t/Config/statsd.t @@ -0,0 +1,61 @@ +use strict; +use Setup; + +my %ctx = test_init(hydra_config => q| + + host = foo.bar + port = 18125 + +|); + +require Hydra::Helper::Nix; +use Test2::V0; + +is(Hydra::Helper::Nix::getStatsdConfig(Hydra::Helper::Nix::getHydraConfig()), { + 'host' => "foo.bar", + 'port' => 18125 +}, "Reading specific configuration from the hydra.conf works"); + +is(Hydra::Helper::Nix::getStatsdConfig(), { + 'host' => "localhost", + 'port' => 8125 +}, "A totally empty configuration yields default options"); + +is(Hydra::Helper::Nix::getStatsdConfig({ + "statsd" => { + + } +}), { + 'host' => "localhost", + 'port' => 8125 +}, "A empty statsd block yields default options"); + +is(Hydra::Helper::Nix::getStatsdConfig({ + "statsd" => { + 'host' => "statsdhost" + } +}), { + 'host' => "statsdhost", + 'port' => 8125 +}, "An overridden statsd host propogates, but the other defaults are returned"); + +is(Hydra::Helper::Nix::getStatsdConfig({ + "statsd" => { + 'port' => 5218 + } +}), { + 'host' => "localhost", + 'port' => 5218 +}, "An overridden statsd port propogates, but the other defaults are returned"); + +is(Hydra::Helper::Nix::getStatsdConfig({ + "statsd" => { + 'host' => 'my.statsd.host', + 'port' => 5218 + } +}), { + 'host' => "my.statsd.host", + 'port' => 5218 +}, "An overridden statsd port and host propogate"); + +done_testing; From 9e018d54431b5f23fc9c8c4823c210cc1a90bc40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Janne=20He=C3=9F?= Date: Tue, 18 Feb 2020 02:16:18 +0100 Subject: [PATCH 041/965] Add the project name to declarative inputs This allows for more generic declarative configurations which can be shared between projects. --- doc/manual/src/projects.md | 3 ++- src/script/hydra-eval-jobset | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/manual/src/projects.md b/doc/manual/src/projects.md index b144f1d9..c4ea8f55 100644 --- a/doc/manual/src/projects.md +++ b/doc/manual/src/projects.md @@ -435,7 +435,8 @@ evaluated will go through the steps above in reverse order: 2. Hydra will use the configuration given in the declarative spec file as the jobset configuration for this evaluation. In addition to any inputs specified in the spec file, hydra will also pass the - `declInput` argument corresponding to the input fetched in step 1. + `declInput` argument corresponding to the input fetched in step 1 and + the `projectName` argument containing the project\'s name. 3. As normal, hydra will build the jobs specified in the jobset repository, which in this case is the single `jobsets` job. When diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index f4a044eb..5e56b20d 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -565,7 +565,7 @@ sub checkJobsetWrapped { if ($jobsetsJobset) { my @declInputs = fetchInput($plugins, $db, $project, $jobset, "decl", $project->decltype, $project->declvalue, 0); my $declInput = @declInputs[0] or die "cannot find the input containing the declarative project specification\n"; - die "multiple alternatives for the input containing the declarative project specificaiton are not supported\n" + die "multiple alternatives for the input containing the declarative project specification are not supported\n" if scalar @declInputs != 1; my $declFile = $declInput->{storePath} . "/" . $project->declfile; my $declText = read_file($declFile) @@ -599,6 +599,8 @@ sub checkJobsetWrapped { updateDeclarativeJobset($db, $project, ".jobsets", $declSpec); $jobset->discard_changes; $inputInfo->{"declInput"} = [ $declInput ]; + $inputInfo ->{"projectName"} = [ { "value" => $project->name, } ]; + } } else { die "Declarative specification file $declFile is not a dictionary" From 2179b4b4b01d3b24b70f1dba39e1bb3bb891dd76 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Thu, 25 Feb 2021 12:47:56 -0500 Subject: [PATCH 042/965] RunCommand: emit the `finished` field as a boolean --- src/lib/Hydra/Plugin/RunCommand.pm | 2 +- t/plugins/runcommand.t | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/Hydra/Plugin/RunCommand.pm b/src/lib/Hydra/Plugin/RunCommand.pm index fd095222..7cfb63c5 100644 --- a/src/lib/Hydra/Plugin/RunCommand.pm +++ b/src/lib/Hydra/Plugin/RunCommand.pm @@ -62,7 +62,7 @@ sub buildFinished { my $json = { event => $event, build => $build->id, - finished => $build->get_column('finished'), + finished => $build->get_column('finished') ? JSON::true : JSON::false, timestamp => $build->get_column('timestamp'), project => $build->get_column('project'), jobset => $build->get_column('jobset'), diff --git a/t/plugins/runcommand.t b/t/plugins/runcommand.t index 1945e1dd..7f5f4cb1 100644 --- a/t/plugins/runcommand.t +++ b/t/plugins/runcommand.t @@ -53,7 +53,7 @@ subtest "Validate the top level fields match" => sub { is($dat->{build}, $newbuild->id, "The build event matches our expected ID."); is($dat->{buildStatus}, 0, "The build status matches."); is($dat->{event}, "buildFinished", "The build event matches."); - is($dat->{finished}, 1, "The build finished."); + is($dat->{finished}, JSON::true, "The build finished."); is($dat->{project}, "tests", "The project matches."); is($dat->{jobset}, "basic", "The jobset matches."); is($dat->{job}, "metrics", "The job matches."); From 930f05c38eeac63ad6c3e3250de2667e2df2e96e Mon Sep 17 00:00:00 2001 From: Shea Levy Date: Wed, 10 Mar 2021 12:42:19 -0500 Subject: [PATCH 043/965] Bump Nix version --- flake.lock | 12 ++++++------ src/hydra-queue-runner/build-remote.cc | 7 +++++-- src/hydra-queue-runner/queue-monitor.cc | 4 +++- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/flake.lock b/flake.lock index 82427e9f..45995506 100644 --- a/flake.lock +++ b/flake.lock @@ -5,11 +5,11 @@ "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1613747933, - "narHash": "sha256-Q6VuNRdr87B4F3ILiM6IlQ+bkIYbQTs6EEAtwNrvl1Y=", + "lastModified": 1615194819, + "narHash": "sha256-LfPUsgeFRBzRgTp+828E7UhiVItSYH+CK6IJcjmlcZ4=", "owner": "NixOS", "repo": "nix", - "rev": "548437c2347159c4c79352283dd12ce58324f1d6", + "rev": "1c0e3e453d41b869e4ac7e25dc1c00c349a7c411", "type": "github" }, "original": { @@ -19,11 +19,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1602702596, - "narHash": "sha256-fqJ4UgOb4ZUnCDIapDb4gCrtAah5Rnr2/At3IzMitig=", + "lastModified": 1614309161, + "narHash": "sha256-93kRxDPyEW9QIpxU71kCaV1r+hgOgP6/aVgC7vvO8IU=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "ad0d20345219790533ebe06571f82ed6b034db31", + "rev": "0e499fde7af3c28d63e9b13636716b86c3162b93", "type": "github" }, "original": { diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 51f96db2..cce46a05 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -264,8 +264,11 @@ void State::buildRemote(ref destStore, a no-op for regular stores, but for the binary cache store, this will copy the inputs to the binary cache from the local store. */ - if (localStore != std::shared_ptr(destStore)) - copyClosure(ref(localStore), destStore, step->drv->inputSrcs, NoRepair, NoCheckSigs); + if (localStore != std::shared_ptr(destStore)) { + StorePathSet closure; + localStore->computeFSClosure(step->drv->inputSrcs, closure); + copyPaths(ref(localStore), destStore, closure, NoRepair, NoCheckSigs, NoSubstitute); + } /* Copy the input closure. */ if (!machine->isLocalhost()) { diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 53d00f99..9c92f635 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -500,7 +500,9 @@ Step::ptr State::createStep(ref destStore, // FIXME: should copy directly from substituter to destStore. } - copyClosure(ref(localStore), destStore, {*path}); + StorePathSet closure; + localStore->computeFSClosure({*path}, closure); + copyPaths(ref(localStore), destStore, closure, NoRepair, CheckSigs, NoSubstitute); time_t stopTime = time(0); From 3c86083d211e5f5395d927c0a27c428fccb15181 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Janne=20He=C3=9F?= Date: Sun, 13 Sep 2020 17:46:53 +0200 Subject: [PATCH 044/965] Fixup #717 "Add the project name to declarative inputs" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ``` Mar 10 16:22:35 hydra-b hydra-evaluator[41419]: DBIx::Class::Storage::DBI::_dbh_execute(): DBI Exception: DBD::Pg::st execute failed: ERROR: null value in column "type" violates not-null constraint Mar 10 16:22:35 hydra-b hydra-evaluator[41419]: DETAIL: Failing row contains (62358, projectName, 0, null, null, null, hackworthltd, null, , null). [for Statement "INSERT INTO jobsetevalinputs ( altnr, dependency, eval, name, path, revision, sha256hash, type, uri, value) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )" with ParamValues: 1='0', 2=undef, 3='62358', 4='projectName', 5='', 6=undef, 7=undef, 8=undef, 9=undef, 10='hackworthltd'] at /nix/store/cmqblv437mp57yz5lwvkzcqca4ldf3r5-hydra-0.1.20210308.ebf1cd2/bin/.hydra-eval-jobset-wrapped line 793 Mar 10 16:22:35 hydra-b hydra-evaluator[25828]: evaluation of jobset ‘hackworthltd:.jobsets (jobset#1)’ failed with exit code 1 ``` Use the abstraction for creating inputs for simulating the project name input. Co-authored-by: Graham Christensen --- src/script/hydra-eval-jobset | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index 5e56b20d..e6189b3b 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -599,7 +599,7 @@ sub checkJobsetWrapped { updateDeclarativeJobset($db, $project, ".jobsets", $declSpec); $jobset->discard_changes; $inputInfo->{"declInput"} = [ $declInput ]; - $inputInfo ->{"projectName"} = [ { "value" => $project->name, } ]; + $inputInfo->{"projectName"} = [ fetchInput($plugins, $db, $project, $jobset, "", "string", $project->name, 0) ]; } } else { From 87d46ad5d66c080023d89a27decfa0eadb6c14eb Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 16 Mar 2021 16:09:36 -0400 Subject: [PATCH 045/965] hydra-queue-runner: --build-one: correctly handle a cached build Previously, the build ID would never flow through channels which exited. This patch tracks the buildOne state as part of State and exits avoids waiting forever for new work. The code around buildOnly is a bit rough, making this a bit weird to implement but since it is only used for testing the value of improving it on its own is a bit questionable. --- src/hydra-queue-runner/builder.cc | 14 +++++++------- src/hydra-queue-runner/dispatcher.cc | 5 ++--- src/hydra-queue-runner/queue-monitor.cc | 13 +++++++++++-- src/hydra-queue-runner/state.hh | 4 ++-- 4 files changed, 22 insertions(+), 14 deletions(-) diff --git a/src/hydra-queue-runner/builder.cc b/src/hydra-queue-runner/builder.cc index 7f8830f9..89aa7d15 100644 --- a/src/hydra-queue-runner/builder.cc +++ b/src/hydra-queue-runner/builder.cc @@ -148,7 +148,8 @@ State::StepResult State::doBuildStep(nix::ref destStore, localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1)); } - bool quit = buildId == buildOne && step->drvPath == *buildDrvPath; + if (!buildOneDone) + buildOneDone = buildId == buildOne && step->drvPath == *buildDrvPath; RemoteResult result; BuildOutput res; @@ -265,7 +266,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, if (retry) { auto mc = startDbUpdate(); stepFinished = true; - if (quit) exit(1); + if (buildOneDone) exit(1); return sRetry; } } @@ -376,7 +377,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, } } else - failStep(*conn, step, buildId, result, machine, stepFinished, quit); + failStep(*conn, step, buildId, result, machine, stepFinished); // FIXME: keep stats about aborted steps? nrStepsDone++; @@ -386,7 +387,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, machine->state->totalStepTime += stepStopTime - stepStartTime; machine->state->totalStepBuildTime += result.stopTime - result.startTime; - if (quit) exit(0); // testing hack; FIXME: this won't run plugins + if (buildOneDone) exit(0); // testing hack; FIXME: this won't run plugins return sDone; } @@ -398,8 +399,7 @@ void State::failStep( BuildID buildId, const RemoteResult & result, Machine::ptr machine, - bool & stepFinished, - bool & quit) + bool & stepFinished) { /* Register failure in the database for all Build objects that directly or indirectly depend on this step. */ @@ -481,7 +481,7 @@ void State::failStep( b->finishedInDB = true; builds_->erase(b->id); dependentIDs.push_back(b->id); - if (buildOne == b->id) quit = true; + if (!buildOneDone && buildOne == b->id) buildOneDone = true; } } diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc index 6dc7f700..8c497a66 100644 --- a/src/hydra-queue-runner/dispatcher.cc +++ b/src/hydra-queue-runner/dispatcher.cc @@ -374,7 +374,6 @@ void State::abortUnsupported() if (!build) build = *dependents.begin(); bool stepFinished = false; - bool quit = false; failStep( *conn, step, build->id, @@ -385,9 +384,9 @@ void State::abortUnsupported() .startTime = now2, .stopTime = now2, }, - nullptr, stepFinished, quit); + nullptr, stepFinished); - if (quit) exit(1); + if (buildOneDone) exit(1); } } diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 9c92f635..26d780b2 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -35,14 +35,17 @@ void State::queueMonitorLoop() unsigned int lastBuildId = 0; - while (true) { + bool quit = false; + while (!quit) { localStore->clearPathInfoCache(); bool done = getQueuedBuilds(*conn, destStore, lastBuildId); + if (buildOne && buildOneDone) quit = true; + /* Sleep until we get notification from the database about an event. */ - if (done) { + if (done && !quit) { conn->await_notification(); nrQueueWakeups++; } else @@ -65,6 +68,8 @@ void State::queueMonitorLoop() processJobsetSharesChange(*conn); } } + + exit(0); } @@ -160,6 +165,7 @@ bool State::getQueuedBuilds(Connection & conn, /* Some step previously failed, so mark the build as failed right away. */ + if (!buildOneDone && build->id == buildOne) buildOneDone = true; printMsg(lvlError, "marking build %d as cached failure due to ‘%s’", build->id, localStore->printStorePath(ex.step->drvPath)); if (!build->finishedInDB) { @@ -231,6 +237,7 @@ bool State::getQueuedBuilds(Connection & conn, auto mc = startDbUpdate(); pqxx::work txn(conn); time_t now = time(0); + if (!buildOneDone && build->id == buildOne) buildOneDone = true; printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id); markSucceededBuild(txn, build, res, true, now, now); notifyBuildFinished(txn, build->id, {}); @@ -289,6 +296,8 @@ bool State::getQueuedBuilds(Connection & conn, for (auto & r : newRunnable) makeRunnable(r); + if (buildOne && newRunnable.size() == 0) buildOneDone = true; + nrBuildsRead += nrAdded; /* Stop after a certain time to allow priority bumps to be diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 57fd5a77..5a5c5f94 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -367,6 +367,7 @@ private: /* Specific build to do for --build-one (testing only). */ BuildID buildOne; + bool buildOneDone = false; /* Statistics per machine type for the Hydra auto-scaler. */ struct MachineType @@ -485,8 +486,7 @@ private: BuildID buildId, const RemoteResult & result, Machine::ptr machine, - bool & stepFinished, - bool & quit); + bool & stepFinished); Jobset::ptr createJobset(pqxx::work & txn, const std::string & projectName, const std::string & jobsetName); From d62a2c16574113828b36e363b9c332be9ca31b23 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 17 Mar 2021 11:07:19 -0400 Subject: [PATCH 046/965] NixExprs: extract the `escape` function and test it --- src/lib/Hydra/Helper/Escape.pm | 14 ++++++++++++++ src/lib/Hydra/View/NixExprs.pm | 25 +++++++++---------------- t/Helper/escape.t | 25 +++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 16 deletions(-) create mode 100644 src/lib/Hydra/Helper/Escape.pm create mode 100644 t/Helper/escape.t diff --git a/src/lib/Hydra/Helper/Escape.pm b/src/lib/Hydra/Helper/Escape.pm new file mode 100644 index 00000000..3312c5ea --- /dev/null +++ b/src/lib/Hydra/Helper/Escape.pm @@ -0,0 +1,14 @@ +package Hydra::Helper::Escape; + +use strict; +use base qw(Exporter); + +our @EXPORT = qw(escapeString); + +sub escapeString { + my ($s) = @_; + $s =~ s|\\|\\\\|g; + $s =~ s|\"|\\\"|g; + $s =~ s|\$|\\\$|g; + return "\"" . $s . "\""; +} diff --git a/src/lib/Hydra/View/NixExprs.pm b/src/lib/Hydra/View/NixExprs.pm index 7bfa3109..fa2f7086 100644 --- a/src/lib/Hydra/View/NixExprs.pm +++ b/src/lib/Hydra/View/NixExprs.pm @@ -3,19 +3,12 @@ package Hydra::View::NixExprs; use strict; use base qw/Catalyst::View/; use Hydra::Helper::Nix; +use Hydra::Helper::Escape; use Archive::Tar; use IO::Compress::Bzip2 qw(bzip2); use Encode; -sub escape { - my ($s) = @_; - $s =~ s|\\|\\\\|g; - $s =~ s|\"|\\\"|g; - $s =~ s|\$|\\\$|g; - return "\"" . $s . "\""; -} - sub process { my ($self, $c) = @_; @@ -62,7 +55,7 @@ EOF my $first = 1; foreach my $system (keys %perSystem) { $res .= "else " if !$first; - $res .= "if system == ${\escape $system} then {\n\n"; + $res .= "if system == ${\escapeString $system} then {\n\n"; foreach my $job (keys %{$perSystem{$system}}) { my $pkg = $perSystem{$system}->{$job}; @@ -70,21 +63,21 @@ EOF $res .= " # Hydra build ${\$build->id}\n"; my $attr = $build->get_column('job'); $attr =~ s/\./-/g; - $res .= " ${\escape $attr} = (mkFakeDerivation {\n"; + $res .= " ${\escapeString $attr} = (mkFakeDerivation {\n"; $res .= " type = \"derivation\";\n"; - $res .= " name = ${\escape ($build->get_column('releasename') or $build->nixname)};\n"; - $res .= " system = ${\escape $build->system};\n"; + $res .= " name = ${\escapeString ($build->get_column('releasename') or $build->nixname)};\n"; + $res .= " system = ${\escapeString $build->system};\n"; $res .= " meta = {\n"; - $res .= " description = ${\escape $build->description};\n" + $res .= " description = ${\escapeString $build->description};\n" if $build->description; - $res .= " license = ${\escape $build->license};\n" + $res .= " license = ${\escapeString $build->license};\n" if $build->license; - $res .= " maintainers = ${\escape $build->maintainers};\n" + $res .= " maintainers = ${\escapeString $build->maintainers};\n" if $build->maintainers; $res .= " };\n"; $res .= " } {\n"; my @outputNames = sort (keys %{$pkg->{outputs}}); - $res .= " ${\escape $_} = ${\escape $pkg->{outputs}->{$_}};\n" foreach @outputNames; + $res .= " ${\escapeString $_} = ${\escapeString $pkg->{outputs}->{$_}};\n" foreach @outputNames; my $out = defined $pkg->{outputs}->{"out"} ? "out" : $outputNames[0]; $res .= " }).$out;\n\n"; } diff --git a/t/Helper/escape.t b/t/Helper/escape.t new file mode 100644 index 00000000..f614cec3 --- /dev/null +++ b/t/Helper/escape.t @@ -0,0 +1,25 @@ +use strict; +use Setup; +use Data::Dumper; +use Test2::V0; +use Hydra::Helper::Escape; + +subtest "checking individual attribute set elements" => sub { + my %values = ( + "" => '""', + "." => '"."', + "foobar" => '"foobar"', + "foo.bar" => '"foo.bar"', + "🌮" => '"🌮"', + 'foo"bar' => '"foo\"bar"', + 'foo\\bar' => '"foo\\\\bar"', + '$bar' => '"\\$bar"', + ); + + for my $input (keys %values) { + my $value = $values{$input}; + is(escapeString($input), $value, "Escaping the value: " . $input); + } +}; + +done_testing; From 88e0198a8ec92060328a42fee4aa54e044fe08e4 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 17 Mar 2021 11:53:00 -0400 Subject: [PATCH 047/965] Create a helper for dealing with nested attribute sets --- src/lib/Hydra/Helper/AttributeSet.pm | 56 ++++++++++++++++++++++++++++ src/lib/Hydra/Helper/Escape.pm | 9 ++++- t/Helper/attributeset.t | 53 ++++++++++++++++++++++++++ t/Helper/escape.t | 19 ++++++++++ 4 files changed, 136 insertions(+), 1 deletion(-) create mode 100644 src/lib/Hydra/Helper/AttributeSet.pm create mode 100644 t/Helper/attributeset.t diff --git a/src/lib/Hydra/Helper/AttributeSet.pm b/src/lib/Hydra/Helper/AttributeSet.pm new file mode 100644 index 00000000..b750d6e1 --- /dev/null +++ b/src/lib/Hydra/Helper/AttributeSet.pm @@ -0,0 +1,56 @@ +package Hydra::Helper::AttributeSet; + +use strict; +use warnings; + +sub new { + my ($self) = @_; + return bless { "paths" => [] }, $self; +} + +sub registerValue { + my ($self, $attributePath) = @_; + + my @pathParts = splitPath($attributePath); + + pop(@pathParts); + if (scalar(@pathParts) == 0) { + return; + } + + my $lineage = ""; + for my $pathPart (@pathParts) { + $lineage = $self->registerChild($lineage, $pathPart); + } +} + +sub registerChild { + my ($self, $parent, $attributePath) = @_; + if ($parent ne "") { + $parent .= "." + } + + my $name = $parent . $attributePath; + if (!grep { $_ eq $name} @{$self->{"paths"}}) { + push(@{$self->{"paths"}}, $name); + } + return $name; +} + +sub splitPath { + my ($s) = @_; + + if ($s eq "") { + return ('') + } + + return split(/\./, $s, -1); +} + +sub enumerate { + my ($self) = @_; + my @paths = sort { length($a) <=> length($b) } @{$self->{"paths"}}; + return wantarray ? @paths : \@paths; +} + +1; diff --git a/src/lib/Hydra/Helper/Escape.pm b/src/lib/Hydra/Helper/Escape.pm index 3312c5ea..f7682a4f 100644 --- a/src/lib/Hydra/Helper/Escape.pm +++ b/src/lib/Hydra/Helper/Escape.pm @@ -2,8 +2,9 @@ package Hydra::Helper::Escape; use strict; use base qw(Exporter); +use Hydra::Helper::AttributeSet; -our @EXPORT = qw(escapeString); +our @EXPORT = qw(escapeString escapeAttributePath); sub escapeString { my ($s) = @_; @@ -12,3 +13,9 @@ sub escapeString { $s =~ s|\$|\\\$|g; return "\"" . $s . "\""; } + +sub escapeAttributePath { + my ($s) = @_; + + return join(".", map( { escapeString($_) } Hydra::Helper::AttributeSet::splitPath($s))); +} diff --git a/t/Helper/attributeset.t b/t/Helper/attributeset.t new file mode 100644 index 00000000..112cd9be --- /dev/null +++ b/t/Helper/attributeset.t @@ -0,0 +1,53 @@ +use strict; +use warnings; +use Setup; +use Data::Dumper; +use Test2::V0; +use Hydra::Helper::AttributeSet; + + +subtest "splitting an attribute path in to its component parts" => sub { + my %values = ( + "" => [''], + "." => ['', ''], + "...." => ['', '', '', '', ''], + "foobar" => ['foobar'], + "foo.bar" => ['foo', 'bar'], + "🌮" => ['🌮'], + + # not supported: 'foo."bar.baz".tux' => [ 'foo', 'bar.baz', 'tux' ] + # the edge cases are fairly significant around escaping and unescaping. + ); + + for my $input (keys %values) { + my @value = @{$values{$input}}; + my @components = Hydra::Helper::AttributeSet::splitPath($input); + is(\@components, \@value, "Splitting the attribute path: " . $input); + } +}; + +my $attrs = Hydra::Helper::AttributeSet->new(); +$attrs->registerValue("foo"); +$attrs->registerValue("bar.baz.tux"); +$attrs->registerValue("bar.baz.bux.foo.bar.baz"); + +is( + $attrs->enumerate(), + [ + # "foo": skipped since we're registering values, and we + # only want to track nested attribute sets. + + # "bar.baz.tux": expand the path + "bar", + "bar.baz", + + #"bar.baz.bux.foo.bar.baz": expand the path, but only register new + # attribute set names. + "bar.baz.bux", + "bar.baz.bux.foo", + "bar.baz.bux.foo.bar", + ], + "Attribute set paths are registered." +); + +done_testing; diff --git a/t/Helper/escape.t b/t/Helper/escape.t index f614cec3..22dd4d47 100644 --- a/t/Helper/escape.t +++ b/t/Helper/escape.t @@ -22,4 +22,23 @@ subtest "checking individual attribute set elements" => sub { } }; +subtest "escaping path components of a nested attribute" => sub { + my %values = ( + "" => '""', + "." => '"".""', + "...." => '""."".""."".""', + "foobar" => '"foobar"', + "foo.bar" => '"foo"."bar"', + "🌮" => '"🌮"', + 'foo"bar' => '"foo\"bar"', + 'foo\\bar' => '"foo\\\\bar"', + '$bar' => '"\\$bar"', + ); + + for my $input (keys %values) { + my $value = $values{$input}; + is(escapeAttributePath($input), $value, "Escaping the attribute path: " . $input); + } +}; + done_testing; From 019aef3d419b03266f7a137300b54bcfa167350e Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 17 Mar 2021 11:20:19 -0400 Subject: [PATCH 048/965] Test the fake derivations channel, asserting nested packages are properly represented. This is a breaking change. Previously, packages named `packageset.foo` would be exposed in the fake derivation channel as `packageset-foo`. Presumably this was done to avoid needing to track attribute sets, and to avoid the complexity. I think this now correctly handles the complexity and properly mirrors the input expressions layout. --- src/lib/Hydra/View/NixExprs.pm | 16 ++++++--- t/Controller/Jobset/channel.t | 62 ++++++++++++++++++++++++++++++++++ t/jobs/nested-attributes.nix | 36 ++++++++++++++++++++ 3 files changed, 109 insertions(+), 5 deletions(-) create mode 100644 t/Controller/Jobset/channel.t create mode 100644 t/jobs/nested-attributes.nix diff --git a/src/lib/Hydra/View/NixExprs.pm b/src/lib/Hydra/View/NixExprs.pm index fa2f7086..194c51ec 100644 --- a/src/lib/Hydra/View/NixExprs.pm +++ b/src/lib/Hydra/View/NixExprs.pm @@ -4,10 +4,11 @@ use strict; use base qw/Catalyst::View/; use Hydra::Helper::Nix; use Hydra::Helper::Escape; +use Hydra::Helper::AttributeSet; use Archive::Tar; use IO::Compress::Bzip2 qw(bzip2); use Encode; - +use Data::Dumper; sub process { @@ -56,14 +57,15 @@ EOF foreach my $system (keys %perSystem) { $res .= "else " if !$first; $res .= "if system == ${\escapeString $system} then {\n\n"; - + my $attrsets = Hydra::Helper::AttributeSet->new(); foreach my $job (keys %{$perSystem{$system}}) { my $pkg = $perSystem{$system}->{$job}; my $build = $pkg->{build}; - $res .= " # Hydra build ${\$build->id}\n"; my $attr = $build->get_column('job'); - $attr =~ s/\./-/g; - $res .= " ${\escapeString $attr} = (mkFakeDerivation {\n"; + $attrsets->registerValue($attr); + + $res .= " # Hydra build ${\$build->id}\n"; + $res .= " ${\escapeAttributePath $attr} = (mkFakeDerivation {\n"; $res .= " type = \"derivation\";\n"; $res .= " name = ${\escapeString ($build->get_column('releasename') or $build->nixname)};\n"; $res .= " system = ${\escapeString $build->system};\n"; @@ -82,6 +84,10 @@ EOF $res .= " }).$out;\n\n"; } + for my $attrset ($attrsets->enumerate()) { + $res .= " ${\escapeAttributePath $attrset}.recurseForDerivations = true;\n\n"; + } + $res .= "}\n\n"; $first = 0; } diff --git a/t/Controller/Jobset/channel.t b/t/Controller/Jobset/channel.t new file mode 100644 index 00000000..2b034025 --- /dev/null +++ b/t/Controller/Jobset/channel.t @@ -0,0 +1,62 @@ +use feature 'unicode_strings'; +use strict; +use Setup; +use IO::Uncompress::Bunzip2 qw(bunzip2); +use Archive::Tar; +use JSON qw(decode_json); +use Data::Dumper; +my %ctx = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; +require Hydra::Helper::Nix; + +use Test2::V0; +require Catalyst::Test; +Catalyst::Test->import('Hydra'); + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"}); + +# Most basic test case, no parameters +my $jobset = createBaseJobset("nested-attributes", "nested-attributes.nix", $ctx{jobsdir}); + +ok(evalSucceeds($jobset)); +is(nrQueuedBuildsForJobset($jobset), 4); + +for my $build (queuedBuildsForJobset($jobset)) { + ok(runBuild($build), "Build '".$build->job."' should exit with code 0"); + my $newbuild = $db->resultset('Builds')->find($build->id); + is($newbuild->finished, 1, "Build '".$build->job."' should be finished."); + is($newbuild->buildstatus, 0, "Build '".$build->job."' should have buildstatus 0."); +} + +my $compressed = get('/jobset/tests/nested-attributes/channel/latest/nixexprs.tar.bz2'); +my $tarcontent; +bunzip2(\$compressed => \$tarcontent); +open(my $tarfh, "<", \$tarcontent); +my $tar = Archive::Tar->new($tarfh); + +my $defaultnix = $ctx{"tmpdir"} . "/channel-default.nix"; +$tar->extract_file("channel/default.nix", $defaultnix); + +print STDERR $tar->get_content("channel/default.nix"); + +(my $status, my $stdout, my $stderr) = Hydra::Helper::Nix::captureStdoutStderr(5, "nix-env", "--json", "--query", "--available", "--attr-path", "--file", $defaultnix); +is($stderr, "", "Stderr should be empty"); +is($status, 0, "Querying the packages should succeed"); + +my $packages = decode_json($stdout); +my $keys = [sort keys %$packages]; +is($keys, [ + "packageset-nested", + "packageset.deeper.deeper.nested", + "packageset.nested", + "packageset.nested2", +]); +is($packages->{"packageset-nested"}->{"name"}, "actually-top-level"); +is($packages->{"packageset.nested"}->{"name"}, "actually-nested"); + +done_testing; diff --git a/t/jobs/nested-attributes.nix b/t/jobs/nested-attributes.nix new file mode 100644 index 00000000..4cd90d9b --- /dev/null +++ b/t/jobs/nested-attributes.nix @@ -0,0 +1,36 @@ +with import ./config.nix; +rec { + # Given a jobset containing a package set named X with an interior member Y, + # expose the interior member Y with the name X-Y. This is to exercise a bug + # in the NixExprs view's generated Nix expression which flattens the + # package set namespace from `X.Y` to `X-Y`. If the bug is present, the + # resulting expression incorrectly renders two `X-Y` packages. + packageset = { + recurseForDerivations = true; + deeper = { + recurseForDerivations = true; + deeper = { + recurseForDerivations = true; + + nested = mkDerivation { + name = "much-too-deep"; + builder = ./empty-dir-builder.sh; + }; + }; + }; + + nested = mkDerivation { + name = "actually-nested"; + builder = ./empty-dir-builder.sh; + }; + + nested2 = mkDerivation { + name = "actually-nested2"; + builder = ./empty-dir-builder.sh; + }; + }; + packageset-nested = mkDerivation { + name = "actually-top-level"; + builder = ./empty-dir-builder.sh; + }; +} From 6b7ca554f95425b2e86117f4a990bb3755edba83 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Thu, 18 Mar 2021 16:27:21 -0400 Subject: [PATCH 049/965] Update src/lib/Hydra/Helper/Escape.pm: fewer ()s Co-authored-by: Stig --- src/lib/Hydra/Helper/Escape.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/Hydra/Helper/Escape.pm b/src/lib/Hydra/Helper/Escape.pm index f7682a4f..3037951f 100644 --- a/src/lib/Hydra/Helper/Escape.pm +++ b/src/lib/Hydra/Helper/Escape.pm @@ -17,5 +17,5 @@ sub escapeString { sub escapeAttributePath { my ($s) = @_; - return join(".", map( { escapeString($_) } Hydra::Helper::AttributeSet::splitPath($s))); + return join ".", map { escapeString($_) } Hydra::Helper::AttributeSet::splitPath($s); } From 6f662a606abe02c1c4918742c21eeec772e8fcfc Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sat, 20 Mar 2021 09:06:54 -0400 Subject: [PATCH 050/965] hydra-send-stats: add a failing test asserting it can run --- t/lib/Setup.pm | 5 ++++- t/scripts/hydra-send-stats.t | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 t/scripts/hydra-send-stats.t diff --git a/t/lib/Setup.pm b/t/lib/Setup.pm index 3cc9828e..cdea38ce 100644 --- a/t/lib/Setup.pm +++ b/t/lib/Setup.pm @@ -9,7 +9,10 @@ use File::Basename; use Cwd qw(abs_path getcwd); our @ISA = qw(Exporter); -our @EXPORT = qw(test_init hydra_setup nrBuildsForJobset queuedBuildsForJobset nrQueuedBuildsForJobset createBaseJobset createJobsetWithOneInput evalSucceeds runBuild sendNotifications updateRepository); +our @EXPORT = qw(test_init hydra_setup nrBuildsForJobset queuedBuildsForJobset + nrQueuedBuildsForJobset createBaseJobset createJobsetWithOneInput + evalSucceeds runBuild sendNotifications updateRepository + captureStdoutStderr); # Set up the environment for running tests. # diff --git a/t/scripts/hydra-send-stats.t b/t/scripts/hydra-send-stats.t new file mode 100644 index 00000000..174bde64 --- /dev/null +++ b/t/scripts/hydra-send-stats.t @@ -0,0 +1,20 @@ +use feature 'unicode_strings'; +use strict; +use Setup; + +my %ctx = test_init(); + +require Hydra::Schema; +require Hydra::Model::DB; + +use Test2::V0; + +my $db = Hydra::Model::DB->new; +hydra_setup($db); + +my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-send-stats")); +is($stdout, "", "hydra-send-stats stdout should be empty"); +is($stderr, "", "hydra-send-stats stderr should be empty"); +is($res, 0, "hydra-send-stats --once should exit zero"); + +done_testing; From 6bb180a0f2c136375d6d2fe5ae441a7c0f949b90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Sat, 20 Mar 2021 06:57:37 +0100 Subject: [PATCH 051/965] hydra-send-stats: fix imports --- src/script/hydra-send-stats | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/script/hydra-send-stats b/src/script/hydra-send-stats index 8c7d2cd5..ee047276 100755 --- a/src/script/hydra-send-stats +++ b/src/script/hydra-send-stats @@ -2,6 +2,7 @@ use strict; use utf8; +use Hydra::Helper::Nix; use Net::Statsd; use File::Slurp; use JSON; @@ -10,7 +11,7 @@ STDERR->autoflush(1); binmode STDERR, ":encoding(utf8)"; my $config = getHydraConfig(); -my $statsdConfig = Hydra::Helper::Nix::getStatsdConfig($config); +my $statsdConfig = getStatsdConfig($config); $Net::Statsd::HOST = $statsdConfig->{'host'}; $Net::Statsd::PORT = $statsdConfig->{'port'}; From 425c7ff17f2f801894902184fb4b39f14c944d55 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sat, 20 Mar 2021 09:12:02 -0400 Subject: [PATCH 052/965] hydra-send-stats: add a --once option for testing --- src/script/hydra-send-stats | 24 ++++++++++++++++++++++++ t/scripts/hydra-send-stats.t | 2 +- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/script/hydra-send-stats b/src/script/hydra-send-stats index ee047276..d07d9406 100755 --- a/src/script/hydra-send-stats +++ b/src/script/hydra-send-stats @@ -6,6 +6,7 @@ use Hydra::Helper::Nix; use Net::Statsd; use File::Slurp; use JSON; +use Getopt::Long qw(:config gnu_getopt); STDERR->autoflush(1); binmode STDERR, ":encoding(utf8)"; @@ -65,6 +66,26 @@ sub sendQueueRunnerStats { gauge("hydra.queue.machines.in_use", scalar(grep { $_->{currentJobs} > 0 } (values %{$json->{machines}}))); } + +sub showHelp { + print < \$once, + "help" => sub { showHelp() } + ) or exit 1; + while (1) { eval { sendQueueRunnerStats(); @@ -78,5 +99,8 @@ while (1) { gauge("hydra.mem.dirty", $dirty); } + if ($once) { + last; + } sleep(30); } diff --git a/t/scripts/hydra-send-stats.t b/t/scripts/hydra-send-stats.t index 174bde64..dde88cfe 100644 --- a/t/scripts/hydra-send-stats.t +++ b/t/scripts/hydra-send-stats.t @@ -12,7 +12,7 @@ use Test2::V0; my $db = Hydra::Model::DB->new; hydra_setup($db); -my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-send-stats")); +my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-send-stats", "--once")); is($stdout, "", "hydra-send-stats stdout should be empty"); is($stderr, "", "hydra-send-stats stderr should be empty"); is($res, 0, "hydra-send-stats --once should exit zero"); From 627af61abe4ee1aa9fc41bf0afb8a810af918c43 Mon Sep 17 00:00:00 2001 From: Tyson Whitehead Date: Tue, 30 Jul 2019 16:23:20 -0400 Subject: [PATCH 053/965] Update jquery to latest 3.4.1 (considered by some as more secure) --- src/root/Makefile.am | 2 +- src/root/layout.tt | 2 +- src/root/static/js/jquery/jquery-1.12.3.min.js | 5 ----- src/root/static/js/jquery/jquery-3.4.1.min.js | 2 ++ 4 files changed, 4 insertions(+), 7 deletions(-) delete mode 100644 src/root/static/js/jquery/jquery-1.12.3.min.js create mode 100644 src/root/static/js/jquery/jquery-3.4.1.min.js diff --git a/src/root/Makefile.am b/src/root/Makefile.am index 52a14ef3..506bf597 100644 --- a/src/root/Makefile.am +++ b/src/root/Makefile.am @@ -4,7 +4,7 @@ STATIC = \ $(wildcard static/css/*) \ static/js/bootbox.min.js \ static/js/common.js \ - static/js/jquery/jquery-1.12.3.min.js \ + static/js/jquery/jquery-3.4.1.min.js \ static/js/jquery/jquery-ui-1.10.4.min.js FLOT = flot-0.8.3.zip diff --git a/src/root/layout.tt b/src/root/layout.tt index 2da24cb0..0d62d6e5 100644 --- a/src/root/layout.tt +++ b/src/root/layout.tt @@ -11,7 +11,7 @@ - + diff --git a/src/root/static/js/jquery/jquery-1.12.3.min.js b/src/root/static/js/jquery/jquery-1.12.3.min.js deleted file mode 100644 index dad4f0af..00000000 --- a/src/root/static/js/jquery/jquery-1.12.3.min.js +++ /dev/null @@ -1,5 +0,0 @@ -/*! jQuery v1.12.3 | (c) jQuery Foundation | jquery.org/license */ -!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=a.document,e=c.slice,f=c.concat,g=c.push,h=c.indexOf,i={},j=i.toString,k=i.hasOwnProperty,l={},m="1.12.3",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return e.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:e.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a){return n.each(this,a)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(e.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor()},push:g,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(n.isPlainObject(c)||(b=n.isArray(c)))?(b?(b=!1,f=a&&n.isArray(a)?a:[]):f=a&&n.isPlainObject(a)?a:{},g[d]=n.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray||function(a){return"array"===n.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){var b=a&&a.toString();return!n.isArray(a)&&b-parseFloat(b)+1>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==n.type(a)||a.nodeType||n.isWindow(a))return!1;try{if(a.constructor&&!k.call(a,"constructor")&&!k.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(!l.ownFirst)for(b in a)return k.call(a,b);for(b in a);return void 0===b||k.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?i[j.call(a)]||"object":typeof a},globalEval:function(b){b&&n.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b){var c,d=0;if(s(a)){for(c=a.length;c>d;d++)if(b.call(a[d],d,a[d])===!1)break}else for(d in a)if(b.call(a[d],d,a[d])===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):g.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(h)return h.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,e,g=0,h=[];if(s(a))for(d=a.length;d>g;g++)e=b(a[g],g,c),null!=e&&h.push(e);else for(g in a)e=b(a[g],g,c),null!=e&&h.push(e);return f.apply([],h)},guid:1,proxy:function(a,b){var c,d,f;return"string"==typeof b&&(f=a[b],b=a,a=f),n.isFunction(a)?(c=e.call(arguments,2),d=function(){return a.apply(b||this,c.concat(e.call(arguments)))},d.guid=a.guid=a.guid||n.guid++,d):void 0},now:function(){return+new Date},support:l}),"function"==typeof Symbol&&(n.fn[Symbol.iterator]=c[Symbol.iterator]),n.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(a,b){i["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=!!a&&"length"in a&&a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ga(),z=ga(),A=ga(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+M+"))|)"+L+"*\\]",O=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+N+")*)|.*)\\)|)",P=new RegExp(L+"+","g"),Q=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),R=new RegExp("^"+L+"*,"+L+"*"),S=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),T=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),U=new RegExp(O),V=new RegExp("^"+M+"$"),W={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M+"|[*])"),ATTR:new RegExp("^"+N),PSEUDO:new RegExp("^"+O),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},X=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Z=/^[^{]+\{\s*\[native \w/,$=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,_=/[+~]/,aa=/'|\\/g,ba=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),ca=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},da=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(ea){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fa(a,b,d,e){var f,h,j,k,l,o,r,s,w=b&&b.ownerDocument,x=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==x&&9!==x&&11!==x)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==x&&(o=$.exec(a)))if(f=o[1]){if(9===x){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(w&&(j=w.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(o[2])return H.apply(d,b.getElementsByTagName(a)),d;if((f=o[3])&&c.getElementsByClassName&&b.getElementsByClassName)return H.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==x)w=b,s=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(aa,"\\$&"):b.setAttribute("id",k=u),r=g(a),h=r.length,l=V.test(k)?"#"+k:"[id='"+k+"']";while(h--)r[h]=l+" "+qa(r[h]);s=r.join(","),w=_.test(a)&&oa(b.parentNode)||b}if(s)try{return H.apply(d,w.querySelectorAll(s)),d}catch(y){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(Q,"$1"),b,d,e)}function ga(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ha(a){return a[u]=!0,a}function ia(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ja(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function ka(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function la(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function na(a){return ha(function(b){return b=+b,ha(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function oa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=fa.support={},f=fa.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fa.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ia(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ia(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Z.test(n.getElementsByClassName),c.getById=ia(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return"undefined"!=typeof b.getElementsByClassName&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=Z.test(n.querySelectorAll))&&(ia(function(a){o.appendChild(a).innerHTML="",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ia(function(a){var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Z.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ia(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",O)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Z.test(o.compareDocumentPosition),t=b||Z.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return ka(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?ka(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},fa.matches=function(a,b){return fa(a,null,null,b)},fa.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(T,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fa(b,n,null,[a]).length>0},fa.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fa.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fa.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fa.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fa.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fa.selectors={cacheLength:50,createPseudo:ha,match:W,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ba,ca),a[3]=(a[3]||a[4]||a[5]||"").replace(ba,ca),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fa.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fa.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return W.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&U.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ba,ca).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fa.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(P," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fa.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ha(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ha(function(a){var b=[],c=[],d=h(a.replace(Q,"$1"));return d[u]?ha(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ha(function(a){return function(b){return fa(a,b).length>0}}),contains:ha(function(a){return a=a.replace(ba,ca),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ha(function(a){return V.test(a||"")||fa.error("unsupported lang: "+a),a=a.replace(ba,ca).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Y.test(a.nodeName)},input:function(a){return X.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:na(function(){return[0]}),last:na(function(a,b){return[b-1]}),eq:na(function(a,b,c){return[0>c?c+b:c]}),even:na(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:na(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:na(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:na(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function ra(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j,k=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(j=b[u]||(b[u]={}),i=j[b.uniqueID]||(j[b.uniqueID]={}),(h=i[d])&&h[0]===w&&h[1]===f)return k[2]=h[2];if(i[d]=k,k[2]=a(b,c,g))return!0}}}function sa(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ta(a,b,c){for(var d=0,e=b.length;e>d;d++)fa(a,b[d],c);return c}function ua(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(c&&!c(f,d,e)||(g.push(f),j&&b.push(h)));return g}function va(a,b,c,d,e,f){return d&&!d[u]&&(d=va(d)),e&&!e[u]&&(e=va(e,f)),ha(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ta(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ua(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ua(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ua(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function wa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ra(function(a){return a===b},h,!0),l=ra(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[ra(sa(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return va(i>1&&sa(m),i>1&&qa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(Q,"$1"),c,e>i&&wa(a.slice(i,e)),f>e&&wa(a=a.slice(e)),f>e&&qa(a))}m.push(c)}return sa(m)}function xa(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=F.call(i));u=ua(u)}H.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&fa.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ha(f):f}return h=fa.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xa(e,d)),f.selector=a}return f},i=fa.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ba,ca),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=W.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ba,ca),_.test(j[0].type)&&oa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qa(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,!b||_.test(a)&&oa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ia(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ia(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||ja("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ia(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ja("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ia(function(a){return null==a.getAttribute("disabled")})||ja(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fa}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.uniqueSort=n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},v=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},w=n.expr.match.needsContext,x=/^<([\w-]+)\s*\/?>(?:<\/\1>|)$/,y=/^.[^:#\[\.,]*$/;function z(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(y.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return n.inArray(a,b)>-1!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;e>b;b++)if(n.contains(d[b],this))return!0}));for(b=0;e>b;b++)n.find(a,d[b],c);return c=this.pushStack(e>1?n.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(z(this,a||[],!1))},not:function(a){return this.pushStack(z(this,a||[],!0))},is:function(a){return!!z(this,"string"==typeof a&&w.test(a)?n(a):a||[],!1).length}});var A,B=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=n.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||A,"string"==typeof a){if(e="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:B.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),x.test(e[1])&&n.isPlainObject(b))for(e in b)n.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}if(f=d.getElementById(e[2]),f&&f.parentNode){if(f.id!==e[2])return A.find(a);this.length=1,this[0]=f}return this.context=d,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?"undefined"!=typeof c.ready?c.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};C.prototype=n.fn,A=n(d);var D=/^(?:parents|prev(?:Until|All))/,E={children:!0,contents:!0,next:!0,prev:!0};n.fn.extend({has:function(a){var b,c=n(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(n.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=w.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?n.inArray(this[0],n(a)):n.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.uniqueSort(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function F(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return u(a,"parentNode")},parentsUntil:function(a,b,c){return u(a,"parentNode",c)},next:function(a){return F(a,"nextSibling")},prev:function(a){return F(a,"previousSibling")},nextAll:function(a){return u(a,"nextSibling")},prevAll:function(a){return u(a,"previousSibling")},nextUntil:function(a,b,c){return u(a,"nextSibling",c)},prevUntil:function(a,b,c){return u(a,"previousSibling",c)},siblings:function(a){return v((a.parentNode||{}).firstChild,a)},children:function(a){return v(a.firstChild)},contents:function(a){return n.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(E[a]||(e=n.uniqueSort(e)),D.test(a)&&(e=e.reverse())),this.pushStack(e)}});var G=/\S+/g;function H(a){var b={};return n.each(a.match(G)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?H(a):n.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h-1)f.splice(c,1),h>=c&&h--}),this},has:function(a){return a?n.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=!0,c||j.disable(),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().progress(c.notify).done(c.resolve).fail(c.reject):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=e.call(arguments),d=c.length,f=1!==d||a&&n.isFunction(a.promise)?d:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(d){b[a]=this,c[a]=arguments.length>1?e.call(arguments):d,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(d>1)for(i=new Array(d),j=new Array(d),k=new Array(d);d>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().progress(h(b,j,i)).done(h(b,k,c)).fail(g.reject):--f;return f||g.resolveWith(k,c),g.promise()}});var I;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(I.resolveWith(d,[n]),n.fn.triggerHandler&&(n(d).triggerHandler("ready"),n(d).off("ready"))))}});function J(){d.addEventListener?(d.removeEventListener("DOMContentLoaded",K),a.removeEventListener("load",K)):(d.detachEvent("onreadystatechange",K),a.detachEvent("onload",K))}function K(){(d.addEventListener||"load"===a.event.type||"complete"===d.readyState)&&(J(),n.ready())}n.ready.promise=function(b){if(!I)if(I=n.Deferred(),"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll)a.setTimeout(n.ready);else if(d.addEventListener)d.addEventListener("DOMContentLoaded",K),a.addEventListener("load",K);else{d.attachEvent("onreadystatechange",K),a.attachEvent("onload",K);var c=!1;try{c=null==a.frameElement&&d.documentElement}catch(e){}c&&c.doScroll&&!function f(){if(!n.isReady){try{c.doScroll("left")}catch(b){return a.setTimeout(f,50)}J(),n.ready()}}()}return I.promise(b)},n.ready.promise();var L;for(L in n(l))break;l.ownFirst="0"===L,l.inlineBlockNeedsLayout=!1,n(function(){var a,b,c,e;c=d.getElementsByTagName("body")[0],c&&c.style&&(b=d.createElement("div"),e=d.createElement("div"),e.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(e).appendChild(b),"undefined"!=typeof b.style.zoom&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",l.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(e))}),function(){var a=d.createElement("div");l.deleteExpando=!0;try{delete a.test}catch(b){l.deleteExpando=!1}a=null}();var M=function(a){var b=n.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b},N=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,O=/([A-Z])/g;function P(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(O,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:N.test(c)?n.parseJSON(c):c}catch(e){}n.data(a,b,c)}else c=void 0; -}return c}function Q(a){var b;for(b in a)if(("data"!==b||!n.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function R(a,b,d,e){if(M(a)){var f,g,h=n.expando,i=a.nodeType,j=i?n.cache:a,k=i?a[h]:a[h]&&h;if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||n.guid++:h),j[k]||(j[k]=i?{}:{toJSON:n.noop}),"object"!=typeof b&&"function"!=typeof b||(e?j[k]=n.extend(j[k],b):j[k].data=n.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[n.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[n.camelCase(b)])):f=g,f}}function S(a,b,c){if(M(a)){var d,e,f=a.nodeType,g=f?n.cache:a,h=f?a[n.expando]:n.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){n.isArray(b)?b=b.concat(n.map(b,n.camelCase)):b in d?b=[b]:(b=n.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!Q(d):!n.isEmptyObject(d))return}(c||(delete g[h].data,Q(g[h])))&&(f?n.cleanData([a],!0):l.deleteExpando||g!=g.window?delete g[h]:g[h]=void 0)}}}n.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?n.cache[a[n.expando]]:a[n.expando],!!a&&!Q(a)},data:function(a,b,c){return R(a,b,c)},removeData:function(a,b){return S(a,b)},_data:function(a,b,c){return R(a,b,c,!0)},_removeData:function(a,b){return S(a,b,!0)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=n.data(f),1===f.nodeType&&!n._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),P(f,d,e[d])));n._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){n.data(this,a)}):arguments.length>1?this.each(function(){n.data(this,a,b)}):f?P(f,a,n.data(f,a)):void 0},removeData:function(a){return this.each(function(){n.removeData(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=n._data(a,b),c&&(!d||n.isArray(c)?d=n._data(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return n._data(a,c)||n._data(a,c,{empty:n.Callbacks("once memory").add(function(){n._removeData(a,b+"queue"),n._removeData(a,c)})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.lengthh;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},Z=/^(?:checkbox|radio)$/i,$=/<([\w:-]+)/,_=/^$|\/(?:java|ecma)script/i,aa=/^\s+/,ba="abbr|article|aside|audio|bdi|canvas|data|datalist|details|dialog|figcaption|figure|footer|header|hgroup|main|mark|meter|nav|output|picture|progress|section|summary|template|time|video";function ca(a){var b=ba.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}!function(){var a=d.createElement("div"),b=d.createDocumentFragment(),c=d.createElement("input");a.innerHTML="
a",l.leadingWhitespace=3===a.firstChild.nodeType,l.tbody=!a.getElementsByTagName("tbody").length,l.htmlSerialize=!!a.getElementsByTagName("link").length,l.html5Clone="<:nav>"!==d.createElement("nav").cloneNode(!0).outerHTML,c.type="checkbox",c.checked=!0,b.appendChild(c),l.appendChecked=c.checked,a.innerHTML="",l.noCloneChecked=!!a.cloneNode(!0).lastChild.defaultValue,b.appendChild(a),c=d.createElement("input"),c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),a.appendChild(c),l.checkClone=a.cloneNode(!0).cloneNode(!0).lastChild.checked,l.noCloneEvent=!!a.addEventListener,a[n.expando]=1,l.attributes=!a.getAttribute(n.expando)}();var da={option:[1,""],legend:[1,"
","
"],area:[1,"",""],param:[1,"",""],thead:[1,"","
"],tr:[2,"","
"],col:[2,"","
"],td:[3,"","
"],_default:l.htmlSerialize?[0,"",""]:[1,"X
","
"]};da.optgroup=da.option,da.tbody=da.tfoot=da.colgroup=da.caption=da.thead,da.th=da.td;function ea(a,b){var c,d,e=0,f="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||n.nodeName(d,b)?f.push(d):n.merge(f,ea(d,b));return void 0===b||b&&n.nodeName(a,b)?n.merge([a],f):f}function fa(a,b){for(var c,d=0;null!=(c=a[d]);d++)n._data(c,"globalEval",!b||n._data(b[d],"globalEval"))}var ga=/<|&#?\w+;/,ha=/r;r++)if(g=a[r],g||0===g)if("object"===n.type(g))n.merge(q,g.nodeType?[g]:g);else if(ga.test(g)){i=i||p.appendChild(b.createElement("div")),j=($.exec(g)||["",""])[1].toLowerCase(),m=da[j]||da._default,i.innerHTML=m[1]+n.htmlPrefilter(g)+m[2],f=m[0];while(f--)i=i.lastChild;if(!l.leadingWhitespace&&aa.test(g)&&q.push(b.createTextNode(aa.exec(g)[0])),!l.tbody){g="table"!==j||ha.test(g)?""!==m[1]||ha.test(g)?0:i:i.firstChild,f=g&&g.childNodes.length;while(f--)n.nodeName(k=g.childNodes[f],"tbody")&&!k.childNodes.length&&g.removeChild(k)}n.merge(q,i.childNodes),i.textContent="";while(i.firstChild)i.removeChild(i.firstChild);i=p.lastChild}else q.push(b.createTextNode(g));i&&p.removeChild(i),l.appendChecked||n.grep(ea(q,"input"),ia),r=0;while(g=q[r++])if(d&&n.inArray(g,d)>-1)e&&e.push(g);else if(h=n.contains(g.ownerDocument,g),i=ea(p.appendChild(g),"script"),h&&fa(i),c){f=0;while(g=i[f++])_.test(g.type||"")&&c.push(g)}return i=null,p}!function(){var b,c,e=d.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(l[b]=c in a)||(e.setAttribute(c,"t"),l[b]=e.attributes[c].expando===!1);e=null}();var ka=/^(?:input|select|textarea)$/i,la=/^key/,ma=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,na=/^(?:focusinfocus|focusoutblur)$/,oa=/^([^.]*)(?:\.(.+)|)/;function pa(){return!0}function qa(){return!1}function ra(){try{return d.activeElement}catch(a){}}function sa(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)sa(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=qa;else if(!e)return a;return 1===f&&(g=e,e=function(a){return n().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=n.guid++)),a.each(function(){n.event.add(this,b,e,d,c)})}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=n.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return"undefined"==typeof n||a&&n.event.triggered===a.type?void 0:n.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(G)||[""],h=b.length;while(h--)f=oa.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=n.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=n.event.special[o]||{},l=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},i),(m=g[o])||(m=g[o]=[],m.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,l):m.push(l),n.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n.hasData(a)&&n._data(a);if(r&&(k=r.events)){b=(b||"").match(G)||[""],j=b.length;while(j--)if(h=oa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=m.length;while(f--)g=m[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(m.splice(f,1),g.selector&&m.delegateCount--,l.remove&&l.remove.call(a,g));i&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(k)&&(delete r.handle,n._removeData(a,"events"))}},trigger:function(b,c,e,f){var g,h,i,j,l,m,o,p=[e||d],q=k.call(b,"type")?b.type:b,r=k.call(b,"namespace")?b.namespace.split("."):[];if(i=m=e=e||d,3!==e.nodeType&&8!==e.nodeType&&!na.test(q+n.event.triggered)&&(q.indexOf(".")>-1&&(r=q.split("."),q=r.shift(),r.sort()),h=q.indexOf(":")<0&&"on"+q,b=b[n.expando]?b:new n.Event(q,"object"==typeof b&&b),b.isTrigger=f?2:3,b.namespace=r.join("."),b.rnamespace=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=e),c=null==c?[b]:n.makeArray(c,[b]),l=n.event.special[q]||{},f||!l.trigger||l.trigger.apply(e,c)!==!1)){if(!f&&!l.noBubble&&!n.isWindow(e)){for(j=l.delegateType||q,na.test(j+q)||(i=i.parentNode);i;i=i.parentNode)p.push(i),m=i;m===(e.ownerDocument||d)&&p.push(m.defaultView||m.parentWindow||a)}o=0;while((i=p[o++])&&!b.isPropagationStopped())b.type=o>1?j:l.bindType||q,g=(n._data(i,"events")||{})[b.type]&&n._data(i,"handle"),g&&g.apply(i,c),g=h&&i[h],g&&g.apply&&M(i)&&(b.result=g.apply(i,c),b.result===!1&&b.preventDefault());if(b.type=q,!f&&!b.isDefaultPrevented()&&(!l._default||l._default.apply(p.pop(),c)===!1)&&M(e)&&h&&e[q]&&!n.isWindow(e)){m=e[h],m&&(e[h]=null),n.event.triggered=q;try{e[q]()}catch(s){}n.event.triggered=void 0,m&&(e[h]=m)}return b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,d,f,g,h=[],i=e.call(arguments),j=(n._data(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())a.rnamespace&&!a.rnamespace.test(g.namespace)||(a.handleObj=g,a.data=g.data,d=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==d&&(a.result=d)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&("click"!==a.type||isNaN(a.button)||a.button<1))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>-1:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h]","i"),va=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:-]+)[^>]*)\/>/gi,wa=/\s*$/g,Aa=ca(d),Ba=Aa.appendChild(d.createElement("div"));function Ca(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function Da(a){return a.type=(null!==n.find.attr(a,"type"))+"/"+a.type,a}function Ea(a){var b=ya.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function Fa(a,b){if(1===b.nodeType&&n.hasData(a)){var c,d,e,f=n._data(a),g=n._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)n.event.add(b,c,h[c][d])}g.data&&(g.data=n.extend({},g.data))}}function Ga(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!l.noCloneEvent&&b[n.expando]){e=n._data(b);for(d in e.events)n.removeEvent(b,d,e.handle);b.removeAttribute(n.expando)}"script"===c&&b.text!==a.text?(Da(b).text=a.text,Ea(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),l.html5Clone&&a.innerHTML&&!n.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&Z.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:"input"!==c&&"textarea"!==c||(b.defaultValue=a.defaultValue)}}function Ha(a,b,c,d){b=f.apply([],b);var e,g,h,i,j,k,m=0,o=a.length,p=o-1,q=b[0],r=n.isFunction(q);if(r||o>1&&"string"==typeof q&&!l.checkClone&&xa.test(q))return a.each(function(e){var f=a.eq(e);r&&(b[0]=q.call(this,e,f.html())),Ha(f,b,c,d)});if(o&&(k=ja(b,a[0].ownerDocument,!1,a,d),e=k.firstChild,1===k.childNodes.length&&(k=e),e||d)){for(i=n.map(ea(k,"script"),Da),h=i.length;o>m;m++)g=k,m!==p&&(g=n.clone(g,!0,!0),h&&n.merge(i,ea(g,"script"))),c.call(a[m],g,m);if(h)for(j=i[i.length-1].ownerDocument,n.map(i,Ea),m=0;h>m;m++)g=i[m],_.test(g.type||"")&&!n._data(g,"globalEval")&&n.contains(j,g)&&(g.src?n._evalUrl&&n._evalUrl(g.src):n.globalEval((g.text||g.textContent||g.innerHTML||"").replace(za,"")));k=e=null}return a}function Ia(a,b,c){for(var d,e=b?n.filter(b,a):a,f=0;null!=(d=e[f]);f++)c||1!==d.nodeType||n.cleanData(ea(d)),d.parentNode&&(c&&n.contains(d.ownerDocument,d)&&fa(ea(d,"script")),d.parentNode.removeChild(d));return a}n.extend({htmlPrefilter:function(a){return a.replace(va,"<$1>")},clone:function(a,b,c){var d,e,f,g,h,i=n.contains(a.ownerDocument,a);if(l.html5Clone||n.isXMLDoc(a)||!ua.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(Ba.innerHTML=a.outerHTML,Ba.removeChild(f=Ba.firstChild)),!(l.noCloneEvent&&l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(d=ea(f),h=ea(a),g=0;null!=(e=h[g]);++g)d[g]&&Ga(e,d[g]);if(b)if(c)for(h=h||ea(a),d=d||ea(f),g=0;null!=(e=h[g]);g++)Fa(e,d[g]);else Fa(a,f);return d=ea(f,"script"),d.length>0&&fa(d,!i&&ea(a,"script")),d=h=e=null,f},cleanData:function(a,b){for(var d,e,f,g,h=0,i=n.expando,j=n.cache,k=l.attributes,m=n.event.special;null!=(d=a[h]);h++)if((b||M(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)m[e]?n.event.remove(d,e):n.removeEvent(d,e,g.handle);j[f]&&(delete j[f],k||"undefined"==typeof d.removeAttribute?d[i]=void 0:d.removeAttribute(i),c.push(f))}}}),n.fn.extend({domManip:Ha,detach:function(a){return Ia(this,a,!0)},remove:function(a){return Ia(this,a)},text:function(a){return Y(this,function(a){return void 0===a?n.text(this):this.empty().append((this[0]&&this[0].ownerDocument||d).createTextNode(a))},null,a,arguments.length)},append:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.appendChild(a)}})},prepend:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&n.cleanData(ea(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&n.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return Y(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(ta,""):void 0;if("string"==typeof a&&!wa.test(a)&&(l.htmlSerialize||!ua.test(a))&&(l.leadingWhitespace||!aa.test(a))&&!da[($.exec(a)||["",""])[1].toLowerCase()]){a=n.htmlPrefilter(a);try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(ea(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=[];return Ha(this,arguments,function(b){var c=this.parentNode;n.inArray(this,a)<0&&(n.cleanData(ea(this)),c&&c.replaceChild(b,this))},a)}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=0,e=[],f=n(a),h=f.length-1;h>=d;d++)c=d===h?this:this.clone(!0),n(f[d])[b](c),g.apply(e,c.get());return this.pushStack(e)}});var Ja,Ka={HTML:"block",BODY:"block"};function La(a,b){var c=n(b.createElement(a)).appendTo(b.body),d=n.css(c[0],"display");return c.detach(),d}function Ma(a){var b=d,c=Ka[a];return c||(c=La(a,b),"none"!==c&&c||(Ja=(Ja||n(" [% END %] diff --git a/src/root/jobset.tt b/src/root/jobset.tt index 5d8345f9..5afcbfde 100644 --- a/src/root/jobset.tt +++ b/src/root/jobset.tt @@ -119,8 +119,7 @@ [% IF jobset.errormsg || jobset.fetcherrormsg %]
-

Errors occurred at [% INCLUDE renderDateTime timestamp=(jobset.errortime || jobset.lastcheckedtime) %].

-
[% HTML.escape(jobset.fetcherrormsg || jobset.errormsg) %]
+
[% END %] diff --git a/src/root/layout.tt b/src/root/layout.tt index 399962b4..b520b455 100644 --- a/src/root/layout.tt +++ b/src/root/layout.tt @@ -10,31 +10,7 @@ - - - - - - - - - - - - - - - - - - - - - - - + [% INCLUDE style.tt %] [% IF c.config.enable_google_login %] diff --git a/src/root/static/js/common.js b/src/root/static/js/common.js index c51f769a..9f31d1e6 100644 --- a/src/root/static/js/common.js +++ b/src/root/static/js/common.js @@ -129,6 +129,12 @@ $(document).ready(function() { el.addClass("is-local"); } }); + + [...document.getElementsByTagName("iframe")].forEach((element) => { + element.contentWindow.addEventListener("DOMContentLoaded", (_) => { + element.style.height = element.contentWindow.document.body.scrollHeight + 'px'; + }) + }) }); var tabsLoaded = {}; diff --git a/src/root/style.tt b/src/root/style.tt new file mode 100644 index 00000000..4094b7bc --- /dev/null +++ b/src/root/style.tt @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/t/Hydra/Controller/Jobset/evals.t b/t/Hydra/Controller/Jobset/evals.t index 221efb65..25357f8f 100644 --- a/t/Hydra/Controller/Jobset/evals.t +++ b/t/Hydra/Controller/Jobset/evals.t @@ -32,4 +32,9 @@ subtest "/jobset/PROJECT/JOBSET/evals" => sub { ok($jobsetevals->is_success, "The page showing the jobset evals returns 200."); }; +subtest "/jobset/PROJECT/JOBSET/errors" => sub { + my $jobsetevals = request(GET '/jobset/' . $project->name . '/' . $jobset->name . '/errors'); + ok($jobsetevals->is_success, "The page showing the jobset eval errors returns 200."); +}; + done_testing; diff --git a/t/Hydra/Controller/JobsetEval/fetch.t b/t/Hydra/Controller/JobsetEval/fetch.t index 14169c39..609e9224 100644 --- a/t/Hydra/Controller/JobsetEval/fetch.t +++ b/t/Hydra/Controller/JobsetEval/fetch.t @@ -35,6 +35,10 @@ subtest "Fetching the eval's overview" => sub { is($fetch->code, 200, "channel page is 200"); }; +subtest "Fetching the eval's overview" => sub { + my $fetch = request(GET '/eval/' . $eval->id, '/errors'); + is($fetch->code, 200, "errors page is 200"); +}; done_testing; From abe35881e4d9263babb452bb9169cf6491d418bd Mon Sep 17 00:00:00 2001 From: "git@71rd.net" Date: Sat, 3 Aug 2024 10:36:10 +0000 Subject: [PATCH 927/965] Stream files from store instead of buffering them When an artifact is requested from hydra the output is first copied from the nix store into memory and then sent as a response, delaying the download and taking up significant amounts of memory. As reported in https://github.com/NixOS/hydra/issues/1357 Instead of calling a command and blocking while reading in the entire output, this adds read_into_socket(). the function takes a command, starting a subprocess with that command, returning a file descriptor attached to stdout. This file descriptor is then by responsebuilder of Catalyst to steam the output directly (cherry picked from commit 459aa0a5983a0bd546399c08231468d6e9282f54) --- src/lib/Hydra/Controller/Build.pm | 2 +- src/lib/Hydra/Helper/Nix.pm | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/lib/Hydra/Controller/Build.pm b/src/lib/Hydra/Controller/Build.pm index de2c204d..5e7b6f24 100644 --- a/src/lib/Hydra/Controller/Build.pm +++ b/src/lib/Hydra/Controller/Build.pm @@ -238,7 +238,7 @@ sub serveFile { # XSS hole. $c->response->header('Content-Security-Policy' => 'sandbox allow-scripts'); - $c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command", + $c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command", "store", "cat", "--store", getStoreUri(), "$path"]) }; # Detect MIME type. diff --git a/src/lib/Hydra/Helper/Nix.pm b/src/lib/Hydra/Helper/Nix.pm index bff7a5ed..f87946d5 100644 --- a/src/lib/Hydra/Helper/Nix.pm +++ b/src/lib/Hydra/Helper/Nix.pm @@ -36,6 +36,7 @@ our @EXPORT = qw( jobsetOverview jobsetOverview_ pathIsInsidePrefix + readIntoSocket readNixFile registerRoot restartBuilds @@ -417,6 +418,17 @@ sub pathIsInsidePrefix { return $cur; } +sub readIntoSocket{ + my (%args) = @_; + my $sock; + + eval { + my $x= join(" ", @{$args{cmd}}); + open($sock, "-|", $x) or die q(failed to open socket from command:\n $x); + }; + + return $sock; +} From 6133693097c3d24eb67f901eff2a35ded4455ee8 Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Sun, 18 Aug 2024 10:40:42 +0200 Subject: [PATCH 928/965] readIntoSocket: fix with store URIs containing an `&` The third argument to `open()` in `-|` mode is passed to a shell if it's a string. In my case the store URI contains `?secret-key=${signingKey.directory}/secret&compression=zstd` For the `nix store cat` case this means that * until `&` the process will be started in the background. This fails immediately because no path to cat is specified. * `compression=zstd` is a variable assignment * the `$path` argument to `store cat` is attempted to be executed as another command Passing just the list solves the problem. (cherry picked from commit 3ee51dbe589458cc54ff753317bbc6db530bddc0) --- src/lib/Hydra/Helper/Nix.pm | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/lib/Hydra/Helper/Nix.pm b/src/lib/Hydra/Helper/Nix.pm index f87946d5..88fbdd6d 100644 --- a/src/lib/Hydra/Helper/Nix.pm +++ b/src/lib/Hydra/Helper/Nix.pm @@ -423,8 +423,7 @@ sub readIntoSocket{ my $sock; eval { - my $x= join(" ", @{$args{cmd}}); - open($sock, "-|", $x) or die q(failed to open socket from command:\n $x); + open($sock, "-|", @{$args{cmd}}) or die q(failed to open socket from command:\n $x); }; return $sock; From 0d0c4f278bbed3ad31b100cc662933041d34651d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 7 Apr 2025 12:31:02 -0400 Subject: [PATCH 929/965] Fix evaluation of NixOS tests, avoid `with` --- nixos-tests.nix | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/nixos-tests.nix b/nixos-tests.nix index 9efe68c8..77dd8f8a 100644 --- a/nixos-tests.nix +++ b/nixos-tests.nix @@ -27,8 +27,7 @@ in { install = forEachSystem (system: - with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }; - simpleTest { + (import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest { name = "hydra-install"; nodes.machine = hydraServer; testScript = @@ -43,8 +42,7 @@ in }); notifications = forEachSystem (system: - with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }; - simpleTest { + (import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest { name = "hydra-notifications"; nodes.machine = { imports = [ hydraServer ]; @@ -56,7 +54,7 @@ in ''; services.influxdb.enable = true; }; - testScript = '' + testScript = { nodes, ... }: '' machine.wait_for_job("hydra-init") # Create an admin account and some other state. @@ -87,7 +85,7 @@ in # Setup the project and jobset machine.succeed( - "su - hydra -c 'perl -I ${config.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2" + "su - hydra -c 'perl -I ${nodes.machine.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2" ) # Wait until hydra has build the job and @@ -101,9 +99,7 @@ in }); gitea = forEachSystem (system: - let pkgs = nixpkgs.legacyPackages.${system}; in - with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }; - makeTest { + (import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).makeTest { name = "hydra-gitea"; nodes.machine = { pkgs, ... }: { imports = [ hydraServer ]; @@ -129,7 +125,7 @@ in networking.firewall.allowedTCPPorts = [ 3000 ]; }; skipLint = true; - testScript = + testScript = { pkgs, ... }: let scripts.mktoken = pkgs.writeText "token.sql" '' INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all'); From 5a9985f96cd00278a137bbbca8ee2789acfae093 Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Thu, 11 Apr 2024 17:12:47 +0200 Subject: [PATCH 930/965] web: Skip System on /machines It is redundant --- src/root/machine-status.tt | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/root/machine-status.tt b/src/root/machine-status.tt index 3af5073c..07a2359d 100644 --- a/src/root/machine-status.tt +++ b/src/root/machine-status.tt @@ -6,7 +6,6 @@
- @@ -41,7 +40,6 @@ [% idle = 0 %] - From 1e2d3211d9fec6dd9e00667bc5c12203ee0bdaa4 Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Thu, 11 Apr 2024 15:03:23 +0200 Subject: [PATCH 931/965] queue-runner: limit parallelism of CPU intensive operations My current theory is that running more parallel xz than available CPU cores is reducing our overall throughput by requiring more scheduling overhead and more cache thrashing. --- src/hydra-queue-runner/build-remote.cc | 18 ++++++++++++++++++ src/hydra-queue-runner/hydra-queue-runner.cc | 1 + src/hydra-queue-runner/state.hh | 7 +++++++ src/root/common.tt | 2 ++ 4 files changed, 28 insertions(+) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 79c32d46..0c8b3f10 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -386,6 +386,16 @@ void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult) } +/* Utility guard object to auto-release a semaphore on destruction. */ +template +class SemaphoreReleaser { +public: + SemaphoreReleaser(T* s) : sem(s) {} + ~SemaphoreReleaser() { sem->release(); } + +private: + T* sem; +}; void State::buildRemote(ref destStore, ::Machine::ptr machine, Step::ptr step, @@ -527,6 +537,14 @@ void State::buildRemote(ref destStore, result.logFile = ""; } + /* Throttle CPU-bound work. Opportunistically skip updating the current + * step, since this requires a DB roundtrip. */ + if (!localWorkThrottler.try_acquire()) { + updateStep(ssWaitingForLocalSlot); + localWorkThrottler.acquire(); + } + SemaphoreReleaser releaser(&localWorkThrottler); + StorePathSet outputs; for (auto & [_, realisation] : buildResult.builtOutputs) outputs.insert(realisation.outPath); diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 64a98797..cf7d4056 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -85,6 +85,7 @@ State::State(std::optional metricsAddrOpt) : config(std::make_unique()) , maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0)) , dbPool(config->getIntOption("max_db_connections", 128)) + , localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2))) , maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30)) , maxLogSize(config->getIntOption("max_log_size", 64ULL << 20)) , uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false)) diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 8933720d..34b7a676 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -6,6 +6,8 @@ #include #include #include +#include +#include #include #include @@ -58,6 +60,7 @@ typedef enum { ssConnecting = 10, ssSendingInputs = 20, ssBuilding = 30, + ssWaitingForLocalSlot = 35, ssReceivingOutputs = 40, ssPostProcessing = 50, } StepState; @@ -353,6 +356,10 @@ private: typedef std::map Machines; nix::Sync machines; // FIXME: use atomic_shared_ptr + /* Throttler for CPU-bound local work. */ + static constexpr unsigned int maxSupportedLocalWorkers = 1024; + std::counting_semaphore localWorkThrottler; + /* Various stats. */ time_t startedAt; counter nrBuildsRead{0}; diff --git a/src/root/common.tt b/src/root/common.tt index 842ad109..6348bee7 100644 --- a/src/root/common.tt +++ b/src/root/common.tt @@ -270,6 +270,8 @@ BLOCK renderBusyStatus; Sending inputs [% ELSIF step.busy == 30 %] Building + [% ELSIF step.busy == 35 %] + Waiting to receive outputs [% ELSIF step.busy == 40 %] Receiving outputs [% ELSIF step.busy == 50 %] From efcf6815d95134050b0f280668af7655aae9ef72 Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Sat, 20 Apr 2024 16:48:03 +0200 Subject: [PATCH 932/965] queue-runner: add prom metrics to allow detecting internal bottlenecks By looking at the ratio of running vs. waiting for the dispatcher and the queue monitor, we should get better visibility into what hydra is currently bottlenecked on. There are other side effects we can try to measure to get to the same result, but having a simple way doesn't cost us much. --- src/hydra-queue-runner/dispatcher.cc | 12 ++++++--- src/hydra-queue-runner/hydra-queue-runner.cc | 28 ++++++++++++++++++++ src/hydra-queue-runner/queue-monitor.cc | 11 ++++++++ src/hydra-queue-runner/state.hh | 6 +++++ 4 files changed, 54 insertions(+), 3 deletions(-) diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc index cbf982bf..11db0071 100644 --- a/src/hydra-queue-runner/dispatcher.cc +++ b/src/hydra-queue-runner/dispatcher.cc @@ -40,13 +40,15 @@ void State::dispatcher() printMsg(lvlDebug, "dispatcher woken up"); nrDispatcherWakeups++; - auto now1 = std::chrono::steady_clock::now(); + auto t_before_work = std::chrono::steady_clock::now(); auto sleepUntil = doDispatch(); - auto now2 = std::chrono::steady_clock::now(); + auto t_after_work = std::chrono::steady_clock::now(); - dispatchTimeMs += std::chrono::duration_cast(now2 - now1).count(); + prom.dispatcher_time_spent_running.Increment( + std::chrono::duration_cast(t_after_work - t_before_work).count()); + dispatchTimeMs += std::chrono::duration_cast(t_after_work - t_before_work).count(); /* Sleep until we're woken up (either because a runnable build is added, or because a build finishes). */ @@ -60,6 +62,10 @@ void State::dispatcher() *dispatcherWakeup_ = false; } + auto t_after_sleep = std::chrono::steady_clock::now(); + prom.dispatcher_time_spent_waiting.Increment( + std::chrono::duration_cast(t_after_sleep - t_after_work).count()); + } catch (std::exception & e) { printError("dispatcher: %s", e.what()); sleep(1); diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index cf7d4056..8123fd39 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -77,6 +77,34 @@ State::PromMetrics::PromMetrics() .Register(*registry) .Add({}) ) + , dispatcher_time_spent_running( + prometheus::BuildCounter() + .Name("hydraqueuerunner_dispatcher_time_spent_running") + .Help("Time (in micros) spent running the dispatcher") + .Register(*registry) + .Add({}) + ) + , dispatcher_time_spent_waiting( + prometheus::BuildCounter() + .Name("hydraqueuerunner_dispatcher_time_spent_waiting") + .Help("Time (in micros) spent waiting for the dispatcher to obtain work") + .Register(*registry) + .Add({}) + ) + , queue_monitor_time_spent_running( + prometheus::BuildCounter() + .Name("hydraqueuerunner_queue_monitor_time_spent_running") + .Help("Time (in micros) spent running the queue monitor") + .Register(*registry) + .Add({}) + ) + , queue_monitor_time_spent_waiting( + prometheus::BuildCounter() + .Name("hydraqueuerunner_queue_monitor_time_spent_waiting") + .Help("Time (in micros) spent waiting for the queue monitor to obtain work") + .Register(*registry) + .Add({}) + ) { } diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 81bda873..3af0752a 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -42,12 +42,19 @@ void State::queueMonitorLoop(Connection & conn) bool quit = false; while (!quit) { + auto t_before_work = std::chrono::steady_clock::now(); + localStore->clearPathInfoCache(); bool done = getQueuedBuilds(conn, destStore, lastBuildId); if (buildOne && buildOneDone) quit = true; + auto t_after_work = std::chrono::steady_clock::now(); + + prom.queue_monitor_time_spent_running.Increment( + std::chrono::duration_cast(t_after_work - t_before_work).count()); + /* Sleep until we get notification from the database about an event. */ if (done && !quit) { @@ -72,6 +79,10 @@ void State::queueMonitorLoop(Connection & conn) printMsg(lvlTalkative, "got notification: jobset shares changed"); processJobsetSharesChange(conn); } + + auto t_after_sleep = std::chrono::steady_clock::now(); + prom.queue_monitor_time_spent_waiting.Increment( + std::chrono::duration_cast(t_after_sleep - t_after_work).count()); } exit(0); diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 34b7a676..5e05157b 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -459,6 +459,12 @@ private: prometheus::Counter& queue_checks_finished; prometheus::Gauge& queue_max_id; + prometheus::Counter& dispatcher_time_spent_running; + prometheus::Counter& dispatcher_time_spent_waiting; + + prometheus::Counter& queue_monitor_time_spent_running; + prometheus::Counter& queue_monitor_time_spent_waiting; + PromMetrics(); }; PromMetrics prom; From d8ffa6b56a49dc6705b1fc3a59f6acfe64dd106a Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Sat, 20 Apr 2024 16:53:52 +0200 Subject: [PATCH 933/965] queue-runner: remove id > X from new builds query Running the query with/without it shows that it makes no difference to postgres, since there's an index on finished=0 already. This allows a few simplifications, but also paves the way towards running multiple parallel monitor threads in the future. --- src/hydra-queue-runner/hydra-queue-runner.cc | 7 ------- src/hydra-queue-runner/queue-monitor.cc | 21 ++++---------------- src/hydra-queue-runner/state.hh | 4 +--- 3 files changed, 5 insertions(+), 27 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 8123fd39..405c44d1 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -70,13 +70,6 @@ State::PromMetrics::PromMetrics() .Register(*registry) .Add({}) ) - , queue_max_id( - prometheus::BuildGauge() - .Name("hydraqueuerunner_queue_max_build_id_info") - .Help("Maximum build record ID in the queue") - .Register(*registry) - .Add({}) - ) , dispatcher_time_spent_running( prometheus::BuildCounter() .Name("hydraqueuerunner_dispatcher_time_spent_running") diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 3af0752a..a9d386d0 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -38,15 +38,13 @@ void State::queueMonitorLoop(Connection & conn) auto destStore = getDestStore(); - unsigned int lastBuildId = 0; - bool quit = false; while (!quit) { auto t_before_work = std::chrono::steady_clock::now(); localStore->clearPathInfoCache(); - bool done = getQueuedBuilds(conn, destStore, lastBuildId); + bool done = getQueuedBuilds(conn, destStore); if (buildOne && buildOneDone) quit = true; @@ -64,12 +62,10 @@ void State::queueMonitorLoop(Connection & conn) conn.get_notifs(); if (auto lowestId = buildsAdded.get()) { - lastBuildId = std::min(lastBuildId, static_cast(std::stoul(*lowestId) - 1)); printMsg(lvlTalkative, "got notification: new builds added to the queue"); } if (buildsRestarted.get()) { printMsg(lvlTalkative, "got notification: builds restarted"); - lastBuildId = 0; // check all builds } if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) { printMsg(lvlTalkative, "got notification: builds cancelled or bumped"); @@ -96,11 +92,11 @@ struct PreviousFailure : public std::exception { bool State::getQueuedBuilds(Connection & conn, - ref destStore, unsigned int & lastBuildId) + ref destStore) { prom.queue_checks_started.Increment(); - printInfo("checking the queue for builds > %d...", lastBuildId); + printInfo("checking the queue for builds..."); /* Grab the queued builds from the database, but don't process them yet (since we don't want a long-running transaction). */ @@ -108,8 +104,6 @@ bool State::getQueuedBuilds(Connection & conn, std::map newBuildsByID; std::multimap newBuildsByPath; - unsigned int newLastBuildId = lastBuildId; - { pqxx::work txn(conn); @@ -118,17 +112,12 @@ bool State::getQueuedBuilds(Connection & conn, "jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, " "globalPriority, priority from Builds " "inner join jobsets on builds.jobset_id = jobsets.id " - "where builds.id > $1 and finished = 0 order by globalPriority desc, builds.id", - lastBuildId); + "where finished = 0 order by globalPriority desc, builds.id"); for (auto const & row : res) { auto builds_(builds.lock()); BuildID id = row["id"].as(); if (buildOne && id != buildOne) continue; - if (id > newLastBuildId) { - newLastBuildId = id; - prom.queue_max_id.Set(id); - } if (builds_->count(id)) continue; auto build = std::make_shared( @@ -337,8 +326,6 @@ bool State::getQueuedBuilds(Connection & conn, } prom.queue_checks_finished.Increment(); - - lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1; return newBuildsByID.empty(); } diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 5e05157b..4cb295e7 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -457,7 +457,6 @@ private: prometheus::Counter& queue_steps_created; prometheus::Counter& queue_checks_early_exits; prometheus::Counter& queue_checks_finished; - prometheus::Gauge& queue_max_id; prometheus::Counter& dispatcher_time_spent_running; prometheus::Counter& dispatcher_time_spent_waiting; @@ -507,8 +506,7 @@ private: void queueMonitorLoop(Connection & conn); /* Check the queue for new builds. */ - bool getQueuedBuilds(Connection & conn, - nix::ref destStore, unsigned int & lastBuildId); + bool getQueuedBuilds(Connection & conn, nix::ref destStore); /* Handle cancellation, deletion and priority bumps. */ void processQueueChange(Connection & conn); From 9265fc5002f6c1073d32c0ced999f334fefd4bc6 Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Sat, 20 Apr 2024 16:58:10 +0200 Subject: [PATCH 934/965] queue-runner: reduce the time between queue monitor restarts This will induce more DB queries (though these are fairly cheap), but at the benefit of processing bumps within 1m instead of within 10m. --- src/hydra-queue-runner/queue-monitor.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index a9d386d0..2049a6c1 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -319,7 +319,7 @@ bool State::getQueuedBuilds(Connection & conn, /* Stop after a certain time to allow priority bumps to be processed. */ - if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) { + if (std::chrono::system_clock::now() > start + std::chrono::seconds(60)) { prom.queue_checks_early_exits.Increment(); break; } From 52a0199a9bfbb499b564a77d33fe615f39899f6b Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Sat, 20 Apr 2024 22:18:13 +0200 Subject: [PATCH 935/965] queue runner: introduce some parallelism for remote paths lookup Each output for a given step being ingested is looked up in parallel, which should basically multiply the speed of builds ingestion by the average number of outputs per derivation. --- src/hydra-queue-runner/queue-monitor.cc | 40 +++++++++++++++++++++---- src/hydra-queue-runner/state.hh | 6 ++++ 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 2049a6c1..9eab6e90 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -2,6 +2,7 @@ #include "hydra-build-result.hh" #include #include +#include #include @@ -404,6 +405,34 @@ void State::processQueueChange(Connection & conn) } +std::map> State::getMissingRemotePaths( + ref destStore, + const std::map> & paths) +{ + Sync>> missing_; + ThreadPool tp; + + for (auto & [output, maybeOutputPath] : paths) { + if (!maybeOutputPath) { + auto missing(missing_.lock()); + missing->insert({output, maybeOutputPath}); + } else { + tp.enqueue([&] { + if (!destStore->isValidPath(*maybeOutputPath)) { + auto missing(missing_.lock()); + missing->insert({output, maybeOutputPath}); + } + }); + } + } + + tp.process(); + + auto missing(missing_.lock()); + return *missing; +} + + Step::ptr State::createStep(ref destStore, Connection & conn, Build::ptr build, const StorePath & drvPath, Build::ptr referringBuild, Step::ptr referringStep, std::set & finishedDrvs, @@ -487,16 +516,15 @@ Step::ptr State::createStep(ref destStore, /* Are all outputs valid? */ auto outputHashes = staticOutputHashes(*localStore, *(step->drv)); - bool valid = true; - std::map> missing; + std::map> paths; for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) { auto outputHash = outputHashes.at(outputName); - if (maybeOutputPath && destStore->isValidPath(*maybeOutputPath)) - continue; - valid = false; - missing.insert({{outputHash, outputName}, maybeOutputPath}); + paths.insert({{outputHash, outputName}, maybeOutputPath}); } + auto missing = getMissingRemotePaths(destStore, paths); + bool valid = missing.empty(); + /* Try to copy the missing paths from the local store or from substitutes. */ if (!missing.empty()) { diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 4cb295e7..18101a0a 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -514,6 +514,12 @@ private: BuildOutput getBuildOutputCached(Connection & conn, nix::ref destStore, const nix::StorePath & drvPath); + /* Returns paths missing from the remote store. Paths are processed in + * parallel to work around the possible latency of remote stores. */ + std::map> getMissingRemotePaths( + nix::ref destStore, + const std::map> & paths); + Step::ptr createStep(nix::ref store, Connection & conn, Build::ptr build, const nix::StorePath & drvPath, Build::ptr referringBuild, Step::ptr referringStep, std::set & finishedDrvs, From 8e02589ac8d6c0c71d55d3a0fa75d321b0af2e50 Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Sat, 20 Apr 2024 22:49:24 +0200 Subject: [PATCH 936/965] queue-runner: switch to pseudorandom ordering of builds processing We don't rely on sequential / monotonic build IDs processing anymore, so randomizing actually has the advantage of mixing builds for different systems together, to avoid only one chunk of builds for a single system getting processed while builders for other systems are starved. --- src/hydra-queue-runner/queue-monitor.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 9eab6e90..bb15ac04 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -102,7 +102,7 @@ bool State::getQueuedBuilds(Connection & conn, /* Grab the queued builds from the database, but don't process them yet (since we don't want a long-running transaction). */ std::vector newIDs; - std::map newBuildsByID; + std::unordered_map newBuildsByID; std::multimap newBuildsByPath; { @@ -113,7 +113,7 @@ bool State::getQueuedBuilds(Connection & conn, "jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, " "globalPriority, priority from Builds " "inner join jobsets on builds.jobset_id = jobsets.id " - "where finished = 0 order by globalPriority desc, builds.id"); + "where finished = 0 order by globalPriority desc, random()"); for (auto const & row : res) { auto builds_(builds.lock()); From 78687e23cf1c8d341253689d07d40393f6e5edc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Mon, 7 Apr 2025 18:43:12 +0200 Subject: [PATCH 937/965] test/gitea: fix eval --- nixos-tests.nix | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nixos-tests.nix b/nixos-tests.nix index 77dd8f8a..c70a3cd1 100644 --- a/nixos-tests.nix +++ b/nixos-tests.nix @@ -99,6 +99,9 @@ in }); gitea = forEachSystem (system: + let + pkgs = nixpkgs.legacyPackages.${system}; + in (import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).makeTest { name = "hydra-gitea"; nodes.machine = { pkgs, ... }: { @@ -125,7 +128,7 @@ in networking.firewall.allowedTCPPorts = [ 3000 ]; }; skipLint = true; - testScript = { pkgs, ... }: + testScript = let scripts.mktoken = pkgs.writeText "token.sql" '' INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all'); From c94ba404fd80538fae0c29788b3dab9deb9f19e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Mon, 7 Apr 2025 18:54:39 +0200 Subject: [PATCH 938/965] don't build hydra twice in a pull request + enable merge queue --- .github/workflows/test.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 42cb6843..9c05d752 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,7 +1,10 @@ name: "Test" on: pull_request: + merge_group: push: + branches: + - master jobs: tests: runs-on: ubuntu-latest From b3a433336ed4bd2532ff7d71cb2ced90b389700b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Mon, 7 Apr 2025 18:43:56 +0200 Subject: [PATCH 939/965] bump nixpkgs --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 3a7a1672..ddff55ec 100644 --- a/flake.lock +++ b/flake.lock @@ -44,11 +44,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1739461644, - "narHash": "sha256-1o1qR0KYozYGRrnqytSpAhVBYLNBHX+Lv6I39zGRzKM=", + "lastModified": 1743987495, + "narHash": "sha256-46T2vMZ4/AfCK0Y2OjlFzJPxmdpP8GtsuEqSSJv3oe4=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "97a719c9f0a07923c957cf51b20b329f9fb9d43f", + "rev": "db8f4fe18ce772a9c8f3adf321416981c8fe9371", "type": "github" }, "original": { From 8a6482bb1c3e3c2361fd3cb0a86c3a86d0420e97 Mon Sep 17 00:00:00 2001 From: K900 Date: Tue, 1 Oct 2024 19:14:24 +0300 Subject: [PATCH 940/965] Add metric for builds waiting for download slot (cherry picked from commit f23ec71227911891807706b6b978836e4d80edde) --- src/hydra-queue-runner/build-remote.cc | 1 + src/hydra-queue-runner/hydra-queue-runner.cc | 1 + src/hydra-queue-runner/state.hh | 1 + 3 files changed, 3 insertions(+) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 0c8b3f10..e25da38d 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -540,6 +540,7 @@ void State::buildRemote(ref destStore, /* Throttle CPU-bound work. Opportunistically skip updating the current * step, since this requires a DB roundtrip. */ if (!localWorkThrottler.try_acquire()) { + MaintainCount mc(nrStepsWaitingForDownloadSlot); updateStep(ssWaitingForLocalSlot); localWorkThrottler.acquire(); } diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 405c44d1..adc903d8 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -573,6 +573,7 @@ void State::dumpStatus(Connection & conn) {"nrActiveSteps", activeSteps_.lock()->size()}, {"nrStepsBuilding", nrStepsBuilding.load()}, {"nrStepsCopyingTo", nrStepsCopyingTo.load()}, + {"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()}, {"nrStepsCopyingFrom", nrStepsCopyingFrom.load()}, {"nrStepsWaiting", nrStepsWaiting.load()}, {"nrUnsupportedSteps", nrUnsupportedSteps.load()}, diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 18101a0a..12aead40 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -369,6 +369,7 @@ private: counter nrStepsDone{0}; counter nrStepsBuilding{0}; counter nrStepsCopyingTo{0}; + counter nrStepsWaitingForDownloadSlot{0}; counter nrStepsCopyingFrom{0}; counter nrStepsWaiting{0}; counter nrUnsupportedSteps{0}; From 143a07bff0a4a46f132a404413192e10dcc7150f Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Sun, 21 Apr 2024 01:46:41 +0200 Subject: [PATCH 941/965] queue-runner: release machine reservation while copying outputs This allows for better builder usage when the queue runner is busy. To avoid running into uncontrollable imbalances between builder/queue runner, we only release the machine reservation after the local throttler has found a slot to start copying the outputs for that build. As opposed to asserting uniqueness to understand resource utilization, we just switch to using `std::unique_ptr`. --- src/hydra-queue-runner/build-remote.cc | 9 +++++++++ src/hydra-queue-runner/builder.cc | 24 +++++++++++------------- src/hydra-queue-runner/dispatcher.cc | 2 +- src/hydra-queue-runner/state.hh | 6 +++--- 4 files changed, 24 insertions(+), 17 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index e25da38d..7e307c75 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -398,6 +398,7 @@ private: }; void State::buildRemote(ref destStore, + std::unique_ptr reservation, ::Machine::ptr machine, Step::ptr step, const ServeProto::BuildOptions & buildOptions, RemoteResult & result, std::shared_ptr activeStep, @@ -546,6 +547,14 @@ void State::buildRemote(ref destStore, } SemaphoreReleaser releaser(&localWorkThrottler); + /* Once we've started copying outputs, release the machine reservation + * so further builds can happen. We do not release the machine earlier + * to avoid situations where the queue runner is bottlenecked on + * copying outputs and we end up building too many things that we + * haven't been able to allow copy slots for. */ + reservation.reset(); + wakeDispatcher(); + StorePathSet outputs; for (auto & [_, realisation] : buildResult.builtOutputs) outputs.insert(realisation.outPath); diff --git a/src/hydra-queue-runner/builder.cc b/src/hydra-queue-runner/builder.cc index 018215a5..ff0634b1 100644 --- a/src/hydra-queue-runner/builder.cc +++ b/src/hydra-queue-runner/builder.cc @@ -16,7 +16,7 @@ void setThreadName(const std::string & name) } -void State::builder(MachineReservation::ptr reservation) +void State::builder(std::unique_ptr reservation) { setThreadName("bld~" + std::string(reservation->step->drvPath.to_string())); @@ -35,22 +35,20 @@ void State::builder(MachineReservation::ptr reservation) activeSteps_.lock()->erase(activeStep); }); + std::string machine = reservation->machine->storeUri.render(); + try { auto destStore = getDestStore(); - res = doBuildStep(destStore, reservation, activeStep); + // Might release the reservation. + res = doBuildStep(destStore, std::move(reservation), activeStep); } catch (std::exception & e) { printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s", - localStore->printStorePath(reservation->step->drvPath), - reservation->machine->storeUri.render(), + localStore->printStorePath(activeStep->step->drvPath), + machine, e.what()); } } - /* Release the machine and wake up the dispatcher. */ - assert(reservation.unique()); - reservation = 0; - wakeDispatcher(); - /* If there was a temporary failure, retry the step after an exponentially increasing interval. */ Step::ptr step = wstep.lock(); @@ -72,11 +70,11 @@ void State::builder(MachineReservation::ptr reservation) State::StepResult State::doBuildStep(nix::ref destStore, - MachineReservation::ptr reservation, + std::unique_ptr reservation, std::shared_ptr activeStep) { - auto & step(reservation->step); - auto & machine(reservation->machine); + auto step(reservation->step); + auto machine(reservation->machine); { auto step_(step->state.lock()); @@ -211,7 +209,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, try { /* FIXME: referring builds may have conflicting timeouts. */ - buildRemote(destStore, machine, step, buildOptions, result, activeStep, updateStep, narMembers); + buildRemote(destStore, std::move(reservation), machine, step, buildOptions, result, activeStep, updateStep, narMembers); } catch (Error & e) { if (activeStep->state_.lock()->cancelled) { printInfo("marking step %d of build %d as cancelled", stepNr, buildId); diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc index 11db0071..d3e145de 100644 --- a/src/hydra-queue-runner/dispatcher.cc +++ b/src/hydra-queue-runner/dispatcher.cc @@ -288,7 +288,7 @@ system_time State::doDispatch() /* Make a slot reservation and start a thread to do the build. */ auto builderThread = std::thread(&State::builder, this, - std::make_shared(*this, step, mi.machine)); + std::make_unique(*this, step, mi.machine)); builderThread.detach(); // FIXME? keepGoing = true; diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 12aead40..edfad4fb 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -400,7 +400,6 @@ private: struct MachineReservation { - typedef std::shared_ptr ptr; State & state; Step::ptr step; Machine::ptr machine; @@ -550,16 +549,17 @@ private: void abortUnsupported(); - void builder(MachineReservation::ptr reservation); + void builder(std::unique_ptr reservation); /* Perform the given build step. Return true if the step is to be retried. */ enum StepResult { sDone, sRetry, sMaybeCancelled }; StepResult doBuildStep(nix::ref destStore, - MachineReservation::ptr reservation, + std::unique_ptr reservation, std::shared_ptr activeStep); void buildRemote(nix::ref destStore, + std::unique_ptr reservation, Machine::ptr machine, Step::ptr step, const nix::ServeProto::BuildOptions & buildOptions, RemoteResult & result, std::shared_ptr activeStep, From b4322edd05ba509dfe374625b88e5aeb71cd268f Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Fri, 12 Apr 2024 17:33:27 +0200 Subject: [PATCH 942/965] web: replace 'errormsg' with 'errormsg IS NULL' in most cases This is implement in an extremely hacky way due to poor DBIx feature support. Ideally, what we'd need is a way to tell DBIx to ignore the errormsg column unless explicitly requested, and to automatically add a computed 'errormsg IS NULL' column in others. Since it does not support that, this commit instead hacks some support via method overrides while taking care to not break anything obvious. --- package.nix | 1 + src/lib/Hydra/Controller/Jobset.pm | 6 ++++ src/lib/Hydra/Controller/JobsetEval.pm | 2 ++ src/lib/Hydra/Helper/Nix.pm | 3 +- .../Hydra/Schema/Result/EvaluationErrors.pm | 2 ++ src/lib/Hydra/Schema/Result/Jobsets.pm | 2 ++ .../Schema/ResultSet/EvaluationErrors.pm | 30 +++++++++++++++++++ src/lib/Hydra/Schema/ResultSet/Jobsets.pm | 30 +++++++++++++++++++ src/root/common.tt | 4 +-- src/root/jobset-eval.tt | 4 +-- src/root/jobset.tt | 6 ++-- t/evaluator/evaluate-constituents-globbing.t | 8 ++--- t/lib/CliRunners.pm | 4 +-- t/queue-runner/direct-indirect-constituents.t | 2 +- 14 files changed, 88 insertions(+), 16 deletions(-) create mode 100644 src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm create mode 100644 src/lib/Hydra/Schema/ResultSet/Jobsets.pm diff --git a/package.nix b/package.nix index 8bf6a199..4a7840c1 100644 --- a/package.nix +++ b/package.nix @@ -89,6 +89,7 @@ let DateTime DBDPg DBDSQLite + DBIxClassHelpers DigestSHA1 EmailMIME EmailSender diff --git a/src/lib/Hydra/Controller/Jobset.pm b/src/lib/Hydra/Controller/Jobset.pm index 20a52f6f..bc7d7444 100644 --- a/src/lib/Hydra/Controller/Jobset.pm +++ b/src/lib/Hydra/Controller/Jobset.pm @@ -371,6 +371,12 @@ sub errors_GET { $c->stash->{template} = 'eval-error.tt'; + my $jobsetName = $c->stash->{params}->{name}; + $c->stash->{jobset} = $c->stash->{project}->jobsets->find( + { name => $jobsetName }, + { '+columns' => { 'errormsg' => 'errormsg' } } + ); + $self->status_ok($c, entity => $c->stash->{jobset}); } diff --git a/src/lib/Hydra/Controller/JobsetEval.pm b/src/lib/Hydra/Controller/JobsetEval.pm index aca03d72..643a516c 100644 --- a/src/lib/Hydra/Controller/JobsetEval.pm +++ b/src/lib/Hydra/Controller/JobsetEval.pm @@ -93,6 +93,8 @@ sub errors_GET { $c->stash->{template} = 'eval-error.tt'; + $c->stash->{eval} = $c->model('DB::JobsetEvals')->find($c->stash->{eval}->id, { prefetch => 'evaluationerror' }); + $self->status_ok($c, entity => $c->stash->{eval}); } diff --git a/src/lib/Hydra/Helper/Nix.pm b/src/lib/Hydra/Helper/Nix.pm index 88fbdd6d..134b8b7e 100644 --- a/src/lib/Hydra/Helper/Nix.pm +++ b/src/lib/Hydra/Helper/Nix.pm @@ -297,8 +297,7 @@ sub getEvals { my @evals = $evals_result_set->search( { hasnewbuilds => 1 }, - { order_by => "$me.id DESC", rows => $rows, offset => $offset - , prefetch => { evaluationerror => [ ] } }); + { order_by => "$me.id DESC", rows => $rows, offset => $offset }); my @res = (); my $cache = {}; diff --git a/src/lib/Hydra/Schema/Result/EvaluationErrors.pm b/src/lib/Hydra/Schema/Result/EvaluationErrors.pm index 7033fa5e..f6cc48db 100644 --- a/src/lib/Hydra/Schema/Result/EvaluationErrors.pm +++ b/src/lib/Hydra/Schema/Result/EvaluationErrors.pm @@ -105,4 +105,6 @@ __PACKAGE__->add_column( "+id" => { retrieve_on_insert => 1 } ); +__PACKAGE__->mk_group_accessors('column' => 'has_error'); + 1; diff --git a/src/lib/Hydra/Schema/Result/Jobsets.pm b/src/lib/Hydra/Schema/Result/Jobsets.pm index cd704ac8..aee87e00 100644 --- a/src/lib/Hydra/Schema/Result/Jobsets.pm +++ b/src/lib/Hydra/Schema/Result/Jobsets.pm @@ -386,6 +386,8 @@ __PACKAGE__->add_column( "+id" => { retrieve_on_insert => 1 } ); +__PACKAGE__->mk_group_accessors('column' => 'has_error'); + sub supportsDynamicRunCommand { my ($self) = @_; diff --git a/src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm b/src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm new file mode 100644 index 00000000..a4c6d955 --- /dev/null +++ b/src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm @@ -0,0 +1,30 @@ +package Hydra::Schema::ResultSet::EvaluationErrors; + +use strict; +use utf8; +use warnings; + +use parent 'DBIx::Class::ResultSet'; + +use Storable qw(dclone); + +__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns'); + +# Exclude expensive error message values unless explicitly requested, and +# replace them with a summary field describing their presence/absence. +sub search_rs { + my ( $class, $query, $attrs ) = @_; + + if ($attrs) { + $attrs = dclone($attrs); + } + + unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) { + $attrs->{'+columns'}->{'has_error'} = "errormsg != ''"; + } + unless (exists $attrs->{'+columns'}->{'errormsg'}) { + push @{ $attrs->{'remove_columns'} }, 'errormsg'; + } + + return $class->next::method($query, $attrs); +} diff --git a/src/lib/Hydra/Schema/ResultSet/Jobsets.pm b/src/lib/Hydra/Schema/ResultSet/Jobsets.pm new file mode 100644 index 00000000..1b2a12e3 --- /dev/null +++ b/src/lib/Hydra/Schema/ResultSet/Jobsets.pm @@ -0,0 +1,30 @@ +package Hydra::Schema::ResultSet::Jobsets; + +use strict; +use utf8; +use warnings; + +use parent 'DBIx::Class::ResultSet'; + +use Storable qw(dclone); + +__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns'); + +# Exclude expensive error message values unless explicitly requested, and +# replace them with a summary field describing their presence/absence. +sub search_rs { + my ( $class, $query, $attrs ) = @_; + + if ($attrs) { + $attrs = dclone($attrs); + } + + unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) { + $attrs->{'+columns'}->{'has_error'} = "errormsg != ''"; + } + unless (exists $attrs->{'+columns'}->{'errormsg'}) { + push @{ $attrs->{'remove_columns'} }, 'errormsg'; + } + + return $class->next::method($query, $attrs); +} diff --git a/src/root/common.tt b/src/root/common.tt index 6348bee7..86335a74 100644 --- a/src/root/common.tt +++ b/src/root/common.tt @@ -513,7 +513,7 @@ BLOCK renderEvals %] ELSE %] - [% END %] - [% IF eval.evaluationerror.errormsg %] + [% IF eval.evaluationerror.has_error %] Eval Errors [% END %] @@ -639,7 +639,7 @@ BLOCK renderJobsetOverview %] [% IF j.get_column('nrtotal') > 0 %] diff --git a/src/root/jobset-eval.tt b/src/root/jobset-eval.tt index 146878f2..f0b92f97 100644 --- a/src/root/jobset-eval.tt +++ b/src/root/jobset-eval.tt @@ -90,7 +90,7 @@ c.uri_for(c.controller('JobsetEval').action_for('view'), [% END %] - [% IF eval.evaluationerror.errormsg %] + [% IF eval.evaluationerror.has_error %] [% END %] @@ -165,7 +165,7 @@ c.uri_for(c.controller('JobsetEval').action_for('view'), [% END %] - [% IF eval.evaluationerror.errormsg %] + [% IF eval.evaluationerror.has_error %]
diff --git a/src/root/jobset.tt b/src/root/jobset.tt index 5afcbfde..3e594756 100644 --- a/src/root/jobset.tt +++ b/src/root/jobset.tt @@ -61,7 +61,7 @@ [% END %] - [% IF jobset.errormsg || jobset.fetcherrormsg %] + [% IF jobset.has_error || jobset.fetcherrormsg %] [% END %] @@ -79,7 +79,7 @@ -
JobSystem Build Step What
[% INCLUDE renderFullJobName project=step.project jobset=step.jobset job=step.job %][% step.system %] [% step.build %] [% IF step.busy >= 30 %][% step.stepnr %][% ELSE; step.stepnr; END %] [% step.drvpath.match('-(.*)').0 %][% HTML.escape(j.description) %] [% IF j.lastcheckedtime; INCLUDE renderDateTime timestamp = j.lastcheckedtime; - IF j.errormsg || j.fetcherrormsg; %] Error[% END; + IF j.has_error || j.fetcherrormsg; %] Error[% END; ELSE; "-"; END %]Last checked: [% IF jobset.lastcheckedtime %] - [% INCLUDE renderDateTime timestamp = jobset.lastcheckedtime %], [% IF jobset.errormsg || jobset.fetcherrormsg %]with errors![% ELSE %]no errors[% END %] + [% INCLUDE renderDateTime timestamp = jobset.lastcheckedtime %], [% IF jobset.has_error || jobset.fetcherrormsg %]with errors![% ELSE %]no errors[% END %] [% ELSE %] never [% END %] @@ -117,7 +117,7 @@ - [% IF jobset.errormsg || jobset.fetcherrormsg %] + [% IF jobset.has_error || jobset.fetcherrormsg %]
diff --git a/t/evaluator/evaluate-constituents-globbing.t b/t/evaluator/evaluate-constituents-globbing.t index 49315d58..0ed0ee7c 100644 --- a/t/evaluator/evaluate-constituents-globbing.t +++ b/t/evaluator/evaluate-constituents-globbing.t @@ -61,8 +61,8 @@ subtest "* selects all except current aggregate" => sub { $jobset->discard_changes; # refresh from DB is( - $jobset->errormsg, - "", + $jobset->has_error, + 0, "eval-errors non-empty" ); }; @@ -101,7 +101,7 @@ subtest "trivial cycle check" => sub { ok(utf8::decode($stderr), "Stderr output is UTF8-clean"); - $jobset->discard_changes; # refresh from DB + $jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB like( $jobset->errormsg, qr/Dependency cycle: indirect_aggregate <-> ok_aggregate/, @@ -123,7 +123,7 @@ subtest "cycle check with globbing" => sub { ok(utf8::decode($stderr), "Stderr output is UTF8-clean"); - $jobset->discard_changes; # refresh from DB + $jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB like( $jobset->errormsg, qr/aggregate job ‘indirect_aggregate’ failed with the error: Dependency cycle: indirect_aggregate <-> packages.constituentA/, diff --git a/t/lib/CliRunners.pm b/t/lib/CliRunners.pm index 885f2ae4..8c53b551 100644 --- a/t/lib/CliRunners.pm +++ b/t/lib/CliRunners.pm @@ -14,7 +14,7 @@ our @EXPORT = qw( sub evalSucceeds { my ($jobset) = @_; my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-eval-jobset", $jobset->project->name, $jobset->name)); - $jobset->discard_changes; # refresh from DB + $jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB if ($res) { chomp $stdout; chomp $stderr; utf8::decode($stdout) or die "Invalid unicode in stdout."; @@ -29,7 +29,7 @@ sub evalSucceeds { sub evalFails { my ($jobset) = @_; my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-eval-jobset", $jobset->project->name, $jobset->name)); - $jobset->discard_changes; # refresh from DB + $jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB if (!$res) { chomp $stdout; chomp $stderr; utf8::decode($stdout) or die "Invalid unicode in stdout."; diff --git a/t/queue-runner/direct-indirect-constituents.t b/t/queue-runner/direct-indirect-constituents.t index 35370450..a017c76f 100644 --- a/t/queue-runner/direct-indirect-constituents.t +++ b/t/queue-runner/direct-indirect-constituents.t @@ -13,7 +13,7 @@ my $constituentBuildA = $builds->{"constituentA"}; my $constituentBuildB = $builds->{"constituentB"}; my $eval = $constituentBuildA->jobsetevals->first(); -is($eval->evaluationerror->errormsg, ""); +is($eval->evaluationerror->has_error, 0); subtest "Verifying the direct aggregate" => sub { my $aggBuild = $builds->{"direct_aggregate"}; From a5b17d0686d9443dab0a552800d8762b8f7422af Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 8 Apr 2025 17:38:19 -0400 Subject: [PATCH 943/965] Queue-runner: Always produce a machines JSON object Even if there are no machines, there should at least be an empty object. --- src/hydra-queue-runner/hydra-queue-runner.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index adc903d8..05d7e263 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -615,6 +615,7 @@ void State::dumpStatus(Connection & conn) } { + auto machines_json = json::object(); auto machines_(machines.lock()); for (auto & i : *machines_) { auto & m(i.second); @@ -641,8 +642,9 @@ void State::dumpStatus(Connection & conn) machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone; machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone; } - statusJson["machines"][m->storeUri.render()] = machine; + machines_json[m->storeUri.render()] = machine; } + statusJson["machines"] = machines_json; } { From 7e0157e387e6bacc83c8a566eba609516415761e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sandro=20J=C3=A4ckel?= Date: Wed, 9 Apr 2025 17:53:14 +0200 Subject: [PATCH 944/965] Fix compilation with a nix which was compiled withou aws sdk --- src/hydra-queue-runner/hydra-queue-runner.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 05d7e263..ab146312 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -703,6 +703,7 @@ void State::dumpStatus(Connection & conn) : 0.0}, }; +#if NIX_WITH_S3_SUPPORT auto s3Store = dynamic_cast(&*store); if (s3Store) { auto & s3Stats = s3Store->getS3Stats(); @@ -728,6 +729,7 @@ void State::dumpStatus(Connection & conn) + s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09}, }; } +#endif } { From f1a976d3fdd40e6880cf7d2b6c1b97132f89934e Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Thu, 3 Apr 2025 10:33:57 +0200 Subject: [PATCH 945/965] Fix displaying eval errors in jobset eval view Quickfix for something that annoyed me once too often. Specifically, I'm talking about `/eval/1#tabs-errors`. To not fetch long errors on each request, this is only done on-demand. I.e., when the tab is opened, an iframe is requested with the errors. This iframe uses a template for both the jobset view and the jobset-eval view. It is differentiated by checking if `jobset` or `eval` is defined. However, the jobset-eval view also has a `jobset` variable in its stash which means that in both cases the `if` path was used. Since `jobset.fetcherrormsg` isn't defined in the eval case though, you always got an empty error. The band-aid fix is relatively simple: swap if and else: the `eval` variable is not defined in the stash of the jobset view, so now this is a useful condition to decide which view we're in. (cherry picked from commit https://git.lix.systems/lix-project/hydra/commit/70c3d75f739b184b36908a2c898332444482d1a1) --- src/root/eval-error.tt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/root/eval-error.tt b/src/root/eval-error.tt index c2ea28ec..7b6e702f 100644 --- a/src/root/eval-error.tt +++ b/src/root/eval-error.tt @@ -13,12 +13,12 @@
- [% IF jobset %] -

Errors occurred at [% INCLUDE renderDateTime timestamp=(jobset.errortime || jobset.lastcheckedtime) %].

-
[% HTML.escape(jobset.fetcherrormsg || jobset.errormsg) %]
- [% ELSIF eval %] + [% IF eval %]

Errors occurred at [% INCLUDE renderDateTime timestamp=(eval.evaluationerror.errortime || eval.timestamp) %].

[% HTML.escape(eval.evaluationerror.errormsg) %]
+ [% ELSIF jobset %] +

Errors occurred at [% INCLUDE renderDateTime timestamp=(jobset.errortime || jobset.lastcheckedtime) %].

+
[% HTML.escape(jobset.fetcherrormsg || jobset.errormsg) %]
[% END %]
From 8d750265135b7e203520036a742afdf301b4013f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Mon, 7 Apr 2025 18:54:39 +0200 Subject: [PATCH 946/965] re-enable restrict-eval for non-flakes --- flake.lock | 6 +++--- src/script/hydra-eval-jobset | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 3a7a1672..49168a96 100644 --- a/flake.lock +++ b/flake.lock @@ -29,11 +29,11 @@ "nix-eval-jobs": { "flake": false, "locked": { - "lastModified": 1744018595, - "narHash": "sha256-v5n6t49X7MOpqS9j0FtI6TWOXvxuZMmGsp2OfUK5QfA=", + "lastModified": 1744370057, + "narHash": "sha256-n220U5pjzCtTtOJtbga4Xr/PyllowKw9anSevgCqJEw=", "owner": "nix-community", "repo": "nix-eval-jobs", - "rev": "cba718bafe5dc1607c2b6761ecf53c641a6f3b21", + "rev": "1260c6599d22dfd8c25fea6893c3d031996b20e1", "type": "github" }, "original": { diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index cf3fa294..80f5d79c 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -372,6 +372,7 @@ sub evalJobs { or die "cannot find the input containing the job expression\n"; @cmd = ("nix-eval-jobs", + "--option", "restrict-eval", "true", "<" . $nixExprInputName . "/" . $nixExprPath . ">", inputsToArgs($inputInfo)); } From cf33a9158ab2806f34c4d2bde23ed394417eb555 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Sun, 13 Apr 2025 08:29:01 +0200 Subject: [PATCH 947/965] web: increase colspan for machine row in machine status --- src/root/machine-status.tt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/root/machine-status.tt b/src/root/machine-status.tt index 07a2359d..725598eb 100644 --- a/src/root/machine-status.tt +++ b/src/root/machine-status.tt @@ -17,7 +17,7 @@ [% name = m.key ? stripSSHUser(m.key) : "localhost" %]
+ [% INCLUDE renderMachineName machine=m.key %] [% IF m.value.systemTypes %] From 0ab357e43515b77a6c1deca4cef126cc204236ba Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Sat, 20 Jul 2024 13:09:39 +0200 Subject: [PATCH 948/965] jobset-eval: fix actions not showing up sometimes for new jobs New jobs have their "new" status take precedence over them being "failed" or "queued", which means actions that can act on "failed" or "queued" jobs weren't shown to the user when they could only act on "new" jobs. (cherry picked from commit https://git.lix.systems/lix-project/hydra/commit/9a4a5dd624a1cedc7cdc40687815739b228e5c77) --- src/lib/Hydra/Controller/JobsetEval.pm | 4 +++- src/lib/Hydra/Helper/BuildDiff.pm | 18 +++++++++++++++--- src/root/jobset-eval.tt | 8 ++++---- t/Hydra/Helper/BuildDiff.t | 17 +++++++---------- 4 files changed, 29 insertions(+), 18 deletions(-) diff --git a/src/lib/Hydra/Controller/JobsetEval.pm b/src/lib/Hydra/Controller/JobsetEval.pm index 643a516c..77c01a84 100644 --- a/src/lib/Hydra/Controller/JobsetEval.pm +++ b/src/lib/Hydra/Controller/JobsetEval.pm @@ -76,7 +76,9 @@ sub view_GET { $c->stash->{removed} = $diff->{removed}; $c->stash->{unfinished} = $diff->{unfinished}; $c->stash->{aborted} = $diff->{aborted}; - $c->stash->{failed} = $diff->{failed}; + $c->stash->{totalAborted} = $diff->{totalAborted}; + $c->stash->{totalFailed} = $diff->{totalFailed}; + $c->stash->{totalQueued} = $diff->{totalQueued}; $c->stash->{full} = ($c->req->params->{full} || "0") eq "1"; diff --git a/src/lib/Hydra/Helper/BuildDiff.pm b/src/lib/Hydra/Helper/BuildDiff.pm index 65dad17c..be8525d6 100644 --- a/src/lib/Hydra/Helper/BuildDiff.pm +++ b/src/lib/Hydra/Helper/BuildDiff.pm @@ -32,7 +32,12 @@ sub buildDiff { removed => [], unfinished => [], aborted => [], - failed => [], + + # These summary counters cut across the categories to determine whether + # actions such as "Restart all failed" or "Bump queue" are available. + totalAborted => 0, + totalFailed => 0, + totalQueued => 0, }; my $n = 0; @@ -80,8 +85,15 @@ sub buildDiff { } else { push @{$ret->{new}}, $build if !$found; } - if (defined $build->buildstatus && $build->buildstatus != 0) { - push @{$ret->{failed}}, $build; + + if ($build->finished != 0 && $build->buildstatus != 0) { + if ($aborted) { + ++$ret->{totalAborted}; + } else { + ++$ret->{totalFailed}; + } + } elsif ($build->finished == 0) { + ++$ret->{totalQueued}; } } diff --git a/src/root/jobset-eval.tt b/src/root/jobset-eval.tt index f0b92f97..12086d85 100644 --- a/src/root/jobset-eval.tt +++ b/src/root/jobset-eval.tt @@ -48,16 +48,16 @@ c.uri_for(c.controller('JobsetEval').action_for('view'), Actions diff --git a/t/Hydra/Helper/BuildDiff.t b/t/Hydra/Helper/BuildDiff.t index 243bb596..eef25a0f 100644 --- a/t/Hydra/Helper/BuildDiff.t +++ b/t/Hydra/Helper/BuildDiff.t @@ -25,7 +25,10 @@ subtest "empty diff" => sub { removed => [], unfinished => [], aborted => [], - failed => [], + + totalAborted => 0, + totalFailed => 0, + totalQueued => 0, }, "empty list of jobs returns empty diff" ); @@ -48,12 +51,7 @@ subtest "2 different jobs" => sub { "succeed_with_failed is a new job" ); - is(scalar(@{$ret->{failed}}), 1, "list of failed jobs is 1 element long"); - is( - $ret->{failed}[0]->get_column('id'), - $builds->{"succeed_with_failed"}->get_column('id'), - "succeed_with_failed is a failed job" - ); + is($ret->{totalFailed}, 1, "total failed jobs is 1"); is( $ret->{removed}, @@ -70,9 +68,9 @@ subtest "2 different jobs" => sub { subtest "failed job with no previous history" => sub { my $ret = buildDiff([$builds->{"fails"}], []); - is(scalar(@{$ret->{failed}}), 1, "list of failed jobs is 1 element long"); + is($ret->{totalFailed}, 1, "total failed jobs is 1"); is( - $ret->{failed}[0]->get_column('id'), + $ret->{new}[0]->get_column('id'), $builds->{"fails"}->get_column('id'), "fails is a failed job" ); @@ -93,7 +91,6 @@ subtest "not-yet-built job with no previous history" => sub { is($ret->{removed}, [], "removed"); is($ret->{unfinished}, [], "unfinished"); is($ret->{aborted}, [], "aborted"); - is($ret->{failed}, [], "failed"); is(scalar(@{$ret->{new}}), 1, "list of new jobs is 1 element long"); is( From 720db63d52ebcbda617603e7aa5b5c750cc6afec Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Sun, 21 Apr 2024 17:36:16 +0200 Subject: [PATCH 949/965] queue runner: attempt at slightly smarter scheduling criteria Instead of just going for "whatever is the oldest build we know of", use the following first: - Is the step more constrained? If so, schedule it first to avoid filling up "more desirable" build slots with less constrained builds. - Does the step have more dependents? If so, schedule it first to try and maximize open parallelism and breadth of scheduling options. (cherry picked from commit https://git.lix.systems/lix-project/hydra/commit/b8d03adaf45105452bf1040deeaaccc8b8b22efb) --- src/hydra-queue-runner/dispatcher.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc index d3e145de..ada25dc6 100644 --- a/src/hydra-queue-runner/dispatcher.cc +++ b/src/hydra-queue-runner/dispatcher.cc @@ -134,6 +134,8 @@ system_time State::doDispatch() comparator is a partial ordering (see MachineInfo). */ int highestGlobalPriority; int highestLocalPriority; + size_t numRequiredSystemFeatures; + size_t numRevDeps; BuildID lowestBuildID; StepInfo(Step::ptr step, Step::State & step_) : step(step) @@ -142,6 +144,8 @@ system_time State::doDispatch() lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed()); highestGlobalPriority = step_.highestGlobalPriority; highestLocalPriority = step_.highestLocalPriority; + numRequiredSystemFeatures = step->requiredSystemFeatures.size(); + numRevDeps = step_.rdeps.size(); lowestBuildID = step_.lowestBuildID; } }; @@ -194,6 +198,8 @@ system_time State::doDispatch() a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority : a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed : a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority : + a.numRequiredSystemFeatures != b.numRequiredSystemFeatures ? a.numRequiredSystemFeatures > b.numRequiredSystemFeatures : + a.numRevDeps != b.numRevDeps ? a.numRevDeps > b.numRevDeps : a.lowestBuildID < b.lowestBuildID; }); From 23755bf001592d82c735b49816f66a3bdd96216d Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Wed, 23 Apr 2025 18:27:14 +0200 Subject: [PATCH 950/965] flake.lock: Update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'nix': 'github:NixOS/nix/a4962f73b5fc874d4b16baef47921daf349addfc' (2025-04-07) → 'github:NixOS/nix/70921714cb3b5e6041b7413459541838651079f3' (2025-04-23) • Updated input 'nixpkgs': 'github:NixOS/nixpkgs/db8f4fe18ce772a9c8f3adf321416981c8fe9371' (2025-04-07) → 'github:NixOS/nixpkgs/eea3403f7ca9f9942098f4f2756adab4ec924b2b' (2025-04-23) --- flake.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.lock b/flake.lock index 1a1531ec..53a0526b 100644 --- a/flake.lock +++ b/flake.lock @@ -12,11 +12,11 @@ "nixpkgs-regression": [] }, "locked": { - "lastModified": 1744030329, - "narHash": "sha256-r+psCOW77vTSTNbxTVrYHeh6OgB0QukbnyUVDwg8s4I=", + "lastModified": 1745420957, + "narHash": "sha256-ZbB3IH9OlJvo14GlQZbYHzJojf/HCDT38GzYTod8DaU=", "owner": "NixOS", "repo": "nix", - "rev": "a4962f73b5fc874d4b16baef47921daf349addfc", + "rev": "70921714cb3b5e6041b7413459541838651079f3", "type": "github" }, "original": { @@ -44,11 +44,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1743987495, - "narHash": "sha256-46T2vMZ4/AfCK0Y2OjlFzJPxmdpP8GtsuEqSSJv3oe4=", + "lastModified": 1745408698, + "narHash": "sha256-JT1wMjLIypWJA0N2V27WpUw8feDmTok4Dwkb0oYXDS4=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "db8f4fe18ce772a9c8f3adf321416981c8fe9371", + "rev": "eea3403f7ca9f9942098f4f2756adab4ec924b2b", "type": "github" }, "original": { From 89fcb931ce7d1cc64b644c4589daecfbf87321f3 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 23 Apr 2025 17:51:47 -0400 Subject: [PATCH 951/965] Use Nix without the flake This is what we do for `nix-eval-jobs` already. It allows for more fine-grained control over dependencies. --- flake.lock | 11 +--------- flake.nix | 63 ++++++++++++++++++++++++++++++++---------------------- 2 files changed, 39 insertions(+), 35 deletions(-) diff --git a/flake.lock b/flake.lock index 53a0526b..3e403f6f 100644 --- a/flake.lock +++ b/flake.lock @@ -1,16 +1,7 @@ { "nodes": { "nix": { - "inputs": { - "flake-compat": [], - "flake-parts": [], - "git-hooks-nix": [], - "nixpkgs": [ - "nixpkgs" - ], - "nixpkgs-23-11": [], - "nixpkgs-regression": [] - }, + "flake": false, "locked": { "lastModified": 1745420957, "narHash": "sha256-ZbB3IH9OlJvo14GlQZbYHzJojf/HCDT38GzYTod8DaU=", diff --git a/flake.nix b/flake.nix index dc1d1b8a..0e2f32dc 100644 --- a/flake.nix +++ b/flake.nix @@ -5,14 +5,8 @@ inputs.nix = { url = "github:NixOS/nix/2.28-maintenance"; - inputs.nixpkgs.follows = "nixpkgs"; - - # hide nix dev tooling from our lock file - inputs.flake-parts.follows = ""; - inputs.git-hooks-nix.follows = ""; - inputs.nixpkgs-regression.follows = ""; - inputs.nixpkgs-23-11.follows = ""; - inputs.flake-compat.follows = ""; + # We want to control the deps precisely + flake = false; }; inputs.nix-eval-jobs = { @@ -30,10 +24,27 @@ # A Nixpkgs overlay that provides a 'hydra' package. overlays.default = final: prev: { - nix-eval-jobs = final.callPackage nix-eval-jobs {}; + nixDependenciesForHydra = final.lib.makeScope final.newScope + (import (nix + "/packaging/dependencies.nix") { + pkgs = final; + inherit (final) stdenv; + inputs = {}; + }); + nixComponentsForHydra = final.lib.makeScope final.nixDependenciesForHydra.newScope + (import (nix + "packaging/components.nix") { + officialRelease = true; + inherit (final) lib; + pkgs = final; + src = nix; + maintainers = [ ]; + }); + nix-eval-jobs = final.callPackage nix-eval-jobs { + nixComponents = final.nixComponentsForHydra; + }; hydra = final.callPackage ./package.nix { - inherit (nixpkgs.lib) fileset; + inherit (final.lib) fileset; rawSrc = self; + nixComponents = final.nixComponentsForHydra; }; }; @@ -73,24 +84,26 @@ }); packages = forEachSystem (system: let - nixComponents = { - inherit (nix.packages.${system}) - nix-util - nix-store - nix-expr - nix-fetchers - nix-flake - nix-main - nix-cmd - nix-cli - nix-perl-bindings - ; - }; + inherit (nixpkgs) lib; + pkgs = nixpkgs.legacyPackages.${system}; + nixDependencies = lib.makeScope pkgs.newScope + (import (nix + "/packaging/dependencies.nix") { + inherit pkgs; + inherit (pkgs) stdenv; + inputs = {}; + }); + nixComponents = lib.makeScope nixDependencies.newScope + (import (nix + "/packaging/components.nix") { + officialRelease = true; + inherit lib pkgs; + src = nix; + maintainers = [ ]; + }); in { - nix-eval-jobs = nixpkgs.legacyPackages.${system}.callPackage nix-eval-jobs { + nix-eval-jobs = pkgs.callPackage nix-eval-jobs { inherit nixComponents; }; - hydra = nixpkgs.legacyPackages.${system}.callPackage ./package.nix { + hydra = pkgs.callPackage ./package.nix { inherit (nixpkgs.lib) fileset; inherit nixComponents; inherit (self.packages.${system}) nix-eval-jobs; From 8218a9ad1b7e61de6449e6efa1bc3e189cc7469a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 29 Apr 2025 20:06:35 +0200 Subject: [PATCH 952/965] hydra: expose nix-cli package This makes it easier in other packages to get the nix version used to build Hydra. --- package.nix | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/package.nix b/package.nix index 4a7840c1..5c1a7860 100644 --- a/package.nix +++ b/package.nix @@ -277,5 +277,8 @@ stdenv.mkDerivation (finalAttrs: { dontStrip = true; meta.description = "Build of Hydra on ${stdenv.system}"; - passthru = { inherit perlDeps; }; + passthru = { + inherit perlDeps; + nix = nixComponents.nix-cli; + }; }) From 2ae27dd20da6de787a0926e2de38dc537437a215 Mon Sep 17 00:00:00 2001 From: Sandro Date: Mon, 5 May 2025 00:10:59 +0200 Subject: [PATCH 953/965] Add missing slash error: access to absolute path '/nix/store/sai35xfsrba2a2vasmzxakmn54wdfa13-sourcepackaging' is forbidden in pure evaluation mode (use '--impure' to override) --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index 0e2f32dc..979bfcbd 100644 --- a/flake.nix +++ b/flake.nix @@ -31,7 +31,7 @@ inputs = {}; }); nixComponentsForHydra = final.lib.makeScope final.nixDependenciesForHydra.newScope - (import (nix + "packaging/components.nix") { + (import (nix + "/packaging/components.nix") { officialRelease = true; inherit (final) lib; pkgs = final; From da1aebe970a96a533e8cff51436bbd160c496bd5 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Thu, 15 May 2025 04:08:57 +0200 Subject: [PATCH 954/965] Migrate from "gc-" prefixed nix options These have been deprecated, e.g. gc-keep-outputs is now just keep-outputs. --- nixos-modules/hydra.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nixos-modules/hydra.nix b/nixos-modules/hydra.nix index 79d639e6..283a9b8d 100644 --- a/nixos-modules/hydra.nix +++ b/nixos-modules/hydra.nix @@ -228,8 +228,8 @@ in nix.settings = { trusted-users = [ "hydra-queue-runner" ]; - gc-keep-outputs = true; - gc-keep-derivations = true; + keep-outputs = true; + keep-derivations = true; }; services.hydra-dev.extraConfig = From 8bb7d27588706eb20f68c3103883b2f58b016754 Mon Sep 17 00:00:00 2001 From: Thomas Nixon Date: Fri, 23 May 2025 19:53:15 +0100 Subject: [PATCH 955/965] doc/manual: correct nginx reverse proxy example - hydra does not remove the base URI from the request before processing it, so this must be done in the reverse proxy. in nginx this is done by giving proxy_pass a URI rather than a protocol/host/port; see: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass - proxy_redirect is not correct/required: hydra uses proxy headers to correctly form redirects in most cases, and where it doesn't it produces local redirects which aren't matched by this directive anyway --- doc/manual/src/configuration.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index d370312a..856d314c 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -63,8 +63,7 @@ following: .. other configuration .. location /hydra/ { - proxy_pass http://127.0.0.1:3000; - proxy_redirect http://127.0.0.1:3000 https://example.com/hydra; + proxy_pass http://127.0.0.1:3000/; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; @@ -74,6 +73,9 @@ following: } } +Note the trailing slash on the `proxy_pass` directive, which causes nginx to +strip off the `/hydra/` part of the URL before passing it to hydra. + Populating a Cache ------------------ From 8a50488f6c6be4acdc9ed325f5fd1d88edf5d0d8 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sun, 25 May 2025 20:51:05 -0400 Subject: [PATCH 956/965] flake.lock: Update Nixpkgs to 25.05 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'nixpkgs': 'github:NixOS/nixpkgs/eea3403f7ca9f9942098f4f2756adab4ec924b2b?narHash=sha256-JT1wMjLIypWJA0N2V27WpUw8feDmTok4Dwkb0oYXDS4%3D' (2025-04-23) → 'github:NixOS/nixpkgs/db1aed32009f408e4048c1dd0beaf714dd34ed93?narHash=sha256-8A7HjmnvCpDjmETrZY1QwzKunR63LiP7lHu1eA5q6JI%3D' (2025-05-24) --- flake.lock | 8 ++++---- flake.nix | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/flake.lock b/flake.lock index 3e403f6f..3a8d6870 100644 --- a/flake.lock +++ b/flake.lock @@ -35,16 +35,16 @@ }, "nixpkgs": { "locked": { - "lastModified": 1745408698, - "narHash": "sha256-JT1wMjLIypWJA0N2V27WpUw8feDmTok4Dwkb0oYXDS4=", + "lastModified": 1748124805, + "narHash": "sha256-8A7HjmnvCpDjmETrZY1QwzKunR63LiP7lHu1eA5q6JI=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "eea3403f7ca9f9942098f4f2756adab4ec924b2b", + "rev": "db1aed32009f408e4048c1dd0beaf714dd34ed93", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-24.11-small", + "ref": "nixos-25.05-small", "repo": "nixpkgs", "type": "github" } diff --git a/flake.nix b/flake.nix index 979bfcbd..21bd793d 100644 --- a/flake.nix +++ b/flake.nix @@ -1,7 +1,7 @@ { description = "A Nix-based continuous build system"; - inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11-small"; + inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05-small"; inputs.nix = { url = "github:NixOS/nix/2.28-maintenance"; From dafa252d085aa44a3c46e55cfdd3f26908a224d8 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 15 May 2025 00:09:42 -0400 Subject: [PATCH 957/965] flake.lock: Update Nix and nix-eval-jobs to 2.29 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'nix': 'github:NixOS/nix/70921714cb3b5e6041b7413459541838651079f3?narHash=sha256-ZbB3IH9OlJvo14GlQZbYHzJojf/HCDT38GzYTod8DaU%3D' (2025-04-23) → 'github:NixOS/nix/d761dad79c79af17aa476a29749bd9d69747548f?narHash=sha256-rCpANMHFIlafta6J/G0ILRd%2BWNSnzv/lzi40Y8f1AR8%3D' (2025-05-25) • Updated input 'nix-eval-jobs': 'github:nix-community/nix-eval-jobs/1260c6599d22dfd8c25fea6893c3d031996b20e1?narHash=sha256-n220U5pjzCtTtOJtbga4Xr/PyllowKw9anSevgCqJEw%3D' (2025-04-11) → 'github:nix-community/nix-eval-jobs/d9262e535e35454daebcebd434bdb9c1486bb998?narHash=sha256-AJ22q6yWc1hPkqssXMxQqD6QUeJ6hbx52xWHhKsmuP0%3D' (2025-05-25) --- flake.lock | 14 +++++++------- flake.nix | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/flake.lock b/flake.lock index 3a8d6870..2679eecb 100644 --- a/flake.lock +++ b/flake.lock @@ -3,16 +3,16 @@ "nix": { "flake": false, "locked": { - "lastModified": 1745420957, - "narHash": "sha256-ZbB3IH9OlJvo14GlQZbYHzJojf/HCDT38GzYTod8DaU=", + "lastModified": 1748154947, + "narHash": "sha256-rCpANMHFIlafta6J/G0ILRd+WNSnzv/lzi40Y8f1AR8=", "owner": "NixOS", "repo": "nix", - "rev": "70921714cb3b5e6041b7413459541838651079f3", + "rev": "d761dad79c79af17aa476a29749bd9d69747548f", "type": "github" }, "original": { "owner": "NixOS", - "ref": "2.28-maintenance", + "ref": "2.29-maintenance", "repo": "nix", "type": "github" } @@ -20,11 +20,11 @@ "nix-eval-jobs": { "flake": false, "locked": { - "lastModified": 1744370057, - "narHash": "sha256-n220U5pjzCtTtOJtbga4Xr/PyllowKw9anSevgCqJEw=", + "lastModified": 1748211873, + "narHash": "sha256-AJ22q6yWc1hPkqssXMxQqD6QUeJ6hbx52xWHhKsmuP0=", "owner": "nix-community", "repo": "nix-eval-jobs", - "rev": "1260c6599d22dfd8c25fea6893c3d031996b20e1", + "rev": "d9262e535e35454daebcebd434bdb9c1486bb998", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 21bd793d..e67a3a99 100644 --- a/flake.nix +++ b/flake.nix @@ -4,7 +4,7 @@ inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05-small"; inputs.nix = { - url = "github:NixOS/nix/2.28-maintenance"; + url = "github:NixOS/nix/2.29-maintenance"; # We want to control the deps precisely flake = false; }; From 278a3ebfd5297a976400f93afb1e54c6d1915cca Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 15 May 2025 00:24:25 -0400 Subject: [PATCH 958/965] Fix build with Nix 2.29 --- src/hydra-queue-runner/build-remote.cc | 2 +- src/hydra-queue-runner/hydra-queue-runner.cc | 4 ++-- src/hydra-queue-runner/queue-monitor.cc | 10 ++++++++-- src/hydra-queue-runner/state.hh | 2 +- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 7e307c75..b372e7dd 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -50,7 +50,7 @@ static std::unique_ptr openConnection( auto remoteStore = machine->storeUri.params.find("remote-store"); if (remoteStore != machine->storeUri.params.end()) { command.push_back("--store"); - command.push_back(shellEscape(remoteStore->second)); + command.push_back(escapeShellArgAlways(remoteStore->second)); } } diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index ab146312..a4a7f0a7 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -14,7 +14,7 @@ #include #include "state.hh" #include "hydra-build-result.hh" -#include +#include #include #include @@ -832,7 +832,7 @@ void State::run(BuildID buildOne) << metricsAddr << "/metrics (port " << exposerPort << ")" << std::endl; - Store::Params localParams; + Store::Config::Params localParams; localParams["max-connections"] = "16"; localParams["max-connection-age"] = "600"; localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams); diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index bb15ac04..0785be6f 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -492,8 +492,14 @@ Step::ptr State::createStep(ref destStore, runnable while step->created == false. */ step->drv = std::make_unique(localStore->readDerivation(drvPath)); { - auto parsedDrv = ParsedDerivation{drvPath, *step->drv}; - step->drvOptions = std::make_unique(DerivationOptions::fromParsedDerivation(parsedDrv)); + auto parsedOpt = StructuredAttrs::tryParse(step->drv->env); + try { + step->drvOptions = std::make_unique( + DerivationOptions::fromStructuredAttrs(step->drv->env, parsedOpt ? &*parsedOpt : nullptr)); + } catch (Error & e) { + e.addTrace({}, "while parsing derivation '%s'", localStore->printStorePath(drvPath)); + throw; + } } step->preferLocalBuild = step->drvOptions->willBuildLocally(*localStore, *step->drv); diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index edfad4fb..f7ab7de3 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -172,7 +172,7 @@ struct Step nix::StorePath drvPath; std::unique_ptr drv; std::unique_ptr drvOptions; - std::set requiredSystemFeatures; + nix::StringSet requiredSystemFeatures; bool preferLocalBuild; bool isDeterministic; std::string systemType; // concatenation of drv.platform and requiredSystemFeatures From 635aff50dd14e30baadc278781bad0c8bf7199c3 Mon Sep 17 00:00:00 2001 From: Julien Marquet Date: Sat, 23 Apr 2022 23:17:49 +0200 Subject: [PATCH 959/965] docs: refine instructions for proxy setting --- doc/manual/src/configuration.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index 856d314c..bd8141a3 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -51,10 +51,12 @@ base_uri example.com `base_uri` should be your hydra servers proxied URL. If you are using Hydra nixos module then setting `hydraURL` option should be enough. -If you want to serve Hydra with a prefix path, for example -[http://example.com/hydra]() then you need to configure your reverse -proxy to pass `X-Request-Base` to hydra, with prefix path as value. For -example if you are using nginx, then use configuration similar to +You also need to configure your reverse proxy to pass `X-Request-Base` +to hydra, with the same value as `base_uri`. +This also covers the case of serving Hydra with a prefix path, +as in [http://example.com/hydra](). + +For example if you are using nginx, then use configuration similar to following: server { From c621f274822dbb8f5d67bb3a7e1e1fcae883a1b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 24 Jun 2025 18:45:14 +0200 Subject: [PATCH 960/965] test: bump used nix version --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9c05d752..613e3ef9 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -12,6 +12,6 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - - uses: cachix/install-nix-action@v17 + - uses: cachix/install-nix-action@v31 #- run: nix flake check - run: nix-build -A checks.x86_64-linux.build -A checks.x86_64-linux.validate-openapi From 9efe38c60b61c225d3333e5bc658d1ea7de5fb7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 24 Jun 2025 18:45:22 +0200 Subject: [PATCH 961/965] add update-flakes action --- .github/workflows/update-flakes.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/workflows/update-flakes.yml diff --git a/.github/workflows/update-flakes.yml b/.github/workflows/update-flakes.yml new file mode 100644 index 00000000..b5c0c2dd --- /dev/null +++ b/.github/workflows/update-flakes.yml @@ -0,0 +1,28 @@ +name: "Update Flakes" +on: + schedule: + # Run weekly on Monday at 00:00 UTC + - cron: '0 0 * * 1' + workflow_dispatch: +jobs: + update-flakes: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - uses: actions/checkout@v3 + - uses: cachix/install-nix-action@v31 + - name: Update flake inputs + run: nix flake update + - name: Create Pull Request + uses: peter-evans/create-pull-request@v5 + with: + commit-message: "flake.lock: Update" + title: "Update flake inputs" + body: | + Automated flake input updates. + + This PR was automatically created by the update-flakes workflow. + branch: update-flakes + delete-branch: true \ No newline at end of file From 605a0e9ce9274073aa994a20916007a79439d6db Mon Sep 17 00:00:00 2001 From: Mic92 Date: Tue, 24 Jun 2025 17:03:10 +0000 Subject: [PATCH 962/965] flake.lock: Update --- flake.lock | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/flake.lock b/flake.lock index 2679eecb..0ca074f3 100644 --- a/flake.lock +++ b/flake.lock @@ -3,11 +3,11 @@ "nix": { "flake": false, "locked": { - "lastModified": 1748154947, - "narHash": "sha256-rCpANMHFIlafta6J/G0ILRd+WNSnzv/lzi40Y8f1AR8=", + "lastModified": 1750777360, + "narHash": "sha256-nDWFxwhT+fQNgi4rrr55EKjpxDyVKSl1KaNmSXtYj40=", "owner": "NixOS", "repo": "nix", - "rev": "d761dad79c79af17aa476a29749bd9d69747548f", + "rev": "7bb200199705eddd53cb34660a76567c6f1295d9", "type": "github" }, "original": { @@ -20,11 +20,11 @@ "nix-eval-jobs": { "flake": false, "locked": { - "lastModified": 1748211873, - "narHash": "sha256-AJ22q6yWc1hPkqssXMxQqD6QUeJ6hbx52xWHhKsmuP0=", + "lastModified": 1748680938, + "narHash": "sha256-TQk6pEMD0mFw7jZXpg7+2qNKGbAluMQgc55OMgEO8bM=", "owner": "nix-community", "repo": "nix-eval-jobs", - "rev": "d9262e535e35454daebcebd434bdb9c1486bb998", + "rev": "974a4af3d4a8fd242d8d0e2608da4be87a62b83f", "type": "github" }, "original": { @@ -35,11 +35,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1748124805, - "narHash": "sha256-8A7HjmnvCpDjmETrZY1QwzKunR63LiP7lHu1eA5q6JI=", + "lastModified": 1750736827, + "narHash": "sha256-UcNP7BR41xMTe0sfHBH8R79+HdCw0OwkC/ZKrQEuMeo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "db1aed32009f408e4048c1dd0beaf714dd34ed93", + "rev": "b4a30b08433ad7b6e1dfba0833fb0fe69d43dfec", "type": "github" }, "original": { From c544042051269ab0516ce84ca3b259e0a21b59d7 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Fri, 4 Jul 2025 06:44:41 +0200 Subject: [PATCH 963/965] Replace nettools with hostname-debian As far as I understand we include nettools for its hostname executable used by the Sys-Hostname-Long perl package. But if we just need that then the hostname-debian package provides a simpler and better maintained version. --- nixos-modules/hydra.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nixos-modules/hydra.nix b/nixos-modules/hydra.nix index 283a9b8d..83ffeec4 100644 --- a/nixos-modules/hydra.nix +++ b/nixos-modules/hydra.nix @@ -340,7 +340,7 @@ in requires = [ "hydra-init.service" ]; wants = [ "network-online.target" ]; after = [ "hydra-init.service" "network.target" "network-online.target" ]; - path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ]; + path = [ cfg.package pkgs.hostname-debian pkgs.openssh pkgs.bzip2 config.nix.package ]; restartTriggers = [ hydraConf ]; environment = env // { PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr @@ -364,7 +364,7 @@ in requires = [ "hydra-init.service" ]; restartTriggers = [ hydraConf ]; after = [ "hydra-init.service" "network.target" ]; - path = with pkgs; [ nettools cfg.package jq ]; + path = with pkgs; [ hostname-debian cfg.package jq ]; environment = env // { HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator"; }; From b3b48bc2370802c2dd82eae72fa6210b2a064026 Mon Sep 17 00:00:00 2001 From: Sandro Date: Fri, 4 Jul 2025 12:01:42 +0200 Subject: [PATCH 964/965] module: sync with nixpkgs --- nixos-modules/hydra.nix | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nixos-modules/hydra.nix b/nixos-modules/hydra.nix index 283a9b8d..380cf01b 100644 --- a/nixos-modules/hydra.nix +++ b/nixos-modules/hydra.nix @@ -463,12 +463,12 @@ in '' set -eou pipefail compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf) - if [[ $compression == "" ]]; then - compression="bzip2" + if [[ $compression == "" || $compression == bzip2 ]]; then + compressionCmd=(bzip2) elif [[ $compression == zstd ]]; then - compression="zstd --rm" + compressionCmd=(zstd --rm) fi - find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet + find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c -print0 | xargs -0 -r "''${compressionCmd[@]}" --force --quiet ''; startAt = "Sun 01:45"; }; From 62fcacb7d2bbafa1fe19087ec93964fe1352b683 Mon Sep 17 00:00:00 2001 From: Dionysis Grigoropoulos Date: Tue, 15 Jul 2025 19:45:13 +0300 Subject: [PATCH 965/965] fix: Update Nix download url --- doc/manual/src/installation.md | 2 +- src/root/build.tt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/manual/src/installation.md b/doc/manual/src/installation.md index cbf3f907..39a86885 100644 --- a/doc/manual/src/installation.md +++ b/doc/manual/src/installation.md @@ -48,7 +48,7 @@ Getting Nix If your server runs NixOS you are all set to continue with installation of Hydra. Otherwise you first need to install Nix. The latest stable version can be found one [the Nix web -site](http://nixos.org/nix/download.html), along with a manual, which +site](https://nixos.org/download/), along with a manual, which includes installation instructions. Installation diff --git a/src/root/build.tt b/src/root/build.tt index 18ff6f01..93629427 100644 --- a/src/root/build.tt +++ b/src/root/build.tt @@ -563,7 +563,7 @@ END; [% IF eval.flake %] -

If you have Nix +

If you have Nix installed, you can reproduce this build on your own machine by running the following command:

@@ -573,7 +573,7 @@ END; [% ELSE %] -

If you have Nix +

If you have Nix installed, you can reproduce this build on your own machine by downloading url) %]>a script that checks out all inputs of the build and then invokes Nix to