diff --git a/flake.lock b/flake.lock index 897785ed..c47a3b88 100644 --- a/flake.lock +++ b/flake.lock @@ -1,30 +1,10 @@ { "nodes": { - "libgit2": { - "flake": false, - "locked": { - "lastModified": 1715853528, - "narHash": "sha256-J2rCxTecyLbbDdsyBWn9w7r3pbKRMkI9E7RvRgAqBdY=", - "owner": "libgit2", - "repo": "libgit2", - "rev": "36f7e21ad757a3dacc58cf7944329da6bc1d6e96", - "type": "github" - }, - "original": { - "owner": "libgit2", - "ref": "v1.8.1", - "repo": "libgit2", - "type": "github" - } - }, "nix": { "inputs": { "flake-compat": [], "flake-parts": [], "git-hooks-nix": [], - "libgit2": [ - "libgit2" - ], "nixpkgs": [ "nixpkgs" ], @@ -32,40 +12,56 @@ "nixpkgs-regression": [] }, "locked": { - "lastModified": 1726787955, - "narHash": "sha256-XFznzb8L4SdUm9u+w3DPpMWJhffuv+/6+aiVl00slns=", + "lastModified": 1739899400, + "narHash": "sha256-q/RgA4bB7zWai4oPySq9mch7qH14IEeom2P64SXdqHs=", "owner": "NixOS", "repo": "nix", - "rev": "a7fdef6858dd45b9d7bda7c92324c63faee7f509", + "rev": "e310c19a1aeb1ce1ed4d41d5ab2d02db596e0918", "type": "github" }, "original": { "owner": "NixOS", - "ref": "2.24-maintenance", + "ref": "2.26-maintenance", "repo": "nix", "type": "github" } }, + "nix-eval-jobs": { + "flake": false, + "locked": { + "lastModified": 1739500569, + "narHash": "sha256-3wIReAqdTALv39gkWXLMZQvHyBOc3yPkWT2ZsItxedY=", + "owner": "nix-community", + "repo": "nix-eval-jobs", + "rev": "4b392b284877d203ae262e16af269f702df036bc", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-eval-jobs", + "type": "github" + } + }, "nixpkgs": { "locked": { - "lastModified": 1726688310, - "narHash": "sha256-Xc9lEtentPCEtxc/F1e6jIZsd4MPDYv4Kugl9WtXlz0=", + "lastModified": 1739461644, + "narHash": "sha256-1o1qR0KYozYGRrnqytSpAhVBYLNBHX+Lv6I39zGRzKM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "dbebdd67a6006bb145d98c8debf9140ac7e651d0", + "rev": "97a719c9f0a07923c957cf51b20b329f9fb9d43f", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-24.05-small", + "ref": "nixos-24.11-small", "repo": "nixpkgs", "type": "github" } }, "root": { "inputs": { - "libgit2": "libgit2", "nix": "nix", + "nix-eval-jobs": "nix-eval-jobs", "nixpkgs": "nixpkgs" } } diff --git a/flake.nix b/flake.nix index fccd45b9..dc3aaf5c 100644 --- a/flake.nix +++ b/flake.nix @@ -1,21 +1,27 @@ { description = "A Nix-based continuous build system"; - inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05-small"; + inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11-small"; - inputs.libgit2 = { url = "github:libgit2/libgit2/v1.8.1"; flake = false; }; - inputs.nix.url = "github:NixOS/nix/2.24-maintenance"; - inputs.nix.inputs.nixpkgs.follows = "nixpkgs"; - inputs.nix.inputs.libgit2.follows = "libgit2"; + inputs.nix = { + url = "github:NixOS/nix/2.26-maintenance"; + inputs.nixpkgs.follows = "nixpkgs"; - # hide nix dev tooling from our lock file - inputs.nix.inputs.flake-parts.follows = ""; - inputs.nix.inputs.git-hooks-nix.follows = ""; - inputs.nix.inputs.nixpkgs-regression.follows = ""; - inputs.nix.inputs.nixpkgs-23-11.follows = ""; - inputs.nix.inputs.flake-compat.follows = ""; + # hide nix dev tooling from our lock file + inputs.flake-parts.follows = ""; + inputs.git-hooks-nix.follows = ""; + inputs.nixpkgs-regression.follows = ""; + inputs.nixpkgs-23-11.follows = ""; + inputs.flake-compat.follows = ""; + }; - outputs = { self, nixpkgs, nix, ... }: + inputs.nix-eval-jobs = { + url = "github:nix-community/nix-eval-jobs"; + # We want to control the deps precisely + flake = false; + }; + + outputs = { self, nixpkgs, nix, nix-eval-jobs, ... }: let systems = [ "x86_64-linux" "aarch64-linux" ]; forEachSystem = nixpkgs.lib.genAttrs systems; @@ -24,6 +30,7 @@ # A Nixpkgs overlay that provides a 'hydra' package. overlays.default = final: prev: { + nix-eval-jobs = final.callPackage nix-eval-jobs {}; hydra = final.callPackage ./package.nix { inherit (nixpkgs.lib) fileset; rawSrc = self; @@ -67,10 +74,19 @@ }); packages = forEachSystem (system: { + nix-eval-jobs = nixpkgs.legacyPackages.${system}.callPackage nix-eval-jobs { + nix = nix.packages.${system}.nix; + }; hydra = nixpkgs.legacyPackages.${system}.callPackage ./package.nix { inherit (nixpkgs.lib) fileset; + inherit (self.packages.${system}) nix-eval-jobs; rawSrc = self; - nix = nix.packages.${system}.nix; + inherit (nix.packages.${system}) + nix-util + nix-store + nix-main + nix-cli + ; nix-perl-bindings = nix.hydraJobs.perlBindings.${system}; }; default = self.packages.${system}.hydra; diff --git a/meson.build b/meson.build index b9327d51..9c9c09a1 100644 --- a/meson.build +++ b/meson.build @@ -8,22 +8,22 @@ project('hydra', 'cpp', ], ) +nix_util_dep = dependency('nix-util', required: true) nix_store_dep = dependency('nix-store', required: true) nix_main_dep = dependency('nix-main', required: true) -nix_expr_dep = dependency('nix-expr', required: true) -nix_flake_dep = dependency('nix-flake', required: true) -nix_cmd_dep = dependency('nix-cmd', required: true) # Nix need extra flags not provided in its pkg-config files. nix_dep = declare_dependency( dependencies: [ + nix_util_dep, nix_store_dep, nix_main_dep, - nix_expr_dep, - nix_flake_dep, - nix_cmd_dep, ], - compile_args: ['-include', 'nix/config.h'], + compile_args: [ + '-include', 'nix/config-util.hh', + '-include', 'nix/config-store.hh', + '-include', 'nix/config-main.hh', + ], ) pqxx_dep = dependency('libpqxx', required: true) diff --git a/package.nix b/package.nix index f944fe2b..12fac1d8 100644 --- a/package.nix +++ b/package.nix @@ -8,7 +8,10 @@ , perlPackages -, nix +, nix-util +, nix-store +, nix-main +, nix-cli , nix-perl-bindings , git @@ -50,6 +53,7 @@ , xz , gnutar , gnused +, nix-eval-jobs , rpm , dpkg @@ -161,7 +165,7 @@ stdenv.mkDerivation (finalAttrs: { nukeReferences pkg-config mdbook - nix + nix-cli perlDeps perl unzip @@ -171,7 +175,9 @@ stdenv.mkDerivation (finalAttrs: { libpqxx openssl libxslt - nix + nix-util + nix-store + nix-main perlDeps perl boost @@ -190,6 +196,7 @@ stdenv.mkDerivation (finalAttrs: { openldap postgresql_13 pixz + nix-eval-jobs ]; checkInputs = [ @@ -197,13 +204,14 @@ stdenv.mkDerivation (finalAttrs: { glibcLocales libressl.nc python3 + nix-cli ]; hydraPath = lib.makeBinPath ( [ subversion openssh - nix + nix-cli coreutils findutils pixz @@ -218,6 +226,7 @@ stdenv.mkDerivation (finalAttrs: { darcs gnused breezy + nix-eval-jobs ] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] ); @@ -232,7 +241,7 @@ stdenv.mkDerivation (finalAttrs: { shellHook = '' pushd $(git rev-parse --show-toplevel) >/dev/null - PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH + PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-queue-runner:$PATH PERL5LIB=$(pwd)/src/lib:$PERL5LIB export HYDRA_HOME="$(pwd)/src/" mkdir -p .hydra-data @@ -263,12 +272,13 @@ stdenv.mkDerivation (finalAttrs: { --prefix PATH ':' $out/bin:$hydraPath \ --set HYDRA_RELEASE ${version} \ --set HYDRA_HOME $out/libexec/hydra \ - --set NIX_RELEASE ${nix.name or "unknown"} + --set NIX_RELEASE ${nix-cli.name or "unknown"} \ + --set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"} done ''; dontStrip = true; meta.description = "Build of Hydra on ${stdenv.system}"; - passthru = { inherit perlDeps nix; }; + passthru = { inherit perlDeps; }; }) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc deleted file mode 100644 index b83cae91..00000000 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ /dev/null @@ -1,587 +0,0 @@ -#include -#include -#include -#include - -#include "shared.hh" -#include "store-api.hh" -#include "eval.hh" -#include "eval-gc.hh" -#include "eval-inline.hh" -#include "eval-settings.hh" -#include "signals.hh" -#include "terminal.hh" -#include "util.hh" -#include "get-drvs.hh" -#include "globals.hh" -#include "common-eval-args.hh" -#include "flake/flakeref.hh" -#include "flake/flake.hh" -#include "attr-path.hh" -#include "derivations.hh" -#include "local-fs-store.hh" - -#include "hydra-config.hh" - -#include -#include -#include - -#include - -void check_pid_status_nonblocking(pid_t check_pid) -{ - // Only check 'initialized' and known PID's - if (check_pid <= 0) { return; } - - int wstatus = 0; - pid_t pid = waitpid(check_pid, &wstatus, WNOHANG); - // -1 = failure, WNOHANG: 0 = no change - if (pid <= 0) { return; } - - std::cerr << "child process (" << pid << ") "; - - if (WIFEXITED(wstatus)) { - std::cerr << "exited with status=" << WEXITSTATUS(wstatus) << std::endl; - } else if (WIFSIGNALED(wstatus)) { - std::cerr << "killed by signal=" << WTERMSIG(wstatus) << std::endl; - } else if (WIFSTOPPED(wstatus)) { - std::cerr << "stopped by signal=" << WSTOPSIG(wstatus) << std::endl; - } else if (WIFCONTINUED(wstatus)) { - std::cerr << "continued" << std::endl; - } -} - -using namespace nix; - -static Path gcRootsDir; -static size_t maxMemorySize; - -struct MyArgs : MixEvalArgs, MixCommonArgs, RootArgs -{ - Path releaseExpr; - bool flake = false; - bool dryRun = false; - - MyArgs() : MixCommonArgs("hydra-eval-jobs") - { - addFlag({ - .longName = "gc-roots-dir", - .description = "garbage collector roots directory", - .labels = {"path"}, - .handler = {&gcRootsDir} - }); - - addFlag({ - .longName = "dry-run", - .description = "don't create store derivations", - .handler = {&dryRun, true} - }); - - addFlag({ - .longName = "flake", - .description = "build a flake", - .handler = {&flake, true} - }); - - expectArg("expr", &releaseExpr); - } -}; - -static MyArgs myArgs; - -static std::string queryMetaStrings(EvalState & state, PackageInfo & drv, const std::string & name, const std::string & subAttribute) -{ - Strings res; - std::function rec; - - rec = [&](Value & v) { - state.forceValue(v, noPos); - if (v.type() == nString) - res.emplace_back(v.string_view()); - else if (v.isList()) - for (unsigned int n = 0; n < v.listSize(); ++n) - rec(*v.listElems()[n]); - else if (v.type() == nAttrs) { - auto a = v.attrs()->find(state.symbols.create(subAttribute)); - if (a != v.attrs()->end()) - res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes"))); - } - }; - - Value * v = drv.queryMeta(name); - if (v) rec(*v); - - return concatStringsSep(", ", res); -} - -static void worker( - EvalState & state, - Bindings & autoArgs, - AutoCloseFD & to, - AutoCloseFD & from) -{ - Value vTop; - - if (myArgs.flake) { - using namespace flake; - - auto [flakeRef, fragment, outputSpec] = parseFlakeRefWithFragmentAndExtendedOutputsSpec(fetchSettings, myArgs.releaseExpr, absPath(".")); - - auto vFlake = state.allocValue(); - - auto lockedFlake = lockFlake( - flakeSettings, - state, - flakeRef, - LockFlags { - .updateLockFile = false, - .useRegistries = false, - .allowUnlocked = false, - }); - - callFlake(state, lockedFlake, *vFlake); - - auto vOutputs = vFlake->attrs()->get(state.symbols.create("outputs"))->value; - state.forceValue(*vOutputs, noPos); - - auto aHydraJobs = vOutputs->attrs()->get(state.symbols.create("hydraJobs")); - if (!aHydraJobs) - aHydraJobs = vOutputs->attrs()->get(state.symbols.create("checks")); - if (!aHydraJobs) - throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef); - - vTop = *aHydraJobs->value; - - } else { - state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop); - } - - auto vRoot = state.allocValue(); - state.autoCallFunction(autoArgs, vTop, *vRoot); - - while (true) { - /* Wait for the master to send us a job name. */ - writeLine(to.get(), "next"); - - auto s = readLine(from.get()); - if (s == "exit") break; - if (!hasPrefix(s, "do ")) abort(); - std::string attrPath(s, 3); - - debug("worker process %d at '%s'", getpid(), attrPath); - - /* Evaluate it and send info back to the master. */ - nlohmann::json reply; - - try { - auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first; - - auto v = state.allocValue(); - state.autoCallFunction(autoArgs, *vTmp, *v); - - if (auto drv = getDerivation(state, *v, false)) { - - // CA derivations do not have static output paths, so we - // have to defensively not query output paths in case we - // encounter one. - PackageInfo::Outputs outputs = drv->queryOutputs( - !experimentalFeatureSettings.isEnabled(Xp::CaDerivations)); - - if (drv->querySystem() == "unknown") - state.error("derivation must have a 'system' attribute").debugThrow(); - - auto drvPath = state.store->printStorePath(drv->requireDrvPath()); - - nlohmann::json job; - - job["nixName"] = drv->queryName(); - job["system"] =drv->querySystem(); - job["drvPath"] = drvPath; - job["description"] = drv->queryMetaString("description"); - job["license"] = queryMetaStrings(state, *drv, "license", "shortName"); - job["homepage"] = drv->queryMetaString("homepage"); - job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email"); - job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100); - job["timeout"] = drv->queryMetaInt("timeout", 36000); - job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200); - job["isChannel"] = drv->queryMetaBool("isHydraChannel", false); - - /* If this is an aggregate, then get its constituents. */ - auto a = v->attrs()->get(state.symbols.create("_hydraAggregate")); - if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) { - auto a = v->attrs()->get(state.symbols.create("constituents")); - if (!a) - state.error("derivation must have a ‘constituents’ attribute").debugThrow(); - - NixStringContext context; - state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false); - for (auto & c : context) - std::visit(overloaded { - [&](const NixStringContextElem::Built & b) { - job["constituents"].push_back(b.drvPath->to_string(*state.store)); - }, - [&](const NixStringContextElem::Opaque & o) { - }, - [&](const NixStringContextElem::DrvDeep & d) { - }, - }, c.raw); - - state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute"); - for (unsigned int n = 0; n < a->value->listSize(); ++n) { - auto v = a->value->listElems()[n]; - state.forceValue(*v, noPos); - if (v->type() == nString) - job["namedConstituents"].push_back(v->string_view()); - } - } - - /* Register the derivation as a GC root. !!! This - registers roots for jobs that we may have already - done. */ - auto localStore = state.store.dynamic_pointer_cast(); - if (gcRootsDir != "" && localStore) { - Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath)); - if (!pathExists(root)) - localStore->addPermRoot(localStore->parseStorePath(drvPath), root); - } - - nlohmann::json out; - for (auto & [outputName, optOutputPath] : outputs) { - if (optOutputPath) { - out[outputName] = state.store->printStorePath(*optOutputPath); - } else { - // See the `queryOutputs` call above; we should - // not encounter missing output paths otherwise. - assert(experimentalFeatureSettings.isEnabled(Xp::CaDerivations)); - out[outputName] = nullptr; - } - } - job["outputs"] = std::move(out); - reply["job"] = std::move(job); - } - - else if (v->type() == nAttrs) { - auto attrs = nlohmann::json::array(); - StringSet ss; - for (auto & i : v->attrs()->lexicographicOrder(state.symbols)) { - std::string name(state.symbols[i->name]); - if (name.find(' ') != std::string::npos) { - printError("skipping job with illegal name '%s'", name); - continue; - } - attrs.push_back(name); - } - reply["attrs"] = std::move(attrs); - } - - else if (v->type() == nNull) - ; - - else state.error("attribute '%s' is %s, which is not supported", attrPath, showType(*v)).debugThrow(); - - } catch (EvalError & e) { - auto msg = e.msg(); - // Transmits the error we got from the previous evaluation - // in the JSON output. - reply["error"] = filterANSIEscapes(msg, true); - // Don't forget to print it into the STDERR log, this is - // what's shown in the Hydra UI. - printError(msg); - } - - writeLine(to.get(), reply.dump()); - - /* If our RSS exceeds the maximum, exit. The master will - start a new process. */ - struct rusage r; - getrusage(RUSAGE_SELF, &r); - if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break; - } - - writeLine(to.get(), "restart"); -} - -int main(int argc, char * * argv) -{ - /* Prevent undeclared dependencies in the evaluation via - $NIX_PATH. */ - unsetenv("NIX_PATH"); - - return handleExceptions(argv[0], [&]() { - - auto config = std::make_unique(); - - auto nrWorkers = config->getIntOption("evaluator_workers", 1); - maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096); - - initNix(); - initGC(); - - myArgs.parseCmdline(argvToStrings(argc, argv)); - - auto pureEval = config->getBoolOption("evaluator_pure_eval", myArgs.flake); - - /* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */ - settings.builders = ""; - - /* Prevent access to paths outside of the Nix search path and - to the environment. */ - evalSettings.restrictEval = true; - - /* When building a flake, use pure evaluation (no access to - 'getEnv', 'currentSystem' etc. */ - evalSettings.pureEval = pureEval; - - if (myArgs.dryRun) settings.readOnlyMode = true; - - if (myArgs.releaseExpr == "") throw UsageError("no expression specified"); - - if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified"); - - struct State - { - std::set todo{""}; - std::set active; - nlohmann::json jobs; - std::exception_ptr exc; - }; - - std::condition_variable wakeup; - - Sync state_; - - /* Start a handler thread per worker process. */ - auto handler = [&]() - { - pid_t pid = -1; - try { - AutoCloseFD from, to; - - while (true) { - - /* Start a new worker process if necessary. */ - if (pid == -1) { - Pipe toPipe, fromPipe; - toPipe.create(); - fromPipe.create(); - pid = startProcess( - [&, - to{std::make_shared(std::move(fromPipe.writeSide))}, - from{std::make_shared(std::move(toPipe.readSide))} - ]() - { - try { - auto evalStore = myArgs.evalStoreUrl - ? openStore(*myArgs.evalStoreUrl) - : openStore(); - EvalState state(myArgs.lookupPath, - evalStore, fetchSettings, evalSettings); - Bindings & autoArgs = *myArgs.getAutoArgs(state); - worker(state, autoArgs, *to, *from); - } catch (Error & e) { - nlohmann::json err; - auto msg = e.msg(); - err["error"] = filterANSIEscapes(msg, true); - printError(msg); - writeLine(to->get(), err.dump()); - // Don't forget to print it into the STDERR log, this is - // what's shown in the Hydra UI. - writeLine(to->get(), "restart"); - } - }, - ProcessOptions { .allowVfork = false }); - from = std::move(fromPipe.readSide); - to = std::move(toPipe.writeSide); - debug("created worker process %d", pid); - } - - /* Check whether the existing worker process is still there. */ - auto s = readLine(from.get()); - if (s == "restart") { - pid = -1; - continue; - } else if (s != "next") { - auto json = nlohmann::json::parse(s); - throw Error("worker error: %s", (std::string) json["error"]); - } - - /* Wait for a job name to become available. */ - std::string attrPath; - - while (true) { - checkInterrupt(); - auto state(state_.lock()); - if ((state->todo.empty() && state->active.empty()) || state->exc) { - writeLine(to.get(), "exit"); - return; - } - if (!state->todo.empty()) { - attrPath = *state->todo.begin(); - state->todo.erase(state->todo.begin()); - state->active.insert(attrPath); - break; - } else - state.wait(wakeup); - } - - /* Tell the worker to evaluate it. */ - writeLine(to.get(), "do " + attrPath); - - /* Wait for the response. */ - auto response = nlohmann::json::parse(readLine(from.get())); - - /* Handle the response. */ - StringSet newAttrs; - - if (response.find("job") != response.end()) { - auto state(state_.lock()); - state->jobs[attrPath] = response["job"]; - } - - if (response.find("attrs") != response.end()) { - for (auto & i : response["attrs"]) { - std::string path = i; - if (path.find(".") != std::string::npos){ - path = "\"" + path + "\""; - } - auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) path; - newAttrs.insert(s); - } - } - - if (response.find("error") != response.end()) { - auto state(state_.lock()); - state->jobs[attrPath]["error"] = response["error"]; - } - - /* Add newly discovered job names to the queue. */ - { - auto state(state_.lock()); - state->active.erase(attrPath); - for (auto & s : newAttrs) - state->todo.insert(s); - wakeup.notify_all(); - } - } - } catch (...) { - check_pid_status_nonblocking(pid); - auto state(state_.lock()); - state->exc = std::current_exception(); - wakeup.notify_all(); - } - }; - - std::vector threads; - for (size_t i = 0; i < nrWorkers; i++) - threads.emplace_back(std::thread(handler)); - - for (auto & thread : threads) - thread.join(); - - auto state(state_.lock()); - - if (state->exc) - std::rethrow_exception(state->exc); - - /* For aggregate jobs that have named consistuents - (i.e. constituents that are a job name rather than a - derivation), look up the referenced job and add it to the - dependencies of the aggregate derivation. */ - auto store = openStore(); - - for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) { - auto jobName = i.key(); - auto & job = i.value(); - - auto named = job.find("namedConstituents"); - if (named == job.end()) continue; - - std::unordered_map brokenJobs; - auto getNonBrokenJobOrRecordError = [&brokenJobs, &jobName, &state]( - const std::string & childJobName) -> std::optional { - auto childJob = state->jobs.find(childJobName); - if (childJob == state->jobs.end()) { - printError("aggregate job '%s' references non-existent job '%s'", jobName, childJobName); - brokenJobs[childJobName] = "does not exist"; - return std::nullopt; - } - if (childJob->find("error") != childJob->end()) { - std::string error = (*childJob)["error"]; - printError("aggregate job '%s' references broken job '%s': %s", jobName, childJobName, error); - brokenJobs[childJobName] = error; - return std::nullopt; - } - return *childJob; - }; - - if (myArgs.dryRun) { - for (std::string jobName2 : *named) { - auto job2 = getNonBrokenJobOrRecordError(jobName2); - if (!job2) { - continue; - } - std::string drvPath2 = (*job2)["drvPath"]; - job["constituents"].push_back(drvPath2); - } - } else { - auto drvPath = store->parseStorePath((std::string) job["drvPath"]); - auto drv = store->readDerivation(drvPath); - - for (std::string jobName2 : *named) { - auto job2 = getNonBrokenJobOrRecordError(jobName2); - if (!job2) { - continue; - } - auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]); - auto drv2 = store->readDerivation(drvPath2); - job["constituents"].push_back(store->printStorePath(drvPath2)); - drv.inputDrvs.map[drvPath2].value = {drv2.outputs.begin()->first}; - } - - if (brokenJobs.empty()) { - std::string drvName(drvPath.name()); - assert(hasSuffix(drvName, drvExtension)); - drvName.resize(drvName.size() - drvExtension.size()); - - auto hashModulo = hashDerivationModulo(*store, drv, true); - if (hashModulo.kind != DrvHash::Kind::Regular) continue; - auto h = hashModulo.hashes.find("out"); - if (h == hashModulo.hashes.end()) continue; - auto outPath = store->makeOutputPath("out", h->second, drvName); - drv.env["out"] = store->printStorePath(outPath); - drv.outputs.insert_or_assign("out", DerivationOutput::InputAddressed { .path = outPath }); - auto newDrvPath = store->printStorePath(writeDerivation(*store, drv)); - - debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath); - - job["drvPath"] = newDrvPath; - job["outputs"]["out"] = store->printStorePath(outPath); - } - } - - job.erase("namedConstituents"); - - /* Register the derivation as a GC root. !!! This - registers roots for jobs that we may have already - done. */ - auto localStore = store.dynamic_pointer_cast(); - if (gcRootsDir != "" && localStore) { - auto drvPath = job["drvPath"].get(); - Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath)); - if (!pathExists(root)) - localStore->addPermRoot(localStore->parseStorePath(drvPath), root); - } - - if (!brokenJobs.empty()) { - std::stringstream ss; - for (const auto& [jobName, error] : brokenJobs) { - ss << jobName << ": " << error << "\n"; - } - job["error"] = ss.str(); - } - } - - std::cout << state->jobs.dump(2) << "\n"; - }); -} diff --git a/src/hydra-eval-jobs/meson.build b/src/hydra-eval-jobs/meson.build deleted file mode 100644 index 916212e1..00000000 --- a/src/hydra-eval-jobs/meson.build +++ /dev/null @@ -1,8 +0,0 @@ -hydra_eval_jobs = executable('hydra-eval-jobs', - 'hydra-eval-jobs.cc', - dependencies: [ - libhydra_dep, - nix_dep, - ], - install: true, -) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 1cabd291..39970bd3 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -7,70 +7,35 @@ #include "build-result.hh" #include "path.hh" +#include "legacy-ssh-store.hh" #include "serve-protocol.hh" -#include "serve-protocol-impl.hh" #include "state.hh" #include "current-process.hh" #include "processes.hh" #include "util.hh" -#include "serve-protocol.hh" -#include "serve-protocol-impl.hh" #include "ssh.hh" #include "finally.hh" #include "url.hh" using namespace nix; +bool ::Machine::isLocalhost() const +{ + return storeUri.params.empty() && std::visit(overloaded { + [](const StoreReference::Auto &) { + return true; + }, + [](const StoreReference::Specified & s) { + return + (s.scheme == "local" || s.scheme == "unix") || + ((s.scheme == "ssh" || s.scheme == "ssh-ng") && + s.authority == "localhost"); + }, + }, storeUri.variant); +} + namespace nix::build_remote { -static Strings extraStoreArgs(std::string & machine) -{ - Strings result; - try { - auto parsed = parseURL(machine); - if (parsed.scheme != "ssh") { - throw SysError("Currently, only (legacy-)ssh stores are supported!"); - } - machine = parsed.authority.value_or(""); - auto remoteStore = parsed.query.find("remote-store"); - if (remoteStore != parsed.query.end()) { - result = {"--store", shellEscape(remoteStore->second)}; - } - } catch (BadURL &) { - // We just try to continue with `machine->sshName` here for backwards compat. - } - - return result; -} - -static std::unique_ptr openConnection( - ::Machine::ptr machine, SSHMaster & master) -{ - Strings command = {"nix-store", "--serve", "--write"}; - if (machine->isLocalhost()) { - command.push_back("--builders"); - command.push_back(""); - } else { - command.splice(command.end(), extraStoreArgs(machine->sshName)); - } - - auto ret = master.startCommand(std::move(command), { - "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes" - }); - - // XXX: determine the actual max value we can use from /proc. - - // FIXME: Should this be upstreamed into `startCommand` in Nix? - - int pipesize = 1024 * 1024; - - fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize); - fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize); - - return ret; -} - - static void copyClosureTo( ::Machine::Connection & conn, Store & destStore, @@ -87,8 +52,8 @@ static void copyClosureTo( // FIXME: substitute output pollutes our build log /* Get back the set of paths that are already valid on the remote host. */ - auto present = conn.queryValidPaths( - destStore, true, closure, useSubstitutes); + auto present = conn.store->queryValidPaths( + closure, true, useSubstitutes); if (present.size() == closure.size()) return; @@ -103,12 +68,7 @@ static void copyClosureTo( std::unique_lock sendLock(conn.machine->state->sendLock, std::chrono::seconds(600)); - conn.to << ServeProto::Command::ImportPaths; - destStore.exportPaths(missing, conn.to); - conn.to.flush(); - - if (readInt(conn.from) != 1) - throw Error("remote machine failed to import closure"); + conn.store->addMultipleToStoreLegacy(destStore, missing); } @@ -198,7 +158,7 @@ static BasicDerivation sendInputs( MaintainCount mc2(nrStepsCopyingTo); printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’", - localStore.printStorePath(step.drvPath), conn.machine->sshName); + localStore.printStorePath(step.drvPath), conn.machine->storeUri.render()); auto now1 = std::chrono::steady_clock::now(); @@ -228,7 +188,7 @@ static BuildResult performBuild( counter & nrStepsBuilding ) { - conn.putBuildDerivationRequest(localStore, drvPath, drv, options); + auto kont = conn.store->buildDerivationAsync(drvPath, drv, options); BuildResult result; @@ -237,7 +197,10 @@ static BuildResult performBuild( startTime = time(0); { MaintainCount mc(nrStepsBuilding); - result = ServeProto::Serialise::read(localStore, conn); + result = kont(); + // Without proper call-once functions, we need to manually + // delete after calling. + kont = {}; } stopTime = time(0); @@ -253,7 +216,7 @@ static BuildResult performBuild( // If the protocol was too old to give us `builtOutputs`, initialize // it manually by introspecting the derivation. - if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6) + if (GET_PROTOCOL_MINOR(conn.store->getProtocol()) < 6) { // If the remote is too old to handle CA derivations, we can’t get this // far anyways @@ -278,32 +241,6 @@ static BuildResult performBuild( return result; } -static std::map queryPathInfos( - ::Machine::Connection & conn, - Store & localStore, - StorePathSet & outputs, - size_t & totalNarSize -) -{ - - /* Get info about each output path. */ - std::map infos; - conn.to << ServeProto::Command::QueryPathInfos; - ServeProto::write(localStore, conn, outputs); - conn.to.flush(); - while (true) { - auto storePathS = readString(conn.from); - if (storePathS == "") break; - - auto storePath = localStore.parseStorePath(storePathS); - auto info = ServeProto::Serialise::read(localStore, conn); - totalNarSize += info.narSize; - infos.insert_or_assign(std::move(storePath), std::move(info)); - } - - return infos; -} - static void copyPathFromRemote( ::Machine::Connection & conn, NarMemberDatas & narMembers, @@ -312,26 +249,25 @@ static void copyPathFromRemote( const ValidPathInfo & info ) { - /* Receive the NAR from the remote and add it to the - destination store. Meanwhile, extract all the info from the - NAR that getBuildOutput() needs. */ - auto source2 = sinkToSource([&](Sink & sink) - { - /* Note: we should only send the command to dump the store - path to the remote if the NAR is actually going to get read - by the destination store, which won't happen if this path - is already valid on the destination store. Since this - lambda function only gets executed if someone tries to read - from source2, we will send the command from here rather - than outside the lambda. */ - conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path); - conn.to.flush(); + /* Receive the NAR from the remote and add it to the + destination store. Meanwhile, extract all the info from the + NAR that getBuildOutput() needs. */ + auto source2 = sinkToSource([&](Sink & sink) + { + /* Note: we should only send the command to dump the store + path to the remote if the NAR is actually going to get read + by the destination store, which won't happen if this path + is already valid on the destination store. Since this + lambda function only gets executed if someone tries to read + from source2, we will send the command from here rather + than outside the lambda. */ + conn.store->narFromPath(info.path, [&](Source & source) { + TeeSource tee{source, sink}; + extractNarData(tee, conn.store->printStorePath(info.path), narMembers); + }); + }); - TeeSource tee(conn.from, sink); - extractNarData(tee, localStore.printStorePath(info.path), narMembers); - }); - - destStore.addToStore(info, *source2, NoRepair, NoCheckSigs); + destStore.addToStore(info, *source2, NoRepair, NoCheckSigs); } static void copyPathsFromRemote( @@ -430,22 +366,39 @@ void State::buildRemote(ref destStore, updateStep(ssConnecting); - SSHMaster master { - machine->sshName, - machine->sshKey, - machine->sshPublicHostKey, - false, // no SSH master yet - false, // no compression yet - logFD.get(), - }; - // FIXME: rewrite to use Store. - auto child = build_remote::openConnection(machine, master); + ::Machine::Connection conn { + .machine = machine, + .store = [&]{ + auto * pSpecified = std::get_if(&machine->storeUri.variant); + if (!pSpecified || pSpecified->scheme != "ssh") { + throw Error("Currently, only (legacy-)ssh stores are supported!"); + } + + auto remoteStore = machine->openStore().dynamic_pointer_cast(); + assert(remoteStore); + + remoteStore->connPipeSize = 1024 * 1024; + + if (machine->isLocalhost()) { + auto rp_new = remoteStore->remoteProgram.get(); + rp_new.push_back("--builders"); + rp_new.push_back(""); + const_cast &>(remoteStore->remoteProgram).assign(rp_new); + } + remoteStore->extraSshArgs = { + "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes" + }; + const_cast &>(remoteStore->logFD).assign(logFD.get()); + + return nix::ref{remoteStore}; + }(), + }; { auto activeStepState(activeStep->state_.lock()); if (activeStepState->cancelled) throw Error("step cancelled"); - activeStepState->pid = child->sshPid; + activeStepState->pid = conn.store->getConnectionPid(); } Finally clearPid([&]() { @@ -460,41 +413,12 @@ void State::buildRemote(ref destStore, process. Meh. */ }); - ::Machine::Connection conn { - { - .to = child->in.get(), - .from = child->out.get(), - /* Handshake. */ - .remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize - }, - /*.machine =*/ machine, - }; - Finally updateStats([&]() { - bytesReceived += conn.from.read; - bytesSent += conn.to.written; + auto stats = conn.store->getConnectionStats(); + bytesReceived += stats.bytesReceived; + bytesSent += stats.bytesSent; }); - constexpr ServeProto::Version our_version = 0x206; - - try { - conn.remoteVersion = decltype(conn)::handshake( - conn.to, - conn.from, - our_version, - machine->sshName); - } catch (EndOfFile & e) { - child->sshPid.wait(); - std::string s = chomp(readFile(result.logFile)); - throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s); - } - - // Do not attempt to speak a newer version of the protocol. - // - // Per https://github.com/NixOS/nix/issues/9584 should be handled as - // part of `handshake` in upstream nix. - conn.remoteVersion = std::min(conn.remoteVersion, our_version); - { auto info(machine->state->connectInfo.lock()); info->consecutiveFailures = 0; @@ -523,7 +447,7 @@ void State::buildRemote(ref destStore, /* Do the build. */ printMsg(lvlDebug, "building ‘%s’ on ‘%s’", localStore->printStorePath(step->drvPath), - machine->sshName); + machine->storeUri.render()); updateStep(ssBuilding); @@ -546,7 +470,7 @@ void State::buildRemote(ref destStore, get a build log. */ if (result.isCached) { printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’", - localStore->printStorePath(step->drvPath), machine->sshName); + localStore->printStorePath(step->drvPath), machine->storeUri.render()); unlink(result.logFile.c_str()); result.logFile = ""; } @@ -563,8 +487,10 @@ void State::buildRemote(ref destStore, auto now1 = std::chrono::steady_clock::now(); + auto infos = conn.store->queryPathInfosUncached(outputs); + size_t totalNarSize = 0; - auto infos = build_remote::queryPathInfos(conn, *localStore, outputs, totalNarSize); + for (auto & [_, info] : infos) totalNarSize += info.narSize; if (totalNarSize > maxOutputSize) { result.stepStatus = bsNarSizeLimitExceeded; @@ -573,7 +499,7 @@ void State::buildRemote(ref destStore, /* Copy each path. */ printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)", - localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize); + localStore->printStorePath(step->drvPath), machine->storeUri.render(), totalNarSize); build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos); auto now2 = std::chrono::steady_clock::now(); @@ -596,9 +522,11 @@ void State::buildRemote(ref destStore, } } - /* Shut down the connection. */ - child->in = -1; - child->sshPid.wait(); + /* Shut down the connection done by RAII. + + Only difference is kill() instead of wait() (i.e. send signal + then wait()) + */ } catch (Error & e) { /* Disable this machine until a certain period of time has @@ -612,7 +540,7 @@ void State::buildRemote(ref destStore, info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4); info->lastFailure = now; int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30); - printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->sshName, delta); + printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->storeUri.render(), delta); info->disabledUntil = now + std::chrono::seconds(delta); } throw; diff --git a/src/hydra-queue-runner/builder.cc b/src/hydra-queue-runner/builder.cc index 5269febd..4bc00f0c 100644 --- a/src/hydra-queue-runner/builder.cc +++ b/src/hydra-queue-runner/builder.cc @@ -41,7 +41,7 @@ void State::builder(MachineReservation::ptr reservation) } catch (std::exception & e) { printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s", localStore->printStorePath(reservation->step->drvPath), - reservation->machine->sshName, + reservation->machine->storeUri.render(), e.what()); } } @@ -150,7 +150,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, buildOptions.buildTimeout = build->buildTimeout; printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)", - localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->sshName, buildId, (dependents.size() - 1)); + localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->storeUri.render(), buildId, (dependents.size() - 1)); } if (!buildOneDone) @@ -178,7 +178,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, unlink(result.logFile.c_str()); } } catch (...) { - ignoreException(); + ignoreExceptionInDestructor(); } } }); @@ -196,7 +196,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, { auto mc = startDbUpdate(); pqxx::work txn(*conn); - stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy); + stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->storeUri.render(), bsBusy); txn.commit(); } @@ -253,7 +253,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, /* Finish the step in the database. */ if (stepNr) { pqxx::work txn(*conn); - finishBuildStep(txn, result, buildId, stepNr, machine->sshName); + finishBuildStep(txn, result, buildId, stepNr, machine->storeUri.render()); txn.commit(); } @@ -261,7 +261,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, issue). Retry a number of times. */ if (result.canRetry) { printMsg(lvlError, "possibly transient failure building ‘%s’ on ‘%s’: %s", - localStore->printStorePath(step->drvPath), machine->sshName, result.errorMsg); + localStore->printStorePath(step->drvPath), machine->storeUri.render(), result.errorMsg); assert(stepNr); bool retry; { @@ -452,7 +452,7 @@ void State::failStep( build->finishedInDB) continue; createBuildStep(txn, - 0, build->id, step, machine ? machine->sshName : "", + 0, build->id, step, machine ? machine->storeUri.render() : "", result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId); } diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc index a4c84252..cbf982bf 100644 --- a/src/hydra-queue-runner/dispatcher.cc +++ b/src/hydra-queue-runner/dispatcher.cc @@ -256,7 +256,7 @@ system_time State::doDispatch() /* Can this machine do this step? */ if (!mi.machine->supportsStep(step)) { debug("machine '%s' does not support step '%s' (system type '%s')", - mi.machine->sshName, localStore->printStorePath(step->drvPath), step->drv->platform); + mi.machine->storeUri.render(), localStore->printStorePath(step->drvPath), step->drv->platform); continue; } diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 28ed6deb..99411f9f 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -135,65 +135,26 @@ void State::parseMachines(const std::string & contents) oldMachines = *machines_; } - for (auto line : tokenizeString(contents, "\n")) { - line = trim(std::string(line, 0, line.find('#'))); - auto tokens = tokenizeString>(line); - if (tokens.size() < 3) continue; - tokens.resize(8); - - if (tokens[5] == "-") tokens[5] = ""; - auto supportedFeatures = tokenizeString(tokens[5], ","); - - if (tokens[6] == "-") tokens[6] = ""; - auto mandatoryFeatures = tokenizeString(tokens[6], ","); - - for (auto & f : mandatoryFeatures) - supportedFeatures.insert(f); - - using MaxJobs = std::remove_const::type; - - auto machine = std::make_shared<::Machine>(nix::Machine { - // `storeUri`, not yet used - "", - // `systemTypes` - tokenizeString(tokens[1], ","), - // `sshKey` - tokens[2] == "-" ? "" : tokens[2], - // `maxJobs` - tokens[3] != "" - ? string2Int(tokens[3]).value() - : 1, - // `speedFactor` - std::stof(tokens[4].c_str()), - // `supportedFeatures` - std::move(supportedFeatures), - // `mandatoryFeatures` - std::move(mandatoryFeatures), - // `sshPublicHostKey` - tokens[7] != "" && tokens[7] != "-" - ? tokens[7] - : "", - }); - - machine->sshName = tokens[0]; + for (auto && machine_ : nix::Machine::parseConfig({}, contents)) { + auto machine = std::make_shared<::Machine>(std::move(machine_)); /* Re-use the State object of the previous machine with the same name. */ - auto i = oldMachines.find(machine->sshName); + auto i = oldMachines.find(machine->storeUri.variant); if (i == oldMachines.end()) - printMsg(lvlChatty, "adding new machine ‘%1%’", machine->sshName); + printMsg(lvlChatty, "adding new machine ‘%1%’", machine->storeUri.render()); else - printMsg(lvlChatty, "updating machine ‘%1%’", machine->sshName); + printMsg(lvlChatty, "updating machine ‘%1%’", machine->storeUri.render()); machine->state = i == oldMachines.end() ? std::make_shared<::Machine::State>() : i->second->state; - newMachines[machine->sshName] = machine; + newMachines[machine->storeUri.variant] = machine; } for (auto & m : oldMachines) if (newMachines.find(m.first) == newMachines.end()) { if (m.second->enabled) - printInfo("removing machine ‘%1%’", m.first); + printInfo("removing machine ‘%1%’", m.second->storeUri.render()); /* Add a disabled ::Machine object to make sure stats are maintained. */ auto machine = std::make_shared<::Machine>(*(m.second)); @@ -657,7 +618,7 @@ void State::dumpStatus(Connection & conn) machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone; machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone; } - statusJson["machines"][m->sshName] = machine; + statusJson["machines"][m->storeUri.render()] = machine; } } diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 839239fe..e2d31434 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -6,7 +6,6 @@ #include #include #include -#include #include #include @@ -21,9 +20,7 @@ #include "store-api.hh" #include "sync.hh" #include "nar-extractor.hh" -#include "serve-protocol.hh" -#include "serve-protocol-impl.hh" -#include "serve-protocol-connection.hh" +#include "legacy-ssh-store.hh" #include "machines.hh" @@ -241,10 +238,6 @@ struct Machine : nix::Machine { typedef std::shared_ptr ptr; - /* TODO Get rid of: `nix::Machine::storeUri` is normalized in a way - we are not yet used to, but once we are, we don't need this. */ - std::string sshName; - struct State { typedef std::shared_ptr ptr; counter currentJobs{0}; @@ -294,16 +287,14 @@ struct Machine : nix::Machine return true; } - bool isLocalhost() - { - std::regex r("^(ssh://|ssh-ng://)?localhost$"); - return std::regex_search(sshName, r); - } + bool isLocalhost() const; // A connection to a machine - struct Connection : nix::ServeProto::BasicClientConnection { + struct Connection { // Backpointer to the machine ptr machine; + // Opened store + nix::ref store; }; }; @@ -358,7 +349,7 @@ private: /* The build machines. */ std::mutex machinesReadyLock; - typedef std::map Machines; + typedef std::map Machines; nix::Sync machines; // FIXME: use atomic_shared_ptr /* Various stats. */ diff --git a/src/lib/Hydra/Controller/Root.pm b/src/lib/Hydra/Controller/Root.pm index aa1ad5ab..a231d7c0 100644 --- a/src/lib/Hydra/Controller/Root.pm +++ b/src/lib/Hydra/Controller/Root.pm @@ -51,6 +51,7 @@ sub begin :Private { $c->stash->{curUri} = $c->request->uri; $c->stash->{version} = $ENV{"HYDRA_RELEASE"} || ""; $c->stash->{nixVersion} = $ENV{"NIX_RELEASE"} || ""; + $c->stash->{nixEvalJobsVersion} = $ENV{"NIX_EVAL_JOBS_RELEASE"} || ""; $c->stash->{curTime} = time; $c->stash->{logo} = defined $c->config->{hydra_logo} ? "/logo" : ""; $c->stash->{tracker} = defined $c->config->{tracker} ? $c->config->{tracker} : ""; diff --git a/src/meson.build b/src/meson.build index 8c7562ed..52b821bc 100644 --- a/src/meson.build +++ b/src/meson.build @@ -1,6 +1,5 @@ # Native code subdir('libhydra') -subdir('hydra-eval-jobs') subdir('hydra-evaluator') subdir('hydra-queue-runner') diff --git a/src/root/layout.tt b/src/root/layout.tt index d67ff1b8..399962b4 100644 --- a/src/root/layout.tt +++ b/src/root/layout.tt @@ -93,7 +93,7 @@