Merge branch 'master' into patch-1
This commit is contained in:
@@ -1,5 +1,7 @@
|
||||
# IMPORTANT: if you delete this file your app will not work as
|
||||
# expected. you have been warned
|
||||
use strict;
|
||||
use warnings;
|
||||
use inc::Module::Install;
|
||||
|
||||
name 'Hydra';
|
||||
|
@@ -1,3 +0,0 @@
|
||||
SUBDIRS = hydra-evaluator hydra-eval-jobs hydra-queue-runner sql script lib root ttf
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
@@ -1,5 +0,0 @@
|
||||
bin_PROGRAMS = hydra-eval-jobs
|
||||
|
||||
hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc
|
||||
hydra_eval_jobs_LDADD = $(NIX_LIBS)
|
||||
hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra
|
@@ -1,491 +0,0 @@
|
||||
#include <map>
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
|
||||
#include "shared.hh"
|
||||
#include "store-api.hh"
|
||||
#include "eval.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "util.hh"
|
||||
#include "get-drvs.hh"
|
||||
#include "globals.hh"
|
||||
#include "common-eval-args.hh"
|
||||
#include "flake/flakeref.hh"
|
||||
#include "flake/flake.hh"
|
||||
#include "attr-path.hh"
|
||||
#include "derivations.hh"
|
||||
#include "local-fs-store.hh"
|
||||
|
||||
#include "hydra-config.hh"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/resource.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
static Path gcRootsDir;
|
||||
static size_t maxMemorySize;
|
||||
|
||||
struct MyArgs : MixEvalArgs, MixCommonArgs
|
||||
{
|
||||
Path releaseExpr;
|
||||
bool flake = false;
|
||||
bool dryRun = false;
|
||||
|
||||
MyArgs() : MixCommonArgs("hydra-eval-jobs")
|
||||
{
|
||||
addFlag({
|
||||
.longName = "help",
|
||||
.description = "show usage information",
|
||||
.handler = {[&]() {
|
||||
printHelp(programName, std::cout);
|
||||
throw Exit();
|
||||
}}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "gc-roots-dir",
|
||||
.description = "garbage collector roots directory",
|
||||
.labels = {"path"},
|
||||
.handler = {&gcRootsDir}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "dry-run",
|
||||
.description = "don't create store derivations",
|
||||
.handler = {&dryRun, true}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "flake",
|
||||
.description = "build a flake",
|
||||
.handler = {&flake, true}
|
||||
});
|
||||
|
||||
expectArg("expr", &releaseExpr);
|
||||
}
|
||||
};
|
||||
|
||||
static MyArgs myArgs;
|
||||
|
||||
static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const string & name, const string & subAttribute)
|
||||
{
|
||||
Strings res;
|
||||
std::function<void(Value & v)> rec;
|
||||
|
||||
rec = [&](Value & v) {
|
||||
state.forceValue(v);
|
||||
if (v.type() == nString)
|
||||
res.push_back(v.string.s);
|
||||
else if (v.isList())
|
||||
for (unsigned int n = 0; n < v.listSize(); ++n)
|
||||
rec(*v.listElems()[n]);
|
||||
else if (v.type() == nAttrs) {
|
||||
auto a = v.attrs->find(state.symbols.create(subAttribute));
|
||||
if (a != v.attrs->end())
|
||||
res.push_back(state.forceString(*a->value));
|
||||
}
|
||||
};
|
||||
|
||||
Value * v = drv.queryMeta(name);
|
||||
if (v) rec(*v);
|
||||
|
||||
return concatStringsSep(", ", res);
|
||||
}
|
||||
|
||||
static void worker(
|
||||
EvalState & state,
|
||||
Bindings & autoArgs,
|
||||
AutoCloseFD & to,
|
||||
AutoCloseFD & from)
|
||||
{
|
||||
Value vTop;
|
||||
|
||||
if (myArgs.flake) {
|
||||
using namespace flake;
|
||||
|
||||
auto flakeRef = parseFlakeRef(myArgs.releaseExpr);
|
||||
|
||||
auto vFlake = state.allocValue();
|
||||
|
||||
auto lockedFlake = lockFlake(state, flakeRef,
|
||||
LockFlags {
|
||||
.updateLockFile = false,
|
||||
.useRegistries = false,
|
||||
.allowMutable = false,
|
||||
});
|
||||
|
||||
callFlake(state, lockedFlake, *vFlake);
|
||||
|
||||
auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value;
|
||||
state.forceValue(*vOutputs);
|
||||
|
||||
auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs"));
|
||||
if (!aHydraJobs)
|
||||
aHydraJobs = vOutputs->attrs->get(state.symbols.create("checks"));
|
||||
if (!aHydraJobs)
|
||||
throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef);
|
||||
|
||||
vTop = *aHydraJobs->value;
|
||||
|
||||
} else {
|
||||
state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop);
|
||||
}
|
||||
|
||||
auto vRoot = state.allocValue();
|
||||
state.autoCallFunction(autoArgs, vTop, *vRoot);
|
||||
|
||||
while (true) {
|
||||
/* Wait for the master to send us a job name. */
|
||||
writeLine(to.get(), "next");
|
||||
|
||||
auto s = readLine(from.get());
|
||||
if (s == "exit") break;
|
||||
if (!hasPrefix(s, "do ")) abort();
|
||||
std::string attrPath(s, 3);
|
||||
|
||||
debug("worker process %d at '%s'", getpid(), attrPath);
|
||||
|
||||
/* Evaluate it and send info back to the master. */
|
||||
nlohmann::json reply;
|
||||
|
||||
try {
|
||||
auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first;
|
||||
|
||||
auto v = state.allocValue();
|
||||
state.autoCallFunction(autoArgs, *vTmp, *v);
|
||||
|
||||
if (auto drv = getDerivation(state, *v, false)) {
|
||||
|
||||
DrvInfo::Outputs outputs = drv->queryOutputs();
|
||||
|
||||
if (drv->querySystem() == "unknown")
|
||||
throw EvalError("derivation must have a 'system' attribute");
|
||||
|
||||
auto drvPath = drv->queryDrvPath();
|
||||
|
||||
nlohmann::json job;
|
||||
|
||||
job["nixName"] = drv->queryName();
|
||||
job["system"] =drv->querySystem();
|
||||
job["drvPath"] = drvPath;
|
||||
job["description"] = drv->queryMetaString("description");
|
||||
job["license"] = queryMetaStrings(state, *drv, "license", "shortName");
|
||||
job["homepage"] = drv->queryMetaString("homepage");
|
||||
job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email");
|
||||
job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100);
|
||||
job["timeout"] = drv->queryMetaInt("timeout", 36000);
|
||||
job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200);
|
||||
job["isChannel"] = drv->queryMetaBool("isHydraChannel", false);
|
||||
|
||||
/* If this is an aggregate, then get its constituents. */
|
||||
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
|
||||
if (a && state.forceBool(*a->value, *a->pos)) {
|
||||
auto a = v->attrs->get(state.symbols.create("constituents"));
|
||||
if (!a)
|
||||
throw EvalError("derivation must have a ‘constituents’ attribute");
|
||||
|
||||
|
||||
PathSet context;
|
||||
state.coerceToString(*a->pos, *a->value, context, true, false);
|
||||
for (auto & i : context)
|
||||
if (i.at(0) == '!') {
|
||||
size_t index = i.find("!", 1);
|
||||
job["constituents"].push_back(string(i, index + 1));
|
||||
}
|
||||
|
||||
state.forceList(*a->value, *a->pos);
|
||||
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
|
||||
auto v = a->value->listElems()[n];
|
||||
state.forceValue(*v);
|
||||
if (v->type() == nString)
|
||||
job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
|
||||
}
|
||||
}
|
||||
|
||||
/* Register the derivation as a GC root. !!! This
|
||||
registers roots for jobs that we may have already
|
||||
done. */
|
||||
auto localStore = state.store.dynamic_pointer_cast<LocalFSStore>();
|
||||
if (gcRootsDir != "" && localStore) {
|
||||
Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
|
||||
if (!pathExists(root))
|
||||
localStore->addPermRoot(localStore->parseStorePath(drvPath), root);
|
||||
}
|
||||
|
||||
nlohmann::json out;
|
||||
for (auto & j : outputs)
|
||||
out[j.first] = j.second;
|
||||
job["outputs"] = std::move(out);
|
||||
|
||||
reply["job"] = std::move(job);
|
||||
}
|
||||
|
||||
else if (v->type() == nAttrs) {
|
||||
auto attrs = nlohmann::json::array();
|
||||
StringSet ss;
|
||||
for (auto & i : v->attrs->lexicographicOrder()) {
|
||||
std::string name(i->name);
|
||||
if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) {
|
||||
printError("skipping job with illegal name '%s'", name);
|
||||
continue;
|
||||
}
|
||||
attrs.push_back(name);
|
||||
}
|
||||
reply["attrs"] = std::move(attrs);
|
||||
}
|
||||
|
||||
else if (v->type() == nNull)
|
||||
;
|
||||
|
||||
else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v));
|
||||
|
||||
} catch (EvalError & e) {
|
||||
// Transmits the error we got from the previous evaluation
|
||||
// in the JSON output.
|
||||
reply["error"] = filterANSIEscapes(e.msg(), true);
|
||||
// Don't forget to print it into the STDERR log, this is
|
||||
// what's shown in the Hydra UI.
|
||||
printError("error: %s", reply["error"]);
|
||||
}
|
||||
|
||||
writeLine(to.get(), reply.dump());
|
||||
|
||||
/* If our RSS exceeds the maximum, exit. The master will
|
||||
start a new process. */
|
||||
struct rusage r;
|
||||
getrusage(RUSAGE_SELF, &r);
|
||||
if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break;
|
||||
}
|
||||
|
||||
writeLine(to.get(), "restart");
|
||||
}
|
||||
|
||||
int main(int argc, char * * argv)
|
||||
{
|
||||
/* Prevent undeclared dependencies in the evaluation via
|
||||
$NIX_PATH. */
|
||||
unsetenv("NIX_PATH");
|
||||
|
||||
return handleExceptions(argv[0], [&]() {
|
||||
|
||||
auto config = std::make_unique<HydraConfig>();
|
||||
|
||||
auto nrWorkers = config->getIntOption("evaluator_workers", 1);
|
||||
maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096);
|
||||
|
||||
initNix();
|
||||
initGC();
|
||||
|
||||
myArgs.parseCmdline(argvToStrings(argc, argv));
|
||||
|
||||
/* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */
|
||||
settings.builders = "";
|
||||
|
||||
/* Prevent access to paths outside of the Nix search path and
|
||||
to the environment. */
|
||||
evalSettings.restrictEval = true;
|
||||
|
||||
/* When building a flake, use pure evaluation (no access to
|
||||
'getEnv', 'currentSystem' etc. */
|
||||
evalSettings.pureEval = myArgs.flake;
|
||||
|
||||
if (myArgs.dryRun) settings.readOnlyMode = true;
|
||||
|
||||
if (myArgs.releaseExpr == "") throw UsageError("no expression specified");
|
||||
|
||||
if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified");
|
||||
|
||||
struct State
|
||||
{
|
||||
std::set<std::string> todo{""};
|
||||
std::set<std::string> active;
|
||||
nlohmann::json jobs;
|
||||
std::exception_ptr exc;
|
||||
};
|
||||
|
||||
std::condition_variable wakeup;
|
||||
|
||||
Sync<State> state_;
|
||||
|
||||
/* Start a handler thread per worker process. */
|
||||
auto handler = [&]()
|
||||
{
|
||||
try {
|
||||
pid_t pid = -1;
|
||||
AutoCloseFD from, to;
|
||||
|
||||
while (true) {
|
||||
|
||||
/* Start a new worker process if necessary. */
|
||||
if (pid == -1) {
|
||||
Pipe toPipe, fromPipe;
|
||||
toPipe.create();
|
||||
fromPipe.create();
|
||||
pid = startProcess(
|
||||
[&,
|
||||
to{std::make_shared<AutoCloseFD>(std::move(fromPipe.writeSide))},
|
||||
from{std::make_shared<AutoCloseFD>(std::move(toPipe.readSide))}
|
||||
]()
|
||||
{
|
||||
try {
|
||||
EvalState state(myArgs.searchPath, openStore());
|
||||
Bindings & autoArgs = *myArgs.getAutoArgs(state);
|
||||
worker(state, autoArgs, *to, *from);
|
||||
} catch (std::exception & e) {
|
||||
nlohmann::json err;
|
||||
err["error"] = e.what();
|
||||
writeLine(to->get(), err.dump());
|
||||
// Don't forget to print it into the STDERR log, this is
|
||||
// what's shown in the Hydra UI.
|
||||
printError("error: %s", err["error"]);
|
||||
}
|
||||
},
|
||||
ProcessOptions { .allowVfork = false });
|
||||
from = std::move(fromPipe.readSide);
|
||||
to = std::move(toPipe.writeSide);
|
||||
debug("created worker process %d", pid);
|
||||
}
|
||||
|
||||
/* Check whether the existing worker process is still there. */
|
||||
auto s = readLine(from.get());
|
||||
if (s == "restart") {
|
||||
pid = -1;
|
||||
continue;
|
||||
} else if (s != "next") {
|
||||
auto json = nlohmann::json::parse(s);
|
||||
throw Error("worker error: %s", (std::string) json["error"]);
|
||||
}
|
||||
|
||||
/* Wait for a job name to become available. */
|
||||
std::string attrPath;
|
||||
|
||||
while (true) {
|
||||
checkInterrupt();
|
||||
auto state(state_.lock());
|
||||
if ((state->todo.empty() && state->active.empty()) || state->exc) {
|
||||
writeLine(to.get(), "exit");
|
||||
return;
|
||||
}
|
||||
if (!state->todo.empty()) {
|
||||
attrPath = *state->todo.begin();
|
||||
state->todo.erase(state->todo.begin());
|
||||
state->active.insert(attrPath);
|
||||
break;
|
||||
} else
|
||||
state.wait(wakeup);
|
||||
}
|
||||
|
||||
/* Tell the worker to evaluate it. */
|
||||
writeLine(to.get(), "do " + attrPath);
|
||||
|
||||
/* Wait for the response. */
|
||||
auto response = nlohmann::json::parse(readLine(from.get()));
|
||||
|
||||
/* Handle the response. */
|
||||
StringSet newAttrs;
|
||||
|
||||
if (response.find("job") != response.end()) {
|
||||
auto state(state_.lock());
|
||||
state->jobs[attrPath] = response["job"];
|
||||
}
|
||||
|
||||
if (response.find("attrs") != response.end()) {
|
||||
for (auto & i : response["attrs"]) {
|
||||
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i;
|
||||
newAttrs.insert(s);
|
||||
}
|
||||
}
|
||||
|
||||
if (response.find("error") != response.end()) {
|
||||
auto state(state_.lock());
|
||||
state->jobs[attrPath]["error"] = response["error"];
|
||||
}
|
||||
|
||||
/* Add newly discovered job names to the queue. */
|
||||
{
|
||||
auto state(state_.lock());
|
||||
state->active.erase(attrPath);
|
||||
for (auto & s : newAttrs)
|
||||
state->todo.insert(s);
|
||||
wakeup.notify_all();
|
||||
}
|
||||
}
|
||||
} catch (...) {
|
||||
auto state(state_.lock());
|
||||
state->exc = std::current_exception();
|
||||
wakeup.notify_all();
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<std::thread> threads;
|
||||
for (size_t i = 0; i < nrWorkers; i++)
|
||||
threads.emplace_back(std::thread(handler));
|
||||
|
||||
for (auto & thread : threads)
|
||||
thread.join();
|
||||
|
||||
auto state(state_.lock());
|
||||
|
||||
if (state->exc)
|
||||
std::rethrow_exception(state->exc);
|
||||
|
||||
/* For aggregate jobs that have named consistuents
|
||||
(i.e. constituents that are a job name rather than a
|
||||
derivation), look up the referenced job and add it to the
|
||||
dependencies of the aggregate derivation. */
|
||||
auto store = openStore();
|
||||
|
||||
for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) {
|
||||
auto jobName = i.key();
|
||||
auto & job = i.value();
|
||||
|
||||
auto named = job.find("namedConstituents");
|
||||
if (named == job.end()) continue;
|
||||
|
||||
if (myArgs.dryRun) {
|
||||
for (std::string jobName2 : *named) {
|
||||
auto job2 = state->jobs.find(jobName2);
|
||||
if (job2 == state->jobs.end())
|
||||
throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2);
|
||||
std::string drvPath2 = (*job2)["drvPath"];
|
||||
job["constituents"].push_back(drvPath2);
|
||||
}
|
||||
} else {
|
||||
auto drvPath = store->parseStorePath((std::string) job["drvPath"]);
|
||||
auto drv = store->readDerivation(drvPath);
|
||||
|
||||
for (std::string jobName2 : *named) {
|
||||
auto job2 = state->jobs.find(jobName2);
|
||||
if (job2 == state->jobs.end())
|
||||
throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2);
|
||||
auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
|
||||
auto drv2 = store->readDerivation(drvPath2);
|
||||
job["constituents"].push_back(store->printStorePath(drvPath2));
|
||||
drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first};
|
||||
}
|
||||
|
||||
std::string drvName(drvPath.name());
|
||||
assert(hasSuffix(drvName, drvExtension));
|
||||
drvName.resize(drvName.size() - drvExtension.size());
|
||||
auto h = std::get<Hash>(hashDerivationModulo(*store, drv, true));
|
||||
auto outPath = store->makeOutputPath("out", h, drvName);
|
||||
drv.env["out"] = store->printStorePath(outPath);
|
||||
drv.outputs.insert_or_assign("out", DerivationOutput { .output = DerivationOutputInputAddressed { .path = outPath } });
|
||||
auto newDrvPath = store->printStorePath(writeDerivation(*store, drv));
|
||||
|
||||
debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath);
|
||||
|
||||
job["drvPath"] = newDrvPath;
|
||||
job["outputs"]["out"] = store->printStorePath(outPath);
|
||||
}
|
||||
|
||||
job.erase("namedConstituents");
|
||||
}
|
||||
|
||||
std::cout << state->jobs.dump(2) << "\n";
|
||||
});
|
||||
}
|
@@ -1,5 +0,0 @@
|
||||
bin_PROGRAMS = hydra-evaluator
|
||||
|
||||
hydra_evaluator_SOURCES = hydra-evaluator.cc
|
||||
hydra_evaluator_LDADD = $(NIX_LIBS) -lpqxx
|
||||
hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
@@ -1,7 +1,8 @@
|
||||
#include "db.hh"
|
||||
#include "hydra-config.hh"
|
||||
#include "pool.hh"
|
||||
#include "shared.hh"
|
||||
#include <nix/util/pool.hh>
|
||||
#include <nix/main/shared.hh>
|
||||
#include <nix/util/signals.hh>
|
||||
|
||||
#include <algorithm>
|
||||
#include <thread>
|
||||
@@ -37,7 +38,7 @@ class JobsetId {
|
||||
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
||||
|
||||
std::string display() const {
|
||||
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
return boost::str(boost::format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
}
|
||||
};
|
||||
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
|
||||
@@ -233,12 +234,12 @@ struct Evaluator
|
||||
pqxx::work txn(*conn);
|
||||
|
||||
if (jobset.evaluation_style == EvaluationStyle::ONE_AT_A_TIME) {
|
||||
auto evaluation_res = txn.parameterized
|
||||
auto evaluation_res = txn.exec_params
|
||||
("select id from JobsetEvals "
|
||||
"where jobset_id = $1 "
|
||||
"order by id desc limit 1")
|
||||
(jobset.name.id)
|
||||
.exec();
|
||||
"order by id desc limit 1"
|
||||
,jobset.name.id
|
||||
);
|
||||
|
||||
if (evaluation_res.empty()) {
|
||||
// First evaluation, so allow scheduling.
|
||||
@@ -249,15 +250,15 @@ struct Evaluator
|
||||
|
||||
auto evaluation_id = evaluation_res[0][0].as<int>();
|
||||
|
||||
auto unfinished_build_res = txn.parameterized
|
||||
auto unfinished_build_res = txn.exec_params
|
||||
("select id from Builds "
|
||||
"join JobsetEvalMembers "
|
||||
" on (JobsetEvalMembers.build = Builds.id) "
|
||||
"where JobsetEvalMembers.eval = $1 "
|
||||
" and builds.finished = 0 "
|
||||
" limit 1")
|
||||
(evaluation_id)
|
||||
.exec();
|
||||
" limit 1"
|
||||
,evaluation_id
|
||||
);
|
||||
|
||||
// If the previous evaluation has no unfinished builds
|
||||
// schedule!
|
||||
@@ -366,6 +367,9 @@ struct Evaluator
|
||||
printInfo("received jobset event");
|
||||
}
|
||||
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in database monitor thread: %s", e.what());
|
||||
sleep(30);
|
||||
@@ -473,6 +477,9 @@ struct Evaluator
|
||||
while (true) {
|
||||
try {
|
||||
loop();
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in main loop: %s", e.what());
|
||||
sleep(30);
|
||||
|
10
src/hydra-evaluator/meson.build
Normal file
10
src/hydra-evaluator/meson.build
Normal file
@@ -0,0 +1,10 @@
|
||||
hydra_evaluator = executable('hydra-evaluator',
|
||||
'hydra-evaluator.cc',
|
||||
dependencies: [
|
||||
libhydra_dep,
|
||||
nix_util_dep,
|
||||
nix_main_dep,
|
||||
pqxx_dep,
|
||||
],
|
||||
install: true,
|
||||
)
|
@@ -1,8 +0,0 @@
|
||||
bin_PROGRAMS = hydra-queue-runner
|
||||
|
||||
hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
|
||||
builder.cc build-result.cc build-remote.cc \
|
||||
build-result.hh counter.hh state.hh db.hh \
|
||||
nar-extractor.cc nar-extractor.hh
|
||||
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx
|
||||
hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
@@ -5,106 +5,94 @@
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "serve-protocol.hh"
|
||||
#include <nix/store/build-result.hh>
|
||||
#include <nix/store/path.hh>
|
||||
#include <nix/store/legacy-ssh-store.hh>
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include "state.hh"
|
||||
#include "util.hh"
|
||||
#include "worker-protocol.hh"
|
||||
#include "finally.hh"
|
||||
#include <nix/util/current-process.hh>
|
||||
#include <nix/util/processes.hh>
|
||||
#include <nix/util/util.hh>
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include <nix/store/ssh.hh>
|
||||
#include <nix/util/finally.hh>
|
||||
#include <nix/util/url.hh>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
struct Child
|
||||
bool ::Machine::isLocalhost() const
|
||||
{
|
||||
Pid pid;
|
||||
AutoCloseFD to, from;
|
||||
};
|
||||
|
||||
|
||||
static void append(Strings & dst, const Strings & src)
|
||||
{
|
||||
dst.insert(dst.end(), src.begin(), src.end());
|
||||
return storeUri.params.empty() && std::visit(overloaded {
|
||||
[](const StoreReference::Auto &) {
|
||||
return true;
|
||||
},
|
||||
[](const StoreReference::Specified & s) {
|
||||
return
|
||||
(s.scheme == "local" || s.scheme == "unix") ||
|
||||
((s.scheme == "ssh" || s.scheme == "ssh-ng") &&
|
||||
s.authority == "localhost");
|
||||
},
|
||||
}, storeUri.variant);
|
||||
}
|
||||
|
||||
namespace nix::build_remote {
|
||||
|
||||
static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child)
|
||||
static std::unique_ptr<SSHMaster::Connection> openConnection(
|
||||
::Machine::ptr machine, SSHMaster & master)
|
||||
{
|
||||
string pgmName;
|
||||
Pipe to, from;
|
||||
to.create();
|
||||
from.create();
|
||||
|
||||
child.pid = startProcess([&]() {
|
||||
|
||||
restoreSignals();
|
||||
|
||||
if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
|
||||
throw SysError("cannot dup input pipe to stdin");
|
||||
|
||||
if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
|
||||
throw SysError("cannot dup output pipe to stdout");
|
||||
|
||||
if (dup2(stderrFD, STDERR_FILENO) == -1)
|
||||
throw SysError("cannot dup stderr");
|
||||
|
||||
Strings argv;
|
||||
if (machine->isLocalhost()) {
|
||||
pgmName = "nix-store";
|
||||
argv = {"nix-store", "--builders", "", "--serve", "--write"};
|
||||
}
|
||||
else {
|
||||
pgmName = "ssh";
|
||||
argv = {"ssh", machine->sshName};
|
||||
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
||||
if (machine->sshPublicHostKey != "") {
|
||||
Path fileName = tmpDir + "/host-key";
|
||||
auto p = machine->sshName.find("@");
|
||||
string host = p != string::npos ? string(machine->sshName, p + 1) : machine->sshName;
|
||||
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
||||
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
||||
}
|
||||
append(argv,
|
||||
{ "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
, "--", "nix-store", "--serve", "--write" });
|
||||
Strings command = {"nix-store", "--serve", "--write"};
|
||||
if (machine->isLocalhost()) {
|
||||
command.push_back("--builders");
|
||||
command.push_back("");
|
||||
} else {
|
||||
auto remoteStore = machine->storeUri.params.find("remote-store");
|
||||
if (remoteStore != machine->storeUri.params.end()) {
|
||||
command.push_back("--store");
|
||||
command.push_back(escapeShellArgAlways(remoteStore->second));
|
||||
}
|
||||
}
|
||||
|
||||
execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
|
||||
|
||||
throw SysError("cannot start %s", pgmName);
|
||||
auto ret = master.startCommand(std::move(command), {
|
||||
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
});
|
||||
|
||||
to.readSide = -1;
|
||||
from.writeSide = -1;
|
||||
// XXX: determine the actual max value we can use from /proc.
|
||||
|
||||
child.to = to.writeSide.release();
|
||||
child.from = from.readSide.release();
|
||||
// FIXME: Should this be upstreamed into `startCommand` in Nix?
|
||||
|
||||
int pipesize = 1024 * 1024;
|
||||
|
||||
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
|
||||
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void copyClosureTo(std::timed_mutex & sendMutex, ref<Store> destStore,
|
||||
FdSource & from, FdSink & to, const StorePathSet & paths,
|
||||
bool useSubstitutes = false)
|
||||
static void copyClosureTo(
|
||||
::Machine::Connection & conn,
|
||||
Store & destStore,
|
||||
const StorePathSet & paths,
|
||||
SubstituteFlag useSubstitutes = NoSubstitute)
|
||||
{
|
||||
StorePathSet closure;
|
||||
for (auto & path : paths)
|
||||
destStore->computeFSClosure(path, closure);
|
||||
destStore.computeFSClosure(paths, closure);
|
||||
|
||||
/* Send the "query valid paths" command with the "lock" option
|
||||
enabled. This prevents a race where the remote host
|
||||
garbage-collect paths that are already there. Optionally, ask
|
||||
the remote host to substitute missing paths. */
|
||||
// FIXME: substitute output pollutes our build log
|
||||
to << cmdQueryValidPaths << 1 << useSubstitutes;
|
||||
worker_proto::write(*destStore, to, closure);
|
||||
to.flush();
|
||||
|
||||
/* Get back the set of paths that are already valid on the remote
|
||||
host. */
|
||||
auto present = worker_proto::read(*destStore, from, Phantom<StorePathSet> {});
|
||||
auto present = conn.queryValidPaths(
|
||||
destStore, true, closure, useSubstitutes);
|
||||
|
||||
if (present.size() == closure.size()) return;
|
||||
|
||||
auto sorted = destStore->topoSortPaths(closure);
|
||||
auto sorted = destStore.topoSortPaths(closure);
|
||||
|
||||
StorePathSet missing;
|
||||
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
|
||||
@@ -112,20 +100,20 @@ static void copyClosureTo(std::timed_mutex & sendMutex, ref<Store> destStore,
|
||||
|
||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||
|
||||
std::unique_lock<std::timed_mutex> sendLock(sendMutex,
|
||||
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
||||
std::chrono::seconds(600));
|
||||
|
||||
to << cmdImportPaths;
|
||||
destStore->exportPaths(missing, to);
|
||||
to.flush();
|
||||
conn.to << ServeProto::Command::ImportPaths;
|
||||
destStore.exportPaths(missing, conn.to);
|
||||
conn.to.flush();
|
||||
|
||||
if (readInt(from) != 1)
|
||||
if (readInt(conn.from) != 1)
|
||||
throw Error("remote machine failed to import closure");
|
||||
}
|
||||
|
||||
|
||||
// FIXME: use Store::topoSortPaths().
|
||||
StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths)
|
||||
static StorePaths reverseTopoSortPaths(const std::map<StorePath, UnkeyedValidPathInfo> & paths)
|
||||
{
|
||||
StorePaths sorted;
|
||||
StorePathSet visited;
|
||||
@@ -153,40 +141,304 @@ StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths
|
||||
return sorted;
|
||||
}
|
||||
|
||||
static std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
|
||||
{
|
||||
std::string base(drvPath.to_string());
|
||||
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||
|
||||
createDirs(dirOf(logFile));
|
||||
|
||||
AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", logFile);
|
||||
|
||||
return {std::move(logFile), std::move(logFD)};
|
||||
}
|
||||
|
||||
static BasicDerivation sendInputs(
|
||||
State & state,
|
||||
Step & step,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
::Machine::Connection & conn,
|
||||
unsigned int & overhead,
|
||||
counter & nrStepsWaiting,
|
||||
counter & nrStepsCopyingTo
|
||||
)
|
||||
{
|
||||
/* Replace the input derivations by their output paths to send a
|
||||
minimal closure to the builder.
|
||||
|
||||
`tryResolve` currently does *not* rewrite input addresses, so it
|
||||
is safe to do this in all cases. (It should probably have a mode
|
||||
to do that, however, but we would not use it here.)
|
||||
*/
|
||||
BasicDerivation basicDrv = ({
|
||||
auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
|
||||
if (!maybeBasicDrv)
|
||||
throw Error(
|
||||
"the derivation '%s' can’t be resolved. It’s probably "
|
||||
"missing some outputs",
|
||||
localStore.printStorePath(step.drvPath));
|
||||
*maybeBasicDrv;
|
||||
});
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (&localStore != &destStore) {
|
||||
copyClosure(localStore, destStore,
|
||||
step.drv->inputSrcs,
|
||||
NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
|
||||
{
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore.printStorePath(step.drvPath), conn.machine->storeUri.render());
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (conn.machine->isLocalhost()) {
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(basicDrv.inputSrcs, closure);
|
||||
copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
} else {
|
||||
copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute);
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
return basicDrv;
|
||||
}
|
||||
|
||||
static BuildResult performBuild(
|
||||
::Machine::Connection & conn,
|
||||
Store & localStore,
|
||||
StorePath drvPath,
|
||||
const BasicDerivation & drv,
|
||||
const ServeProto::BuildOptions & options,
|
||||
counter & nrStepsBuilding
|
||||
)
|
||||
{
|
||||
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
|
||||
|
||||
BuildResult result;
|
||||
|
||||
time_t startTime, stopTime;
|
||||
|
||||
startTime = time(0);
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||
}
|
||||
stopTime = time(0);
|
||||
|
||||
if (!result.startTime) {
|
||||
// If the builder gave `startTime = 0`, use our measurements
|
||||
// instead of the builder's.
|
||||
//
|
||||
// Note: this represents the duration of a single round, rather
|
||||
// than all rounds.
|
||||
result.startTime = startTime;
|
||||
result.stopTime = stopTime;
|
||||
}
|
||||
|
||||
// If the protocol was too old to give us `builtOutputs`, initialize
|
||||
// it manually by introspecting the derivation.
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
|
||||
{
|
||||
// If the remote is too old to handle CA derivations, we can’t get this
|
||||
// far anyways
|
||||
assert(drv.type().hasKnownOutputPaths());
|
||||
DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
|
||||
// Since this a `BasicDerivation`, `staticOutputHashes` will not
|
||||
// do any real work.
|
||||
auto outputHashes = staticOutputHashes(localStore, drv);
|
||||
for (auto & [outputName, output] : drvOutputs) {
|
||||
auto outputPath = output.second;
|
||||
// We’ve just asserted that the output paths of the derivation
|
||||
// were known
|
||||
assert(outputPath);
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
auto drvOutput = DrvOutput { outputHash, outputName };
|
||||
result.builtOutputs.insert_or_assign(
|
||||
std::move(outputName),
|
||||
Realisation { drvOutput, *outputPath });
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void copyPathFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const ValidPathInfo & info
|
||||
)
|
||||
{
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||
conn.to.flush();
|
||||
|
||||
TeeSource tee(conn.from, sink);
|
||||
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||
});
|
||||
|
||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
|
||||
static void copyPathsFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const std::map<StorePath, UnkeyedValidPathInfo> & infos
|
||||
)
|
||||
{
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
copyPathFromRemote(
|
||||
conn, narMembers, localStore, destStore,
|
||||
ValidPathInfo { path, info });
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* using namespace nix::build_remote; */
|
||||
|
||||
void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
|
||||
{
|
||||
startTime = buildResult.startTime;
|
||||
stopTime = buildResult.stopTime;
|
||||
timesBuilt = buildResult.timesBuilt;
|
||||
errorMsg = buildResult.errorMsg;
|
||||
isNonDeterministic = buildResult.isNonDeterministic;
|
||||
|
||||
switch ((BuildResult::Status) buildResult.status) {
|
||||
case BuildResult::Built:
|
||||
stepStatus = bsSuccess;
|
||||
break;
|
||||
case BuildResult::Substituted:
|
||||
case BuildResult::AlreadyValid:
|
||||
stepStatus = bsSuccess;
|
||||
isCached = true;
|
||||
break;
|
||||
case BuildResult::PermanentFailure:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::InputRejected:
|
||||
case BuildResult::OutputRejected:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
break;
|
||||
case BuildResult::TransientFailure:
|
||||
stepStatus = bsFailed;
|
||||
canRetry = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::TimedOut:
|
||||
stepStatus = bsTimedOut;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::MiscFailure:
|
||||
stepStatus = bsAborted;
|
||||
canRetry = true;
|
||||
break;
|
||||
case BuildResult::LogLimitExceeded:
|
||||
stepStatus = bsLogLimitExceeded;
|
||||
break;
|
||||
case BuildResult::NotDeterministic:
|
||||
stepStatus = bsNotDeterministic;
|
||||
canRetry = false;
|
||||
canCache = true;
|
||||
break;
|
||||
default:
|
||||
stepStatus = bsAborted;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Utility guard object to auto-release a semaphore on destruction. */
|
||||
template <typename T>
|
||||
class SemaphoreReleaser {
|
||||
public:
|
||||
SemaphoreReleaser(T* s) : sem(s) {}
|
||||
~SemaphoreReleaser() { sem->release(); }
|
||||
|
||||
private:
|
||||
T* sem;
|
||||
};
|
||||
|
||||
void State::buildRemote(ref<Store> destStore,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
::Machine::ptr machine, Step::ptr step,
|
||||
const ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers)
|
||||
{
|
||||
assert(BuildResult::TimedOut == 8);
|
||||
|
||||
string base(step->drvPath.to_string());
|
||||
result.logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2);
|
||||
AutoDelete autoDelete(result.logFile, false);
|
||||
|
||||
createDirs(dirOf(result.logFile));
|
||||
|
||||
AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", result.logFile);
|
||||
|
||||
nix::Path tmpDir = createTempDir();
|
||||
AutoDelete tmpDirDel(tmpDir, true);
|
||||
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
|
||||
AutoDelete logFileDel(logFile, false);
|
||||
result.logFile = logFile;
|
||||
|
||||
try {
|
||||
|
||||
updateStep(ssConnecting);
|
||||
|
||||
auto storeRef = machine->completeStoreReference();
|
||||
|
||||
auto * pSpecified = std::get_if<StoreReference::Specified>(&storeRef.variant);
|
||||
if (!pSpecified || pSpecified->scheme != "ssh") {
|
||||
throw Error("Currently, only (legacy-)ssh stores are supported!");
|
||||
}
|
||||
|
||||
LegacySSHStoreConfig storeConfig {
|
||||
pSpecified->scheme,
|
||||
pSpecified->authority,
|
||||
storeRef.params
|
||||
};
|
||||
|
||||
auto master = storeConfig.createSSHMaster(
|
||||
false, // no SSH master yet
|
||||
logFD.get());
|
||||
|
||||
// FIXME: rewrite to use Store.
|
||||
Child child;
|
||||
openConnection(machine, tmpDir, logFD.get(), child);
|
||||
auto child = build_remote::openConnection(machine, master);
|
||||
|
||||
{
|
||||
auto activeStepState(activeStep->state_.lock());
|
||||
if (activeStepState->cancelled) throw Error("step cancelled");
|
||||
activeStepState->pid = child.pid;
|
||||
activeStepState->pid = child->sshPid;
|
||||
}
|
||||
|
||||
Finally clearPid([&]() {
|
||||
@@ -201,41 +453,33 @@ void State::buildRemote(ref<Store> destStore,
|
||||
process. Meh. */
|
||||
});
|
||||
|
||||
FdSource from(child.from.get());
|
||||
FdSink to(child.to.get());
|
||||
::Machine::Connection conn {
|
||||
{
|
||||
.to = child->in.get(),
|
||||
.from = child->out.get(),
|
||||
/* Handshake. */
|
||||
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
|
||||
},
|
||||
/*.machine =*/ machine,
|
||||
};
|
||||
|
||||
Finally updateStats([&]() {
|
||||
bytesReceived += from.read;
|
||||
bytesSent += to.written;
|
||||
bytesReceived += conn.from.read;
|
||||
bytesSent += conn.to.written;
|
||||
});
|
||||
|
||||
/* Handshake. */
|
||||
bool sendDerivation = true;
|
||||
unsigned int remoteVersion;
|
||||
constexpr ServeProto::Version our_version = 0x206;
|
||||
|
||||
try {
|
||||
to << SERVE_MAGIC_1 << 0x204;
|
||||
to.flush();
|
||||
|
||||
unsigned int magic = readInt(from);
|
||||
if (magic != SERVE_MAGIC_2)
|
||||
throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", machine->sshName);
|
||||
remoteVersion = readInt(from);
|
||||
if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
|
||||
throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", machine->sshName);
|
||||
// Always send the derivation to localhost, since it's a
|
||||
// no-op anyway but we might not be privileged to use
|
||||
// cmdBuildDerivation (e.g. if we're running in a NixOS
|
||||
// container).
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 1 && !machine->isLocalhost())
|
||||
sendDerivation = false;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0)
|
||||
throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName);
|
||||
|
||||
conn.remoteVersion = decltype(conn)::handshake(
|
||||
conn.to,
|
||||
conn.from,
|
||||
our_version,
|
||||
machine->storeUri.render());
|
||||
} catch (EndOfFile & e) {
|
||||
child.pid.wait();
|
||||
string s = chomp(readFile(result.logFile));
|
||||
throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s);
|
||||
child->sshPid.wait();
|
||||
std::string s = chomp(readFile(result.logFile));
|
||||
throw Error("cannot connect to ‘%1%’: %2%", machine->storeUri.render(), s);
|
||||
}
|
||||
|
||||
{
|
||||
@@ -249,55 +493,12 @@ void State::buildRemote(ref<Store> destStore,
|
||||
copy the immediate sources of the derivation and the required
|
||||
outputs of the input derivations. */
|
||||
updateStep(ssSendingInputs);
|
||||
BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
|
||||
|
||||
StorePathSet inputs;
|
||||
BasicDerivation basicDrv(*step->drv);
|
||||
|
||||
if (sendDerivation)
|
||||
inputs.insert(step->drvPath);
|
||||
else
|
||||
for (auto & p : step->drv->inputSrcs)
|
||||
inputs.insert(p);
|
||||
|
||||
for (auto & input : step->drv->inputDrvs) {
|
||||
auto drv2 = localStore->readDerivation(input.first);
|
||||
for (auto & name : input.second) {
|
||||
if (auto i = get(drv2.outputs, name)) {
|
||||
auto outPath = i->path(*localStore, drv2.name, name);
|
||||
inputs.insert(*outPath);
|
||||
basicDrv.inputSrcs.insert(*outPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (localStore != std::shared_ptr<Store>(destStore))
|
||||
copyClosure(ref<Store>(localStore), destStore, step->drv->inputSrcs, NoRepair, NoCheckSigs);
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (!machine->isLocalhost()) {
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
copyClosureTo(machine->state->sendLock, destStore, from, to, inputs, true);
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
autoDelete.cancel();
|
||||
logFileDel.cancel();
|
||||
|
||||
/* Truncate the log to get rid of messages about substitutions
|
||||
etc. on the remote system. */
|
||||
etc. on the remote system. */
|
||||
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
||||
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
||||
|
||||
@@ -309,111 +510,22 @@ void State::buildRemote(ref<Store> destStore,
|
||||
/* Do the build. */
|
||||
printMsg(lvlDebug, "building ‘%s’ on ‘%s’",
|
||||
localStore->printStorePath(step->drvPath),
|
||||
machine->sshName);
|
||||
machine->storeUri.render());
|
||||
|
||||
updateStep(ssBuilding);
|
||||
|
||||
if (sendDerivation) {
|
||||
to << cmdBuildPaths;
|
||||
worker_proto::write(*localStore, to, StorePathSet{step->drvPath});
|
||||
} else {
|
||||
to << cmdBuildDerivation << localStore->printStorePath(step->drvPath);
|
||||
writeDerivation(to, *localStore, basicDrv);
|
||||
}
|
||||
to << maxSilentTime << buildTimeout;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
|
||||
to << maxLogSize;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
||||
to << repeats // == build-repeat
|
||||
<< step->isDeterministic; // == enforce-determinism
|
||||
}
|
||||
to.flush();
|
||||
BuildResult buildResult = build_remote::performBuild(
|
||||
conn,
|
||||
*localStore,
|
||||
step->drvPath,
|
||||
resolvedDrv,
|
||||
buildOptions,
|
||||
nrStepsBuilding
|
||||
);
|
||||
|
||||
result.startTime = time(0);
|
||||
int res;
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
res = readInt(from);
|
||||
}
|
||||
result.stopTime = time(0);
|
||||
result.updateWithBuildResult(buildResult);
|
||||
|
||||
if (sendDerivation) {
|
||||
if (res) {
|
||||
result.errorMsg = fmt("%s on ‘%s’", readString(from), machine->sshName);
|
||||
if (res == 100) {
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
}
|
||||
else if (res == 101) {
|
||||
result.stepStatus = bsTimedOut;
|
||||
}
|
||||
else {
|
||||
result.stepStatus = bsAborted;
|
||||
result.canRetry = true;
|
||||
}
|
||||
return;
|
||||
}
|
||||
result.stepStatus = bsSuccess;
|
||||
} else {
|
||||
result.errorMsg = readString(from);
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
||||
result.timesBuilt = readInt(from);
|
||||
result.isNonDeterministic = readInt(from);
|
||||
auto start = readInt(from);
|
||||
auto stop = readInt(from);
|
||||
if (start && start) {
|
||||
/* Note: this represents the duration of a single
|
||||
round, rather than all rounds. */
|
||||
result.startTime = start;
|
||||
result.stopTime = stop;
|
||||
}
|
||||
}
|
||||
switch ((BuildResult::Status) res) {
|
||||
case BuildResult::Built:
|
||||
result.stepStatus = bsSuccess;
|
||||
break;
|
||||
case BuildResult::Substituted:
|
||||
case BuildResult::AlreadyValid:
|
||||
result.stepStatus = bsSuccess;
|
||||
result.isCached = true;
|
||||
break;
|
||||
case BuildResult::PermanentFailure:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::InputRejected:
|
||||
case BuildResult::OutputRejected:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
break;
|
||||
case BuildResult::TransientFailure:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canRetry = true;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::TimedOut:
|
||||
result.stepStatus = bsTimedOut;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::MiscFailure:
|
||||
result.stepStatus = bsAborted;
|
||||
result.canRetry = true;
|
||||
break;
|
||||
case BuildResult::LogLimitExceeded:
|
||||
result.stepStatus = bsLogLimitExceeded;
|
||||
break;
|
||||
case BuildResult::NotDeterministic:
|
||||
result.stepStatus = bsNotDeterministic;
|
||||
result.canRetry = false;
|
||||
result.canCache = true;
|
||||
break;
|
||||
default:
|
||||
result.stepStatus = bsAborted;
|
||||
break;
|
||||
}
|
||||
if (result.stepStatus != bsSuccess) return;
|
||||
}
|
||||
if (result.stepStatus != bsSuccess) return;
|
||||
|
||||
result.errorMsg = "";
|
||||
|
||||
@@ -421,11 +533,32 @@ void State::buildRemote(ref<Store> destStore,
|
||||
get a build log. */
|
||||
if (result.isCached) {
|
||||
printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render());
|
||||
unlink(result.logFile.c_str());
|
||||
result.logFile = "";
|
||||
}
|
||||
|
||||
/* Throttle CPU-bound work. Opportunistically skip updating the current
|
||||
* step, since this requires a DB roundtrip. */
|
||||
if (!localWorkThrottler.try_acquire()) {
|
||||
MaintainCount<counter> mc(nrStepsWaitingForDownloadSlot);
|
||||
updateStep(ssWaitingForLocalSlot);
|
||||
localWorkThrottler.acquire();
|
||||
}
|
||||
SemaphoreReleaser releaser(&localWorkThrottler);
|
||||
|
||||
/* Once we've started copying outputs, release the machine reservation
|
||||
* so further builds can happen. We do not release the machine earlier
|
||||
* to avoid situations where the queue runner is bottlenecked on
|
||||
* copying outputs and we end up building too many things that we
|
||||
* haven't been able to allow copy slots for. */
|
||||
reservation.reset();
|
||||
wakeDispatcher();
|
||||
|
||||
StorePathSet outputs;
|
||||
for (auto & [_, realisation] : buildResult.builtOutputs)
|
||||
outputs.insert(realisation.outPath);
|
||||
|
||||
/* Copy the output paths. */
|
||||
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
||||
updateStep(ssReceivingOutputs);
|
||||
@@ -434,39 +567,10 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
StorePathSet outputs;
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second)
|
||||
outputs.insert(*i.second.second);
|
||||
}
|
||||
auto infos = conn.queryPathInfos(*localStore, outputs);
|
||||
|
||||
/* Get info about each output path. */
|
||||
std::map<StorePath, ValidPathInfo> infos;
|
||||
size_t totalNarSize = 0;
|
||||
to << cmdQueryPathInfos;
|
||||
worker_proto::write(*localStore, to, outputs);
|
||||
to.flush();
|
||||
while (true) {
|
||||
auto storePathS = readString(from);
|
||||
if (storePathS == "") break;
|
||||
auto deriver = readString(from); // deriver
|
||||
auto references = worker_proto::read(*localStore, from, Phantom<StorePathSet> {});
|
||||
readLongLong(from); // download size
|
||||
auto narSize = readLongLong(from);
|
||||
auto narHash = Hash::parseAny(readString(from), htSHA256);
|
||||
auto ca = parseContentAddressOpt(readString(from));
|
||||
readStrings<StringSet>(from); // sigs
|
||||
ValidPathInfo info(localStore->parseStorePath(storePathS), narHash);
|
||||
assert(outputs.count(info.path));
|
||||
info.references = references;
|
||||
info.narSize = narSize;
|
||||
totalNarSize += info.narSize;
|
||||
info.narHash = narHash;
|
||||
info.ca = ca;
|
||||
if (deriver != "")
|
||||
info.deriver = localStore->parseStorePath(deriver);
|
||||
infos.insert_or_assign(info.path, info);
|
||||
}
|
||||
for (auto & [_, info] : infos) totalNarSize += info.narSize;
|
||||
|
||||
if (totalNarSize > maxOutputSize) {
|
||||
result.stepStatus = bsNarSizeLimitExceeded;
|
||||
@@ -475,35 +579,32 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
/* Copy each path. */
|
||||
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
|
||||
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
to << cmdDumpStorePath << localStore->printStorePath(path);
|
||||
to.flush();
|
||||
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
TeeSource tee(from, sink);
|
||||
extractNarData(tee, localStore->printStorePath(path), narMembers);
|
||||
});
|
||||
|
||||
destStore->addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render(), totalNarSize);
|
||||
|
||||
build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos);
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
/* Register the outputs of the newly built drv */
|
||||
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
|
||||
auto outputHashes = staticOutputHashes(*localStore, *step->drv);
|
||||
for (auto & [outputName, realisation] : buildResult.builtOutputs) {
|
||||
// Register the resolved drv output
|
||||
destStore->registerDrvOutput(realisation);
|
||||
|
||||
// Also register the unresolved one
|
||||
auto unresolvedRealisation = realisation;
|
||||
unresolvedRealisation.signatures.clear();
|
||||
unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
|
||||
destStore->registerDrvOutput(unresolvedRealisation);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shut down the connection. */
|
||||
child.to = -1;
|
||||
child.pid.wait();
|
||||
child->in = -1;
|
||||
child->sshPid.wait();
|
||||
|
||||
} catch (Error & e) {
|
||||
/* Disable this machine until a certain period of time has
|
||||
@@ -517,7 +618,7 @@ void State::buildRemote(ref<Store> destStore,
|
||||
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
||||
info->lastFailure = now;
|
||||
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
|
||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->sshName, delta);
|
||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->storeUri.render(), delta);
|
||||
info->disabledUntil = now + std::chrono::seconds(delta);
|
||||
}
|
||||
throw;
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#include "build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "util.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/store/store-api.hh>
|
||||
#include <nix/util/util.hh>
|
||||
#include <nix/util/source-accessor.hh>
|
||||
|
||||
#include <regex>
|
||||
|
||||
@@ -11,18 +11,18 @@ using namespace nix;
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const Derivation & drv)
|
||||
const OutputPathMap derivationOutputs)
|
||||
{
|
||||
BuildOutput res;
|
||||
|
||||
/* Compute the closure size. */
|
||||
StorePathSet outputs;
|
||||
StorePathSet closure;
|
||||
for (auto & i : drv.outputsAndOptPaths(*store))
|
||||
if (i.second.second) {
|
||||
store->computeFSClosure(*i.second.second, closure);
|
||||
outputs.insert(*i.second.second);
|
||||
}
|
||||
for (auto& [outputName, outputPath] : derivationOutputs) {
|
||||
store->computeFSClosure(outputPath, closure);
|
||||
outputs.insert(outputPath);
|
||||
res.outputs.insert({outputName, outputPath});
|
||||
}
|
||||
for (auto & path : closure) {
|
||||
auto info = store->queryPathInfo(path);
|
||||
res.closureSize += info->narSize;
|
||||
@@ -63,7 +63,7 @@ BuildOutput getBuildOutput(
|
||||
|
||||
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
|
||||
if (productsFile == narMembers.end() ||
|
||||
productsFile->second.type != FSAccessor::Type::tRegular)
|
||||
productsFile->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
assert(productsFile->second.contents);
|
||||
|
||||
@@ -78,7 +78,7 @@ BuildOutput getBuildOutput(
|
||||
product.type = match[1];
|
||||
product.subtype = match[2];
|
||||
std::string s(match[3]);
|
||||
product.path = s[0] == '"' ? string(s, 1, s.size() - 2) : s;
|
||||
product.path = s[0] == '"' ? std::string(s, 1, s.size() - 2) : s;
|
||||
product.defaultPath = match[5];
|
||||
|
||||
/* Ensure that the path exists and points into the Nix
|
||||
@@ -94,7 +94,7 @@ BuildOutput getBuildOutput(
|
||||
|
||||
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
|
||||
|
||||
if (file->second.type == FSAccessor::Type::tRegular) {
|
||||
if (file->second.type == SourceAccessor::Type::tRegular) {
|
||||
product.isRegular = true;
|
||||
product.fileSize = file->second.fileSize.value();
|
||||
product.sha256hash = file->second.sha256.value();
|
||||
@@ -107,17 +107,16 @@ BuildOutput getBuildOutput(
|
||||
/* If no build products were explicitly declared, then add all
|
||||
outputs as a product of type "nix-build". */
|
||||
if (!explicitProducts) {
|
||||
for (auto & [name, output] : drv.outputs) {
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
BuildProduct product;
|
||||
auto outPath = output.path(*store, drv.name, name);
|
||||
product.path = store->printStorePath(*outPath);
|
||||
product.path = store->printStorePath(output);
|
||||
product.type = "nix-build";
|
||||
product.subtype = name == "out" ? "" : name;
|
||||
product.name = outPath->name();
|
||||
product.name = output.name();
|
||||
|
||||
auto file = narMembers.find(product.path);
|
||||
assert(file != narMembers.end());
|
||||
if (file->second.type == FSAccessor::Type::tDirectory)
|
||||
if (file->second.type == SourceAccessor::Type::tDirectory)
|
||||
res.products.push_back(product);
|
||||
}
|
||||
}
|
||||
@@ -126,7 +125,7 @@ BuildOutput getBuildOutput(
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != FSAccessor::Type::tRegular)
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
res.releaseName = trim(file->second.contents.value());
|
||||
// FIXME: validate release name
|
||||
@@ -136,7 +135,7 @@ BuildOutput getBuildOutput(
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != FSAccessor::Type::tRegular)
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
|
||||
auto fields = tokenizeString<std::vector<std::string>>(line);
|
||||
|
@@ -1,9 +1,9 @@
|
||||
#include <cmath>
|
||||
|
||||
#include "state.hh"
|
||||
#include "build-result.hh"
|
||||
#include "finally.hh"
|
||||
#include "binary-cache-store.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/util/finally.hh>
|
||||
#include <nix/store/binary-cache-store.hh>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
@@ -16,7 +16,7 @@ void setThreadName(const std::string & name)
|
||||
}
|
||||
|
||||
|
||||
void State::builder(MachineReservation::ptr reservation)
|
||||
void State::builder(std::unique_ptr<MachineReservation> reservation)
|
||||
{
|
||||
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
|
||||
|
||||
@@ -35,22 +35,20 @@ void State::builder(MachineReservation::ptr reservation)
|
||||
activeSteps_.lock()->erase(activeStep);
|
||||
});
|
||||
|
||||
std::string machine = reservation->machine->storeUri.render();
|
||||
|
||||
try {
|
||||
auto destStore = getDestStore();
|
||||
res = doBuildStep(destStore, reservation, activeStep);
|
||||
// Might release the reservation.
|
||||
res = doBuildStep(destStore, std::move(reservation), activeStep);
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
||||
localStore->printStorePath(reservation->step->drvPath),
|
||||
reservation->machine->sshName,
|
||||
localStore->printStorePath(activeStep->step->drvPath),
|
||||
machine,
|
||||
e.what());
|
||||
}
|
||||
}
|
||||
|
||||
/* Release the machine and wake up the dispatcher. */
|
||||
assert(reservation.unique());
|
||||
reservation = 0;
|
||||
wakeDispatcher();
|
||||
|
||||
/* If there was a temporary failure, retry the step after an
|
||||
exponentially increasing interval. */
|
||||
Step::ptr step = wstep.lock();
|
||||
@@ -72,11 +70,11 @@ void State::builder(MachineReservation::ptr reservation)
|
||||
|
||||
|
||||
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
MachineReservation::ptr reservation,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
std::shared_ptr<ActiveStep> activeStep)
|
||||
{
|
||||
auto & step(reservation->step);
|
||||
auto & machine(reservation->machine);
|
||||
auto step(reservation->step);
|
||||
auto machine(reservation->machine);
|
||||
|
||||
{
|
||||
auto step_(step->state.lock());
|
||||
@@ -98,8 +96,13 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
it). */
|
||||
BuildID buildId;
|
||||
std::optional<StorePath> buildDrvPath;
|
||||
unsigned int maxSilentTime, buildTimeout;
|
||||
unsigned int repeats = step->isDeterministic ? 1 : 0;
|
||||
// Other fields set below
|
||||
nix::ServeProto::BuildOptions buildOptions {
|
||||
.maxLogSize = maxLogSize,
|
||||
.nrRepeats = step->isDeterministic ? 1u : 0u,
|
||||
.enforceDeterminism = step->isDeterministic,
|
||||
.keepFailed = false,
|
||||
};
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
@@ -134,21 +137,22 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
{
|
||||
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
|
||||
if (i != jobsetRepeats.end())
|
||||
repeats = std::max(repeats, i->second);
|
||||
buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
|
||||
}
|
||||
}
|
||||
if (!build) build = *dependents.begin();
|
||||
|
||||
buildId = build->id;
|
||||
buildDrvPath = build->drvPath;
|
||||
maxSilentTime = build->maxSilentTime;
|
||||
buildTimeout = build->buildTimeout;
|
||||
buildOptions.maxSilentTime = build->maxSilentTime;
|
||||
buildOptions.buildTimeout = build->buildTimeout;
|
||||
|
||||
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
|
||||
localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
|
||||
localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->storeUri.render(), buildId, (dependents.size() - 1));
|
||||
}
|
||||
|
||||
bool quit = buildId == buildOne && step->drvPath == *buildDrvPath;
|
||||
if (!buildOneDone)
|
||||
buildOneDone = buildId == buildOne && step->drvPath == *buildDrvPath;
|
||||
|
||||
RemoteResult result;
|
||||
BuildOutput res;
|
||||
@@ -172,7 +176,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
unlink(result.logFile.c_str());
|
||||
}
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -190,7 +194,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(*conn);
|
||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy);
|
||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->storeUri.render(), bsBusy);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
@@ -205,7 +209,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
try {
|
||||
/* FIXME: referring builds may have conflicting timeouts. */
|
||||
buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep, narMembers);
|
||||
buildRemote(destStore, std::move(reservation), machine, step, buildOptions, result, activeStep, updateStep, narMembers);
|
||||
} catch (Error & e) {
|
||||
if (activeStep->state_.lock()->cancelled) {
|
||||
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
||||
@@ -220,7 +224,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
updateStep(ssPostProcessing);
|
||||
res = getBuildOutput(destStore, narMembers, *step->drv);
|
||||
res = getBuildOutput(destStore, narMembers, destStore->queryDerivationOutputMap(step->drvPath, &*localStore));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -247,7 +251,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
/* Finish the step in the database. */
|
||||
if (stepNr) {
|
||||
pqxx::work txn(*conn);
|
||||
finishBuildStep(txn, result, buildId, stepNr, machine->sshName);
|
||||
finishBuildStep(txn, result, buildId, stepNr, machine->storeUri.render());
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
@@ -255,7 +259,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
issue). Retry a number of times. */
|
||||
if (result.canRetry) {
|
||||
printMsg(lvlError, "possibly transient failure building ‘%s’ on ‘%s’: %s",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName, result.errorMsg);
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render(), result.errorMsg);
|
||||
assert(stepNr);
|
||||
bool retry;
|
||||
{
|
||||
@@ -265,7 +269,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
if (retry) {
|
||||
auto mc = startDbUpdate();
|
||||
stepFinished = true;
|
||||
if (quit) exit(1);
|
||||
if (buildOneDone) exit(1);
|
||||
return sRetry;
|
||||
}
|
||||
}
|
||||
@@ -274,9 +278,12 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
assert(stepNr);
|
||||
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second)
|
||||
addRoot(*i.second.second);
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) {
|
||||
if (!optOutputPath)
|
||||
throw Error(
|
||||
"Missing output %s for derivation %d which was supposed to have succeeded",
|
||||
outputName, localStore->printStorePath(step->drvPath));
|
||||
addRoot(*optOutputPath);
|
||||
}
|
||||
|
||||
/* Register success in the database for all Build objects that
|
||||
@@ -322,7 +329,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
pqxx::work txn(*conn);
|
||||
|
||||
for (auto & b : direct) {
|
||||
printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id);
|
||||
printInfo("marking build %1% as succeeded", b->id);
|
||||
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
|
||||
result.startTime, result.stopTime);
|
||||
}
|
||||
@@ -376,7 +383,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
}
|
||||
|
||||
} else
|
||||
failStep(*conn, step, buildId, result, machine, stepFinished, quit);
|
||||
failStep(*conn, step, buildId, result, machine, stepFinished);
|
||||
|
||||
// FIXME: keep stats about aborted steps?
|
||||
nrStepsDone++;
|
||||
@@ -386,7 +393,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
machine->state->totalStepTime += stepStopTime - stepStartTime;
|
||||
machine->state->totalStepBuildTime += result.stopTime - result.startTime;
|
||||
|
||||
if (quit) exit(0); // testing hack; FIXME: this won't run plugins
|
||||
if (buildOneDone) exit(0); // testing hack; FIXME: this won't run plugins
|
||||
|
||||
return sDone;
|
||||
}
|
||||
@@ -397,9 +404,8 @@ void State::failStep(
|
||||
Step::ptr step,
|
||||
BuildID buildId,
|
||||
const RemoteResult & result,
|
||||
Machine::ptr machine,
|
||||
bool & stepFinished,
|
||||
bool & quit)
|
||||
::Machine::ptr machine,
|
||||
bool & stepFinished)
|
||||
{
|
||||
/* Register failure in the database for all Build objects that
|
||||
directly or indirectly depend on this step. */
|
||||
@@ -444,14 +450,14 @@ void State::failStep(
|
||||
build->finishedInDB)
|
||||
continue;
|
||||
createBuildStep(txn,
|
||||
0, build->id, step, machine ? machine->sshName : "",
|
||||
0, build->id, step, machine ? machine->storeUri.render() : "",
|
||||
result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId);
|
||||
}
|
||||
|
||||
/* Mark all builds that depend on this derivation as failed. */
|
||||
for (auto & build : indirect) {
|
||||
if (build->finishedInDB) continue;
|
||||
printMsg(lvlError, format("marking build %1% as failed") % build->id);
|
||||
printError("marking build %1% as failed", build->id);
|
||||
txn.exec_params0
|
||||
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
|
||||
build->id,
|
||||
@@ -481,7 +487,7 @@ void State::failStep(
|
||||
b->finishedInDB = true;
|
||||
builds_->erase(b->id);
|
||||
dependentIDs.push_back(b->id);
|
||||
if (buildOne == b->id) quit = true;
|
||||
if (!buildOneDone && buildOne == b->id) buildOneDone = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -2,6 +2,7 @@
|
||||
#include <cmath>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "state.hh"
|
||||
|
||||
@@ -31,34 +32,42 @@ void State::makeRunnable(Step::ptr step)
|
||||
|
||||
void State::dispatcher()
|
||||
{
|
||||
while (true) {
|
||||
printMsg(lvlDebug, "Waiting for the machines parsing to have completed at least once");
|
||||
machinesReadyLock.lock();
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
printMsg(lvlDebug, "dispatcher woken up");
|
||||
nrDispatcherWakeups++;
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
auto t_before_work = std::chrono::steady_clock::now();
|
||||
|
||||
auto sleepUntil = doDispatch();
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
auto t_after_work = std::chrono::steady_clock::now();
|
||||
|
||||
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
prom.dispatcher_time_spent_running.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
|
||||
|
||||
/* Sleep until we're woken up (either because a runnable build
|
||||
is added, or because a build finishes). */
|
||||
{
|
||||
auto dispatcherWakeup_(dispatcherWakeup.lock());
|
||||
if (!*dispatcherWakeup_) {
|
||||
printMsg(lvlDebug, format("dispatcher sleeping for %1%s") %
|
||||
debug("dispatcher sleeping for %1%s",
|
||||
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
|
||||
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
|
||||
}
|
||||
*dispatcherWakeup_ = false;
|
||||
}
|
||||
|
||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||
prom.dispatcher_time_spent_waiting.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, format("dispatcher: %1%") % e.what());
|
||||
printError("dispatcher: %s", e.what());
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
@@ -78,17 +87,124 @@ system_time State::doDispatch()
|
||||
jobset.second->pruneSteps();
|
||||
auto s2 = jobset.second->shareUsed();
|
||||
if (s1 != s2)
|
||||
printMsg(lvlDebug, format("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%")
|
||||
% jobset.first.first % jobset.first.second % s1 % s2);
|
||||
debug("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%",
|
||||
jobset.first.first, jobset.first.second, s1, s2);
|
||||
}
|
||||
}
|
||||
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
|
||||
/* Start steps until we're out of steps or slots. */
|
||||
auto sleepUntil = system_time::max();
|
||||
bool keepGoing;
|
||||
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not very likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
struct StepInfo
|
||||
{
|
||||
Step::ptr step;
|
||||
bool alreadyScheduled = false;
|
||||
|
||||
/* The lowest share used of any jobset depending on this
|
||||
step. */
|
||||
double lowestShareUsed = 1e9;
|
||||
|
||||
/* Info copied from step->state to ensure that the
|
||||
comparator is a partial ordering (see MachineInfo). */
|
||||
int highestGlobalPriority;
|
||||
int highestLocalPriority;
|
||||
size_t numRequiredSystemFeatures;
|
||||
size_t numRevDeps;
|
||||
BuildID lowestBuildID;
|
||||
|
||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||
{
|
||||
for (auto & jobset : step_.jobsets)
|
||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||
highestGlobalPriority = step_.highestGlobalPriority;
|
||||
highestLocalPriority = step_.highestLocalPriority;
|
||||
numRequiredSystemFeatures = step->requiredSystemFeatures.size();
|
||||
numRevDeps = step_.rdeps.size();
|
||||
lowestBuildID = step_.lowestBuildID;
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<StepInfo> runnableSorted;
|
||||
|
||||
struct RunnablePerType
|
||||
{
|
||||
unsigned int count{0};
|
||||
std::chrono::seconds waitTime{0};
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnableSorted.reserve(runnable_->size());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
auto step = i->lock();
|
||||
|
||||
/* Remove dead steps. */
|
||||
if (!step) {
|
||||
i = runnable_->erase(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
++i;
|
||||
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
r.count++;
|
||||
|
||||
/* Skip previously failed steps that aren't ready
|
||||
to be retried. */
|
||||
auto step_(step->state.lock());
|
||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||
if (step_->tries > 0 && step_->after > now) {
|
||||
if (step_->after < sleepUntil)
|
||||
sleepUntil = step_->after;
|
||||
continue;
|
||||
}
|
||||
|
||||
runnableSorted.emplace_back(step, *step_);
|
||||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||
a.numRequiredSystemFeatures != b.numRequiredSystemFeatures ? a.numRequiredSystemFeatures > b.numRequiredSystemFeatures :
|
||||
a.numRevDeps != b.numRevDeps ? a.numRevDeps > b.numRevDeps :
|
||||
a.lowestBuildID < b.lowestBuildID;
|
||||
});
|
||||
|
||||
do {
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
now = std::chrono::system_clock::now();
|
||||
|
||||
/* Copy the currentJobs field of each machine. This is
|
||||
necessary to ensure that the sort comparator below is
|
||||
@@ -96,7 +212,7 @@ system_time State::doDispatch()
|
||||
filter out temporarily disabled machines. */
|
||||
struct MachineInfo
|
||||
{
|
||||
Machine::ptr machine;
|
||||
::Machine::ptr machine;
|
||||
unsigned long currentJobs;
|
||||
};
|
||||
std::vector<MachineInfo> machinesSorted;
|
||||
@@ -136,104 +252,6 @@ system_time State::doDispatch()
|
||||
a.currentJobs > b.currentJobs;
|
||||
});
|
||||
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not very likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
struct StepInfo
|
||||
{
|
||||
Step::ptr step;
|
||||
|
||||
/* The lowest share used of any jobset depending on this
|
||||
step. */
|
||||
double lowestShareUsed = 1e9;
|
||||
|
||||
/* Info copied from step->state to ensure that the
|
||||
comparator is a partial ordering (see MachineInfo). */
|
||||
int highestGlobalPriority;
|
||||
int highestLocalPriority;
|
||||
BuildID lowestBuildID;
|
||||
|
||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||
{
|
||||
for (auto & jobset : step_.jobsets)
|
||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||
highestGlobalPriority = step_.highestGlobalPriority;
|
||||
highestLocalPriority = step_.highestLocalPriority;
|
||||
lowestBuildID = step_.lowestBuildID;
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<StepInfo> runnableSorted;
|
||||
|
||||
struct RunnablePerType
|
||||
{
|
||||
unsigned int count{0};
|
||||
std::chrono::seconds waitTime{0};
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnableSorted.reserve(runnable_->size());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
auto step = i->lock();
|
||||
|
||||
/* Remove dead steps. */
|
||||
if (!step) {
|
||||
i = runnable_->erase(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
++i;
|
||||
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
r.count++;
|
||||
|
||||
/* Skip previously failed steps that aren't ready
|
||||
to be retried. */
|
||||
auto step_(step->state.lock());
|
||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||
if (step_->tries > 0 && step_->after > now) {
|
||||
if (step_->after < sleepUntil)
|
||||
sleepUntil = step_->after;
|
||||
continue;
|
||||
}
|
||||
|
||||
runnableSorted.emplace_back(step, *step_);
|
||||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||
a.lowestBuildID < b.lowestBuildID;
|
||||
});
|
||||
|
||||
/* Find a machine with a free slot and find a step to run
|
||||
on it. Once we find such a pair, we restart the outer
|
||||
loop because the machine sorting will have changed. */
|
||||
@@ -243,12 +261,14 @@ system_time State::doDispatch()
|
||||
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
|
||||
|
||||
for (auto & stepInfo : runnableSorted) {
|
||||
if (stepInfo.alreadyScheduled) continue;
|
||||
|
||||
auto & step(stepInfo.step);
|
||||
|
||||
/* Can this machine do this step? */
|
||||
if (!mi.machine->supportsStep(step)) {
|
||||
debug("machine '%s' does not support step '%s' (system type '%s')",
|
||||
mi.machine->sshName, localStore->printStorePath(step->drvPath), step->drv->platform);
|
||||
mi.machine->storeUri.render(), localStore->printStorePath(step->drvPath), step->drv->platform);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -269,10 +289,12 @@ system_time State::doDispatch()
|
||||
r.count--;
|
||||
}
|
||||
|
||||
stepInfo.alreadyScheduled = true;
|
||||
|
||||
/* Make a slot reservation and start a thread to
|
||||
do the build. */
|
||||
auto builderThread = std::thread(&State::builder, this,
|
||||
std::make_shared<MachineReservation>(*this, step, mi.machine));
|
||||
std::make_unique<MachineReservation>(*this, step, mi.machine));
|
||||
builderThread.detach(); // FIXME?
|
||||
|
||||
keepGoing = true;
|
||||
@@ -374,7 +396,6 @@ void State::abortUnsupported()
|
||||
if (!build) build = *dependents.begin();
|
||||
|
||||
bool stepFinished = false;
|
||||
bool quit = false;
|
||||
|
||||
failStep(
|
||||
*conn, step, build->id,
|
||||
@@ -385,9 +406,9 @@ void State::abortUnsupported()
|
||||
.startTime = now2,
|
||||
.stopTime = now2,
|
||||
},
|
||||
nullptr, stepFinished, quit);
|
||||
nullptr, stepFinished);
|
||||
|
||||
if (quit) exit(1);
|
||||
if (buildOneDone) exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -427,7 +448,7 @@ void Jobset::pruneSteps()
|
||||
}
|
||||
|
||||
|
||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, Machine::ptr machine)
|
||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, ::Machine::ptr machine)
|
||||
: state(state), step(step), machine(machine)
|
||||
{
|
||||
machine->state->currentJobs++;
|
||||
|
@@ -2,9 +2,9 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "hash.hh"
|
||||
#include "derivations.hh"
|
||||
#include "store-api.hh"
|
||||
#include <nix/util/hash.hh>
|
||||
#include <nix/store/derivations.hh>
|
||||
#include <nix/store/store-api.hh>
|
||||
#include "nar-extractor.hh"
|
||||
|
||||
struct BuildProduct
|
||||
@@ -36,10 +36,12 @@ struct BuildOutput
|
||||
|
||||
std::list<BuildProduct> products;
|
||||
|
||||
std::map<std::string, nix::StorePath> outputs;
|
||||
|
||||
std::map<std::string, BuildMetric> metrics;
|
||||
};
|
||||
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<nix::Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const nix::Derivation & drv);
|
||||
const nix::OutputPathMap derivationOutputs);
|
@@ -1,32 +1,29 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <optional>
|
||||
#include <type_traits>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "state.hh"
|
||||
#include "build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "remote-store.hh"
|
||||
#include <prometheus/exposer.h>
|
||||
|
||||
#include "globals.hh"
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include <nix/util/signals.hh>
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/store/store-open.hh>
|
||||
#include <nix/store/remote-store.hh>
|
||||
|
||||
#include <nix/store/globals.hh>
|
||||
#include "hydra-config.hh"
|
||||
#include "json.hh"
|
||||
#include "s3-binary-cache-store.hh"
|
||||
#include "shared.hh"
|
||||
#include <nix/store/s3-binary-cache-store.hh>
|
||||
#include <nix/main/shared.hh>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
template<> void toJSON<std::atomic<long>>(std::ostream & str, const std::atomic<long> & n) { str << n; }
|
||||
template<> void toJSON<std::atomic<uint64_t>>(std::ostream & str, const std::atomic<uint64_t> & n) { str << n; }
|
||||
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
|
||||
|
||||
}
|
||||
using nlohmann::json;
|
||||
|
||||
|
||||
std::string getEnvOrDie(const std::string & key)
|
||||
@@ -36,20 +33,94 @@ std::string getEnvOrDie(const std::string & key)
|
||||
return *value;
|
||||
}
|
||||
|
||||
State::PromMetrics::PromMetrics()
|
||||
: registry(std::make_shared<prometheus::Registry>())
|
||||
, queue_checks_started(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_checks_started_total")
|
||||
.Help("Number of times State::getQueuedBuilds() was started")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_build_loads(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_build_loads_total")
|
||||
.Help("Number of builds loaded")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_steps_created(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_steps_created_total")
|
||||
.Help("Number of steps created")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_checks_early_exits(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_checks_early_exits_total")
|
||||
.Help("Number of times State::getQueuedBuilds() yielded to potential bumps")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_checks_finished(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_checks_finished_total")
|
||||
.Help("Number of times State::getQueuedBuilds() was completed")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, dispatcher_time_spent_running(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_dispatcher_time_spent_running")
|
||||
.Help("Time (in micros) spent running the dispatcher")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, dispatcher_time_spent_waiting(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_dispatcher_time_spent_waiting")
|
||||
.Help("Time (in micros) spent waiting for the dispatcher to obtain work")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_monitor_time_spent_running(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_monitor_time_spent_running")
|
||||
.Help("Time (in micros) spent running the queue monitor")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_monitor_time_spent_waiting(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_monitor_time_spent_waiting")
|
||||
.Help("Time (in micros) spent waiting for the queue monitor to obtain work")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
{
|
||||
|
||||
State::State()
|
||||
}
|
||||
|
||||
State::State(std::optional<std::string> metricsAddrOpt)
|
||||
: config(std::make_unique<HydraConfig>())
|
||||
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
|
||||
, dbPool(config->getIntOption("max_db_connections", 128))
|
||||
, localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
|
||||
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
|
||||
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
|
||||
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
|
||||
, rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME"))))
|
||||
, metricsAddr(config->getStrOption("queue_runner_metrics_address", std::string{"127.0.0.1:9198"}))
|
||||
{
|
||||
hydraData = getEnvOrDie("HYDRA_DATA");
|
||||
|
||||
logDir = canonPath(hydraData + "/build-logs");
|
||||
|
||||
if (metricsAddrOpt.has_value()) {
|
||||
metricsAddr = metricsAddrOpt.value();
|
||||
}
|
||||
|
||||
/* handle deprecated store specification */
|
||||
if (config->getStrOption("store_mode") != "")
|
||||
throw Error("store_mode in hydra.conf is deprecated, please use store_uri");
|
||||
@@ -86,50 +157,29 @@ void State::parseMachines(const std::string & contents)
|
||||
oldMachines = *machines_;
|
||||
}
|
||||
|
||||
for (auto line : tokenizeString<Strings>(contents, "\n")) {
|
||||
line = trim(string(line, 0, line.find('#')));
|
||||
auto tokens = tokenizeString<std::vector<std::string>>(line);
|
||||
if (tokens.size() < 3) continue;
|
||||
tokens.resize(8);
|
||||
|
||||
auto machine = std::make_shared<Machine>();
|
||||
machine->sshName = tokens[0];
|
||||
machine->systemTypes = tokenizeString<StringSet>(tokens[1], ",");
|
||||
machine->sshKey = tokens[2] == "-" ? string("") : tokens[2];
|
||||
if (tokens[3] != "")
|
||||
string2Int(tokens[3], machine->maxJobs);
|
||||
else
|
||||
machine->maxJobs = 1;
|
||||
machine->speedFactor = atof(tokens[4].c_str());
|
||||
if (tokens[5] == "-") tokens[5] = "";
|
||||
machine->supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
|
||||
if (tokens[6] == "-") tokens[6] = "";
|
||||
machine->mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
|
||||
for (auto & f : machine->mandatoryFeatures)
|
||||
machine->supportedFeatures.insert(f);
|
||||
if (tokens[7] != "" && tokens[7] != "-")
|
||||
machine->sshPublicHostKey = base64Decode(tokens[7]);
|
||||
for (auto && machine_ : nix::Machine::parseConfig({}, contents)) {
|
||||
auto machine = std::make_shared<::Machine>(std::move(machine_));
|
||||
|
||||
/* Re-use the State object of the previous machine with the
|
||||
same name. */
|
||||
auto i = oldMachines.find(machine->sshName);
|
||||
auto i = oldMachines.find(machine->storeUri.variant);
|
||||
if (i == oldMachines.end())
|
||||
printMsg(lvlChatty, format("adding new machine ‘%1%’") % machine->sshName);
|
||||
printMsg(lvlChatty, "adding new machine ‘%1%’", machine->storeUri.render());
|
||||
else
|
||||
printMsg(lvlChatty, format("updating machine ‘%1%’") % machine->sshName);
|
||||
printMsg(lvlChatty, "updating machine ‘%1%’", machine->storeUri.render());
|
||||
machine->state = i == oldMachines.end()
|
||||
? std::make_shared<Machine::State>()
|
||||
? std::make_shared<::Machine::State>()
|
||||
: i->second->state;
|
||||
newMachines[machine->sshName] = machine;
|
||||
newMachines[machine->storeUri.variant] = machine;
|
||||
}
|
||||
|
||||
for (auto & m : oldMachines)
|
||||
if (newMachines.find(m.first) == newMachines.end()) {
|
||||
if (m.second->enabled)
|
||||
printMsg(lvlInfo, format("removing machine ‘%1%’") % m.first);
|
||||
/* Add a disabled Machine object to make sure stats are
|
||||
printInfo("removing machine ‘%1%’", m.second->storeUri.render());
|
||||
/* Add a disabled ::Machine object to make sure stats are
|
||||
maintained. */
|
||||
auto machine = std::make_shared<Machine>(*(m.second));
|
||||
auto machine = std::make_shared<::Machine>(*(m.second));
|
||||
machine->enabled = false;
|
||||
newMachines[m.first] = machine;
|
||||
}
|
||||
@@ -149,14 +199,16 @@ void State::parseMachines(const std::string & contents)
|
||||
|
||||
void State::monitorMachinesFile()
|
||||
{
|
||||
string defaultMachinesFile = "/etc/nix/machines";
|
||||
std::string defaultMachinesFile = "/etc/nix/machines";
|
||||
auto machinesFiles = tokenizeString<std::vector<Path>>(
|
||||
getEnv("NIX_REMOTE_SYSTEMS").value_or(pathExists(defaultMachinesFile) ? defaultMachinesFile : ""), ":");
|
||||
|
||||
if (machinesFiles.empty()) {
|
||||
parseMachines("localhost " +
|
||||
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
||||
+ " - " + std::to_string(settings.maxBuildJobs) + " 1");
|
||||
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
||||
+ concatStringsSep(",", StoreConfig::getDefaultSystemFeatures()));
|
||||
machinesReadyLock.unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -189,7 +241,7 @@ void State::monitorMachinesFile()
|
||||
|
||||
debug("reloading machines files");
|
||||
|
||||
string contents;
|
||||
std::string contents;
|
||||
for (auto & machinesFile : machinesFiles) {
|
||||
try {
|
||||
contents += readFile(machinesFile);
|
||||
@@ -202,9 +254,15 @@ void State::monitorMachinesFile()
|
||||
parseMachines(contents);
|
||||
};
|
||||
|
||||
auto firstParse = true;
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
readMachinesFiles();
|
||||
if (firstParse) {
|
||||
machinesReadyLock.unlock();
|
||||
firstParse = false;
|
||||
}
|
||||
// FIXME: use inotify.
|
||||
sleep(30);
|
||||
} catch (std::exception & e) {
|
||||
@@ -256,10 +314,13 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
|
||||
|
||||
if (r.affected_rows() == 0) goto restart;
|
||||
|
||||
for (auto & [name, output] : step->drv->outputs)
|
||||
for (auto & [name, output] : getDestStore()->queryPartialDerivationOutputMap(step->drvPath, &*localStore))
|
||||
txn.exec_params0
|
||||
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||
buildId, stepNr, name, localStore->printStorePath(*output.path(*localStore, step->drv->name, name)));
|
||||
buildId, stepNr, name,
|
||||
output
|
||||
? std::optional { localStore->printStorePath(*output)}
|
||||
: std::nullopt);
|
||||
|
||||
if (status == bsBusy)
|
||||
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
|
||||
@@ -296,11 +357,23 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
|
||||
assert(result.logFile.find('\t') == std::string::npos);
|
||||
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
|
||||
buildId, stepNr, result.logFile));
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
// Update the corresponding `BuildStepOutputs` row to add the output path
|
||||
auto res = txn.exec_params1("select drvPath from BuildSteps where build = $1 and stepnr = $2", buildId, stepNr);
|
||||
assert(res.size());
|
||||
StorePath drvPath = localStore->parseStorePath(res[0].as<std::string>());
|
||||
// If we've finished building, all the paths should be known
|
||||
for (auto & [name, output] : getDestStore()->queryDerivationOutputMap(drvPath, &*localStore))
|
||||
txn.exec_params0
|
||||
("update BuildStepOutputs set path = $4 where build = $1 and stepnr = $2 and name = $3",
|
||||
buildId, stepNr, name, localStore->printStorePath(output));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const StorePath & drvPath, const string & outputName, const StorePath & storePath)
|
||||
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
|
||||
{
|
||||
restart:
|
||||
auto stepNr = allocBuildStep(txn, build->id);
|
||||
@@ -320,7 +393,7 @@ int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t sto
|
||||
|
||||
txn.exec_params0
|
||||
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||
build->id, stepNr, outputName,
|
||||
build->id, stepNr, outputName,
|
||||
localStore->printStorePath(storePath));
|
||||
|
||||
return stepNr;
|
||||
@@ -401,6 +474,15 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
|
||||
isCachedBuild ? 1 : 0);
|
||||
|
||||
for (auto & [outputName, outputPath] : res.outputs) {
|
||||
txn.exec_params0
|
||||
("update BuildOutputs set path = $3 where build = $1 and name = $2",
|
||||
build->id,
|
||||
outputName,
|
||||
localStore->printStorePath(outputPath)
|
||||
);
|
||||
}
|
||||
|
||||
txn.exec_params0("delete from BuildProducts where build = $1", build->id);
|
||||
|
||||
unsigned int productNr = 1;
|
||||
@@ -412,7 +494,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
product.type,
|
||||
product.subtype,
|
||||
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt,
|
||||
product.path,
|
||||
product.name,
|
||||
product.defaultPath);
|
||||
@@ -480,182 +562,174 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
||||
|
||||
void State::dumpStatus(Connection & conn)
|
||||
{
|
||||
std::ostringstream out;
|
||||
time_t now = time(0);
|
||||
json statusJson = {
|
||||
{"status", "up"},
|
||||
{"time", time(0)},
|
||||
{"uptime", now - startedAt},
|
||||
{"pid", getpid()},
|
||||
|
||||
{"nrQueuedBuilds", builds.lock()->size()},
|
||||
{"nrActiveSteps", activeSteps_.lock()->size()},
|
||||
{"nrStepsBuilding", nrStepsBuilding.load()},
|
||||
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
||||
{"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()},
|
||||
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
||||
{"nrStepsWaiting", nrStepsWaiting.load()},
|
||||
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
||||
{"bytesSent", bytesSent.load()},
|
||||
{"bytesReceived", bytesReceived.load()},
|
||||
{"nrBuildsRead", nrBuildsRead.load()},
|
||||
{"buildReadTimeMs", buildReadTimeMs.load()},
|
||||
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
|
||||
{"nrBuildsDone", nrBuildsDone.load()},
|
||||
{"nrStepsStarted", nrStepsStarted.load()},
|
||||
{"nrStepsDone", nrStepsDone.load()},
|
||||
{"nrRetries", nrRetries.load()},
|
||||
{"maxNrRetries", maxNrRetries.load()},
|
||||
{"nrQueueWakeups", nrQueueWakeups.load()},
|
||||
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
|
||||
{"dispatchTimeMs", dispatchTimeMs.load()},
|
||||
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
|
||||
{"nrDbConnections", dbPool.count()},
|
||||
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
|
||||
};
|
||||
{
|
||||
JSONObject root(out);
|
||||
time_t now = time(0);
|
||||
root.attr("status", "up");
|
||||
root.attr("time", time(0));
|
||||
root.attr("uptime", now - startedAt);
|
||||
root.attr("pid", getpid());
|
||||
{
|
||||
auto builds_(builds.lock());
|
||||
root.attr("nrQueuedBuilds", builds_->size());
|
||||
}
|
||||
{
|
||||
auto steps_(steps.lock());
|
||||
for (auto i = steps_->begin(); i != steps_->end(); )
|
||||
if (i->second.lock()) ++i; else i = steps_->erase(i);
|
||||
root.attr("nrUnfinishedSteps", steps_->size());
|
||||
statusJson["nrUnfinishedSteps"] = steps_->size();
|
||||
}
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); )
|
||||
if (i->lock()) ++i; else i = runnable_->erase(i);
|
||||
root.attr("nrRunnableSteps", runnable_->size());
|
||||
statusJson["nrRunnableSteps"] = runnable_->size();
|
||||
}
|
||||
root.attr("nrActiveSteps", activeSteps_.lock()->size());
|
||||
root.attr("nrStepsBuilding", nrStepsBuilding);
|
||||
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
|
||||
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
|
||||
root.attr("nrStepsWaiting", nrStepsWaiting);
|
||||
root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
|
||||
root.attr("bytesSent", bytesSent);
|
||||
root.attr("bytesReceived", bytesReceived);
|
||||
root.attr("nrBuildsRead", nrBuildsRead);
|
||||
root.attr("buildReadTimeMs", buildReadTimeMs);
|
||||
root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
|
||||
root.attr("nrBuildsDone", nrBuildsDone);
|
||||
root.attr("nrStepsStarted", nrStepsStarted);
|
||||
root.attr("nrStepsDone", nrStepsDone);
|
||||
root.attr("nrRetries", nrRetries);
|
||||
root.attr("maxNrRetries", maxNrRetries);
|
||||
if (nrStepsDone) {
|
||||
root.attr("totalStepTime", totalStepTime);
|
||||
root.attr("totalStepBuildTime", totalStepBuildTime);
|
||||
root.attr("avgStepTime", (float) totalStepTime / nrStepsDone);
|
||||
root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone);
|
||||
statusJson["totalStepTime"] = totalStepTime.load();
|
||||
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
|
||||
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
|
||||
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
|
||||
}
|
||||
root.attr("nrQueueWakeups", nrQueueWakeups);
|
||||
root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
|
||||
root.attr("dispatchTimeMs", dispatchTimeMs);
|
||||
root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
|
||||
root.attr("nrDbConnections", dbPool.count());
|
||||
root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
|
||||
|
||||
{
|
||||
auto nested = root.object("machines");
|
||||
auto machines_json = json::object();
|
||||
auto machines_(machines.lock());
|
||||
for (auto & i : *machines_) {
|
||||
auto & m(i.second);
|
||||
auto & s(m->state);
|
||||
auto nested2 = nested.object(m->sshName);
|
||||
nested2.attr("enabled", m->enabled);
|
||||
|
||||
{
|
||||
auto list = nested2.list("systemTypes");
|
||||
for (auto & s : m->systemTypes)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
{
|
||||
auto list = nested2.list("supportedFeatures");
|
||||
for (auto & s : m->supportedFeatures)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
{
|
||||
auto list = nested2.list("mandatoryFeatures");
|
||||
for (auto & s : m->mandatoryFeatures)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
nested2.attr("currentJobs", s->currentJobs);
|
||||
if (s->currentJobs == 0)
|
||||
nested2.attr("idleSince", s->idleSince);
|
||||
nested2.attr("nrStepsDone", s->nrStepsDone);
|
||||
if (m->state->nrStepsDone) {
|
||||
nested2.attr("totalStepTime", s->totalStepTime);
|
||||
nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
|
||||
nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
|
||||
nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
|
||||
}
|
||||
|
||||
auto info(m->state->connectInfo.lock());
|
||||
nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil));
|
||||
nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure));
|
||||
nested2.attr("consecutiveFailures", info->consecutiveFailures);
|
||||
|
||||
json machine = {
|
||||
{"enabled", m->enabled},
|
||||
{"systemTypes", m->systemTypes},
|
||||
{"supportedFeatures", m->supportedFeatures},
|
||||
{"mandatoryFeatures", m->mandatoryFeatures},
|
||||
{"nrStepsDone", s->nrStepsDone.load()},
|
||||
{"currentJobs", s->currentJobs.load()},
|
||||
{"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
|
||||
{"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
|
||||
{"consecutiveFailures", info->consecutiveFailures},
|
||||
};
|
||||
|
||||
if (s->currentJobs == 0)
|
||||
machine["idleSince"] = s->idleSince.load();
|
||||
if (m->state->nrStepsDone) {
|
||||
machine["totalStepTime"] = s->totalStepTime.load();
|
||||
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
|
||||
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
||||
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
||||
}
|
||||
machines_json[m->storeUri.render()] = machine;
|
||||
}
|
||||
statusJson["machines"] = machines_json;
|
||||
}
|
||||
|
||||
{
|
||||
auto nested = root.object("jobsets");
|
||||
auto jobsets_json = json::object();
|
||||
auto jobsets_(jobsets.lock());
|
||||
for (auto & jobset : *jobsets_) {
|
||||
auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second);
|
||||
nested2.attr("shareUsed", jobset.second->shareUsed());
|
||||
nested2.attr("seconds", jobset.second->getSeconds());
|
||||
jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
|
||||
{"shareUsed", jobset.second->shareUsed()},
|
||||
{"seconds", jobset.second->getSeconds()},
|
||||
};
|
||||
}
|
||||
statusJson["jobsets"] = jobsets_json;
|
||||
}
|
||||
|
||||
{
|
||||
auto nested = root.object("machineTypes");
|
||||
auto machineTypesJson = json::object();
|
||||
auto machineTypes_(machineTypes.lock());
|
||||
for (auto & i : *machineTypes_) {
|
||||
auto nested2 = nested.object(i.first);
|
||||
nested2.attr("runnable", i.second.runnable);
|
||||
nested2.attr("running", i.second.running);
|
||||
auto machineTypeJson = machineTypesJson[i.first] = {
|
||||
{"runnable", i.second.runnable},
|
||||
{"running", i.second.running},
|
||||
};
|
||||
if (i.second.runnable > 0)
|
||||
nested2.attr("waitTime", i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck));
|
||||
machineTypeJson["waitTime"] = i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck);
|
||||
if (i.second.running == 0)
|
||||
nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive));
|
||||
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
|
||||
}
|
||||
statusJson["machineTypes"] = machineTypesJson;
|
||||
}
|
||||
|
||||
auto store = getDestStore();
|
||||
|
||||
auto nested = root.object("store");
|
||||
|
||||
auto & stats = store->getStats();
|
||||
nested.attr("narInfoRead", stats.narInfoRead);
|
||||
nested.attr("narInfoReadAverted", stats.narInfoReadAverted);
|
||||
nested.attr("narInfoMissing", stats.narInfoMissing);
|
||||
nested.attr("narInfoWrite", stats.narInfoWrite);
|
||||
nested.attr("narInfoCacheSize", stats.pathInfoCacheSize);
|
||||
nested.attr("narRead", stats.narRead);
|
||||
nested.attr("narReadBytes", stats.narReadBytes);
|
||||
nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes);
|
||||
nested.attr("narWrite", stats.narWrite);
|
||||
nested.attr("narWriteAverted", stats.narWriteAverted);
|
||||
nested.attr("narWriteBytes", stats.narWriteBytes);
|
||||
nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes);
|
||||
nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs);
|
||||
nested.attr("narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0);
|
||||
nested.attr("narCompressionSpeed", // MiB/s
|
||||
statusJson["store"] = {
|
||||
{"narInfoRead", stats.narInfoRead.load()},
|
||||
{"narInfoReadAverted", stats.narInfoReadAverted.load()},
|
||||
{"narInfoMissing", stats.narInfoMissing.load()},
|
||||
{"narInfoWrite", stats.narInfoWrite.load()},
|
||||
{"narInfoCacheSize", stats.pathInfoCacheSize.load()},
|
||||
{"narRead", stats.narRead.load()},
|
||||
{"narReadBytes", stats.narReadBytes.load()},
|
||||
{"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
|
||||
{"narWrite", stats.narWrite.load()},
|
||||
{"narWriteAverted", stats.narWriteAverted.load()},
|
||||
{"narWriteBytes", stats.narWriteBytes.load()},
|
||||
{"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
|
||||
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
|
||||
{"narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0},
|
||||
{"narCompressionSpeed", // MiB/s
|
||||
stats.narWriteCompressionTimeMs
|
||||
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
: 0.0},
|
||||
};
|
||||
|
||||
#if NIX_WITH_S3_SUPPORT
|
||||
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
|
||||
if (s3Store) {
|
||||
auto nested2 = nested.object("s3");
|
||||
auto & s3Stats = s3Store->getS3Stats();
|
||||
nested2.attr("put", s3Stats.put);
|
||||
nested2.attr("putBytes", s3Stats.putBytes);
|
||||
nested2.attr("putTimeMs", s3Stats.putTimeMs);
|
||||
nested2.attr("putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("get", s3Stats.get);
|
||||
nested2.attr("getBytes", s3Stats.getBytes);
|
||||
nested2.attr("getTimeMs", s3Stats.getTimeMs);
|
||||
nested2.attr("getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("head", s3Stats.head);
|
||||
nested2.attr("costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09);
|
||||
auto jsonS3 = statusJson["s3"] = {
|
||||
{"put", s3Stats.put.load()},
|
||||
{"putBytes", s3Stats.putBytes.load()},
|
||||
{"putTimeMs", s3Stats.putTimeMs.load()},
|
||||
{"putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"get", s3Stats.get.load()},
|
||||
{"getBytes", s3Stats.getBytes.load()},
|
||||
{"getTimeMs", s3Stats.getTimeMs.load()},
|
||||
{"getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"head", s3Stats.head.load()},
|
||||
{"costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
|
||||
};
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
{
|
||||
@@ -663,7 +737,7 @@ void State::dumpStatus(Connection & conn)
|
||||
pqxx::work txn(conn);
|
||||
// FIXME: use PostgreSQL 9.5 upsert.
|
||||
txn.exec("delete from SystemStatus where what = 'queue-runner'");
|
||||
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str());
|
||||
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump());
|
||||
txn.exec("notify status_dumped");
|
||||
txn.commit();
|
||||
}
|
||||
@@ -675,14 +749,14 @@ void State::showStatus()
|
||||
auto conn(dbPool.get());
|
||||
receiver statusDumped(*conn, "status_dumped");
|
||||
|
||||
string status;
|
||||
std::string status;
|
||||
bool barf = false;
|
||||
|
||||
/* Get the last JSON status dump from the database. */
|
||||
{
|
||||
pqxx::work txn(*conn);
|
||||
auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'");
|
||||
if (res.size()) status = res[0][0].as<string>();
|
||||
if (res.size()) status = res[0][0].as<std::string>();
|
||||
}
|
||||
|
||||
if (status != "") {
|
||||
@@ -702,7 +776,7 @@ void State::showStatus()
|
||||
{
|
||||
pqxx::work txn(*conn);
|
||||
auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'");
|
||||
if (res.size()) status = res[0][0].as<string>();
|
||||
if (res.size()) status = res[0][0].as<std::string>();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -746,7 +820,19 @@ void State::run(BuildID buildOne)
|
||||
if (!lock)
|
||||
throw Error("hydra-queue-runner is already running");
|
||||
|
||||
Store::Params localParams;
|
||||
std::cout << "Starting the Prometheus exporter on " << metricsAddr << std::endl;
|
||||
|
||||
/* Set up simple exporter, to show that we're still alive. */
|
||||
prometheus::Exposer promExposer{metricsAddr};
|
||||
auto exposerPort = promExposer.GetListeningPorts().front();
|
||||
|
||||
promExposer.RegisterCollectable(prom.registry);
|
||||
|
||||
std::cout << "Started the Prometheus exporter, listening on "
|
||||
<< metricsAddr << "/metrics (port " << exposerPort << ")"
|
||||
<< std::endl;
|
||||
|
||||
Store::Config::Params localParams;
|
||||
localParams["max-connections"] = "16";
|
||||
localParams["max-connection-age"] = "600";
|
||||
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);
|
||||
@@ -769,6 +855,7 @@ void State::run(BuildID buildOne)
|
||||
dumpStatus(*conn);
|
||||
}
|
||||
|
||||
machinesReadyLock.lock();
|
||||
std::thread(&State::monitorMachinesFile, this).detach();
|
||||
|
||||
std::thread(&State::queueMonitor, this).detach();
|
||||
@@ -827,10 +914,17 @@ void State::run(BuildID buildOne)
|
||||
while (true) {
|
||||
try {
|
||||
auto conn(dbPool.get());
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
try {
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
}
|
||||
} catch (pqxx::broken_connection & connEx) {
|
||||
printMsg(lvlError, "main thread: %s", connEx.what());
|
||||
printMsg(lvlError, "main thread: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
}
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "main thread: %s", e.what());
|
||||
@@ -855,6 +949,7 @@ int main(int argc, char * * argv)
|
||||
bool unlock = false;
|
||||
bool status = false;
|
||||
BuildID buildOne = 0;
|
||||
std::optional<std::string> metricsAddrOpt = std::nullopt;
|
||||
|
||||
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
|
||||
if (*arg == "--unlock")
|
||||
@@ -862,17 +957,20 @@ int main(int argc, char * * argv)
|
||||
else if (*arg == "--status")
|
||||
status = true;
|
||||
else if (*arg == "--build-one") {
|
||||
if (!string2Int<BuildID>(getArg(*arg, arg, end), buildOne))
|
||||
if (auto b = string2Int<BuildID>(getArg(*arg, arg, end)))
|
||||
buildOne = *b;
|
||||
else
|
||||
throw Error("‘--build-one’ requires a build ID");
|
||||
} else if (*arg == "--prometheus-address") {
|
||||
metricsAddrOpt = getArg(*arg, arg, end);
|
||||
} else
|
||||
return false;
|
||||
return true;
|
||||
});
|
||||
|
||||
settings.verboseBuild = true;
|
||||
settings.lockCPU = false;
|
||||
|
||||
State state;
|
||||
State state{metricsAddrOpt};
|
||||
if (status)
|
||||
state.showStatus();
|
||||
else if (unlock)
|
||||
|
24
src/hydra-queue-runner/meson.build
Normal file
24
src/hydra-queue-runner/meson.build
Normal file
@@ -0,0 +1,24 @@
|
||||
srcs = files(
|
||||
'builder.cc',
|
||||
'build-remote.cc',
|
||||
'build-result.cc',
|
||||
'dispatcher.cc',
|
||||
'hydra-queue-runner.cc',
|
||||
'nar-extractor.cc',
|
||||
'queue-monitor.cc',
|
||||
)
|
||||
|
||||
hydra_queue_runner = executable('hydra-queue-runner',
|
||||
'hydra-queue-runner.cc',
|
||||
srcs,
|
||||
dependencies: [
|
||||
libhydra_dep,
|
||||
nix_util_dep,
|
||||
nix_store_dep,
|
||||
nix_main_dep,
|
||||
pqxx_dep,
|
||||
prom_cpp_core_dep,
|
||||
prom_cpp_pull_dep,
|
||||
],
|
||||
install: true,
|
||||
)
|
@@ -1,12 +1,51 @@
|
||||
#include "nar-extractor.hh"
|
||||
|
||||
#include "archive.hh"
|
||||
#include <nix/util/archive.hh>
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
struct Extractor : ParseSink
|
||||
|
||||
struct NarMemberConstructor : CreateRegularFileSink
|
||||
{
|
||||
NarMemberData & curMember;
|
||||
|
||||
HashSink hashSink = HashSink { HashAlgorithm::SHA256 };
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
|
||||
NarMemberConstructor(NarMemberData & curMember)
|
||||
: curMember(curMember)
|
||||
{ }
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
}
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
{
|
||||
expectedSize = size;
|
||||
}
|
||||
|
||||
void operator () (std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
*curMember.fileSize += data.size();
|
||||
hashSink(data);
|
||||
if (curMember.contents) {
|
||||
curMember.contents->append(data);
|
||||
}
|
||||
assert(curMember.fileSize <= expectedSize);
|
||||
if (curMember.fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink.finish();
|
||||
assert(curMember.fileSize == len);
|
||||
curMember.sha256 = hash;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct Extractor : FileSystemObjectSink
|
||||
{
|
||||
std::unordered_set<Path> filesToKeep {
|
||||
"/nix-support/hydra-build-products",
|
||||
@@ -15,58 +54,40 @@ struct Extractor : ParseSink
|
||||
};
|
||||
|
||||
NarMemberDatas & members;
|
||||
NarMemberData * curMember = nullptr;
|
||||
Path prefix;
|
||||
std::filesystem::path prefix;
|
||||
|
||||
Path toKey(const CanonPath & path)
|
||||
{
|
||||
std::filesystem::path p = prefix;
|
||||
// Conditional to avoid trailing slash
|
||||
if (!path.isRoot()) p /= path.rel();
|
||||
return p;
|
||||
}
|
||||
|
||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
||||
: members(members), prefix(prefix)
|
||||
{ }
|
||||
|
||||
void createDirectory(const Path & path) override
|
||||
void createDirectory(const CanonPath & path) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
|
||||
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tDirectory });
|
||||
}
|
||||
|
||||
void createRegularFile(const Path & path) override
|
||||
void createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func) override
|
||||
{
|
||||
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
|
||||
.type = FSAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
||||
}).first->second;
|
||||
NarMemberConstructor nmc {
|
||||
members.insert_or_assign(toKey(path), NarMemberData {
|
||||
.type = SourceAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path.abs()) ? std::optional("") : std::nullopt,
|
||||
}).first->second,
|
||||
};
|
||||
func(nmc);
|
||||
}
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
std::unique_ptr<HashSink> hashSink;
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
void createSymlink(const CanonPath & path, const std::string & target) override
|
||||
{
|
||||
expectedSize = size;
|
||||
hashSink = std::make_unique<HashSink>(htSHA256);
|
||||
}
|
||||
|
||||
void receiveContents(std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
assert(curMember);
|
||||
assert(hashSink);
|
||||
*curMember->fileSize += data.size();
|
||||
(*hashSink)(data);
|
||||
if (curMember->contents) {
|
||||
curMember->contents->append(data);
|
||||
}
|
||||
assert(curMember->fileSize <= expectedSize);
|
||||
if (curMember->fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink->finish();
|
||||
assert(curMember->fileSize == len);
|
||||
curMember->sha256 = hash;
|
||||
hashSink.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void createSymlink(const Path & path, const string & target) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink });
|
||||
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tSymlink });
|
||||
}
|
||||
};
|
||||
|
||||
|
@@ -1,13 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include "fs-accessor.hh"
|
||||
#include "types.hh"
|
||||
#include "serialise.hh"
|
||||
#include "hash.hh"
|
||||
#include <nix/util/source-accessor.hh>
|
||||
#include <nix/util/types.hh>
|
||||
#include <nix/util/serialise.hh>
|
||||
#include <nix/util/hash.hh>
|
||||
|
||||
struct NarMemberData
|
||||
{
|
||||
nix::FSAccessor::Type type;
|
||||
nix::SourceAccessor::Type type;
|
||||
std::optional<uint64_t> fileSize;
|
||||
std::optional<std::string> contents;
|
||||
std::optional<nix::Hash> sha256;
|
||||
|
@@ -1,6 +1,8 @@
|
||||
#include "state.hh"
|
||||
#include "build-result.hh"
|
||||
#include "globals.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/store/globals.hh>
|
||||
#include <nix/store/parsed-derivations.hh>
|
||||
#include <nix/util/thread-pool.hh>
|
||||
|
||||
#include <cstring>
|
||||
|
||||
@@ -10,61 +12,77 @@ using namespace nix;
|
||||
void State::queueMonitor()
|
||||
{
|
||||
while (true) {
|
||||
auto conn(dbPool.get());
|
||||
try {
|
||||
queueMonitorLoop();
|
||||
queueMonitorLoop(*conn);
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printMsg(lvlError, "queue monitor: %s", e.what());
|
||||
printMsg(lvlError, "queue monitor: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, format("queue monitor: %1%") % e.what());
|
||||
printError("queue monitor: %s", e.what());
|
||||
sleep(10); // probably a DB problem, so don't retry right away
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void State::queueMonitorLoop()
|
||||
void State::queueMonitorLoop(Connection & conn)
|
||||
{
|
||||
auto conn(dbPool.get());
|
||||
|
||||
receiver buildsAdded(*conn, "builds_added");
|
||||
receiver buildsRestarted(*conn, "builds_restarted");
|
||||
receiver buildsCancelled(*conn, "builds_cancelled");
|
||||
receiver buildsDeleted(*conn, "builds_deleted");
|
||||
receiver buildsBumped(*conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(*conn, "jobset_shares_changed");
|
||||
receiver buildsAdded(conn, "builds_added");
|
||||
receiver buildsRestarted(conn, "builds_restarted");
|
||||
receiver buildsCancelled(conn, "builds_cancelled");
|
||||
receiver buildsDeleted(conn, "builds_deleted");
|
||||
receiver buildsBumped(conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(conn, "jobset_shares_changed");
|
||||
|
||||
auto destStore = getDestStore();
|
||||
|
||||
unsigned int lastBuildId = 0;
|
||||
bool quit = false;
|
||||
while (!quit) {
|
||||
auto t_before_work = std::chrono::steady_clock::now();
|
||||
|
||||
while (true) {
|
||||
localStore->clearPathInfoCache();
|
||||
|
||||
bool done = getQueuedBuilds(*conn, destStore, lastBuildId);
|
||||
bool done = getQueuedBuilds(conn, destStore);
|
||||
|
||||
if (buildOne && buildOneDone) quit = true;
|
||||
|
||||
auto t_after_work = std::chrono::steady_clock::now();
|
||||
|
||||
prom.queue_monitor_time_spent_running.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||
|
||||
/* Sleep until we get notification from the database about an
|
||||
event. */
|
||||
if (done) {
|
||||
conn->await_notification();
|
||||
if (done && !quit) {
|
||||
conn.await_notification();
|
||||
nrQueueWakeups++;
|
||||
} else
|
||||
conn->get_notifs();
|
||||
conn.get_notifs();
|
||||
|
||||
if (auto lowestId = buildsAdded.get()) {
|
||||
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
|
||||
printMsg(lvlTalkative, "got notification: new builds added to the queue");
|
||||
}
|
||||
if (buildsRestarted.get()) {
|
||||
printMsg(lvlTalkative, "got notification: builds restarted");
|
||||
lastBuildId = 0; // check all builds
|
||||
}
|
||||
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
||||
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
||||
processQueueChange(*conn);
|
||||
processQueueChange(conn);
|
||||
}
|
||||
if (jobsetSharesChanged.get()) {
|
||||
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
||||
processJobsetSharesChange(*conn);
|
||||
processJobsetSharesChange(conn);
|
||||
}
|
||||
|
||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||
prom.queue_monitor_time_spent_waiting.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
|
||||
@@ -75,45 +93,47 @@ struct PreviousFailure : public std::exception {
|
||||
|
||||
|
||||
bool State::getQueuedBuilds(Connection & conn,
|
||||
ref<Store> destStore, unsigned int & lastBuildId)
|
||||
ref<Store> destStore)
|
||||
{
|
||||
printInfo("checking the queue for builds > %d...", lastBuildId);
|
||||
prom.queue_checks_started.Increment();
|
||||
|
||||
printInfo("checking the queue for builds...");
|
||||
|
||||
/* Grab the queued builds from the database, but don't process
|
||||
them yet (since we don't want a long-running transaction). */
|
||||
std::vector<BuildID> newIDs;
|
||||
std::map<BuildID, Build::ptr> newBuildsByID;
|
||||
std::unordered_map<BuildID, Build::ptr> newBuildsByID;
|
||||
std::multimap<StorePath, BuildID> newBuildsByPath;
|
||||
|
||||
unsigned int newLastBuildId = lastBuildId;
|
||||
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
|
||||
auto res = txn.exec_params
|
||||
("select id, project, jobset, job, drvPath, maxsilent, timeout, timestamp, globalPriority, priority from Builds "
|
||||
"where id > $1 and finished = 0 order by globalPriority desc, id",
|
||||
lastBuildId);
|
||||
("select builds.id, builds.jobset_id, jobsets.project as project, "
|
||||
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
|
||||
"globalPriority, priority from Builds "
|
||||
"inner join jobsets on builds.jobset_id = jobsets.id "
|
||||
"where finished = 0 order by globalPriority desc, random()");
|
||||
|
||||
for (auto const & row : res) {
|
||||
auto builds_(builds.lock());
|
||||
BuildID id = row["id"].as<BuildID>();
|
||||
if (buildOne && id != buildOne) continue;
|
||||
if (id > newLastBuildId) newLastBuildId = id;
|
||||
if (builds_->count(id)) continue;
|
||||
|
||||
auto build = std::make_shared<Build>(
|
||||
localStore->parseStorePath(row["drvPath"].as<string>()));
|
||||
localStore->parseStorePath(row["drvPath"].as<std::string>()));
|
||||
build->id = id;
|
||||
build->projectName = row["project"].as<string>();
|
||||
build->jobsetName = row["jobset"].as<string>();
|
||||
build->jobName = row["job"].as<string>();
|
||||
build->jobsetId = row["jobset_id"].as<JobsetID>();
|
||||
build->projectName = row["project"].as<std::string>();
|
||||
build->jobsetName = row["jobset"].as<std::string>();
|
||||
build->jobName = row["job"].as<std::string>();
|
||||
build->maxSilentTime = row["maxsilent"].as<int>();
|
||||
build->buildTimeout = row["timeout"].as<int>();
|
||||
build->timestamp = row["timestamp"].as<time_t>();
|
||||
build->globalPriority = row["globalPriority"].as<int>();
|
||||
build->localPriority = row["priority"].as<int>();
|
||||
build->jobset = createJobset(txn, build->projectName, build->jobsetName);
|
||||
build->jobset = createJobset(txn, build->projectName, build->jobsetName, build->jobsetId);
|
||||
|
||||
newIDs.push_back(id);
|
||||
newBuildsByID[id] = build;
|
||||
@@ -127,13 +147,14 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
std::set<StorePath> finishedDrvs;
|
||||
|
||||
createBuild = [&](Build::ptr build) {
|
||||
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
|
||||
prom.queue_build_loads.Increment();
|
||||
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
|
||||
nrAdded++;
|
||||
newBuildsByID.erase(build->id);
|
||||
|
||||
if (!localStore->isValidPath(build->drvPath)) {
|
||||
/* Derivation has been GC'ed prematurely. */
|
||||
printMsg(lvlError, format("aborting GC'ed build %1%") % build->id);
|
||||
printError("aborting GC'ed build %1%", build->id);
|
||||
if (!build->finishedInDB) {
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
@@ -160,6 +181,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
|
||||
/* Some step previously failed, so mark the build as
|
||||
failed right away. */
|
||||
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
|
||||
printMsg(lvlError, "marking build %d as cached failure due to ‘%s’",
|
||||
build->id, localStore->printStorePath(ex.step->drvPath));
|
||||
if (!build->finishedInDB) {
|
||||
@@ -176,15 +198,19 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
|
||||
|
||||
if (!propagatedFrom) {
|
||||
for (auto & i : ex.step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second) {
|
||||
auto res = txn.exec_params
|
||||
("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1",
|
||||
localStore->printStorePath(*i.second.second));
|
||||
if (!res[0][0].is_null()) {
|
||||
propagatedFrom = res[0][0].as<BuildID>();
|
||||
break;
|
||||
}
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) {
|
||||
constexpr std::string_view common = "select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1";
|
||||
auto res = optOutputPath
|
||||
? txn.exec_params(
|
||||
std::string { common } + " and path = $1",
|
||||
localStore->printStorePath(*optOutputPath))
|
||||
: txn.exec_params(
|
||||
std::string { common } + " and drvPath = $1 and name = $2",
|
||||
localStore->printStorePath(ex.step->drvPath),
|
||||
outputName);
|
||||
if (!res[0][0].is_null()) {
|
||||
propagatedFrom = res[0][0].as<BuildID>();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -220,17 +246,16 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
/* If we didn't get a step, it means the step's outputs are
|
||||
all valid. So we mark this as a finished, cached build. */
|
||||
if (!step) {
|
||||
auto drv = localStore->readDerivation(build->drvPath);
|
||||
BuildOutput res = getBuildOutputCached(conn, destStore, drv);
|
||||
BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath);
|
||||
|
||||
for (auto & i : drv.outputsAndOptPaths(*localStore))
|
||||
if (i.second.second)
|
||||
addRoot(*i.second.second);
|
||||
for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore))
|
||||
addRoot(i.second);
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
time_t now = time(0);
|
||||
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
|
||||
printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id);
|
||||
markSucceededBuild(txn, build, res, true, now, now);
|
||||
notifyBuildFinished(txn, build->id, {});
|
||||
@@ -275,7 +300,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
try {
|
||||
createBuild(build);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, hintfmt("while loading build %d: ", build->id));
|
||||
e.addTrace({}, HintFmt("while loading build %d: ", build->id));
|
||||
throw;
|
||||
}
|
||||
|
||||
@@ -285,18 +310,23 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
|
||||
/* Add the new runnable build steps to ‘runnable’ and wake up
|
||||
the builder threads. */
|
||||
printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded);
|
||||
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
|
||||
for (auto & r : newRunnable)
|
||||
makeRunnable(r);
|
||||
|
||||
if (buildOne && newRunnable.size() == 0) buildOneDone = true;
|
||||
|
||||
nrBuildsRead += nrAdded;
|
||||
|
||||
/* Stop after a certain time to allow priority bumps to be
|
||||
processed. */
|
||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) break;
|
||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(60)) {
|
||||
prom.queue_checks_early_exits.Increment();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1;
|
||||
prom.queue_checks_finished.Increment();
|
||||
return newBuildsByID.empty();
|
||||
}
|
||||
|
||||
@@ -334,13 +364,13 @@ void State::processQueueChange(Connection & conn)
|
||||
for (auto i = builds_->begin(); i != builds_->end(); ) {
|
||||
auto b = currentIds.find(i->first);
|
||||
if (b == currentIds.end()) {
|
||||
printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first);
|
||||
printInfo("discarding cancelled build %1%", i->first);
|
||||
i = builds_->erase(i);
|
||||
// FIXME: ideally we would interrupt active build steps here.
|
||||
continue;
|
||||
}
|
||||
if (i->second->globalPriority < b->second) {
|
||||
printMsg(lvlInfo, format("priority of build %1% increased") % i->first);
|
||||
printInfo("priority of build %1% increased", i->first);
|
||||
i->second->globalPriority = b->second;
|
||||
i->second->propagatePriorities();
|
||||
}
|
||||
@@ -375,6 +405,34 @@ void State::processQueueChange(Connection & conn)
|
||||
}
|
||||
|
||||
|
||||
std::map<DrvOutput, std::optional<StorePath>> State::getMissingRemotePaths(
|
||||
ref<Store> destStore,
|
||||
const std::map<DrvOutput, std::optional<StorePath>> & paths)
|
||||
{
|
||||
Sync<std::map<DrvOutput, std::optional<StorePath>>> missing_;
|
||||
ThreadPool tp;
|
||||
|
||||
for (auto & [output, maybeOutputPath] : paths) {
|
||||
if (!maybeOutputPath) {
|
||||
auto missing(missing_.lock());
|
||||
missing->insert({output, maybeOutputPath});
|
||||
} else {
|
||||
tp.enqueue([&] {
|
||||
if (!destStore->isValidPath(*maybeOutputPath)) {
|
||||
auto missing(missing_.lock());
|
||||
missing->insert({output, maybeOutputPath});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
tp.process();
|
||||
|
||||
auto missing(missing_.lock());
|
||||
return *missing;
|
||||
}
|
||||
|
||||
|
||||
Step::ptr State::createStep(ref<Store> destStore,
|
||||
Connection & conn, Build::ptr build, const StorePath & drvPath,
|
||||
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
||||
@@ -424,6 +482,8 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
|
||||
if (!isNew) return step;
|
||||
|
||||
prom.queue_steps_created.Increment();
|
||||
|
||||
printMsg(lvlDebug, "considering derivation ‘%1%’", localStore->printStorePath(drvPath));
|
||||
|
||||
/* Initialize the step. Note that the step may be visible in
|
||||
@@ -431,17 +491,23 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
it's not runnable yet, and other threads won't make it
|
||||
runnable while step->created == false. */
|
||||
step->drv = std::make_unique<Derivation>(localStore->readDerivation(drvPath));
|
||||
step->parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *step->drv);
|
||||
{
|
||||
auto parsedOpt = StructuredAttrs::tryParse(step->drv->env);
|
||||
try {
|
||||
step->drvOptions = std::make_unique<DerivationOptions>(
|
||||
DerivationOptions::fromStructuredAttrs(step->drv->env, parsedOpt ? &*parsedOpt : nullptr));
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "while parsing derivation '%s'", localStore->printStorePath(drvPath));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
step->preferLocalBuild = step->parsedDrv->willBuildLocally(*localStore);
|
||||
step->isDeterministic = get(step->drv->env, "isDetermistic").value_or("0") == "1";
|
||||
step->preferLocalBuild = step->drvOptions->willBuildLocally(*localStore, *step->drv);
|
||||
step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1";
|
||||
|
||||
step->systemType = step->drv->platform;
|
||||
{
|
||||
auto i = step->drv->env.find("requiredSystemFeatures");
|
||||
StringSet features;
|
||||
if (i != step->drv->env.end())
|
||||
features = step->requiredSystemFeatures = tokenizeString<std::set<std::string>>(i->second);
|
||||
StringSet features = step->requiredSystemFeatures = step->drvOptions->getRequiredSystemFeatures(*step->drv);
|
||||
if (step->preferLocalBuild)
|
||||
features.insert("local");
|
||||
if (!features.empty()) {
|
||||
@@ -455,26 +521,40 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
throw PreviousFailure{step};
|
||||
|
||||
/* Are all outputs valid? */
|
||||
bool valid = true;
|
||||
DerivationOutputs missing;
|
||||
for (auto & i : step->drv->outputs)
|
||||
if (!destStore->isValidPath(*i.second.path(*localStore, step->drv->name, i.first))) {
|
||||
valid = false;
|
||||
missing.insert_or_assign(i.first, i.second);
|
||||
}
|
||||
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
|
||||
std::map<DrvOutput, std::optional<StorePath>> paths;
|
||||
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
paths.insert({{outputHash, outputName}, maybeOutputPath});
|
||||
}
|
||||
|
||||
auto missing = getMissingRemotePaths(destStore, paths);
|
||||
bool valid = missing.empty();
|
||||
|
||||
/* Try to copy the missing paths from the local store or from
|
||||
substitutes. */
|
||||
if (!missing.empty()) {
|
||||
|
||||
size_t avail = 0;
|
||||
for (auto & i : missing) {
|
||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
||||
if (/* localStore != destStore && */ localStore->isValidPath(*path))
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we don't know the output path from the destination
|
||||
// store, see if the local store can tell us.
|
||||
if (/* localStore != destStore && */ !pathOpt && experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
|
||||
if (auto maybeRealisation = localStore->queryRealisation(i))
|
||||
pathOpt = maybeRealisation->outPath;
|
||||
|
||||
if (!pathOpt) {
|
||||
// No hope of getting the store object if we don't know
|
||||
// the path.
|
||||
continue;
|
||||
}
|
||||
auto & path = *pathOpt;
|
||||
|
||||
if (/* localStore != destStore && */ localStore->isValidPath(path))
|
||||
avail++;
|
||||
else if (useSubstitutes) {
|
||||
SubstitutablePathInfos infos;
|
||||
localStore->querySubstitutablePathInfos({{*path, {}}}, infos);
|
||||
localStore->querySubstitutablePathInfos({{path, {}}}, infos);
|
||||
if (infos.size() == 1)
|
||||
avail++;
|
||||
}
|
||||
@@ -482,38 +562,43 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
|
||||
if (missing.size() == avail) {
|
||||
valid = true;
|
||||
for (auto & i : missing) {
|
||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we found everything, then we should know the path
|
||||
// to every missing store object now.
|
||||
assert(pathOpt);
|
||||
auto & path = *pathOpt;
|
||||
|
||||
try {
|
||||
time_t startTime = time(0);
|
||||
|
||||
if (localStore->isValidPath(*path))
|
||||
if (localStore->isValidPath(path))
|
||||
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath));
|
||||
else {
|
||||
printInfo("substituting output ‘%1%’ of ‘%2%’",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath));
|
||||
localStore->ensurePath(*path);
|
||||
localStore->ensurePath(path);
|
||||
// FIXME: should copy directly from substituter to destStore.
|
||||
}
|
||||
|
||||
copyClosure(ref<Store>(localStore), destStore, {*path});
|
||||
copyClosure(*localStore, *destStore,
|
||||
StorePathSet { path },
|
||||
NoRepair, CheckSigs, NoSubstitute);
|
||||
|
||||
time_t stopTime = time(0);
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, "out", *path);
|
||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
} catch (Error & e) {
|
||||
printError("while copying/substituting output ‘%s’ of ‘%s’: %s",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath),
|
||||
e.what());
|
||||
valid = false;
|
||||
@@ -533,7 +618,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath));
|
||||
|
||||
/* Create steps for the dependencies. */
|
||||
for (auto & i : step->drv->inputDrvs) {
|
||||
for (auto & i : step->drv->inputDrvs.map) {
|
||||
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
||||
if (dep) {
|
||||
auto step_(step->state.lock());
|
||||
@@ -558,7 +643,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
|
||||
|
||||
Jobset::ptr State::createJobset(pqxx::work & txn,
|
||||
const std::string & projectName, const std::string & jobsetName)
|
||||
const std::string & projectName, const std::string & jobsetName, const JobsetID jobsetID)
|
||||
{
|
||||
auto p = std::make_pair(projectName, jobsetName);
|
||||
|
||||
@@ -569,9 +654,8 @@ Jobset::ptr State::createJobset(pqxx::work & txn,
|
||||
}
|
||||
|
||||
auto res = txn.exec_params1
|
||||
("select schedulingShares from Jobsets where project = $1 and name = $2",
|
||||
projectName,
|
||||
jobsetName);
|
||||
("select schedulingShares from Jobsets where id = $1",
|
||||
jobsetID);
|
||||
if (res.empty()) throw Error("missing jobset - can't happen");
|
||||
|
||||
auto shares = res["schedulingShares"].as<unsigned int>();
|
||||
@@ -582,10 +666,9 @@ Jobset::ptr State::createJobset(pqxx::work & txn,
|
||||
/* Load the build steps from the last 24 hours. */
|
||||
auto res2 = txn.exec_params
|
||||
("select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
|
||||
"where s.startTime is not null and s.stopTime > $1 and project = $2 and jobset = $3",
|
||||
"where s.startTime is not null and s.stopTime > $1 and jobset_id = $2",
|
||||
time(0) - Jobset::schedulingWindow * 10,
|
||||
projectName,
|
||||
jobsetName);
|
||||
jobsetID);
|
||||
for (auto const & row : res2) {
|
||||
time_t startTime = row["startTime"].as<time_t>();
|
||||
time_t stopTime = row["stopTime"].as<time_t>();
|
||||
@@ -607,28 +690,30 @@ void State::processJobsetSharesChange(Connection & conn)
|
||||
auto res = txn.exec("select project, name, schedulingShares from Jobsets");
|
||||
for (auto const & row : res) {
|
||||
auto jobsets_(jobsets.lock());
|
||||
auto i = jobsets_->find(std::make_pair(row["project"].as<string>(), row["name"].as<string>()));
|
||||
auto i = jobsets_->find(std::make_pair(row["project"].as<std::string>(), row["name"].as<std::string>()));
|
||||
if (i == jobsets_->end()) continue;
|
||||
i->second->setShares(row["schedulingShares"].as<unsigned int>());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::Derivation & drv)
|
||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::StorePath & drvPath)
|
||||
{
|
||||
auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore);
|
||||
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
|
||||
for (auto & [name, output] : drv.outputsAndOptPaths(*localStore)) {
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
auto r = txn.exec_params
|
||||
("select id, buildStatus, releaseName, closureSize, size from Builds b "
|
||||
"join BuildOutputs o on b.id = o.build "
|
||||
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
||||
localStore->printStorePath(*output.second));
|
||||
localStore->printStorePath(output));
|
||||
if (r.empty()) continue;
|
||||
BuildID id = r[0][0].as<BuildID>();
|
||||
|
||||
printMsg(lvlInfo, format("reusing build %d") % id);
|
||||
printInfo("reusing build %d", id);
|
||||
|
||||
BuildOutput res;
|
||||
res.failed = r[0][1].as<int>() == bsFailedWithOutput;
|
||||
@@ -651,7 +736,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||
product.fileSize = row[2].as<off_t>();
|
||||
}
|
||||
if (!row[3].is_null())
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashAlgorithm::SHA256);
|
||||
if (!row[4].is_null())
|
||||
product.path = row[4].as<std::string>();
|
||||
product.name = row[5].as<std::string>();
|
||||
@@ -678,5 +763,5 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||
}
|
||||
|
||||
NarMemberDatas narMembers;
|
||||
return getBuildOutput(destStore, narMembers, drv);
|
||||
return getBuildOutput(destStore, narMembers, derivationOutputs);
|
||||
}
|
||||
|
@@ -6,19 +6,33 @@
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <regex>
|
||||
#include <semaphore>
|
||||
|
||||
#include <prometheus/counter.h>
|
||||
#include <prometheus/gauge.h>
|
||||
#include <prometheus/registry.h>
|
||||
|
||||
#include "db.hh"
|
||||
|
||||
#include "parsed-derivations.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "pool.hh"
|
||||
#include "store-api.hh"
|
||||
#include "sync.hh"
|
||||
#include <nix/store/derivations.hh>
|
||||
#include <nix/store/derivation-options.hh>
|
||||
#include <nix/store/pathlocks.hh>
|
||||
#include <nix/util/pool.hh>
|
||||
#include <nix/store/build-result.hh>
|
||||
#include <nix/store/store-api.hh>
|
||||
#include <nix/util/sync.hh>
|
||||
#include "nar-extractor.hh"
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include <nix/store/serve-protocol-connection.hh>
|
||||
#include <nix/store/machines.hh>
|
||||
|
||||
|
||||
typedef unsigned int BuildID;
|
||||
|
||||
typedef unsigned int JobsetID;
|
||||
|
||||
typedef std::chrono::time_point<std::chrono::system_clock> system_time;
|
||||
|
||||
typedef std::atomic<unsigned long> counter;
|
||||
@@ -46,6 +60,7 @@ typedef enum {
|
||||
ssConnecting = 10,
|
||||
ssSendingInputs = 20,
|
||||
ssBuilding = 30,
|
||||
ssWaitingForLocalSlot = 35,
|
||||
ssReceivingOutputs = 40,
|
||||
ssPostProcessing = 50,
|
||||
} StepState;
|
||||
@@ -70,6 +85,8 @@ struct RemoteResult
|
||||
{
|
||||
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
||||
}
|
||||
|
||||
void updateWithBuildResult(const nix::BuildResult &);
|
||||
};
|
||||
|
||||
|
||||
@@ -123,6 +140,7 @@ struct Build
|
||||
BuildID id;
|
||||
nix::StorePath drvPath;
|
||||
std::map<std::string, nix::StorePath> outputs;
|
||||
JobsetID jobsetId;
|
||||
std::string projectName, jobsetName, jobName;
|
||||
time_t timestamp;
|
||||
unsigned int maxSilentTime, buildTimeout;
|
||||
@@ -153,8 +171,8 @@ struct Step
|
||||
|
||||
nix::StorePath drvPath;
|
||||
std::unique_ptr<nix::Derivation> drv;
|
||||
std::unique_ptr<nix::ParsedDerivation> parsedDrv;
|
||||
std::set<std::string> requiredSystemFeatures;
|
||||
std::unique_ptr<nix::DerivationOptions> drvOptions;
|
||||
nix::StringSet requiredSystemFeatures;
|
||||
bool preferLocalBuild;
|
||||
bool isDeterministic;
|
||||
std::string systemType; // concatenation of drv.platform and requiredSystemFeatures
|
||||
@@ -222,18 +240,10 @@ void getDependents(Step::ptr step, std::set<Build::ptr> & builds, std::set<Step:
|
||||
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
||||
|
||||
|
||||
struct Machine
|
||||
struct Machine : nix::Machine
|
||||
{
|
||||
typedef std::shared_ptr<Machine> ptr;
|
||||
|
||||
bool enabled{true};
|
||||
|
||||
std::string sshName, sshKey;
|
||||
std::set<std::string> systemTypes, supportedFeatures, mandatoryFeatures;
|
||||
unsigned int maxJobs = 1;
|
||||
float speedFactor = 1.0;
|
||||
std::string sshPublicHostKey;
|
||||
|
||||
struct State {
|
||||
typedef std::shared_ptr<State> ptr;
|
||||
counter currentJobs{0};
|
||||
@@ -283,10 +293,13 @@ struct Machine
|
||||
return true;
|
||||
}
|
||||
|
||||
bool isLocalhost()
|
||||
{
|
||||
return sshName == "localhost";
|
||||
}
|
||||
bool isLocalhost() const;
|
||||
|
||||
// A connection to a machine
|
||||
struct Connection : nix::ServeProto::BasicClientConnection {
|
||||
// Backpointer to the machine
|
||||
ptr machine;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -339,9 +352,14 @@ private:
|
||||
nix::Pool<Connection> dbPool;
|
||||
|
||||
/* The build machines. */
|
||||
typedef std::map<std::string, Machine::ptr> Machines;
|
||||
std::mutex machinesReadyLock;
|
||||
typedef std::map<nix::StoreReference::Variant, Machine::ptr> Machines;
|
||||
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
||||
|
||||
/* Throttler for CPU-bound local work. */
|
||||
static constexpr unsigned int maxSupportedLocalWorkers = 1024;
|
||||
std::counting_semaphore<maxSupportedLocalWorkers> localWorkThrottler;
|
||||
|
||||
/* Various stats. */
|
||||
time_t startedAt;
|
||||
counter nrBuildsRead{0};
|
||||
@@ -351,6 +369,7 @@ private:
|
||||
counter nrStepsDone{0};
|
||||
counter nrStepsBuilding{0};
|
||||
counter nrStepsCopyingTo{0};
|
||||
counter nrStepsWaitingForDownloadSlot{0};
|
||||
counter nrStepsCopyingFrom{0};
|
||||
counter nrStepsWaiting{0};
|
||||
counter nrUnsupportedSteps{0};
|
||||
@@ -367,6 +386,7 @@ private:
|
||||
|
||||
/* Specific build to do for --build-one (testing only). */
|
||||
BuildID buildOne;
|
||||
bool buildOneDone = false;
|
||||
|
||||
/* Statistics per machine type for the Hydra auto-scaler. */
|
||||
struct MachineType
|
||||
@@ -380,7 +400,6 @@ private:
|
||||
|
||||
struct MachineReservation
|
||||
{
|
||||
typedef std::shared_ptr<MachineReservation> ptr;
|
||||
State & state;
|
||||
Step::ptr step;
|
||||
Machine::ptr machine;
|
||||
@@ -418,7 +437,7 @@ private:
|
||||
|
||||
/* How often the build steps of a jobset should be repeated in
|
||||
order to detect non-determinism. */
|
||||
std::map<std::pair<std::string, std::string>, unsigned int> jobsetRepeats;
|
||||
std::map<std::pair<std::string, std::string>, size_t> jobsetRepeats;
|
||||
|
||||
bool uploadLogsToBinaryCache;
|
||||
|
||||
@@ -427,8 +446,30 @@ private:
|
||||
via gc_roots_dir. */
|
||||
nix::Path rootsDir;
|
||||
|
||||
std::string metricsAddr;
|
||||
|
||||
struct PromMetrics
|
||||
{
|
||||
std::shared_ptr<prometheus::Registry> registry;
|
||||
|
||||
prometheus::Counter& queue_checks_started;
|
||||
prometheus::Counter& queue_build_loads;
|
||||
prometheus::Counter& queue_steps_created;
|
||||
prometheus::Counter& queue_checks_early_exits;
|
||||
prometheus::Counter& queue_checks_finished;
|
||||
|
||||
prometheus::Counter& dispatcher_time_spent_running;
|
||||
prometheus::Counter& dispatcher_time_spent_waiting;
|
||||
|
||||
prometheus::Counter& queue_monitor_time_spent_running;
|
||||
prometheus::Counter& queue_monitor_time_spent_waiting;
|
||||
|
||||
PromMetrics();
|
||||
};
|
||||
PromMetrics prom;
|
||||
|
||||
public:
|
||||
State();
|
||||
State(std::optional<std::string> metricsAddrOpt);
|
||||
|
||||
private:
|
||||
|
||||
@@ -456,23 +497,28 @@ private:
|
||||
const std::string & machine);
|
||||
|
||||
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const nix::StorePath & drvPath, const std::string & outputName, const nix::StorePath & storePath);
|
||||
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
|
||||
|
||||
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
||||
|
||||
void queueMonitor();
|
||||
|
||||
void queueMonitorLoop();
|
||||
void queueMonitorLoop(Connection & conn);
|
||||
|
||||
/* Check the queue for new builds. */
|
||||
bool getQueuedBuilds(Connection & conn,
|
||||
nix::ref<nix::Store> destStore, unsigned int & lastBuildId);
|
||||
bool getQueuedBuilds(Connection & conn, nix::ref<nix::Store> destStore);
|
||||
|
||||
/* Handle cancellation, deletion and priority bumps. */
|
||||
void processQueueChange(Connection & conn);
|
||||
|
||||
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
||||
const nix::Derivation & drv);
|
||||
const nix::StorePath & drvPath);
|
||||
|
||||
/* Returns paths missing from the remote store. Paths are processed in
|
||||
* parallel to work around the possible latency of remote stores. */
|
||||
std::map<nix::DrvOutput, std::optional<nix::StorePath>> getMissingRemotePaths(
|
||||
nix::ref<nix::Store> destStore,
|
||||
const std::map<nix::DrvOutput, std::optional<nix::StorePath>> & paths);
|
||||
|
||||
Step::ptr createStep(nix::ref<nix::Store> store,
|
||||
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
||||
@@ -485,11 +531,10 @@ private:
|
||||
BuildID buildId,
|
||||
const RemoteResult & result,
|
||||
Machine::ptr machine,
|
||||
bool & stepFinished,
|
||||
bool & quit);
|
||||
bool & stepFinished);
|
||||
|
||||
Jobset::ptr createJobset(pqxx::work & txn,
|
||||
const std::string & projectName, const std::string & jobsetName);
|
||||
const std::string & projectName, const std::string & jobsetName, const JobsetID);
|
||||
|
||||
void processJobsetSharesChange(Connection & conn);
|
||||
|
||||
@@ -504,19 +549,19 @@ private:
|
||||
|
||||
void abortUnsupported();
|
||||
|
||||
void builder(MachineReservation::ptr reservation);
|
||||
void builder(std::unique_ptr<MachineReservation> reservation);
|
||||
|
||||
/* Perform the given build step. Return true if the step is to be
|
||||
retried. */
|
||||
enum StepResult { sDone, sRetry, sMaybeCancelled };
|
||||
StepResult doBuildStep(nix::ref<nix::Store> destStore,
|
||||
MachineReservation::ptr reservation,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
std::shared_ptr<ActiveStep> activeStep);
|
||||
|
||||
void buildRemote(nix::ref<nix::Store> destStore,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
unsigned int maxSilentTime, unsigned int buildTimeout,
|
||||
unsigned int repeats,
|
||||
const nix::ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers);
|
||||
@@ -539,6 +584,8 @@ private:
|
||||
|
||||
void addRoot(const nix::StorePath & storePath);
|
||||
|
||||
void runMetricsExporter();
|
||||
|
||||
public:
|
||||
|
||||
void showStatus();
|
||||
|
@@ -6,9 +6,9 @@ use parent 'Catalyst';
|
||||
use Moose;
|
||||
use Hydra::Plugin;
|
||||
use Hydra::Model::DB;
|
||||
use Hydra::Config qw(getLDAPConfigAmbient);
|
||||
use Catalyst::Runtime '5.70';
|
||||
use Catalyst qw/ConfigLoader
|
||||
Unicode::Encoding
|
||||
Static::Simple
|
||||
StackTrace
|
||||
Authentication
|
||||
@@ -16,10 +16,10 @@ use Catalyst qw/ConfigLoader
|
||||
Session
|
||||
Session::Store::FastMmap
|
||||
Session::State::Cookie
|
||||
Captcha/,
|
||||
Captcha
|
||||
PrometheusTiny/,
|
||||
'-Log=warn,fatal,error';
|
||||
use CatalystX::RoleApplicator;
|
||||
use YAML qw(LoadFile);
|
||||
use Path::Class 'file';
|
||||
|
||||
our $VERSION = '0.01';
|
||||
@@ -27,27 +27,31 @@ our $VERSION = '0.01';
|
||||
__PACKAGE__->config(
|
||||
name => 'Hydra',
|
||||
default_view => "TT",
|
||||
authentication => {
|
||||
'Plugin::Authentication' => {
|
||||
default_realm => "dbic",
|
||||
realms => {
|
||||
dbic => {
|
||||
credential => {
|
||||
class => "Password",
|
||||
password_field => "password",
|
||||
password_type => "hashed",
|
||||
password_hash_type => "SHA-1",
|
||||
},
|
||||
store => {
|
||||
class => "DBIx::Class",
|
||||
user_class => "DB::Users",
|
||||
role_relation => "userroles",
|
||||
role_field => "role",
|
||||
},
|
||||
|
||||
dbic => {
|
||||
credential => {
|
||||
class => "Password",
|
||||
password_field => "password",
|
||||
password_type => "self_check",
|
||||
},
|
||||
store => {
|
||||
class => "DBIx::Class",
|
||||
user_class => "DB::Users",
|
||||
role_relation => "userroles",
|
||||
role_field => "role",
|
||||
},
|
||||
ldap => $ENV{'HYDRA_LDAP_CONFIG'} ? LoadFile(
|
||||
file($ENV{'HYDRA_LDAP_CONFIG'})
|
||||
) : undef
|
||||
},
|
||||
ldap => getLDAPConfigAmbient()->{'config'}
|
||||
},
|
||||
'Plugin::ConfigLoader' => {
|
||||
driver => {
|
||||
'General' => \%Hydra::Config::configGeneralOpts
|
||||
}
|
||||
},
|
||||
'Plugin::PrometheusTiny' => {
|
||||
include_action_labels => 1,
|
||||
},
|
||||
'Plugin::Static::Simple' => {
|
||||
send_etag => 1,
|
||||
|
@@ -3,8 +3,7 @@ package Hydra::Base::Controller::NixChannel;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::REST';
|
||||
use List::MoreUtils qw(any);
|
||||
use Nix::Store;
|
||||
use List::SomeUtils qw(any);
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
@@ -30,7 +29,7 @@ sub getChannelData {
|
||||
my $outputs = {};
|
||||
foreach my $output (@outputs) {
|
||||
my $outPath = $output->get_column("outpath");
|
||||
next if $checkValidity && !isValidPath($outPath);
|
||||
next if $checkValidity && !$MACHINE_LOCAL_STORE->isValidPath($outPath);
|
||||
$outputs->{$output->get_column("outname")} = $outPath;
|
||||
push @storePaths, $outPath;
|
||||
# Put the system type in the manifest (for top-level
|
||||
|
@@ -5,10 +5,15 @@ use strict;
|
||||
use warnings;
|
||||
|
||||
use base 'DBIx::Class';
|
||||
use JSON::MaybeXS;
|
||||
|
||||
sub TO_JSON {
|
||||
my $self = shift;
|
||||
|
||||
if ($self->can("as_json")) {
|
||||
return $self->as_json();
|
||||
}
|
||||
|
||||
my $hint = $self->json_hint;
|
||||
|
||||
my %json = ();
|
||||
@@ -17,6 +22,14 @@ sub TO_JSON {
|
||||
$json{$column} = $self->get_column($column);
|
||||
}
|
||||
|
||||
foreach my $column (@{$hint->{string_columns}}) {
|
||||
$json{$column} = $self->get_column($column) // "";
|
||||
}
|
||||
|
||||
foreach my $column (@{$hint->{boolean_columns}}) {
|
||||
$json{$column} = $self->get_column($column) ? JSON::MaybeXS::true : JSON::MaybeXS::false;
|
||||
}
|
||||
|
||||
foreach my $relname (keys %{$hint->{relations}}) {
|
||||
my $key = $hint->{relations}->{$relname};
|
||||
$json{$relname} = [ map { $_->$key } $self->$relname ];
|
||||
|
168
src/lib/Hydra/Config.pm
Normal file
168
src/lib/Hydra/Config.pm
Normal file
@@ -0,0 +1,168 @@
|
||||
package Hydra::Config;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Config::General;
|
||||
use List::SomeUtils qw(none);
|
||||
use YAML qw(LoadFile);
|
||||
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT = qw(
|
||||
getHydraConfig
|
||||
getLDAPConfig
|
||||
getLDAPConfigAmbient
|
||||
);
|
||||
|
||||
our %configGeneralOpts = (-UseApacheInclude => 1, -IncludeAgain => 1, -IncludeRelative => 1);
|
||||
|
||||
my $hydraConfigCache;
|
||||
|
||||
sub getHydraConfig {
|
||||
return $hydraConfigCache if defined $hydraConfigCache;
|
||||
|
||||
my $conf;
|
||||
|
||||
if ($ENV{"HYDRA_CONFIG"}) {
|
||||
$conf = $ENV{"HYDRA_CONFIG"};
|
||||
} else {
|
||||
require Hydra::Model::DB;
|
||||
$conf = Hydra::Model::DB::getHydraPath() . "/hydra.conf"
|
||||
};
|
||||
|
||||
if (-f $conf) {
|
||||
$hydraConfigCache = loadConfig($conf);
|
||||
} else {
|
||||
$hydraConfigCache = {};
|
||||
}
|
||||
|
||||
return $hydraConfigCache;
|
||||
}
|
||||
|
||||
sub loadConfig {
|
||||
my ($sourceFile) = @_;
|
||||
|
||||
my %opts = (%configGeneralOpts, -ConfigFile => $sourceFile);
|
||||
|
||||
return { Config::General->new(%opts)->getall };
|
||||
}
|
||||
|
||||
sub is_ldap_in_legacy_mode {
|
||||
my ($config, %env) = @_;
|
||||
|
||||
my $legacy_defined = defined $env{"HYDRA_LDAP_CONFIG"};
|
||||
|
||||
if (defined $config->{"ldap"}) {
|
||||
if ($legacy_defined) {
|
||||
die "The legacy environment variable HYDRA_LDAP_CONFIG is set, but config is also specified in hydra.conf. Please unset the environment variable.";
|
||||
}
|
||||
|
||||
return 0;
|
||||
} elsif ($legacy_defined) {
|
||||
warn "Hydra is configured to use LDAP via the HYDRA_LDAP_CONFIG, a deprecated method. Please see the docs about configuring LDAP in the hydra.conf.";
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
sub getLDAPConfigAmbient {
|
||||
return getLDAPConfig(getHydraConfig(), %ENV);
|
||||
}
|
||||
|
||||
sub getLDAPConfig {
|
||||
my ($config, %env) = @_;
|
||||
|
||||
my $ldap_config;
|
||||
|
||||
if (is_ldap_in_legacy_mode($config, %env)) {
|
||||
$ldap_config = get_legacy_ldap_config($env{"HYDRA_LDAP_CONFIG"});
|
||||
} else {
|
||||
$ldap_config = $config->{"ldap"};
|
||||
}
|
||||
|
||||
$ldap_config->{"role_mapping"} = normalize_ldap_role_mappings($ldap_config->{"role_mapping"});
|
||||
|
||||
return $ldap_config;
|
||||
}
|
||||
|
||||
sub get_legacy_ldap_config {
|
||||
my ($ldap_yaml_file) = @_;
|
||||
|
||||
return {
|
||||
config => LoadFile($ldap_yaml_file),
|
||||
role_mapping => {
|
||||
"hydra_admin" => [ "admin" ],
|
||||
"hydra_bump-to-front" => [ "bump-to-front" ],
|
||||
"hydra_cancel-build" => [ "cancel-build" ],
|
||||
"hydra_create-projects" => [ "create-projects" ],
|
||||
"hydra_eval-jobset" => [ "eval-jobset" ],
|
||||
"hydra_restart-jobs" => [ "restart-jobs" ],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub normalize_ldap_role_mappings {
|
||||
my ($input_map) = @_;
|
||||
|
||||
my $mapping = {};
|
||||
|
||||
my @errors;
|
||||
|
||||
for my $group (keys %{$input_map}) {
|
||||
my $input = $input_map->{$group};
|
||||
|
||||
if (ref $input eq "ARRAY") {
|
||||
$mapping->{$group} = $input;
|
||||
} elsif (ref $input eq "") {
|
||||
$mapping->{$group} = [ $input ];
|
||||
} else {
|
||||
push @errors, "On group '$group': the value is of type ${\ref $input}. Only strings and lists are acceptable.";
|
||||
$mapping->{$group} = [ ];
|
||||
}
|
||||
|
||||
eval {
|
||||
validate_roles($mapping->{$group});
|
||||
};
|
||||
if ($@) {
|
||||
push @errors, "On group '$group': $@";
|
||||
}
|
||||
}
|
||||
|
||||
if (@errors) {
|
||||
die "Failed to normalize LDAP role mappings:\n" . (join "\n", @errors);
|
||||
}
|
||||
|
||||
return $mapping;
|
||||
}
|
||||
|
||||
sub validate_roles {
|
||||
my ($roles) = @_;
|
||||
|
||||
my @invalid;
|
||||
my $valid = valid_roles();
|
||||
|
||||
for my $role (@$roles) {
|
||||
if (none { $_ eq $role } @$valid) {
|
||||
push @invalid, "'$role'";
|
||||
}
|
||||
}
|
||||
|
||||
if (@invalid) {
|
||||
die "Invalid roles: ${\join ', ', @invalid}. Valid roles are: ${\join ', ', @$valid}.";
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub valid_roles {
|
||||
return [
|
||||
"admin",
|
||||
"bump-to-front",
|
||||
"cancel-build",
|
||||
"create-projects",
|
||||
"eval-jobset",
|
||||
"restart-jobs",
|
||||
];
|
||||
}
|
||||
|
||||
1;
|
@@ -7,12 +7,10 @@ use base 'Hydra::Base::Controller::REST';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Controller::Project;
|
||||
use JSON;
|
||||
use JSON::Any;
|
||||
use JSON::MaybeXS;
|
||||
use DateTime;
|
||||
use Digest::SHA qw(sha256_hex);
|
||||
use Text::Diff;
|
||||
use File::Slurp;
|
||||
use IPC::Run qw(run);
|
||||
|
||||
|
||||
@@ -26,8 +24,8 @@ sub buildToHash {
|
||||
my ($build) = @_;
|
||||
my $result = {
|
||||
id => $build->id,
|
||||
project => $build->get_column("project"),
|
||||
jobset => $build->get_column("jobset"),
|
||||
project => $build->jobset->get_column("project"),
|
||||
jobset => $build->jobset->get_column("name"),
|
||||
job => $build->get_column("job"),
|
||||
system => $build->system,
|
||||
nixname => $build->nixname,
|
||||
@@ -56,18 +54,24 @@ sub latestbuilds : Chained('api') PathPart('latestbuilds') Args(0) {
|
||||
my $system = $c->request->params->{system};
|
||||
|
||||
my $filter = {finished => 1};
|
||||
$filter->{project} = $project if ! $project eq "";
|
||||
$filter->{jobset} = $jobset if ! $jobset eq "";
|
||||
$filter->{"jobset.project"} = $project if ! $project eq "";
|
||||
$filter->{"jobset.name"} = $jobset if ! $jobset eq "";
|
||||
$filter->{job} = $job if !$job eq "";
|
||||
$filter->{system} = $system if !$system eq "";
|
||||
|
||||
my @latest = $c->model('DB::Builds')->search($filter, {rows => $nr, order_by => ["id DESC"] });
|
||||
my @latest = $c->model('DB::Builds')->search(
|
||||
$filter,
|
||||
{
|
||||
rows => $nr,
|
||||
order_by => ["id DESC"],
|
||||
join => [ "jobset" ]
|
||||
});
|
||||
|
||||
my @list;
|
||||
push @list, buildToHash($_) foreach @latest;
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@list))
|
||||
data => scalar (encode_json(\@list))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -88,7 +92,7 @@ sub jobsetToHash {
|
||||
triggertime => $jobset->triggertime,
|
||||
fetcherrormsg => $jobset->fetcherrormsg,
|
||||
errortime => $jobset->errortime,
|
||||
haserrormsg => defined($jobset->errormsg) && $jobset->errormsg ne "" ? JSON::true : JSON::false
|
||||
haserrormsg => defined($jobset->errormsg) && $jobset->errormsg ne "" ? JSON::MaybeXS::true : JSON::MaybeXS::false
|
||||
};
|
||||
}
|
||||
|
||||
@@ -108,7 +112,7 @@ sub jobsets : Chained('api') PathPart('jobsets') Args(0) {
|
||||
push @list, jobsetToHash($_) foreach @jobsets;
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@list))
|
||||
data => scalar (encode_json(\@list))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -126,7 +130,7 @@ sub queue : Chained('api') PathPart('queue') Args(0) {
|
||||
push @list, buildToHash($_) foreach @builds;
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@list))
|
||||
data => scalar (encode_json(\@list))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -156,21 +160,31 @@ sub nrbuilds : Chained('api') PathPart('nrbuilds') Args(0) {
|
||||
my $system = $c->request->params->{system};
|
||||
|
||||
my $filter = {finished => 1};
|
||||
$filter->{project} = $project if ! $project eq "";
|
||||
$filter->{jobset} = $jobset if ! $jobset eq "";
|
||||
$filter->{"jobset.project"} = $project if ! $project eq "";
|
||||
$filter->{"jobset.name"} = $jobset if ! $jobset eq "";
|
||||
$filter->{job} = $job if !$job eq "";
|
||||
$filter->{system} = $system if !$system eq "";
|
||||
|
||||
$base = 60*60 if($period eq "hour");
|
||||
$base = 24*60*60 if($period eq "day");
|
||||
|
||||
my @stats = $c->model('DB::Builds')->search($filter, {select => [{ count => "*" }], as => ["nr"], group_by => ["timestamp - timestamp % $base"], order_by => "timestamp - timestamp % $base DESC", rows => $nr});
|
||||
my @stats = $c->model('DB::Builds')->search(
|
||||
$filter,
|
||||
{
|
||||
select => [{ count => "*" }],
|
||||
as => ["nr"],
|
||||
group_by => ["timestamp - timestamp % $base"],
|
||||
order_by => "timestamp - timestamp % $base DESC",
|
||||
rows => $nr,
|
||||
join => [ "jobset" ]
|
||||
}
|
||||
);
|
||||
my @arr;
|
||||
push @arr, int($_->get_column("nr")) foreach @stats;
|
||||
@arr = reverse(@arr);
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@arr))
|
||||
data => scalar (encode_json(\@arr))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -202,8 +216,8 @@ sub scmdiff : Path('/api/scmdiff') Args(0) {
|
||||
} elsif ($type eq "git") {
|
||||
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
|
||||
die if ! -d $clonePath;
|
||||
$diff .= `(cd $clonePath; git log $rev1..$rev2)`;
|
||||
$diff .= `(cd $clonePath; git diff $rev1..$rev2)`;
|
||||
$diff .= `(cd $clonePath; git --git-dir .git log $rev1..$rev2)`;
|
||||
$diff .= `(cd $clonePath; git --git-dir .git diff $rev1..$rev2)`;
|
||||
}
|
||||
|
||||
$c->stash->{'plain'} = { data => (scalar $diff) || " " };
|
||||
@@ -225,6 +239,8 @@ sub triggerJobset {
|
||||
sub push : Chained('api') PathPart('push') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requirePost($c);
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $force = exists $c->request->query_params->{force};
|
||||
@@ -232,17 +248,24 @@ sub push : Chained('api') PathPart('push') Args(0) {
|
||||
foreach my $s (@jobsets) {
|
||||
my ($p, $j) = parseJobsetName($s);
|
||||
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
||||
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
||||
triggerJobset($self, $c, $jobset, $force);
|
||||
}
|
||||
|
||||
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
||||
foreach my $r (@repos) {
|
||||
triggerJobset($self, $c, $_, $force) foreach $c->model('DB::Jobsets')->search(
|
||||
my @jobsets = $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{ join => 'project'
|
||||
, where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ]
|
||||
{
|
||||
join => 'project',
|
||||
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
||||
order_by => 'me.id DESC'
|
||||
});
|
||||
foreach my $jobset (@jobsets) {
|
||||
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||
triggerJobset($self, $c, $jobset, $force)
|
||||
}
|
||||
}
|
||||
|
||||
$self->status_ok(
|
||||
@@ -251,7 +274,6 @@ sub push : Chained('api') PathPart('push') Args(0) {
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
@@ -270,6 +292,23 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $in = $c->request->{data};
|
||||
my $url = $in->{repository}->{clone_url} or die;
|
||||
$url =~ s/.git$//;
|
||||
print STDERR "got push from Gitea repository $url\n";
|
||||
|
||||
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{ join => 'project'
|
||||
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
|
||||
});
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
@@ -6,7 +6,6 @@ use base 'Catalyst::Controller';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Data::Dump qw(dump);
|
||||
use Digest::SHA1 qw(sha1_hex);
|
||||
use Config::General;
|
||||
|
||||
|
||||
@@ -33,7 +32,7 @@ sub machines : Chained('admin') PathPart('machines') Args(0) {
|
||||
|
||||
sub clear_queue_non_current : Chained('admin') PathPart('clear-queue-non-current') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $builds = $c->model('DB::Builds')->search(
|
||||
my $builds = $c->model('DB::Builds')->search_rs(
|
||||
{ id => { -in => \ "select id from Builds where id in ((select id from Builds where finished = 0) except (select build from JobsetEvalMembers where eval in (select max(id) from JobsetEvals where hasNewBuilds = 1 group by jobset_id)))" }
|
||||
});
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $builds);
|
||||
|
@@ -7,16 +7,15 @@ use base 'Hydra::Base::Controller::NixChannel';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use File::Basename;
|
||||
use File::LibMagic;
|
||||
use File::stat;
|
||||
use File::Slurp;
|
||||
use Data::Dump qw(dump);
|
||||
use Nix::Store;
|
||||
use Nix::Config;
|
||||
use List::MoreUtils qw(all);
|
||||
use List::SomeUtils qw(all);
|
||||
use Encode;
|
||||
use MIME::Types;
|
||||
use JSON::PP;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use feature 'state';
|
||||
|
||||
sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) {
|
||||
my ($self, $c, $id) = @_;
|
||||
@@ -38,6 +37,18 @@ sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) {
|
||||
$c->stash->{project} = $c->stash->{build}->project;
|
||||
$c->stash->{jobset} = $c->stash->{build}->jobset;
|
||||
$c->stash->{job} = $c->stash->{build}->job;
|
||||
$c->stash->{runcommandlogs} = [$c->stash->{build}->runcommandlogs->search({}, {order_by => ["id DESC"]})];
|
||||
|
||||
$c->stash->{runcommandlogProblem} = undef;
|
||||
if ($c->stash->{job} =~ qr/^runCommandHook\..*/) {
|
||||
if (!$c->config->{dynamicruncommand}->{enable}) {
|
||||
$c->stash->{runcommandlogProblem} = "disabled-server";
|
||||
} elsif (!$c->stash->{project}->enable_dynamic_run_command) {
|
||||
$c->stash->{runcommandlogProblem} = "disabled-project";
|
||||
} elsif (!$c->stash->{jobset}->enable_dynamic_run_command) {
|
||||
$c->stash->{runcommandlogProblem} = "disabled-jobset";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -66,14 +77,16 @@ sub build_GET {
|
||||
|
||||
$c->stash->{template} = 'build.tt';
|
||||
$c->stash->{isLocalStore} = isLocalStore();
|
||||
# XXX: If the derivation is content-addressed then this will always return
|
||||
# false because `$_->path` will be empty
|
||||
$c->stash->{available} =
|
||||
$c->stash->{isLocalStore}
|
||||
? all { isValidPath($_->path) } $build->buildoutputs->all
|
||||
? all { $_->path && $MACHINE_LOCAL_STORE->isValidPath($_->path) } $build->buildoutputs->all
|
||||
: 1;
|
||||
$c->stash->{drvAvailable} = isValidPath $build->drvpath;
|
||||
$c->stash->{drvAvailable} = $MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||
|
||||
if ($build->finished && $build->iscachedbuild) {
|
||||
my $path = ($build->buildoutputs)[0]->path or die;
|
||||
my $path = ($build->buildoutputs)[0]->path or undef;
|
||||
my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path);
|
||||
if (defined $cachedBuildStep) {
|
||||
$c->stash->{cachedBuild} = $cachedBuildStep->build;
|
||||
@@ -81,26 +94,6 @@ sub build_GET {
|
||||
}
|
||||
}
|
||||
|
||||
if ($build->finished) {
|
||||
$c->stash->{prevBuilds} = [$c->model('DB::Builds')->search(
|
||||
{ project => $c->stash->{project}->name
|
||||
, jobset => $c->stash->{jobset}->name
|
||||
, job => $c->stash->{job}
|
||||
, 'me.system' => $build->system
|
||||
, finished => 1
|
||||
, buildstatus => 0
|
||||
, 'me.id' => { '<=' => $build->id }
|
||||
}
|
||||
, { join => "actualBuildStep"
|
||||
, "+select" => ["actualBuildStep.stoptime - actualBuildStep.starttime"]
|
||||
, "+as" => ["actualBuildTime"]
|
||||
, order_by => "me.id DESC"
|
||||
, rows => 50
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
|
||||
# Get the first eval of which this build was a part.
|
||||
($c->stash->{nrEvals}) = $build->jobsetevals->search({ hasnewbuilds => 1 })->count;
|
||||
$c->stash->{eval} = getFirstEval($build);
|
||||
@@ -124,6 +117,19 @@ sub build_GET {
|
||||
$c->stash->{binaryCachePublicUri} = $c->config->{binary_cache_public_uri};
|
||||
}
|
||||
|
||||
sub constituents :Chained('buildChain') :PathPart('constituents') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub constituents_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
$self->status_ok(
|
||||
$c,
|
||||
entity => [$build->constituents_->search({}, {order_by => ["job"]})]
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
||||
my ($self, $c, $stepnr, $mode) = @_;
|
||||
@@ -133,23 +139,35 @@ sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
||||
|
||||
$c->stash->{step} = $step;
|
||||
|
||||
showLog($c, $mode, $step->busy == 0, $step->drvpath);
|
||||
my $drvPath = $step->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
|
||||
sub view_log : Chained('buildChain') PathPart('log') {
|
||||
my ($self, $c, $mode) = @_;
|
||||
showLog($c, $mode, $c->stash->{build}->finished,
|
||||
$c->stash->{build}->drvpath);
|
||||
|
||||
my $drvPath = $c->stash->{build}->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
|
||||
sub view_runcommandlog : Chained('buildChain') PathPart('runcommandlog') {
|
||||
my ($self, $c, $uuid, $mode) = @_;
|
||||
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("runcommandlog"), $uuid);
|
||||
showLog($c, $mode, $log_uri);
|
||||
$c->stash->{template} = 'runcommand-log.tt';
|
||||
$c->stash->{runcommandlog} = $c->stash->{build}->runcommandlogs->find({ uuid => $uuid });
|
||||
}
|
||||
|
||||
|
||||
sub showLog {
|
||||
my ($c, $mode, $finished, $drvPath) = @_;
|
||||
my ($c, $mode, $log_uri) = @_;
|
||||
$mode //= "pretty";
|
||||
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
||||
|
||||
if ($mode eq "pretty") {
|
||||
$c->stash->{log_uri} = $log_uri;
|
||||
$c->stash->{template} = 'log.tt';
|
||||
@@ -216,17 +234,24 @@ sub serveFile {
|
||||
}
|
||||
|
||||
elsif ($ls->{type} eq "regular") {
|
||||
# Have the hosted data considered its own origin to avoid being a giant
|
||||
# XSS hole.
|
||||
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
||||
|
||||
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"cat-store", "--store", getStoreUri(), "$path"]) };
|
||||
$c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||
|
||||
# Detect MIME type. Borrowed from Catalyst::Plugin::Static::Simple.
|
||||
# Detect MIME type.
|
||||
my $type = "text/plain";
|
||||
if ($path =~ /.*\.(\S{1,})$/xms) {
|
||||
my $ext = $1;
|
||||
my $mimeTypes = MIME::Types->new(only_complete => 1);
|
||||
my $t = $mimeTypes->mimeTypeOf($ext);
|
||||
$type = ref $t ? $t->type : $t if $t;
|
||||
} else {
|
||||
state $magic = File::LibMagic->new(follow_symlinks => 1);
|
||||
my $info = $magic->info_from_filename($path);
|
||||
$type = $info->{mime_with_encoding};
|
||||
}
|
||||
$c->response->content_type($type);
|
||||
$c->forward('Hydra::View::Plain');
|
||||
@@ -272,29 +297,7 @@ sub download : Chained('buildChain') PathPart {
|
||||
my $path = $product->path;
|
||||
$path .= "/" . join("/", @path) if scalar @path > 0;
|
||||
|
||||
if (isLocalStore) {
|
||||
|
||||
notFound($c, "File '" . $product->path . "' does not exist.") unless -e $product->path;
|
||||
|
||||
# Make sure the file is in the Nix store.
|
||||
$path = checkPath($self, $c, $path);
|
||||
|
||||
# If this is a directory but no "/" is attached, then redirect.
|
||||
if (-d $path && substr($c->request->uri, -1) ne "/") {
|
||||
return $c->res->redirect($c->request->uri . "/");
|
||||
}
|
||||
|
||||
$path = "$path/index.html" if -d $path && -e "$path/index.html";
|
||||
|
||||
notFound($c, "File '$path' does not exist.") if !-e $path;
|
||||
|
||||
notFound($c, "Path '$path' is a directory.") if -d $path;
|
||||
|
||||
$c->serve_static_file($path);
|
||||
|
||||
} else {
|
||||
serveFile($c, $path);
|
||||
}
|
||||
serveFile($c, $path);
|
||||
|
||||
$c->response->headers->last_modified($c->stash->{build}->stoptime);
|
||||
}
|
||||
@@ -307,7 +310,7 @@ sub output : Chained('buildChain') PathPart Args(1) {
|
||||
error($c, "This build is not finished yet.") unless $build->finished;
|
||||
my $output = $build->buildoutputs->find({name => $outputName});
|
||||
notFound($c, "This build has no output named ‘$outputName’") unless defined $output;
|
||||
gone($c, "Output is no longer available.") unless isValidPath $output->path;
|
||||
gone($c, "Output is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($output->path);
|
||||
|
||||
$c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\"");
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
@@ -350,7 +353,7 @@ sub contents : Chained('buildChain') PathPart Args(1) {
|
||||
|
||||
# FIXME: don't use shell invocations below.
|
||||
|
||||
# FIXME: use nix cat-store
|
||||
# FIXME: use nix store cat
|
||||
|
||||
my $res;
|
||||
|
||||
@@ -424,7 +427,7 @@ sub getDependencyGraph {
|
||||
};
|
||||
$$done{$path} = $node;
|
||||
my @refs;
|
||||
foreach my $ref (queryReferences($path)) {
|
||||
foreach my $ref ($MACHINE_LOCAL_STORE->queryReferences($path)) {
|
||||
next if $ref eq $path;
|
||||
next unless $runtime || $ref =~ /\.drv$/;
|
||||
getDependencyGraph($self, $c, $runtime, $done, $ref);
|
||||
@@ -432,7 +435,7 @@ sub getDependencyGraph {
|
||||
}
|
||||
# Show in reverse topological order to flatten the graph.
|
||||
# Should probably do a proper BFS.
|
||||
my @sorted = reverse topoSortPaths(@refs);
|
||||
my @sorted = reverse $MACHINE_LOCAL_STORE->topoSortPaths(@refs);
|
||||
$node->{refs} = [map { $$done{$_} } @sorted];
|
||||
}
|
||||
|
||||
@@ -445,7 +448,7 @@ sub build_deps : Chained('buildChain') PathPart('build-deps') {
|
||||
my $build = $c->stash->{build};
|
||||
my $drvPath = $build->drvpath;
|
||||
|
||||
error($c, "Derivation no longer available.") unless isValidPath $drvPath;
|
||||
error($c, "Derivation no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($drvPath);
|
||||
|
||||
$c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath);
|
||||
|
||||
@@ -460,7 +463,7 @@ sub runtime_deps : Chained('buildChain') PathPart('runtime-deps') {
|
||||
|
||||
requireLocalStore($c);
|
||||
|
||||
error($c, "Build outputs no longer available.") unless all { isValidPath($_) } @outPaths;
|
||||
error($c, "Build outputs no longer available.") unless all { $MACHINE_LOCAL_STORE->isValidPath($_) } @outPaths;
|
||||
|
||||
my $done = {};
|
||||
$c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ];
|
||||
@@ -480,7 +483,7 @@ sub nix : Chained('buildChain') PathPart('nix') CaptureArgs(0) {
|
||||
if (isLocalStore) {
|
||||
foreach my $out ($build->buildoutputs) {
|
||||
notFound($c, "Path " . $out->path . " is no longer available.")
|
||||
unless isValidPath($out->path);
|
||||
unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -495,7 +498,7 @@ sub restart : Chained('buildChain') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $build = $c->stash->{build};
|
||||
requireRestartPrivileges($c, $build->project);
|
||||
my $n = restartBuilds($c->model('DB')->schema, $c->model('DB::Builds')->search({ id => $build->id }));
|
||||
my $n = restartBuilds($c->model('DB')->schema, $c->model('DB::Builds')->search_rs({ id => $build->id }));
|
||||
error($c, "This build cannot be restarted.") if $n != 1;
|
||||
$c->flash->{successMsg} = "Build has been restarted.";
|
||||
$c->res->redirect($c->uri_for($self->action_for("build"), $c->req->captures));
|
||||
@@ -506,7 +509,7 @@ sub cancel : Chained('buildChain') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $build = $c->stash->{build};
|
||||
requireCancelBuildPrivileges($c, $build->project);
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $c->model('DB::Builds')->search({ id => $build->id }));
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $c->model('DB::Builds')->search_rs({ id => $build->id }));
|
||||
error($c, "This build cannot be cancelled.") if $n != 1;
|
||||
$c->flash->{successMsg} = "Build has been cancelled.";
|
||||
$c->res->redirect($c->uri_for($self->action_for("build"), $c->req->captures));
|
||||
@@ -578,7 +581,7 @@ sub evals : Chained('buildChain') PathPart('evals') Args(0) {
|
||||
$c->stash->{page} = $page;
|
||||
$c->stash->{resultsPerPage} = $resultsPerPage;
|
||||
$c->stash->{total} = $evals->search({hasnewbuilds => 1})->count;
|
||||
$c->stash->{evals} = getEvals($self, $c, $evals, ($page - 1) * $resultsPerPage, $resultsPerPage)
|
||||
$c->stash->{evals} = getEvals($c, $evals, ($page - 1) * $resultsPerPage, $resultsPerPage)
|
||||
}
|
||||
|
||||
|
||||
|
@@ -22,8 +22,7 @@ sub channel : Chained('/') PathPart('channel/custom') CaptureArgs(3) {
|
||||
|
||||
my $lastSuccessful = $c->model('DB::Builds')->find(
|
||||
{ 'eval.hasnewbuilds' => 1
|
||||
, project => $projectName
|
||||
, jobset => $jobsetName
|
||||
, jobset_id => $c->stash->{jobset}->id,
|
||||
, job => $channelName
|
||||
, buildstatus => 0
|
||||
},
|
||||
|
@@ -6,6 +6,7 @@ use warnings;
|
||||
use base 'Hydra::Base::Controller::ListBuilds';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use JSON::MaybeXS;
|
||||
use Net::Prometheus;
|
||||
|
||||
sub job : Chained('/') PathPart('job') CaptureArgs(3) {
|
||||
@@ -50,7 +51,7 @@ sub shield :Chained('job') PathPart('shield') Args(0) {
|
||||
|
||||
$c->response->content_type('application/json');
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(
|
||||
data => scalar (encode_json(
|
||||
{
|
||||
schemaVersion => 1,
|
||||
label => "hydra build",
|
||||
@@ -68,7 +69,7 @@ sub prometheus : Chained('job') PathPart('prometheus') Args(0) {
|
||||
|
||||
my $lastBuild = $c->stash->{jobset}->builds->find(
|
||||
{ job => $c->stash->{job}, finished => 1 },
|
||||
{ order_by => 'id DESC', rows => 1, columns => [@buildListColumns] }
|
||||
{ order_by => 'id DESC', rows => 1, columns => ["stoptime", "buildstatus", "closuresize", "size"] }
|
||||
);
|
||||
|
||||
$prometheus->new_counter(
|
||||
@@ -91,6 +92,26 @@ sub prometheus : Chained('job') PathPart('prometheus') Args(0) {
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->buildstatus > 0);
|
||||
|
||||
$prometheus->new_gauge(
|
||||
name => "hydra_build_closure_size",
|
||||
help => "Closure size of the last job's build in bytes",
|
||||
labels => [ "project", "jobset", "job" ]
|
||||
)->labels(
|
||||
$c->stash->{project}->name,
|
||||
$c->stash->{jobset}->name,
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->closuresize);
|
||||
|
||||
$prometheus->new_gauge(
|
||||
name => "hydra_build_output_size",
|
||||
help => "Output size of the last job's build in bytes",
|
||||
labels => [ "project", "jobset", "job" ]
|
||||
)->labels(
|
||||
$c->stash->{project}->name,
|
||||
$c->stash->{jobset}->name,
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->size);
|
||||
|
||||
$c->stash->{'plain'} = { data => $prometheus->render };
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -121,10 +142,10 @@ sub overview : Chained('job') PathPart('') Args(0) {
|
||||
|
||||
my $aggregates = {};
|
||||
my %constituentJobs;
|
||||
foreach my $b (@constituents) {
|
||||
$aggregates->{$b->get_column('aggregate')}->{constituents}->{$b->job} =
|
||||
{ id => $b->id, finished => $b->finished, buildstatus => $b->buildstatus };
|
||||
$constituentJobs{$b->job} = 1;
|
||||
foreach my $build (@constituents) {
|
||||
$aggregates->{$build->get_column('aggregate')}->{constituents}->{$build->job} =
|
||||
{ id => $build->id, finished => $build->finished, buildstatus => $build->buildstatus };
|
||||
$constituentJobs{$build->job} = 1;
|
||||
}
|
||||
|
||||
foreach my $agg (keys %$aggregates) {
|
||||
@@ -144,7 +165,7 @@ sub overview : Chained('job') PathPart('') Args(0) {
|
||||
}
|
||||
|
||||
|
||||
sub metrics_tab : Chained('job') PathPart('metrics-tab') Args(0) {
|
||||
sub metrics_tab : Chained('job') PathPart('metric-tab') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'job-metrics-tab.tt';
|
||||
$c->stash->{metrics} = [ $c->stash->{jobset}->buildmetrics->search(
|
||||
|
@@ -41,7 +41,7 @@ sub jobset_GET {
|
||||
|
||||
$c->stash->{template} = 'jobset.tt';
|
||||
|
||||
$c->stash->{evals} = getEvals($self, $c, scalar $c->stash->{jobset}->jobsetevals, 0, 10);
|
||||
$c->stash->{evals} = getEvals($c, scalar $c->stash->{jobset}->jobsetevals, 0, 10);
|
||||
|
||||
$c->stash->{latestEval} = $c->stash->{jobset}->jobsetevals->search({ hasnewbuilds => 1 }, { rows => 1, order_by => ["id desc"] })->single;
|
||||
|
||||
@@ -213,6 +213,22 @@ sub checkInputValue {
|
||||
}
|
||||
|
||||
|
||||
sub knownInputTypes {
|
||||
my ($c) = @_;
|
||||
|
||||
my @keys = keys %{$c->stash->{inputTypes}};
|
||||
my $types = "";
|
||||
my $counter = 0;
|
||||
|
||||
foreach my $key (@keys) {
|
||||
$types = $types . "and ‘$key’" if ++$counter == scalar(@keys);
|
||||
$types = $types . "‘$key’, " if $counter != scalar(@keys);
|
||||
}
|
||||
|
||||
return $types;
|
||||
}
|
||||
|
||||
|
||||
sub updateJobset {
|
||||
my ($c, $jobset) = @_;
|
||||
|
||||
@@ -223,7 +239,7 @@ sub updateJobset {
|
||||
error($c, "Cannot rename jobset to ‘$jobsetName’ since that identifier is already taken.")
|
||||
if $jobsetName ne $oldName && defined $c->stash->{project}->jobsets->find({ name => $jobsetName });
|
||||
|
||||
my $type = int($c->stash->{params}->{"type"}) // 0;
|
||||
my $type = int($c->stash->{params}->{"type"} // 0);
|
||||
|
||||
my ($nixExprPath, $nixExprInput);
|
||||
my $flake;
|
||||
@@ -231,7 +247,7 @@ sub updateJobset {
|
||||
if ($type == 0) {
|
||||
($nixExprPath, $nixExprInput) = nixExprPathFromParams $c;
|
||||
} elsif ($type == 1) {
|
||||
$flake = trim($c->stash->{params}->{"flakeref"});
|
||||
$flake = trim($c->stash->{params}->{"flake"});
|
||||
error($c, "Invalid flake URI ‘$flake’.") if $flake !~ /^[a-zA-Z]/;
|
||||
} else {
|
||||
error($c, "Invalid jobset type.");
|
||||
@@ -245,6 +261,14 @@ sub updateJobset {
|
||||
|
||||
my $checkinterval = int(trim($c->stash->{params}->{checkinterval}));
|
||||
|
||||
my $enable_dynamic_run_command = defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0;
|
||||
if ($enable_dynamic_run_command
|
||||
&& !($c->config->{dynamicruncommand}->{enable}
|
||||
&& $jobset->project->enable_dynamic_run_command))
|
||||
{
|
||||
badRequest($c, "Dynamic RunCommand is not enabled by the server or the parent project.");
|
||||
}
|
||||
|
||||
$jobset->update(
|
||||
{ name => $jobsetName
|
||||
, description => trim($c->stash->{params}->{"description"})
|
||||
@@ -252,9 +276,10 @@ sub updateJobset {
|
||||
, nixexprinput => $nixExprInput
|
||||
, enabled => $enabled
|
||||
, enableemail => defined $c->stash->{params}->{enableemail} ? 1 : 0
|
||||
, enable_dynamic_run_command => $enable_dynamic_run_command
|
||||
, emailoverride => trim($c->stash->{params}->{emailoverride}) || ""
|
||||
, hidden => defined $c->stash->{params}->{visible} ? 0 : 1
|
||||
, keepnr => int(trim($c->stash->{params}->{keepnr}))
|
||||
, keepnr => int(trim($c->stash->{params}->{keepnr} // "0"))
|
||||
, checkinterval => $checkinterval
|
||||
, triggertime => ($enabled && $checkinterval > 0) ? $jobset->triggertime // time() : undef
|
||||
, schedulingshares => $shares
|
||||
@@ -275,9 +300,10 @@ sub updateJobset {
|
||||
my $type = $inputData->{type};
|
||||
my $value = $inputData->{value};
|
||||
my $emailresponsible = defined $inputData->{emailresponsible} ? 1 : 0;
|
||||
my $types = knownInputTypes($c);
|
||||
|
||||
error($c, "Invalid input name ‘$name’.") unless $name =~ /^[[:alpha:]][\w-]*$/;
|
||||
error($c, "Invalid input type ‘$type’.") unless defined $c->stash->{inputTypes}->{$type};
|
||||
badRequest($c, "Invalid input name ‘$name’.") unless $name =~ /^[[:alpha:]][\w-]*$/;
|
||||
badRequest($c, "Invalid input type ‘$type’; valid types: $types.") unless defined $c->stash->{inputTypes}->{$type};
|
||||
|
||||
my $input = $jobset->jobsetinputs->create(
|
||||
{ name => $name,
|
||||
@@ -320,7 +346,7 @@ sub evals_GET {
|
||||
$c->stash->{resultsPerPage} = $resultsPerPage;
|
||||
$c->stash->{total} = $evals->search({hasnewbuilds => 1})->count;
|
||||
my $offset = ($page - 1) * $resultsPerPage;
|
||||
$c->stash->{evals} = getEvals($self, $c, $evals, $offset, $resultsPerPage);
|
||||
$c->stash->{evals} = getEvals($c, $evals, $offset, $resultsPerPage);
|
||||
my %entity = (
|
||||
evals => [ map { $_->{eval} } @{$c->stash->{evals}} ],
|
||||
first => "?page=1",
|
||||
@@ -338,6 +364,21 @@ sub evals_GET {
|
||||
);
|
||||
}
|
||||
|
||||
sub errors :Chained('jobsetChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub errors_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'eval-error.tt';
|
||||
|
||||
my $jobsetName = $c->stash->{params}->{name};
|
||||
$c->stash->{jobset} = $c->stash->{project}->jobsets->find(
|
||||
{ name => $jobsetName },
|
||||
{ '+columns' => { 'errormsg' => 'errormsg' } }
|
||||
);
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{jobset});
|
||||
}
|
||||
|
||||
# Redirect to the latest finished evaluation of this jobset.
|
||||
sub latest_eval : Chained('jobsetChain') PathPart('latest-eval') {
|
||||
|
@@ -6,7 +6,8 @@ use warnings;
|
||||
use base 'Hydra::Base::Controller::NixChannel';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use List::MoreUtils qw(uniq);
|
||||
use Hydra::Helper::BuildDiff;
|
||||
use List::SomeUtils qw(uniq);
|
||||
|
||||
|
||||
sub evalChain : Chained('/') PathPart('eval') CaptureArgs(1) {
|
||||
@@ -63,63 +64,21 @@ sub view_GET {
|
||||
|
||||
$c->stash->{otherEval} = $eval2 if defined $eval2;
|
||||
|
||||
sub cmpBuilds {
|
||||
my ($a, $b) = @_;
|
||||
return $a->get_column('job') cmp $b->get_column('job')
|
||||
|| $a->get_column('system') cmp $b->get_column('system')
|
||||
}
|
||||
|
||||
my @builds = $eval->builds->search($filter, { columns => [@buildListColumns] });
|
||||
my @builds2 = defined $eval2 ? $eval2->builds->search($filter, { columns => [@buildListColumns] }) : ();
|
||||
|
||||
@builds = sort { cmpBuilds($a, $b) } @builds;
|
||||
@builds2 = sort { cmpBuilds($a, $b) } @builds2;
|
||||
|
||||
$c->stash->{stillSucceed} = [];
|
||||
$c->stash->{stillFail} = [];
|
||||
$c->stash->{nowSucceed} = [];
|
||||
$c->stash->{nowFail} = [];
|
||||
$c->stash->{new} = [];
|
||||
$c->stash->{removed} = [];
|
||||
$c->stash->{unfinished} = [];
|
||||
$c->stash->{aborted} = [];
|
||||
|
||||
my $n = 0;
|
||||
foreach my $build (@builds) {
|
||||
my $aborted = $build->finished != 0 && ($build->buildstatus == 3 || $build->buildstatus == 4);
|
||||
my $d;
|
||||
my $found = 0;
|
||||
while ($n < scalar(@builds2)) {
|
||||
my $build2 = $builds2[$n];
|
||||
my $d = cmpBuilds($build, $build2);
|
||||
last if $d == -1;
|
||||
if ($d == 0) {
|
||||
$n++;
|
||||
$found = 1;
|
||||
if ($aborted) {
|
||||
# do nothing
|
||||
} elsif ($build->finished == 0 || $build2->finished == 0) {
|
||||
push @{$c->stash->{unfinished}}, $build;
|
||||
} elsif ($build->buildstatus == 0 && $build2->buildstatus == 0) {
|
||||
push @{$c->stash->{stillSucceed}}, $build;
|
||||
} elsif ($build->buildstatus != 0 && $build2->buildstatus != 0) {
|
||||
push @{$c->stash->{stillFail}}, $build;
|
||||
} elsif ($build->buildstatus == 0 && $build2->buildstatus != 0) {
|
||||
push @{$c->stash->{nowSucceed}}, $build;
|
||||
} elsif ($build->buildstatus != 0 && $build2->buildstatus == 0) {
|
||||
push @{$c->stash->{nowFail}}, $build;
|
||||
} else { die; }
|
||||
last;
|
||||
}
|
||||
push @{$c->stash->{removed}}, { job => $build2->get_column('job'), system => $build2->get_column('system') };
|
||||
$n++;
|
||||
}
|
||||
if ($aborted) {
|
||||
push @{$c->stash->{aborted}}, $build;
|
||||
} else {
|
||||
push @{$c->stash->{new}}, $build if !$found;
|
||||
}
|
||||
}
|
||||
my $diff = buildDiff([@builds], [@builds2]);
|
||||
$c->stash->{stillSucceed} = $diff->{stillSucceed};
|
||||
$c->stash->{stillFail} = $diff->{stillFail};
|
||||
$c->stash->{nowSucceed} = $diff->{nowSucceed};
|
||||
$c->stash->{nowFail} = $diff->{nowFail};
|
||||
$c->stash->{new} = $diff->{new};
|
||||
$c->stash->{removed} = $diff->{removed};
|
||||
$c->stash->{unfinished} = $diff->{unfinished};
|
||||
$c->stash->{aborted} = $diff->{aborted};
|
||||
$c->stash->{totalAborted} = $diff->{totalAborted};
|
||||
$c->stash->{totalFailed} = $diff->{totalFailed};
|
||||
$c->stash->{totalQueued} = $diff->{totalQueued};
|
||||
|
||||
$c->stash->{full} = ($c->req->params->{full} || "0") eq "1";
|
||||
|
||||
@@ -129,6 +88,17 @@ sub view_GET {
|
||||
);
|
||||
}
|
||||
|
||||
sub errors :Chained('evalChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub errors_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'eval-error.tt';
|
||||
|
||||
$c->stash->{eval} = $c->model('DB::JobsetEvals')->find($c->stash->{eval}->id, { prefetch => 'evaluationerror' });
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{eval});
|
||||
}
|
||||
|
||||
sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
@@ -144,7 +114,7 @@ sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
|
||||
sub cancel : Chained('evalChain') PathPart('cancel') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requireCancelBuildPrivileges($c, $c->stash->{project});
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $c->stash->{eval}->builds);
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $c->stash->{eval}->builds->search_rs({}));
|
||||
$c->flash->{successMsg} = "$n builds have been cancelled.";
|
||||
$c->res->redirect($c->uri_for($c->controller('JobsetEval')->action_for('view'), $c->req->captures));
|
||||
}
|
||||
@@ -153,7 +123,7 @@ sub cancel : Chained('evalChain') PathPart('cancel') Args(0) {
|
||||
sub restart {
|
||||
my ($self, $c, $condition) = @_;
|
||||
requireRestartPrivileges($c, $c->stash->{project});
|
||||
my $builds = $c->stash->{eval}->builds->search({ finished => 1, buildstatus => $condition });
|
||||
my $builds = $c->stash->{eval}->builds->search_rs({ finished => 1, buildstatus => $condition });
|
||||
my $n = restartBuilds($c->model('DB')->schema, $builds);
|
||||
$c->flash->{successMsg} = "$n builds have been restarted.";
|
||||
$c->res->redirect($c->uri_for($c->controller('JobsetEval')->action_for('view'), $c->req->captures));
|
||||
|
@@ -78,8 +78,8 @@ sub project_DELETE {
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
$c->stash->{project}->jobsetevals->delete;
|
||||
$c->stash->{project}->builds->delete;
|
||||
$c->stash->{project}->jobsets->delete;
|
||||
$c->stash->{project}->delete;
|
||||
});
|
||||
|
||||
@@ -126,6 +126,7 @@ sub create_jobset : Chained('projectChain') PathPart('create-jobset') Args(0) {
|
||||
$c->stash->{template} = 'edit-jobset.tt';
|
||||
$c->stash->{create} = 1;
|
||||
$c->stash->{totalShares} = getTotalShares($c->model('DB')->schema);
|
||||
$c->stash->{emailNotification} = $c->config->{email_notification} // 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -135,7 +136,7 @@ sub updateProject {
|
||||
my $owner = $project->owner;
|
||||
if ($c->check_user_roles('admin') and defined $c->stash->{params}->{owner}) {
|
||||
$owner = trim $c->stash->{params}->{owner};
|
||||
error($c, "The user name ‘$owner’ does not exist.")
|
||||
badRequest($c, "The user name ‘$owner’ does not exist.")
|
||||
unless defined $c->model('DB::Users')->find($owner);
|
||||
}
|
||||
|
||||
@@ -148,6 +149,11 @@ sub updateProject {
|
||||
my $displayName = trim $c->stash->{params}->{displayname};
|
||||
error($c, "You must specify a display name.") if $displayName eq "";
|
||||
|
||||
my $enable_dynamic_run_command = defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0;
|
||||
if ($enable_dynamic_run_command && !$c->config->{dynamicruncommand}->{enable}) {
|
||||
badRequest($c, "Dynamic RunCommand is not enabled by the server.");
|
||||
}
|
||||
|
||||
$project->update(
|
||||
{ name => $projectName
|
||||
, displayname => $displayName
|
||||
@@ -156,11 +162,14 @@ sub updateProject {
|
||||
, enabled => defined $c->stash->{params}->{enabled} ? 1 : 0
|
||||
, hidden => defined $c->stash->{params}->{visible} ? 0 : 1
|
||||
, owner => $owner
|
||||
, declfile => trim($c->stash->{params}->{declfile})
|
||||
, decltype => trim($c->stash->{params}->{decltype})
|
||||
, declvalue => trim($c->stash->{params}->{declvalue})
|
||||
, enable_dynamic_run_command => $enable_dynamic_run_command
|
||||
, declfile => trim($c->stash->{params}->{declarative}->{file})
|
||||
, decltype => trim($c->stash->{params}->{declarative}->{type})
|
||||
, declvalue => trim($c->stash->{params}->{declarative}->{value})
|
||||
});
|
||||
if (length($project->declfile)) {
|
||||
# This logic also exists in the DeclarativeJobets tests.
|
||||
# TODO: refactor and deduplicate.
|
||||
$project->jobsets->update_or_create(
|
||||
{ name=> ".jobsets"
|
||||
, nixexprinput => ""
|
||||
@@ -168,6 +177,12 @@ sub updateProject {
|
||||
, emailoverride => ""
|
||||
, triggertime => time
|
||||
});
|
||||
} else {
|
||||
$project->jobsets->search({ name => ".jobsets" })->delete;
|
||||
$project->update(
|
||||
{ decltype => ""
|
||||
, declvalue => ""
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -7,15 +7,20 @@ use base 'Hydra::Base::Controller::ListBuilds';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::View::TT;
|
||||
use Digest::SHA1 qw(sha1_hex);
|
||||
use Nix::Store;
|
||||
use Nix::Config;
|
||||
use Encode;
|
||||
use File::Basename;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use List::Util qw[min max];
|
||||
use List::MoreUtils qw{any};
|
||||
use List::SomeUtils qw{any};
|
||||
use Net::Prometheus;
|
||||
use Types::Standard qw/StrMatch/;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
|
||||
# e.g.: https://hydra.example.com/realisations/sha256:a62128132508a3a32eef651d6467695944763602f226ac630543e947d9feb140!out.doi
|
||||
use constant REALISATIONS_REGEX => qr{^(sha256:[a-z0-9]{64}![a-z]+)\.doi$};
|
||||
|
||||
# Put this controller at top-level.
|
||||
__PACKAGE__->config->{namespace} = '';
|
||||
@@ -30,6 +35,7 @@ sub noLoginNeeded {
|
||||
|
||||
return $whitelisted ||
|
||||
$c->request->path eq "api/push-github" ||
|
||||
$c->request->path eq "api/push-gitea" ||
|
||||
$c->request->path eq "google-login" ||
|
||||
$c->request->path eq "github-redirect" ||
|
||||
$c->request->path eq "github-login" ||
|
||||
@@ -45,11 +51,13 @@ sub begin :Private {
|
||||
$c->stash->{curUri} = $c->request->uri;
|
||||
$c->stash->{version} = $ENV{"HYDRA_RELEASE"} || "<devel>";
|
||||
$c->stash->{nixVersion} = $ENV{"NIX_RELEASE"} || "<devel>";
|
||||
$c->stash->{nixEvalJobsVersion} = $ENV{"NIX_EVAL_JOBS_RELEASE"} || "<devel>";
|
||||
$c->stash->{curTime} = time;
|
||||
$c->stash->{logo} = defined $c->config->{hydra_logo} ? "/logo" : "";
|
||||
$c->stash->{tracker} = $ENV{"HYDRA_TRACKER"};
|
||||
$c->stash->{tracker} = defined $c->config->{tracker} ? $c->config->{tracker} : "";
|
||||
$c->stash->{flashMsg} = $c->flash->{flashMsg};
|
||||
$c->stash->{successMsg} = $c->flash->{successMsg};
|
||||
$c->stash->{localStore} = isLocalStore;
|
||||
|
||||
$c->stash->{isPrivateHydra} = $c->config->{private} // "0" ne "0";
|
||||
|
||||
@@ -75,9 +83,9 @@ sub begin :Private {
|
||||
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
||||
|
||||
# XSRF protection: require POST requests to have the same origin.
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") {
|
||||
my $referer = $c->req->header('Origin');
|
||||
$referer //= $c->req->header('Referer');
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
|
||||
my $referer = $c->req->header('Referer');
|
||||
$referer //= $c->req->header('Origin');
|
||||
my $base = $c->req->base;
|
||||
die unless $base =~ /\/$/;
|
||||
$referer .= "/";
|
||||
@@ -104,7 +112,7 @@ sub deserialize :ActionClass('Deserialize') { }
|
||||
sub index :Path :Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'overview.tt';
|
||||
$c->stash->{projects} = [$c->model('DB::Projects')->search({}, {order_by => 'name'})];
|
||||
$c->stash->{projects} = [$c->model('DB::Projects')->search({}, {order_by => ['enabled DESC', 'name']})];
|
||||
$c->stash->{newsItems} = [$c->model('DB::NewsItems')->search({}, { order_by => ['createtime DESC'], rows => 5 })];
|
||||
$self->status_ok($c,
|
||||
entity => $c->stash->{projects}
|
||||
@@ -134,8 +142,9 @@ sub queue_summary :Local :Path('queue-summary') :Args(0) {
|
||||
$c->stash->{template} = 'queue-summary.tt';
|
||||
|
||||
$c->stash->{queued} = dbh($c)->selectall_arrayref(
|
||||
"select project, jobset, count(*) as queued, min(timestamp) as oldest, max(timestamp) as newest from Builds " .
|
||||
"where finished = 0 group by project, jobset order by queued desc",
|
||||
"select jobsets.project as project, jobsets.name as jobset, count(*) as queued, min(timestamp) as oldest, max(timestamp) as newest from Builds " .
|
||||
"join Jobsets jobsets on jobsets.id = builds.jobset_id " .
|
||||
"where finished = 0 group by jobsets.project, jobsets.name order by queued desc",
|
||||
{ Slice => {} });
|
||||
|
||||
$c->stash->{systems} = dbh($c)->selectall_arrayref(
|
||||
@@ -154,7 +163,7 @@ sub status_GET {
|
||||
{ "buildsteps.busy" => { '!=', 0 } },
|
||||
{ order_by => ["globalpriority DESC", "id"],
|
||||
join => "buildsteps",
|
||||
columns => [@buildListColumns]
|
||||
columns => [@buildListColumns, 'buildsteps.drvpath', 'buildsteps.type']
|
||||
})]
|
||||
);
|
||||
}
|
||||
@@ -166,7 +175,7 @@ sub queue_runner_status_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
#my $status = from_json($c->model('DB::SystemStatus')->find('queue-runner')->status);
|
||||
my $status = from_json(`hydra-queue-runner --status`);
|
||||
my $status = decode_json(`hydra-queue-runner --status`);
|
||||
if ($?) { $status->{status} = "unknown"; }
|
||||
my $json = JSON->new->pretty()->canonical();
|
||||
|
||||
@@ -197,8 +206,10 @@ sub machines :Local Args(0) {
|
||||
|
||||
$c->stash->{machines} = $machines;
|
||||
$c->stash->{steps} = dbh($c)->selectall_arrayref(
|
||||
"select build, stepnr, s.system as system, s.drvpath as drvpath, machine, s.starttime as starttime, project, jobset, job, s.busy as busy " .
|
||||
"from BuildSteps s join Builds b on s.build = b.id " .
|
||||
"select build, stepnr, s.system as system, s.drvpath as drvpath, machine, s.starttime as starttime, jobsets.project as project, jobsets.name as jobset, job, s.busy as busy " .
|
||||
"from BuildSteps s " .
|
||||
"join Builds b on s.build = b.id " .
|
||||
"join Jobsets jobsets on jobsets.id = b.jobset_id " .
|
||||
"where busy != 0 order by machine, stepnr",
|
||||
{ Slice => {} });
|
||||
$c->stash->{template} = 'machine-status.tt';
|
||||
@@ -321,7 +332,7 @@ sub nar :Local :Args(1) {
|
||||
else {
|
||||
$path = $Nix::Config::storeDir . "/$path";
|
||||
|
||||
gone($c, "Path " . $path . " is no longer available.") unless isValidPath($path);
|
||||
gone($c, "Path " . $path . " is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
$c->stash->{storePath} = $path;
|
||||
@@ -350,18 +361,45 @@ sub nix_cache_info :Path('nix-cache-info') :Args(0) {
|
||||
}
|
||||
|
||||
|
||||
sub narinfo :LocalRegex('^([a-z0-9]+).narinfo$') :Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
sub realisations :Path('realisations') :Args(StrMatch[REALISATIONS_REGEX]) {
|
||||
my ($self, $c, $realisation) = @_;
|
||||
|
||||
if (!isLocalStore) {
|
||||
notFound($c, "There is no binary cache here.");
|
||||
}
|
||||
|
||||
else {
|
||||
my $hash = $c->req->captures->[0];
|
||||
my ($rawDrvOutput) = $realisation =~ REALISATIONS_REGEX;
|
||||
my $rawRealisation = $MACHINE_LOCAL_STORE->queryRawRealisation($rawDrvOutput);
|
||||
|
||||
die if length($hash) != 32;
|
||||
my $path = queryPathFromHashPart($hash);
|
||||
if (!$rawRealisation) {
|
||||
$c->response->status(404);
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = "does not exist\n";
|
||||
$c->forward('Hydra::View::Plain');
|
||||
setCacheHeaders($c, 60 * 60);
|
||||
return;
|
||||
}
|
||||
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = $rawRealisation;
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
||||
my ($self, $c, $narinfo) = @_;
|
||||
|
||||
if (!isLocalStore) {
|
||||
notFound($c, "There is no binary cache here.");
|
||||
}
|
||||
|
||||
else {
|
||||
my ($hash) = $narinfo =~ NARINFO_REGEX;
|
||||
|
||||
die("Hash length was not 32") if length($hash) != 32;
|
||||
my $path = $MACHINE_LOCAL_STORE->queryPathFromHashPart($hash);
|
||||
|
||||
if (!$path) {
|
||||
$c->response->status(404);
|
||||
@@ -399,7 +437,7 @@ sub evals :Local Args(0) {
|
||||
$c->stash->{page} = $page;
|
||||
$c->stash->{resultsPerPage} = $resultsPerPage;
|
||||
$c->stash->{total} = $evals->search({hasnewbuilds => 1})->count;
|
||||
$c->stash->{evals} = getEvals($self, $c, $evals, ($page - 1) * $resultsPerPage, $resultsPerPage);
|
||||
$c->stash->{evals} = getEvals($c, $evals, ($page - 1) * $resultsPerPage, $resultsPerPage);
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{evals});
|
||||
}
|
||||
@@ -466,8 +504,10 @@ sub search :Local Args(0) {
|
||||
, "jobset.hidden" => 0
|
||||
, iscurrent => 1
|
||||
},
|
||||
{ order_by => ["project", "jobset", "job"], join => ["project", "jobset"]
|
||||
, rows => $c->stash->{limit} + 1
|
||||
{
|
||||
order_by => ["jobset.project", "jobset.name", "job"],
|
||||
join => { "jobset" => "project" },
|
||||
rows => $c->stash->{limit} + 1
|
||||
} )
|
||||
];
|
||||
|
||||
@@ -517,10 +557,29 @@ sub log :Local :Args(1) {
|
||||
my $logPrefix = $c->config->{log_prefix};
|
||||
|
||||
if (defined $logPrefix) {
|
||||
$c->res->redirect($logPrefix . "log/" . basename($drvPath));
|
||||
$c->res->redirect($logPrefix . "log/" . WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath)));
|
||||
} else {
|
||||
notFound($c, "The build log of $drvPath is not available.");
|
||||
}
|
||||
}
|
||||
|
||||
sub runcommandlog :Local :Args(1) {
|
||||
my ($self, $c, $uuid) = @_;
|
||||
|
||||
my $tail = $c->request->params->{"tail"};
|
||||
|
||||
die if defined $tail && $tail !~ /^[0-9]+$/;
|
||||
|
||||
my $runlog = $c->model('DB')->resultset('RunCommandLogs')->find({ uuid => $uuid })
|
||||
or notFound($c, "The RunCommand log is not available.");
|
||||
|
||||
my $logFile = constructRunCommandLogPath($runlog);
|
||||
if (-f $logFile) {
|
||||
serveLogFile($c, $logFile, $tail);
|
||||
return;
|
||||
} else {
|
||||
notFound($c, "The RunCommand log is not available.");
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
||||
|
@@ -4,14 +4,15 @@ use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::REST';
|
||||
use File::Slurp;
|
||||
use File::Slurper qw(read_text);
|
||||
use Crypt::RandPasswd;
|
||||
use Digest::SHA1 qw(sha1_hex);
|
||||
use Hydra::Config qw(getLDAPConfigAmbient);
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Helper::Email;
|
||||
use LWP::UserAgent;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use HTML::Entities;
|
||||
use Encode qw(decode);
|
||||
|
||||
@@ -27,8 +28,8 @@ sub login_POST {
|
||||
my $username = $c->stash->{params}->{username} // "";
|
||||
my $password = $c->stash->{params}->{password} // "";
|
||||
|
||||
error($c, "You must specify a user name.") if $username eq "";
|
||||
error($c, "You must specify a password.") if $password eq "";
|
||||
badRequest($c, "You must specify a user name.") if $username eq "";
|
||||
badRequest($c, "You must specify a password.") if $password eq "";
|
||||
|
||||
if ($c->get_auth_realm('ldap') && $c->authenticate({username => $username, password => $password}, 'ldap')) {
|
||||
doLDAPLogin($self, $c, $username);
|
||||
@@ -37,7 +38,11 @@ sub login_POST {
|
||||
accessDenied($c, "Bad username or password.")
|
||||
}
|
||||
|
||||
currentUser_GET($self, $c);
|
||||
$self->status_found(
|
||||
$c,
|
||||
location => $c->uri_for("current-user"),
|
||||
entity => $c->model("DB::Users")->find($c->user->username)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -52,10 +57,10 @@ sub logout_POST {
|
||||
|
||||
sub doLDAPLogin {
|
||||
my ($self, $c, $username) = @_;
|
||||
|
||||
my $user = $c->find_user({ username => $username });
|
||||
my $LDAPUser = $c->find_user({ username => $username }, 'ldap');
|
||||
my @LDAPRoles = grep { (substr $_, 0, 5) eq "hydra" } $LDAPUser->roles;
|
||||
my @LDAPRoles = $LDAPUser->roles;
|
||||
my $role_mapping = getLDAPConfigAmbient()->{"role_mapping"};
|
||||
|
||||
if (!$user) {
|
||||
$c->model('DB::Users')->create(
|
||||
@@ -75,8 +80,13 @@ sub doLDAPLogin {
|
||||
});
|
||||
}
|
||||
$user->userroles->delete;
|
||||
if (@LDAPRoles) {
|
||||
$user->userroles->create({ role => (substr $_, 6) }) for @LDAPRoles;
|
||||
foreach my $ldap_role (@LDAPRoles) {
|
||||
if (defined($role_mapping->{$ldap_role})) {
|
||||
my $roles = $role_mapping->{$ldap_role};
|
||||
for my $mapped_role (@$roles) {
|
||||
$user->userroles->create({ role => $mapped_role });
|
||||
}
|
||||
}
|
||||
}
|
||||
$c->set_authenticated($user);
|
||||
}
|
||||
@@ -139,7 +149,7 @@ sub google_login :Path('/google-login') Args(0) {
|
||||
|
||||
error($c, "Logging in via Google is not enabled.") unless $c->config->{enable_google_login};
|
||||
|
||||
my $ua = new LWP::UserAgent;
|
||||
my $ua = LWP::UserAgent->new();
|
||||
my $response = $ua->post(
|
||||
'https://www.googleapis.com/oauth2/v3/tokeninfo',
|
||||
{ id_token => ($c->stash->{params}->{id_token} // die "No token."),
|
||||
@@ -161,13 +171,13 @@ sub github_login :Path('/github-login') Args(0) {
|
||||
my $client_id = $c->config->{github_client_id} or die "github_client_id not configured.";
|
||||
my $client_secret = $c->config->{github_client_secret} // do {
|
||||
my $client_secret_file = $c->config->{github_client_secret_file} or die "github_client_secret nor github_client_secret_file is configured.";
|
||||
my $client_secret = read_file($client_secret_file);
|
||||
my $client_secret = read_text($client_secret_file);
|
||||
$client_secret =~ s/\s+//;
|
||||
$client_secret;
|
||||
};
|
||||
die "No github secret configured" unless $client_secret;
|
||||
|
||||
my $ua = new LWP::UserAgent;
|
||||
my $ua = LWP::UserAgent->new();
|
||||
my $response = $ua->post(
|
||||
'https://github.com/login/oauth/access_token',
|
||||
{
|
||||
@@ -229,12 +239,6 @@ sub isValidPassword {
|
||||
}
|
||||
|
||||
|
||||
sub setPassword {
|
||||
my ($user, $password) = @_;
|
||||
$user->update({ password => sha1_hex($password) });
|
||||
}
|
||||
|
||||
|
||||
sub register :Local Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
@@ -294,7 +298,7 @@ sub updatePreferences {
|
||||
error($c, "The passwords you specified did not match.")
|
||||
if $password ne trim $c->stash->{params}->{password2};
|
||||
|
||||
setPassword($user, $password);
|
||||
$user->setPassword($password);
|
||||
}
|
||||
|
||||
my $emailAddress = trim($c->stash->{params}->{emailaddress} // "");
|
||||
@@ -394,7 +398,7 @@ sub reset_password :Chained('user') :PathPart('reset-password') :Args(0) {
|
||||
unless $user->emailaddress;
|
||||
|
||||
my $password = Crypt::RandPasswd->word(8,10);
|
||||
setPassword($user, $password);
|
||||
$user->setPassword($password);
|
||||
sendEmail(
|
||||
$c->config,
|
||||
$user->emailaddress,
|
||||
@@ -459,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) {
|
||||
, "jobset.enabled" => 1
|
||||
},
|
||||
{ order_by => ["project", "jobset", "job"]
|
||||
, join => ["project", "jobset"]
|
||||
, join => {"jobset" => "project"}
|
||||
})];
|
||||
}
|
||||
|
||||
|
62
src/lib/Hydra/Event.pm
Normal file
62
src/lib/Hydra/Event.pm
Normal file
@@ -0,0 +1,62 @@
|
||||
package Hydra::Event;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Hydra::Event::BuildFinished;
|
||||
use Hydra::Event::BuildQueued;
|
||||
use Hydra::Event::BuildStarted;
|
||||
use Hydra::Event::CachedBuildFinished;
|
||||
use Hydra::Event::CachedBuildQueued;
|
||||
use Hydra::Event::EvalAdded;
|
||||
use Hydra::Event::EvalCached;
|
||||
use Hydra::Event::EvalFailed;
|
||||
use Hydra::Event::EvalStarted;
|
||||
use Hydra::Event::StepFinished;
|
||||
|
||||
my %channels_to_events = (
|
||||
build_finished => \&Hydra::Event::BuildFinished::parse,
|
||||
build_queued => \&Hydra::Event::BuildQueued::parse,
|
||||
build_started => \&Hydra::Event::BuildStarted::parse,
|
||||
cached_build_finished => \&Hydra::Event::CachedBuildFinished::parse,
|
||||
cached_build_queued => \&Hydra::Event::CachedBuildQueued::parse,
|
||||
eval_added => \&Hydra::Event::EvalAdded::parse,
|
||||
eval_cached => \&Hydra::Event::EvalCached::parse,
|
||||
eval_failed => \&Hydra::Event::EvalFailed::parse,
|
||||
eval_started => \&Hydra::Event::EvalStarted::parse,
|
||||
step_finished => \&Hydra::Event::StepFinished::parse,
|
||||
);
|
||||
|
||||
|
||||
sub parse_payload :prototype($$) {
|
||||
my ($channel_name, $payload) = @_;
|
||||
my @payload = split /\t/, $payload;
|
||||
|
||||
my $parser = $channels_to_events{$channel_name};
|
||||
unless (defined $parser) {
|
||||
die "Invalid channel name: '$channel_name'";
|
||||
}
|
||||
|
||||
return $parser->(@payload);
|
||||
}
|
||||
|
||||
|
||||
sub new_event {
|
||||
my ($self, $channel_name, $payload) = @_;
|
||||
|
||||
return bless {
|
||||
"channel_name" => $channel_name,
|
||||
"payload" => $payload,
|
||||
"event" => parse_payload($channel_name, $payload),
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
|
||||
return $self->{"event"}->interestedIn($plugin);
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
return $self->{"event"}->execute($db, $plugin);
|
||||
}
|
70
src/lib/Hydra/Event/BuildFinished.pm
Normal file
70
src/lib/Hydra/Event/BuildFinished.pm
Normal file
@@ -0,0 +1,70 @@
|
||||
package Hydra::Event::BuildFinished;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
if (@_ == 0) {
|
||||
die "build_finished: payload takes at least one argument, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my @failures = grep(!/^\d+$/, @_);
|
||||
if (@failures > 0) {
|
||||
die "build_finished: payload arguments should be integers, but we received the following non-integers:", @failures;
|
||||
}
|
||||
|
||||
my ($build_id, @dependents) = map int, @_;
|
||||
return Hydra::Event::BuildFinished->new($build_id, \@dependents);
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $build_id, $dependent_ids) = @_;
|
||||
return bless {
|
||||
"build_id" => $build_id,
|
||||
"dependent_ids" => $dependent_ids,
|
||||
"build" => undef,
|
||||
"dependents" => [],
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('buildFinished')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"build"})) {
|
||||
$self->{"build"} = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
|
||||
foreach my $id (@{$self->{"dependent_ids"}}) {
|
||||
my $dep = $db->resultset('Builds')->find($id)
|
||||
or die "dependent build $id does not exist\n";
|
||||
push @{$self->{"dependents"}}, $dep;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->buildFinished($self->{"build"}, $self->{"dependents"});
|
||||
|
||||
# Mark the build and all dependents as having their notifications "finished".
|
||||
#
|
||||
# Otherwise, the dependent builds will remain with notificationpendingsince set
|
||||
# until hydra-notify is started, as buildFinished is never emitted for them.
|
||||
foreach my $build ($self->{"build"}, @{$self->{"dependents"}}) {
|
||||
if ($build->finished && defined($build->notificationpendingsince)) {
|
||||
$build->update({ notificationpendingsince => undef })
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
52
src/lib/Hydra/Event/BuildQueued.pm
Normal file
52
src/lib/Hydra/Event/BuildQueued.pm
Normal file
@@ -0,0 +1,52 @@
|
||||
package Hydra::Event::BuildQueued;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 1) {
|
||||
die "build_queued: payload takes only one argument, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($build_id) = @_;
|
||||
|
||||
unless ($build_id =~ /^\d+$/) {
|
||||
die "build_queued: payload argument should be an integer, but '", $build_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::BuildQueued->new(int($build_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $id) = @_;
|
||||
return bless {
|
||||
"build_id" => $id,
|
||||
"build" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('buildQueued')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"build"})) {
|
||||
$self->{"build"} = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->buildQueued($self->{"build"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
52
src/lib/Hydra/Event/BuildStarted.pm
Normal file
52
src/lib/Hydra/Event/BuildStarted.pm
Normal file
@@ -0,0 +1,52 @@
|
||||
package Hydra::Event::BuildStarted;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 1) {
|
||||
die "build_started: payload takes only one argument, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($build_id) = @_;
|
||||
|
||||
unless ($build_id =~ /^\d+$/) {
|
||||
die "build_started: payload argument should be an integer, but '", $build_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::BuildStarted->new(int($build_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $id) = @_;
|
||||
return bless {
|
||||
"build_id" => $id,
|
||||
"build" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('buildStarted')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"build"})) {
|
||||
$self->{"build"} = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->buildStarted($self->{"build"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
59
src/lib/Hydra/Event/CachedBuildFinished.pm
Normal file
59
src/lib/Hydra/Event/CachedBuildFinished.pm
Normal file
@@ -0,0 +1,59 @@
|
||||
package Hydra::Event::CachedBuildFinished;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
if (@_ != 2) {
|
||||
die "cached_build_finished: payload takes two arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my @failures = grep(!/^\d+$/, @_);
|
||||
if (@failures > 0) {
|
||||
die "cached_build_finished: payload arguments should be integers, but we received the following non-integers:", @failures;
|
||||
}
|
||||
|
||||
my ($evaluation_id, $build_id) = map int, @_;
|
||||
return Hydra::Event::CachedBuildFinished->new($evaluation_id, $build_id);
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $evaluation_id, $build_id) = @_;
|
||||
return bless {
|
||||
"evaluation_id" => $evaluation_id,
|
||||
"build_id" => $build_id,
|
||||
"evaluation" => undef,
|
||||
"build" => undef,
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('cachedBuildFinished')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"build"})) {
|
||||
$self->{"build"} = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
}
|
||||
|
||||
if (!defined($self->{"evaluation"})) {
|
||||
$self->{"evaluation"} = $db->resultset('JobsetEvals')->find($self->{"evaluation_id"})
|
||||
or die "evaluation $self->{'evaluation_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->cachedBuildFinished($self->{"evaluation"}, $self->{"build"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
59
src/lib/Hydra/Event/CachedBuildQueued.pm
Normal file
59
src/lib/Hydra/Event/CachedBuildQueued.pm
Normal file
@@ -0,0 +1,59 @@
|
||||
package Hydra::Event::CachedBuildQueued;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
if (@_ != 2) {
|
||||
die "cached_build_queued: payload takes two arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my @failures = grep(!/^\d+$/, @_);
|
||||
if (@failures > 0) {
|
||||
die "cached_build_queued: payload arguments should be integers, but we received the following non-integers:", @failures;
|
||||
}
|
||||
|
||||
my ($evaluation_id, $build_id) = map int, @_;
|
||||
return Hydra::Event::CachedBuildQueued->new($evaluation_id, $build_id);
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $evaluation_id, $build_id) = @_;
|
||||
return bless {
|
||||
"evaluation_id" => $evaluation_id,
|
||||
"build_id" => $build_id,
|
||||
"evaluation" => undef,
|
||||
"build" => undef,
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('cachedBuildQueued')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"build"})) {
|
||||
$self->{"build"} = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
}
|
||||
|
||||
if (!defined($self->{"evaluation"})) {
|
||||
$self->{"evaluation"} = $db->resultset('JobsetEvals')->find($self->{"evaluation_id"})
|
||||
or die "evaluation $self->{'evaluation_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->cachedBuildQueued($self->{"evaluation"}, $self->{"build"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
63
src/lib/Hydra/Event/EvalAdded.pm
Normal file
63
src/lib/Hydra/Event/EvalAdded.pm
Normal file
@@ -0,0 +1,63 @@
|
||||
package Hydra::Event::EvalAdded;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 3) {
|
||||
die "eval_added: payload takes exactly three arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($trace_id, $jobset_id, $evaluation_id) = @_;
|
||||
|
||||
unless ($jobset_id =~ /^\d+$/) {
|
||||
die "eval_added: payload argument jobset_id should be an integer, but '", $jobset_id, "' was given"
|
||||
}
|
||||
unless ($evaluation_id =~ /^\d+$/) {
|
||||
die "eval_added: payload argument evaluation_id should be an integer, but '", $evaluation_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::EvalAdded->new($trace_id, int($jobset_id), int($evaluation_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $trace_id, $jobset_id, $evaluation_id) = @_;
|
||||
return bless {
|
||||
"trace_id" => $trace_id,
|
||||
"jobset_id" => $jobset_id,
|
||||
"evaluation_id" => $evaluation_id,
|
||||
"jobset" => undef,
|
||||
"evaluation" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('evalAdded')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"jobset"})) {
|
||||
$self->{"jobset"} = $db->resultset('Jobsets')->find({ id => $self->{"jobset_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
|
||||
if (!defined($self->{"evaluation"})) {
|
||||
$self->{"evaluation"} = $db->resultset('JobsetEvals')->find({ id => $self->{"evaluation_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->evalAdded($self->{"trace_id"}, $self->{"jobset"}, $self->{"evaluation"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
63
src/lib/Hydra/Event/EvalCached.pm
Normal file
63
src/lib/Hydra/Event/EvalCached.pm
Normal file
@@ -0,0 +1,63 @@
|
||||
package Hydra::Event::EvalCached;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 3) {
|
||||
die "eval_cached: payload takes exactly three arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($trace_id, $jobset_id, $evaluation_id) = @_;
|
||||
|
||||
unless ($jobset_id =~ /^\d+$/) {
|
||||
die "eval_cached: payload argument jobset_id should be an integer, but '", $jobset_id, "' was given"
|
||||
}
|
||||
unless ($evaluation_id =~ /^\d+$/) {
|
||||
die "eval_cached: payload argument evaluation_id should be an integer, but '", $evaluation_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::EvalCached->new($trace_id, int($jobset_id), int($evaluation_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $trace_id, $jobset_id, $evaluation_id) = @_;
|
||||
return bless {
|
||||
"trace_id" => $trace_id,
|
||||
"jobset_id" => $jobset_id,
|
||||
"evaluation_id" => $evaluation_id,
|
||||
"jobset" => undef,
|
||||
"evaluation" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('evalCached')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"jobset"})) {
|
||||
$self->{"jobset"} = $db->resultset('Jobsets')->find({ id => $self->{"jobset_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
|
||||
if (!defined($self->{"evaluation"})) {
|
||||
$self->{"evaluation"} = $db->resultset('JobsetEvals')->find({ id => $self->{"evaluation_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->evalCached($self->{"trace_id"}, $self->{"jobset"}, $self->{"evaluation"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
53
src/lib/Hydra/Event/EvalFailed.pm
Normal file
53
src/lib/Hydra/Event/EvalFailed.pm
Normal file
@@ -0,0 +1,53 @@
|
||||
package Hydra::Event::EvalFailed;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 2) {
|
||||
die "eval_failed: payload takes two arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($trace_id, $jobset_id) = @_;
|
||||
|
||||
unless ($jobset_id =~ /^\d+$/) {
|
||||
die "eval_failed: payload argument should be an integer, but '", $jobset_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::EvalFailed->new($trace_id, int($jobset_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $trace_id, $jobset_id) = @_;
|
||||
return bless {
|
||||
"trace_id" => $trace_id,
|
||||
"jobset_id" => $jobset_id,
|
||||
"jobset" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('evalFailed')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"jobset"})) {
|
||||
$self->{"jobset"} = $db->resultset('Jobsets')->find({ id => $self->{"jobset_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->evalFailed($self->{"trace_id"}, $self->{"jobset"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
53
src/lib/Hydra/Event/EvalStarted.pm
Normal file
53
src/lib/Hydra/Event/EvalStarted.pm
Normal file
@@ -0,0 +1,53 @@
|
||||
package Hydra::Event::EvalStarted;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 2) {
|
||||
die "eval_started: payload takes two arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($trace_id, $jobset_id) = @_;
|
||||
|
||||
unless ($jobset_id =~ /^\d+$/) {
|
||||
die "eval_started: payload argument should be an integer, but '", $jobset_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::EvalStarted->new($trace_id, int($jobset_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $trace_id, $jobset_id) = @_;
|
||||
return bless {
|
||||
"trace_id" => $trace_id,
|
||||
"jobset_id" => $jobset_id,
|
||||
"jobset" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('evalStarted')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"jobset"})) {
|
||||
$self->{"jobset"} = $db->resultset('Jobsets')->find({ id => $self->{"jobset_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->evalStarted($self->{"trace_id"}, $self->{"jobset"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
64
src/lib/Hydra/Event/StepFinished.pm
Normal file
64
src/lib/Hydra/Event/StepFinished.pm
Normal file
@@ -0,0 +1,64 @@
|
||||
package Hydra::Event::StepFinished;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 3) {
|
||||
die "step_finished: payload takes exactly three arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($build_id, $step_number, $log_path) = @_;
|
||||
|
||||
unless ($build_id =~ /^\d+$/) {
|
||||
die "step_finished: payload argument build_id should be an integer, but '", $build_id, "' was given"
|
||||
}
|
||||
unless ($step_number =~ /^\d+$/) {
|
||||
die "step_finished: payload argument step_number should be an integer, but '", $step_number, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::StepFinished->new(int($build_id), int($step_number), $log_path);
|
||||
}
|
||||
|
||||
sub new :prototype($$$) {
|
||||
my ($self, $build_id, $step_number, $log_path) = @_;
|
||||
|
||||
$log_path = undef if $log_path eq "-";
|
||||
|
||||
return bless {
|
||||
"build_id" => $build_id,
|
||||
"step_number" => $step_number,
|
||||
"log_path" => $log_path,
|
||||
"step" => undef,
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('stepFinished')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"step"})) {
|
||||
my $build = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
|
||||
$self->{"step"} = $build->buildsteps->find({stepnr => $self->{"step_number"}})
|
||||
or die "step $self->{'step_number'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->stepFinished($self->{"step"}, $self->{"log_path"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
@@ -1,9 +1,10 @@
|
||||
package Hydra::Helper::AddBuilds;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use utf8;
|
||||
use Encode;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use Nix::Store;
|
||||
use Nix::Config;
|
||||
use Hydra::Model::DB;
|
||||
@@ -14,19 +15,20 @@ use File::stat;
|
||||
use File::Path;
|
||||
use File::Temp;
|
||||
use File::Spec;
|
||||
use File::Slurp;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT = qw(
|
||||
validateDeclarativeJobset
|
||||
createJobsetInputsRowAndData
|
||||
updateDeclarativeJobset
|
||||
handleDeclarativeJobsetBuild
|
||||
handleDeclarativeJobsetJson
|
||||
);
|
||||
|
||||
|
||||
sub updateDeclarativeJobset {
|
||||
my ($db, $project, $jobsetName, $declSpec) = @_;
|
||||
sub validateDeclarativeJobset {
|
||||
my ($config, $project, $jobsetName, $declSpec) = @_;
|
||||
|
||||
my @allowed_keys = qw(
|
||||
enabled
|
||||
@@ -39,6 +41,7 @@ sub updateDeclarativeJobset {
|
||||
checkinterval
|
||||
schedulingshares
|
||||
enableemail
|
||||
enable_dynamic_run_command
|
||||
emailoverride
|
||||
keepnr
|
||||
);
|
||||
@@ -61,15 +64,39 @@ sub updateDeclarativeJobset {
|
||||
}
|
||||
}
|
||||
|
||||
my $enable_dynamic_run_command = defined $update{enable_dynamic_run_command} ? 1 : 0;
|
||||
if ($enable_dynamic_run_command
|
||||
&& !($config->{dynamicruncommand}->{enable}
|
||||
&& $project->enable_dynamic_run_command))
|
||||
{
|
||||
die "Dynamic RunCommand is not enabled by the server or the parent project.";
|
||||
}
|
||||
|
||||
return %update;
|
||||
}
|
||||
|
||||
sub createJobsetInputsRowAndData {
|
||||
my ($name, $declSpec) = @_;
|
||||
my $data = $declSpec->{"inputs"}->{$name};
|
||||
my $row = {
|
||||
name => $name,
|
||||
type => $data->{type}
|
||||
};
|
||||
$row->{emailresponsible} = $data->{emailresponsible} // 0;
|
||||
|
||||
return ($row, $data);
|
||||
}
|
||||
|
||||
sub updateDeclarativeJobset {
|
||||
my ($config, $db, $project, $jobsetName, $declSpec) = @_;
|
||||
|
||||
my %update = validateDeclarativeJobset($config, $project, $jobsetName, $declSpec);
|
||||
|
||||
$db->txn_do(sub {
|
||||
my $jobset = $project->jobsets->update_or_create(\%update);
|
||||
$jobset->jobsetinputs->delete;
|
||||
while ((my $name, my $data) = each %{$declSpec->{"inputs"}}) {
|
||||
my $row = {
|
||||
name => $name,
|
||||
type => $data->{type}
|
||||
};
|
||||
$row->{emailresponsible} = $data->{emailresponsible} // 0;
|
||||
foreach my $name (keys %{$declSpec->{"inputs"}}) {
|
||||
my ($row, $data) = createJobsetInputsRowAndData($name, $declSpec);
|
||||
my $input = $jobset->jobsetinputs->create($row);
|
||||
$input->jobsetinputalts->create({altnr => 0, value => $data->{value}});
|
||||
}
|
||||
@@ -80,13 +107,15 @@ sub updateDeclarativeJobset {
|
||||
|
||||
sub handleDeclarativeJobsetJson {
|
||||
my ($db, $project, $declSpec) = @_;
|
||||
my $config = getHydraConfig();
|
||||
$db->txn_do(sub {
|
||||
my @kept = keys %$declSpec;
|
||||
push @kept, ".jobsets";
|
||||
$project->jobsets->search({ name => { "not in" => \@kept } })->update({ enabled => 0, hidden => 1 });
|
||||
while ((my $jobsetName, my $spec) = each %$declSpec) {
|
||||
foreach my $jobsetName (keys %$declSpec) {
|
||||
my $spec = $declSpec->{$jobsetName};
|
||||
eval {
|
||||
updateDeclarativeJobset($db, $project, $jobsetName, $spec);
|
||||
updateDeclarativeJobset($config, $db, $project, $jobsetName, $spec);
|
||||
1;
|
||||
} or do {
|
||||
print STDERR "ERROR: failed to process declarative jobset ", $project->name, ":${jobsetName}, ", $@, "\n";
|
||||
|
56
src/lib/Hydra/Helper/AttributeSet.pm
Normal file
56
src/lib/Hydra/Helper/AttributeSet.pm
Normal file
@@ -0,0 +1,56 @@
|
||||
package Hydra::Helper::AttributeSet;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub new {
|
||||
my ($self) = @_;
|
||||
return bless { "paths" => [] }, $self;
|
||||
}
|
||||
|
||||
sub registerValue {
|
||||
my ($self, $attributePath) = @_;
|
||||
|
||||
my @pathParts = splitPath($attributePath);
|
||||
|
||||
pop(@pathParts);
|
||||
if (scalar(@pathParts) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
my $lineage = "";
|
||||
for my $pathPart (@pathParts) {
|
||||
$lineage = $self->registerChild($lineage, $pathPart);
|
||||
}
|
||||
}
|
||||
|
||||
sub registerChild {
|
||||
my ($self, $parent, $attributePath) = @_;
|
||||
if ($parent ne "") {
|
||||
$parent .= "."
|
||||
}
|
||||
|
||||
my $name = $parent . $attributePath;
|
||||
if (!grep { $_ eq $name} @{$self->{"paths"}}) {
|
||||
push(@{$self->{"paths"}}, $name);
|
||||
}
|
||||
return $name;
|
||||
}
|
||||
|
||||
sub splitPath {
|
||||
my ($s) = @_;
|
||||
|
||||
if ($s eq "") {
|
||||
return ('')
|
||||
}
|
||||
|
||||
return split(/\./, $s, -1);
|
||||
}
|
||||
|
||||
sub enumerate {
|
||||
my ($self) = @_;
|
||||
my @paths = sort { length($a) <=> length($b) } @{$self->{"paths"}};
|
||||
return @paths;
|
||||
}
|
||||
|
||||
1;
|
103
src/lib/Hydra/Helper/BuildDiff.pm
Normal file
103
src/lib/Hydra/Helper/BuildDiff.pm
Normal file
@@ -0,0 +1,103 @@
|
||||
package Hydra::Helper::BuildDiff;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT = qw(
|
||||
buildDiff
|
||||
);
|
||||
|
||||
sub cmpBuilds {
|
||||
my ($left, $right) = @_;
|
||||
return $left->get_column('job') cmp $right->get_column('job')
|
||||
|| $left->get_column('system') cmp $right->get_column('system')
|
||||
}
|
||||
|
||||
sub buildDiff {
|
||||
# $builds is the list of current builds
|
||||
# $builds2 is the list of previous (to-be-compared-to) builds
|
||||
my ($builds, $builds2) = @_;
|
||||
|
||||
$builds = [sort { cmpBuilds($a, $b) } @{$builds}];
|
||||
$builds2 = [sort { cmpBuilds($a, $b) } @{$builds2}];
|
||||
|
||||
my $ret = {
|
||||
stillSucceed => [],
|
||||
stillFail => [],
|
||||
nowSucceed => [],
|
||||
nowFail => [],
|
||||
new => [],
|
||||
removed => [],
|
||||
unfinished => [],
|
||||
aborted => [],
|
||||
|
||||
# These summary counters cut across the categories to determine whether
|
||||
# actions such as "Restart all failed" or "Bump queue" are available.
|
||||
totalAborted => 0,
|
||||
totalFailed => 0,
|
||||
totalQueued => 0,
|
||||
};
|
||||
|
||||
my $n = 0;
|
||||
foreach my $build (@{$builds}) {
|
||||
my $aborted = $build->finished != 0 && (
|
||||
# aborted
|
||||
$build->buildstatus == 3
|
||||
# cancelled
|
||||
|| $build->buildstatus == 4
|
||||
# timeout
|
||||
|| $build->buildstatus == 7
|
||||
# log limit exceeded
|
||||
|| $build->buildstatus == 10
|
||||
);
|
||||
my $d;
|
||||
my $found = 0;
|
||||
while ($n < scalar(@{$builds2})) {
|
||||
my $build2 = @{$builds2}[$n];
|
||||
my $d = cmpBuilds($build, $build2);
|
||||
last if $d == -1;
|
||||
if ($d == 0) {
|
||||
$n++;
|
||||
$found = 1;
|
||||
if ($aborted) {
|
||||
# do nothing
|
||||
} elsif ($build->finished == 0 || $build2->finished == 0) {
|
||||
push @{$ret->{unfinished}}, $build;
|
||||
} elsif ($build->buildstatus == 0 && $build2->buildstatus == 0) {
|
||||
push @{$ret->{stillSucceed}}, $build;
|
||||
} elsif ($build->buildstatus != 0 && $build2->buildstatus != 0) {
|
||||
push @{$ret->{stillFail}}, $build;
|
||||
} elsif ($build->buildstatus == 0 && $build2->buildstatus != 0) {
|
||||
push @{$ret->{nowSucceed}}, $build;
|
||||
} elsif ($build->buildstatus != 0 && $build2->buildstatus == 0) {
|
||||
push @{$ret->{nowFail}}, $build;
|
||||
} else { die; }
|
||||
last;
|
||||
}
|
||||
my $job_system = { job => $build2->get_column('job'), system => $build2->get_column('system') };
|
||||
push @{$ret->{removed}}, $job_system;
|
||||
$n++;
|
||||
}
|
||||
if ($aborted) {
|
||||
push @{$ret->{aborted}}, $build;
|
||||
} else {
|
||||
push @{$ret->{new}}, $build if !$found;
|
||||
}
|
||||
|
||||
if ($build->finished != 0 && $build->buildstatus != 0) {
|
||||
if ($aborted) {
|
||||
++$ret->{totalAborted};
|
||||
} else {
|
||||
++$ret->{totalFailed};
|
||||
}
|
||||
} elsif ($build->finished == 0) {
|
||||
++$ret->{totalQueued};
|
||||
}
|
||||
}
|
||||
|
||||
return $ret;
|
||||
}
|
||||
|
||||
1;
|
@@ -2,19 +2,20 @@ package Hydra::Helper::CatalystUtils;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use Exporter;
|
||||
use Readonly;
|
||||
use ReadonlyX;
|
||||
use Nix::Store;
|
||||
use Hydra::Helper::Nix;
|
||||
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT = qw(
|
||||
getBuild getPreviousBuild getNextBuild getPreviousSuccessfulBuild
|
||||
searchBuildsAndEvalsForJobset
|
||||
error notFound gone accessDenied
|
||||
error notFound gone accessDenied badRequest
|
||||
forceLogin requireUser requireProjectOwner requireRestartPrivileges requireAdmin requirePost isAdmin isProjectOwner
|
||||
requireBumpPrivileges
|
||||
requireCancelBuildPrivileges
|
||||
requireEvalJobsetPrivileges
|
||||
trim
|
||||
getLatestFinishedEval getFirstEval
|
||||
paramToList
|
||||
@@ -33,8 +34,7 @@ our @EXPORT = qw(
|
||||
|
||||
|
||||
# Columns from the Builds table needed to render build lists.
|
||||
Readonly our @buildListColumns => ('id', 'finished', 'timestamp', 'stoptime', 'project', 'jobset', 'job', 'nixname', 'system', 'buildstatus', 'releasename');
|
||||
|
||||
Readonly::Array our @buildListColumns => ('id', 'finished', 'timestamp', 'stoptime', 'jobset_id', 'job', 'nixname', 'system', 'buildstatus', 'releasename');
|
||||
|
||||
sub getBuild {
|
||||
my ($c, $id) = @_;
|
||||
@@ -64,8 +64,7 @@ sub getNextBuild {
|
||||
(my $nextBuild) = $c->model('DB::Builds')->search(
|
||||
{ finished => 1
|
||||
, system => $build->system
|
||||
, project => $build->get_column('project')
|
||||
, jobset => $build->get_column('jobset')
|
||||
, jobset_id => $build->get_column('jobset_id')
|
||||
, job => $build->get_column('job')
|
||||
, 'me.id' => { '>' => $build->id }
|
||||
}, {rows => 1, order_by => "me.id ASC"});
|
||||
@@ -81,8 +80,7 @@ sub getPreviousSuccessfulBuild {
|
||||
(my $prevBuild) = $c->model('DB::Builds')->search(
|
||||
{ finished => 1
|
||||
, system => $build->system
|
||||
, project => $build->get_column('project')
|
||||
, jobset => $build->get_column('jobset')
|
||||
, jobset_id => $build->get_column('jobset_id')
|
||||
, job => $build->get_column('job')
|
||||
, buildstatus => 0
|
||||
, 'me.id' => { '<' => $build->id }
|
||||
@@ -111,14 +109,14 @@ sub searchBuildsAndEvalsForJobset {
|
||||
{ columns => ['id', 'job', 'finished', 'buildstatus'] }
|
||||
);
|
||||
|
||||
foreach my $b (@allBuilds) {
|
||||
my $jobName = $b->get_column('job');
|
||||
foreach my $build (@allBuilds) {
|
||||
my $jobName = $build->get_column('job');
|
||||
|
||||
$evals->{$eval->id}->{timestamp} = $eval->timestamp;
|
||||
$evals->{$eval->id}->{builds}->{$jobName} = {
|
||||
id => $b->id,
|
||||
finished => $b->finished,
|
||||
buildstatus => $b->buildstatus
|
||||
id => $build->id,
|
||||
finished => $build->finished,
|
||||
buildstatus => $build->buildstatus
|
||||
};
|
||||
$builds{$jobName} = 1;
|
||||
$nrBuilds++;
|
||||
@@ -155,6 +153,10 @@ sub accessDenied {
|
||||
error($c, $msg, 403);
|
||||
}
|
||||
|
||||
sub badRequest {
|
||||
my ($c, $msg) = @_;
|
||||
error($c, $msg, 400);
|
||||
}
|
||||
|
||||
sub backToReferer {
|
||||
my ($c) = @_;
|
||||
@@ -185,6 +187,27 @@ sub isProjectOwner {
|
||||
defined $c->model('DB::ProjectMembers')->find({ project => $project, userName => $c->user->username }));
|
||||
}
|
||||
|
||||
sub hasEvalJobsetRole {
|
||||
my ($c) = @_;
|
||||
return $c->user_exists && $c->check_user_roles("eval-jobset");
|
||||
}
|
||||
|
||||
sub mayEvalJobset {
|
||||
my ($c, $project) = @_;
|
||||
return
|
||||
$c->user_exists &&
|
||||
(isAdmin($c) ||
|
||||
hasEvalJobsetRole($c) ||
|
||||
isProjectOwner($c, $project));
|
||||
}
|
||||
|
||||
sub requireEvalJobsetPrivileges {
|
||||
my ($c, $project) = @_;
|
||||
requireUser($c);
|
||||
accessDenied($c, "Only the project members, administrators, and accounts with eval-jobset privileges can perform this operation.")
|
||||
unless mayEvalJobset($c, $project);
|
||||
}
|
||||
|
||||
sub hasCancelBuildRole {
|
||||
my ($c) = @_;
|
||||
return $c->user_exists && $c->check_user_roles('cancel-build');
|
||||
@@ -271,12 +294,12 @@ sub requireAdmin {
|
||||
|
||||
sub requirePost {
|
||||
my ($c) = @_;
|
||||
error($c, "Request must be POSTed.") if $c->request->method ne "POST";
|
||||
error($c, "Request must be POSTed.", 405) if $c->request->method ne "POST";
|
||||
}
|
||||
|
||||
|
||||
sub trim {
|
||||
my $s = shift;
|
||||
my $s = shift // "";
|
||||
$s =~ s/^\s+|\s+$//g;
|
||||
return $s;
|
||||
}
|
||||
@@ -314,16 +337,16 @@ sub paramToList {
|
||||
|
||||
|
||||
# Security checking of filenames.
|
||||
Readonly our $pathCompRE => "(?:[A-Za-z0-9-\+\._\$][A-Za-z0-9-\+\._\$:]*)";
|
||||
Readonly our $relPathRE => "(?:$pathCompRE(?:/$pathCompRE)*)";
|
||||
Readonly our $relNameRE => "(?:[A-Za-z0-9-_][A-Za-z0-9-\._]*)";
|
||||
Readonly our $attrNameRE => "(?:[A-Za-z_][A-Za-z0-9-_]*)";
|
||||
Readonly our $projectNameRE => "(?:[A-Za-z_][A-Za-z0-9-_]*)";
|
||||
Readonly our $jobsetNameRE => "(?:[A-Za-z_][A-Za-z0-9-_\.]*)";
|
||||
Readonly our $jobNameRE => "(?:$attrNameRE(?:\\.$attrNameRE)*)";
|
||||
Readonly our $systemRE => "(?:[a-z0-9_]+-[a-z0-9_]+)";
|
||||
Readonly our $userNameRE => "(?:[a-z][a-z0-9_\.]*)";
|
||||
Readonly our $inputNameRE => "(?:[A-Za-z_][A-Za-z0-9-_]*)";
|
||||
Readonly::Scalar our $pathCompRE => "(?:[A-Za-z0-9-\+\._\$][A-Za-z0-9-\+\._\$:]*)";
|
||||
Readonly::Scalar our $relPathRE => "(?:$pathCompRE(?:/$pathCompRE)*)";
|
||||
Readonly::Scalar our $relNameRE => "(?:[A-Za-z0-9-_][A-Za-z0-9-\._]*)";
|
||||
Readonly::Scalar our $attrNameRE => "(?:[A-Za-z_][A-Za-z0-9-_]*)";
|
||||
Readonly::Scalar our $projectNameRE => "(?:[A-Za-z_][A-Za-z0-9-_]*)";
|
||||
Readonly::Scalar our $jobsetNameRE => "(?:[A-Za-z_][A-Za-z0-9-_\.]*)";
|
||||
Readonly::Scalar our $jobNameRE => "(?:$attrNameRE(?:\\.$attrNameRE)*)";
|
||||
Readonly::Scalar our $systemRE => "(?:[a-z0-9_]+-[a-z0-9_]+)";
|
||||
Readonly::Scalar our $userNameRE => "(?:[a-z][a-z0-9_\.]*)";
|
||||
Readonly::Scalar our $inputNameRE => "(?:[A-Za-z_][A-Za-z0-9-_]*)";
|
||||
|
||||
|
||||
sub parseJobsetName {
|
||||
@@ -335,7 +358,8 @@ sub parseJobsetName {
|
||||
|
||||
sub showJobName {
|
||||
my ($build) = @_;
|
||||
return $build->get_column('project') . ":" . $build->get_column('jobset') . ":" . $build->get_column('job');
|
||||
my $jobset = $build->jobset;
|
||||
return $jobset->get_column('project') . ":" . $jobset->get_column('name') . ":" . $build->get_column('job');
|
||||
}
|
||||
|
||||
|
||||
@@ -411,6 +435,7 @@ sub approxTableSize {
|
||||
|
||||
sub requireLocalStore {
|
||||
my ($c) = @_;
|
||||
require Hydra::Helper::Nix;
|
||||
notFound($c, "Nix channels are not supported by this Hydra server.") if !Hydra::Helper::Nix::isLocalStore();
|
||||
}
|
||||
|
||||
|
@@ -1,10 +1,10 @@
|
||||
package Hydra::Helper::Email;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Email::MIME;
|
||||
use Email::Sender::Simple qw(sendmail);
|
||||
use Exporter 'import';
|
||||
use File::Slurp;
|
||||
use Hydra::Helper::Nix;
|
||||
use Sys::Hostname::Long;
|
||||
|
||||
|
22
src/lib/Hydra/Helper/Escape.pm
Normal file
22
src/lib/Hydra/Helper/Escape.pm
Normal file
@@ -0,0 +1,22 @@
|
||||
package Hydra::Helper::Escape;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use base qw(Exporter);
|
||||
use Hydra::Helper::AttributeSet;
|
||||
|
||||
our @EXPORT = qw(escapeString escapeAttributePath);
|
||||
|
||||
sub escapeString {
|
||||
my ($s) = @_;
|
||||
$s =~ s|\\|\\\\|g;
|
||||
$s =~ s|\"|\\\"|g;
|
||||
$s =~ s|\$|\\\$|g;
|
||||
return "\"" . $s . "\"";
|
||||
}
|
||||
|
||||
sub escapeAttributePath {
|
||||
my ($s) = @_;
|
||||
|
||||
return join ".", map { escapeString($_) } Hydra::Helper::AttributeSet::splitPath($s);
|
||||
}
|
55
src/lib/Hydra/Helper/Exec.pm
Normal file
55
src/lib/Hydra/Helper/Exec.pm
Normal file
@@ -0,0 +1,55 @@
|
||||
use warnings;
|
||||
use strict;
|
||||
use IPC::Run;
|
||||
|
||||
package Hydra::Helper::Exec;
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT = qw(
|
||||
captureStdoutStderr
|
||||
captureStdoutStderrWithStdin
|
||||
expectOkay
|
||||
);
|
||||
|
||||
sub expectOkay {
|
||||
my ($timeout, @cmd) = @_;
|
||||
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderrWithStdin($timeout, \@cmd, "");
|
||||
if ($res) {
|
||||
die <<MSG;
|
||||
Failure executing @cmd.
|
||||
|
||||
STDOUT:
|
||||
$stdout
|
||||
|
||||
STDERR:
|
||||
$stderr
|
||||
MSG
|
||||
}
|
||||
|
||||
1;
|
||||
}
|
||||
|
||||
sub captureStdoutStderr {
|
||||
my ($timeout, @cmd) = @_;
|
||||
|
||||
return captureStdoutStderrWithStdin($timeout, \@cmd, "");
|
||||
}
|
||||
|
||||
sub captureStdoutStderrWithStdin {
|
||||
my ($timeout, $cmd, $stdin) = @_;
|
||||
my $stdout;
|
||||
my $stderr;
|
||||
|
||||
eval {
|
||||
local $SIG{ALRM} = sub { die "timeout\n" }; # NB: \n required
|
||||
alarm $timeout;
|
||||
IPC::Run::run($cmd, \$stdin, \$stdout, \$stderr);
|
||||
alarm 0;
|
||||
1;
|
||||
} or do {
|
||||
die unless $@ eq "timeout\n"; # propagate unexpected errors
|
||||
return (-1, $stdout, ($stderr // "") . "timeout\n");
|
||||
};
|
||||
|
||||
return ($?, $stdout, $stderr);
|
||||
}
|
@@ -1,33 +1,50 @@
|
||||
package Hydra::Helper::Nix;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Exporter;
|
||||
use File::Path;
|
||||
use File::Basename;
|
||||
use Config::General;
|
||||
use Hydra::Config;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Model::DB;
|
||||
use Nix::Store;
|
||||
use Encode;
|
||||
use Sys::Hostname::Long;
|
||||
use IPC::Run;
|
||||
use UUID4::Tiny qw(is_uuid4_string);
|
||||
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT = qw(
|
||||
getHydraHome getHydraConfig getBaseUrl
|
||||
getSCMCacheDir
|
||||
registerRoot getGCRootsDir gcRootFor
|
||||
jobsetOverview jobsetOverview_
|
||||
getDrvLogPath findLog
|
||||
getMainOutput
|
||||
cancelBuilds
|
||||
constructRunCommandLogPath
|
||||
findLog
|
||||
gcRootFor
|
||||
getBaseUrl
|
||||
getDrvLogPath
|
||||
getEvals getMachines
|
||||
pathIsInsidePrefix
|
||||
captureStdoutStderr run grab
|
||||
getTotalShares
|
||||
getGCRootsDir
|
||||
getHydraConfig
|
||||
getHydraHome
|
||||
getMainOutput
|
||||
getSCMCacheDir
|
||||
getStatsdConfig
|
||||
getStoreUri
|
||||
readNixFile
|
||||
getTotalShares
|
||||
grab
|
||||
isLocalStore
|
||||
cancelBuilds restartBuilds);
|
||||
jobsetOverview
|
||||
jobsetOverview_
|
||||
pathIsInsidePrefix
|
||||
readIntoSocket
|
||||
readNixFile
|
||||
registerRoot
|
||||
restartBuilds
|
||||
run
|
||||
$MACHINE_LOCAL_STORE
|
||||
);
|
||||
|
||||
our $MACHINE_LOCAL_STORE = Nix::Store->new();
|
||||
|
||||
|
||||
sub getHydraHome {
|
||||
@@ -35,23 +52,56 @@ sub getHydraHome {
|
||||
return $dir;
|
||||
}
|
||||
|
||||
# Return hash of statsd configuration of the following shape:
|
||||
# (
|
||||
# host => string,
|
||||
# port => digit
|
||||
# )
|
||||
sub getStatsdConfig {
|
||||
my ($config) = @_;
|
||||
my $cfg = $config->{statsd};
|
||||
my %statsd = defined $cfg ? ref $cfg eq "HASH" ? %$cfg : ($cfg) : ();
|
||||
|
||||
my $hydraConfig;
|
||||
|
||||
sub getHydraConfig {
|
||||
return $hydraConfig if defined $hydraConfig;
|
||||
my $conf = $ENV{"HYDRA_CONFIG"} || (Hydra::Model::DB::getHydraPath . "/hydra.conf");
|
||||
if (-f $conf) {
|
||||
my %h = new Config::General( -ConfigFile => $conf
|
||||
, -UseApacheInclude => 1
|
||||
, -IncludeAgain => 1
|
||||
)->getall;
|
||||
|
||||
$hydraConfig = \%h;
|
||||
} else {
|
||||
$hydraConfig = {};
|
||||
return {
|
||||
"host" => $statsd{'host'} // 'localhost',
|
||||
"port" => $statsd{'port'} // 8125,
|
||||
}
|
||||
return $hydraConfig;
|
||||
}
|
||||
|
||||
sub getHydraNotifyPrometheusConfig {
|
||||
my ($config) = @_;
|
||||
my $cfg = $config->{hydra_notify};
|
||||
|
||||
if (!defined($cfg)) {
|
||||
return undef;
|
||||
}
|
||||
|
||||
if (ref $cfg ne "HASH") {
|
||||
print STDERR "Error reading Hydra's configuration file: hydra_notify should be a block.\n";
|
||||
return undef;
|
||||
}
|
||||
|
||||
my $promcfg = $cfg->{prometheus};
|
||||
if (!defined($promcfg)) {
|
||||
return undef;
|
||||
}
|
||||
|
||||
if (ref $promcfg ne "HASH") {
|
||||
print STDERR "Error reading Hydra's configuration file: hydra_notify.prometheus should be a block.\n";
|
||||
return undef;
|
||||
}
|
||||
|
||||
if (defined($promcfg->{"listen_address"}) && defined($promcfg->{"port"})) {
|
||||
return {
|
||||
"listen_address" => $promcfg->{'listen_address'},
|
||||
"port" => $promcfg->{'port'},
|
||||
};
|
||||
} else {
|
||||
print STDERR "Error reading Hydra's configuration file: hydra_notify.prometheus should include listen_address and port.\n";
|
||||
return undef;
|
||||
}
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
|
||||
@@ -88,20 +138,20 @@ sub registerRoot {
|
||||
my ($path) = @_;
|
||||
my $link = gcRootFor $path;
|
||||
return if -e $link;
|
||||
open ROOT, ">$link" or die "cannot create GC root `$link' to `$path'";
|
||||
close ROOT;
|
||||
open(my $root, ">", $link) or die "cannot create GC root `$link' to `$path'";
|
||||
close $root;
|
||||
}
|
||||
|
||||
|
||||
sub jobsetOverview_ {
|
||||
my ($c, $jobsets) = @_;
|
||||
return $jobsets->search({},
|
||||
{ order_by => "name"
|
||||
{ order_by => ["hidden ASC", "enabled DESC", "name"]
|
||||
, "+select" =>
|
||||
[ "(select count(*) from Builds as a where a.finished = 0 and me.project = a.project and me.name = a.jobset and a.isCurrent = 1)"
|
||||
, "(select count(*) from Builds as a where a.finished = 1 and me.project = a.project and me.name = a.jobset and buildstatus <> 0 and a.isCurrent = 1)"
|
||||
, "(select count(*) from Builds as a where a.finished = 1 and me.project = a.project and me.name = a.jobset and buildstatus = 0 and a.isCurrent = 1)"
|
||||
, "(select count(*) from Builds as a where me.project = a.project and me.name = a.jobset and a.isCurrent = 1)"
|
||||
[ "(select count(*) from Builds as a where me.id = a.jobset_id and a.finished = 0 and a.isCurrent = 1)"
|
||||
, "(select count(*) from Builds as a where me.id = a.jobset_id and a.finished = 1 and buildstatus <> 0 and a.isCurrent = 1)"
|
||||
, "(select count(*) from Builds as a where me.id = a.jobset_id and a.finished = 1 and buildstatus = 0 and a.isCurrent = 1)"
|
||||
, "(select count(*) from Builds as a where me.id = a.jobset_id and a.isCurrent = 1)"
|
||||
]
|
||||
, "+as" => ["nrscheduled", "nrfailed", "nrsucceeded", "nrtotal"]
|
||||
});
|
||||
@@ -125,6 +175,9 @@ sub getDrvLogPath {
|
||||
for ($fn . $bucketed, $fn . $bucketed . ".bz2") {
|
||||
return $_ if -f $_;
|
||||
}
|
||||
for ($fn . $bucketed, $fn . $bucketed . ".zst") {
|
||||
return $_ if -f $_;
|
||||
}
|
||||
return undef;
|
||||
}
|
||||
|
||||
@@ -141,6 +194,10 @@ sub findLog {
|
||||
|
||||
return undef if scalar @outPaths == 0;
|
||||
|
||||
# Filter out any NULLs. Content-addressed derivations
|
||||
# that haven't built yet or failed to build may have a NULL outPath.
|
||||
@outPaths = grep {defined} @outPaths;
|
||||
|
||||
my @steps = $c->model('DB::BuildSteps')->search(
|
||||
{ path => { -in => [@outPaths] } },
|
||||
{ select => ["drvpath"]
|
||||
@@ -206,13 +263,41 @@ sub getEvalInfo {
|
||||
}
|
||||
|
||||
|
||||
sub getEvals {
|
||||
my ($self, $c, $evals, $offset, $rows) = @_;
|
||||
=head2 getEvals
|
||||
|
||||
my @evals = $evals->search(
|
||||
This method returns a list of evaluations with details about what changed,
|
||||
intended to be used with `eval.tt`.
|
||||
|
||||
Arguments:
|
||||
|
||||
=over 4
|
||||
|
||||
=item C<$c>
|
||||
L<Hydra> - the entire application.
|
||||
|
||||
=item C<$evals_result_set>
|
||||
|
||||
A L<DBIx::Class::ResultSet> for the result class of L<Hydra::Model::DB::JobsetEvals>
|
||||
|
||||
=item C<$offset>
|
||||
|
||||
Integer offset when selecting evaluations
|
||||
|
||||
=item C<$rows>
|
||||
|
||||
Integer rows to fetch
|
||||
|
||||
=back
|
||||
|
||||
=cut
|
||||
sub getEvals {
|
||||
my ($c, $evals_result_set, $offset, $rows) = @_;
|
||||
|
||||
my $me = $evals_result_set->current_source_alias;
|
||||
|
||||
my @evals = $evals_result_set->search(
|
||||
{ hasnewbuilds => 1 },
|
||||
{ order_by => "me.id DESC", rows => $rows, offset => $offset
|
||||
, prefetch => { evaluationerror => [ ] } });
|
||||
{ order_by => "$me.id DESC", rows => $rows, offset => $offset });
|
||||
my @res = ();
|
||||
my $cache = {};
|
||||
|
||||
@@ -224,7 +309,8 @@ sub getEvals {
|
||||
{ order_by => "id DESC", rows => 1 });
|
||||
|
||||
my $curInfo = getEvalInfo($cache, $curEval);
|
||||
my $prevInfo = getEvalInfo($cache, $prevEval) if defined $prevEval;
|
||||
my $prevInfo;
|
||||
$prevInfo = getEvalInfo($cache, $prevEval) if defined $prevEval;
|
||||
|
||||
# Compute what inputs changed between each eval.
|
||||
my @changedInputs;
|
||||
@@ -259,13 +345,21 @@ sub getMachines {
|
||||
|
||||
for my $machinesFile (@machinesFiles) {
|
||||
next unless -e $machinesFile;
|
||||
open CONF, "<$machinesFile" or die;
|
||||
while (<CONF>) {
|
||||
chomp;
|
||||
s/\#.*$//g;
|
||||
next if /^\s*$/;
|
||||
my @tokens = split /\s/, $_;
|
||||
open(my $conf, "<", $machinesFile) or die;
|
||||
while (my $line = <$conf>) {
|
||||
chomp($line);
|
||||
$line =~ s/\#.*$//g;
|
||||
next if $line =~ /^\s*$/;
|
||||
my @tokens = split /\s+/, $line;
|
||||
|
||||
if (!defined($tokens[5]) || $tokens[5] eq "-") {
|
||||
$tokens[5] = "";
|
||||
}
|
||||
my @supportedFeatures = split(/,/, $tokens[5] || "");
|
||||
|
||||
if (!defined($tokens[6]) || $tokens[6] eq "-") {
|
||||
$tokens[6] = "";
|
||||
}
|
||||
my @mandatoryFeatures = split(/,/, $tokens[6] || "");
|
||||
$machines{$tokens[0]} =
|
||||
{ systemTypes => [ split(/,/, $tokens[1]) ]
|
||||
@@ -276,7 +370,7 @@ sub getMachines {
|
||||
, mandatoryFeatures => [ @mandatoryFeatures ]
|
||||
};
|
||||
}
|
||||
close CONF;
|
||||
close $conf;
|
||||
}
|
||||
|
||||
return \%machines;
|
||||
@@ -305,7 +399,7 @@ sub pathIsInsidePrefix {
|
||||
|
||||
# ‘..’ should not take us outside of the prefix.
|
||||
if ($c eq "..") {
|
||||
return if length($cur) <= length($prefix);
|
||||
return undef if length($cur) <= length($prefix);
|
||||
$cur =~ s/\/[^\/]*$// or die; # remove last component
|
||||
next;
|
||||
}
|
||||
@@ -323,28 +417,20 @@ sub pathIsInsidePrefix {
|
||||
return $cur;
|
||||
}
|
||||
|
||||
|
||||
sub captureStdoutStderr {
|
||||
my ($timeout, @cmd) = @_;
|
||||
my $stdin = "";
|
||||
my $stdout;
|
||||
my $stderr;
|
||||
sub readIntoSocket{
|
||||
my (%args) = @_;
|
||||
my $sock;
|
||||
|
||||
eval {
|
||||
local $SIG{ALRM} = sub { die "timeout\n" }; # NB: \n required
|
||||
alarm $timeout;
|
||||
IPC::Run::run(\@cmd, \$stdin, \$stdout, \$stderr);
|
||||
alarm 0;
|
||||
1;
|
||||
} or do {
|
||||
die unless $@ eq "timeout\n"; # propagate unexpected errors
|
||||
return (-1, $stdout, ($stderr // "") . "timeout\n");
|
||||
open($sock, "-|", @{$args{cmd}}) or die q(failed to open socket from command:\n $x);
|
||||
};
|
||||
|
||||
return ($?, $stdout, $stderr);
|
||||
return $sock;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
sub run {
|
||||
my (%args) = @_;
|
||||
my $res = { stdout => "", stderr => "" };
|
||||
@@ -405,7 +491,7 @@ sub getTotalShares {
|
||||
}
|
||||
|
||||
|
||||
sub cancelBuilds($$) {
|
||||
sub cancelBuilds {
|
||||
my ($db, $builds) = @_;
|
||||
return $db->txn_do(sub {
|
||||
$builds = $builds->search({ finished => 0 });
|
||||
@@ -422,13 +508,13 @@ sub cancelBuilds($$) {
|
||||
}
|
||||
|
||||
|
||||
sub restartBuilds($$) {
|
||||
sub restartBuilds {
|
||||
my ($db, $builds) = @_;
|
||||
|
||||
$builds = $builds->search({ finished => 1 });
|
||||
|
||||
foreach my $build ($builds->search({}, { columns => ["drvpath"] })) {
|
||||
next if !isValidPath($build->drvpath);
|
||||
next if !$MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||
registerRoot $build->drvpath;
|
||||
}
|
||||
|
||||
@@ -471,14 +557,28 @@ sub getStoreUri {
|
||||
sub readNixFile {
|
||||
my ($path) = @_;
|
||||
return grab(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"cat-store", "--store", getStoreUri(), "$path"]);
|
||||
"store", "cat", "--store", getStoreUri(), "$path"]);
|
||||
}
|
||||
|
||||
|
||||
sub isLocalStore {
|
||||
my $uri = getStoreUri();
|
||||
return $uri =~ "^(local|daemon|auto)";
|
||||
return $uri =~ "^(local|daemon|auto|file)";
|
||||
}
|
||||
|
||||
|
||||
sub constructRunCommandLogPath {
|
||||
my ($runlog) = @_;
|
||||
my $uuid = $runlog->uuid;
|
||||
|
||||
if (!is_uuid4_string($uuid)) {
|
||||
die "UUID was invalid."
|
||||
}
|
||||
|
||||
my $hydra_path = Hydra::Model::DB::getHydraPath;
|
||||
my $bucket = substr($uuid, 0, 2);
|
||||
|
||||
return "$hydra_path/runcommand-logs/$bucket/$uuid";
|
||||
}
|
||||
|
||||
1;
|
||||
|
30
src/lib/Hydra/Math.pm
Normal file
30
src/lib/Hydra/Math.pm
Normal file
@@ -0,0 +1,30 @@
|
||||
package Hydra::Math;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use List::Util qw(min);
|
||||
use Exporter 'import';
|
||||
our @EXPORT_OK = qw(exponential_backoff);
|
||||
|
||||
=head2 exponential_backoff
|
||||
|
||||
Calculates a number of seconds to wait before reattempting something.
|
||||
|
||||
Arguments:
|
||||
|
||||
=over 1
|
||||
|
||||
=item C<$attempts>
|
||||
|
||||
Integer number of attempts made.
|
||||
|
||||
=back
|
||||
|
||||
=cut
|
||||
sub exponential_backoff {
|
||||
my ($attempt) = @_;
|
||||
my $clamp = min(10, $attempt);
|
||||
return 2 ** $clamp;
|
||||
}
|
||||
|
||||
1;
|
@@ -1,6 +1,7 @@
|
||||
package Hydra::Model::DB;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Catalyst::Model::DBIC::Schema';
|
||||
|
||||
sub getHydraPath {
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package Hydra::Plugin;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Module::Pluggable
|
||||
search_path => "Hydra::Plugin",
|
||||
instantiate => 'new';
|
||||
@@ -24,29 +25,68 @@ sub instantiate {
|
||||
return @$plugins;
|
||||
}
|
||||
|
||||
# Called when build $build has been queued.
|
||||
sub buildQueued {
|
||||
my ($self, $build) = @_;
|
||||
}
|
||||
# To implement behaviors in response to the following events, implement
|
||||
# the function in your plugin and it will be executed by hydra-notify.
|
||||
#
|
||||
# See the tests in t/Event/*.t for arguments, and the documentation for
|
||||
# notify events for semantics.
|
||||
#
|
||||
|
||||
# Called when build $build has started.
|
||||
sub buildStarted {
|
||||
my ($self, $build) = @_;
|
||||
}
|
||||
# # Called when an evaluation of $jobset has begun.
|
||||
# sub evalStarted {
|
||||
# my ($self, $traceID, $jobset) = @_;
|
||||
# }
|
||||
|
||||
# Called when build $build has finished. If the build failed, then
|
||||
# $dependents is an array ref to a list of builds that have also
|
||||
# failed as a result (i.e. because they depend on $build or a failed
|
||||
# dependeny of $build).
|
||||
sub buildFinished {
|
||||
my ($self, $build, $dependents) = @_;
|
||||
}
|
||||
# # Called when an evaluation of $jobset determined the inputs had not changed.
|
||||
# sub evalCached {
|
||||
# my ($self, $traceID, $jobset, $evaluation) = @_;
|
||||
# }
|
||||
|
||||
# Called when step $step has finished. The build log is stored in the
|
||||
# file $logPath (bzip2-compressed).
|
||||
sub stepFinished {
|
||||
my ($self, $step, $logPath) = @_;
|
||||
}
|
||||
# # Called when an evaluation of $jobset failed.
|
||||
# sub evalFailed {
|
||||
# my ($self, $traceID, $jobset) = @_;
|
||||
# }
|
||||
|
||||
# # Called when $evaluation of $jobset has completed successfully.
|
||||
# sub evalAdded {
|
||||
# my ($self, $traceID, $jobset, $evaluation) = @_;
|
||||
# }
|
||||
|
||||
# # Called when build $build has been queued.
|
||||
# sub buildQueued {
|
||||
# my ($self, $build) = @_;
|
||||
# }
|
||||
|
||||
# # Called when build $build has been queued again by evaluation $evaluation
|
||||
# where $build has not yet finished.
|
||||
# sub cachedBuildQueued {
|
||||
# my ($self, $evaluation, $build) = @_;
|
||||
# }
|
||||
|
||||
# # Called when build $build is a finished build, and is
|
||||
# part evaluation $evaluation
|
||||
# sub cachedBuildFinished {
|
||||
# my ($self, $evaluation, $build) = @_;
|
||||
# }
|
||||
|
||||
# # Called when build $build has started.
|
||||
# sub buildStarted {
|
||||
# my ($self, $build) = @_;
|
||||
# }
|
||||
|
||||
# # Called when build $build has finished. If the build failed, then
|
||||
# # $dependents is an array ref to a list of builds that have also
|
||||
# # failed as a result (i.e. because they depend on $build or a failed
|
||||
# # dependeny of $build).
|
||||
# sub buildFinished {
|
||||
# my ($self, $build, $dependents) = @_;
|
||||
# }
|
||||
|
||||
# # Called when step $step has finished. The build log is stored in the
|
||||
# # file $logPath (bzip2-compressed).
|
||||
# sub stepFinished {
|
||||
# my ($self, $step, $logPath) = @_;
|
||||
# }
|
||||
|
||||
# Called to determine the set of supported input types. The plugin
|
||||
# should add these to the $inputTypes hashref, e.g. $inputTypes{'svn'}
|
||||
|
@@ -1,11 +1,12 @@
|
||||
package Hydra::Plugin::BazaarInput;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -36,9 +37,9 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedBazaarInputs')->search(
|
||||
{uri => $uri, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
} else {
|
||||
@@ -56,7 +57,7 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', $stdout;
|
||||
|
||||
# FIXME: time window between nix-prefetch-bzr and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedBazaarInputs')->create(
|
||||
|
@@ -1,10 +1,11 @@
|
||||
package Hydra::Plugin::BitBucketPulls;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use HTTP::Request;
|
||||
use LWP::UserAgent;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use File::Temp;
|
||||
use POSIX qw(strftime);
|
||||
|
@@ -1,15 +1,16 @@
|
||||
package Hydra::Plugin::BitBucketStatus;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use HTTP::Request;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use LWP::UserAgent;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
sub isEnabled {
|
||||
my ($self) = @_;
|
||||
return $self->{config}->{enable_bitbucket_status} == 1;
|
||||
return ($self->{config}->{enable_bitbucket_status} // 0) == 1;
|
||||
}
|
||||
|
||||
sub toBitBucketState {
|
||||
@@ -22,21 +23,21 @@ sub toBitBucketState {
|
||||
}
|
||||
|
||||
sub common {
|
||||
my ($self, $build, $dependents, $finished) = @_;
|
||||
my ($self, $topbuild, $dependents, $finished) = @_;
|
||||
my $bitbucket = $self->{config}->{bitbucket};
|
||||
my $baseurl = $self->{config}->{'base_uri'} || "http://localhost:3000";
|
||||
|
||||
foreach my $b ($build, @{$dependents}) {
|
||||
my $jobName = showJobName $b;
|
||||
my $evals = $build->jobsetevals;
|
||||
foreach my $build ($topbuild, @{$dependents}) {
|
||||
my $jobName = showJobName $build;
|
||||
my $evals = $topbuild->jobsetevals;
|
||||
my $ua = LWP::UserAgent->new();
|
||||
my $body = encode_json(
|
||||
{
|
||||
state => $finished ? toBitBucketState($b->buildstatus) : "INPROGRESS",
|
||||
url => "$baseurl/build/" . $b->id,
|
||||
state => $finished ? toBitBucketState($build->buildstatus) : "INPROGRESS",
|
||||
url => "$baseurl/build/" . $build->id,
|
||||
name => $jobName,
|
||||
key => $b->id,
|
||||
description => "Hydra build #" . $b->id . " of $jobName",
|
||||
key => $build->id,
|
||||
description => "Hydra build #" . $build->id . " of $jobName",
|
||||
});
|
||||
while (my $eval = $evals->next) {
|
||||
foreach my $i ($eval->jobsetevalinputs){
|
||||
|
@@ -1,11 +1,12 @@
|
||||
package Hydra::Plugin::CircleCINotification;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use HTTP::Request;
|
||||
use LWP::UserAgent;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
|
||||
sub isEnabled {
|
||||
my ($self) = @_;
|
||||
@@ -13,22 +14,22 @@ sub isEnabled {
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
my ($self, $build, $dependents) = @_;
|
||||
my ($self, $topbuild, $dependents) = @_;
|
||||
my $cfg = $self->{config}->{circleci};
|
||||
my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
|
||||
|
||||
# Figure out to which branches to send notification.
|
||||
my %branches;
|
||||
foreach my $b ($build, @{$dependents}) {
|
||||
my $prevBuild = getPreviousBuild($b);
|
||||
my $jobName = showJobName $b;
|
||||
foreach my $build ($topbuild, @{$dependents}) {
|
||||
my $prevBuild = getPreviousBuild($build);
|
||||
my $jobName = showJobName $build;
|
||||
|
||||
foreach my $branch (@config) {
|
||||
my $force = $branch->{force};
|
||||
next unless $jobName =~ /^$branch->{jobs}$/;
|
||||
|
||||
# If build is failed, don't trigger circleci
|
||||
next if ! $force && $b->buildstatus != 0;
|
||||
next if ! $force && $build->buildstatus != 0;
|
||||
|
||||
my $fullUrl = "https://circleci.com/api/v1.1/project/" . $branch->{vcstype} . "/" . $branch->{username} . "/" . $branch->{project} . "/tree/" . $branch->{branch} . "?circle-token=" . $branch->{token};
|
||||
$branches{$fullUrl} = 1;
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package Hydra::Plugin::CompressLog;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use utf8;
|
||||
use parent 'Hydra::Plugin';
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
@@ -8,11 +9,24 @@ use Hydra::Helper::CatalystUtils;
|
||||
sub stepFinished {
|
||||
my ($self, $step, $logPath) = @_;
|
||||
|
||||
my $doCompress = $self->{config}->{'compress_build_logs'} // "1";
|
||||
my $doCompress = $self->{config}->{'compress_build_logs'} // '1';
|
||||
my $silent = $self->{config}->{'compress_build_logs_silent'} // '0';
|
||||
my $compression = $self->{config}->{'compress_build_logs_compression'} // 'bzip2';
|
||||
|
||||
if ($doCompress eq "1" && -e $logPath) {
|
||||
print STDERR "compressing ‘$logPath’...\n";
|
||||
system("bzip2", "--force", $logPath);
|
||||
if (not -e $logPath or $doCompress ne "1") {
|
||||
return;
|
||||
}
|
||||
|
||||
if ($silent ne '1') {
|
||||
print STDERR "compressing '$logPath' with $compression...\n";
|
||||
}
|
||||
|
||||
if ($compression eq 'bzip2') {
|
||||
system('bzip2', '--force', $logPath);
|
||||
} elsif ($compression eq 'zstd') {
|
||||
system('zstd', '--rm', '--quiet', '-T0', $logPath);
|
||||
} else {
|
||||
print STDERR "unknown compression type '$compression'\n";
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package Hydra::Plugin::CoverityScan;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use File::Basename;
|
||||
use LWP::UserAgent;
|
||||
@@ -12,19 +13,19 @@ sub isEnabled {
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
my ($self, $b, $dependents) = @_;
|
||||
my ($self, $build, $dependents) = @_;
|
||||
|
||||
my $cfg = $self->{config}->{coverityscan};
|
||||
my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
|
||||
|
||||
# Scan the job and see if it matches any of the Coverity Scan projects
|
||||
my $proj;
|
||||
my $jobName = showJobName $b;
|
||||
my $jobName = showJobName $build;
|
||||
foreach my $p (@config) {
|
||||
next unless $jobName =~ /^$p->{jobs}$/;
|
||||
|
||||
# If build is cancelled or aborted, do not upload build
|
||||
next if $b->buildstatus == 4 || $b->buildstatus == 3;
|
||||
next if $build->buildstatus == 4 || $build->buildstatus == 3;
|
||||
|
||||
# Otherwise, select this Coverity project
|
||||
$proj = $p; last;
|
||||
@@ -47,16 +48,16 @@ sub buildFinished {
|
||||
unless defined $token;
|
||||
|
||||
# Get tarball locations
|
||||
my $storePath = ($b->buildoutputs)[0]->path;
|
||||
my $storePath = ($build->buildoutputs)[0]->path;
|
||||
my $tarballs = "$storePath/tarballs";
|
||||
my $covTarball;
|
||||
|
||||
opendir TARBALLS, $tarballs or die;
|
||||
while (readdir TARBALLS) {
|
||||
next unless $_ =~ /.*-coverity-int\.(tgz|lzma|xz|bz2|zip)$/;
|
||||
$covTarball = "$tarballs/$_"; last;
|
||||
opendir my $tarballs_handle, $tarballs or die;
|
||||
while (my $file = readdir $tarballs_handle) {
|
||||
next unless $file =~ /.*-coverity-int\.(tgz|lzma|xz|bz2|zip)$/;
|
||||
$covTarball = "$tarballs/$file"; last;
|
||||
}
|
||||
closedir TARBALLS;
|
||||
closedir $tarballs_handle;
|
||||
|
||||
unless (defined $covTarball) {
|
||||
print STDERR "CoverityScan.pm: Coverity tarball not found in $tarballs; skipping upload...\n";
|
||||
@@ -81,13 +82,14 @@ sub buildFinished {
|
||||
my $versionRE = "(?:[A-Za-z0-9\.\-]+)";
|
||||
|
||||
my $shortName = basename($covTarball);
|
||||
my $version = $2 if $shortName =~ /^($pkgNameRE)-($versionRE)-coverity-int.*$/;
|
||||
my $version;
|
||||
$version = $2 if $shortName =~ /^($pkgNameRE)-($versionRE)-coverity-int.*$/;
|
||||
|
||||
die "CoverityScan.pm: Couldn't parse build version for upload! ($shortName)"
|
||||
unless defined $version;
|
||||
|
||||
# Submit build
|
||||
my $jobid = $b->id;
|
||||
my $jobid = $build->id;
|
||||
my $desc = "Hydra Coverity Build ($jobName) - $jobid:$version";
|
||||
|
||||
print STDERR "uploading $desc ($shortName) to Coverity Scan\n";
|
||||
|
@@ -1,11 +1,12 @@
|
||||
package Hydra::Plugin::DarcsInput;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -56,7 +57,7 @@ sub fetchInput {
|
||||
{uri => $uri, revision => $revision},
|
||||
{rows => 1});
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$revision = $cachedInput->revision;
|
||||
@@ -73,8 +74,8 @@ sub fetchInput {
|
||||
die "darcs changes --count failed" if $? != 0;
|
||||
|
||||
system "rm", "-rf", "$tmpDir/export/_darcs";
|
||||
$storePath = addToStore("$tmpDir/export", 1, "sha256");
|
||||
$sha256 = queryPathHash($storePath);
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/export", 1, "sha256");
|
||||
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath);
|
||||
$sha256 =~ s/sha256://;
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
|
18
src/lib/Hydra/Plugin/DeclarativeJobsets.pm
Normal file
18
src/lib/Hydra/Plugin/DeclarativeJobsets.pm
Normal file
@@ -0,0 +1,18 @@
|
||||
package Hydra::Plugin::DeclarativeJobsets;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use Hydra::Helper::AddBuilds;
|
||||
|
||||
sub buildFinished {
|
||||
my ($self, $build, $dependents) = @_;
|
||||
|
||||
my $project = $build->project;
|
||||
my $jobsetName = $build->jobset->get_column('name');
|
||||
if (length($project->declfile) && $jobsetName eq ".jobsets" && $build->iscurrent) {
|
||||
handleDeclarativeJobsetBuild($self->{"db"}, $project, $build);
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
@@ -2,6 +2,7 @@ package Hydra::Plugin::EmailNotification;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use POSIX qw(strftime);
|
||||
use Template;
|
||||
@@ -11,7 +12,7 @@ use Hydra::Helper::Email;
|
||||
|
||||
sub isEnabled {
|
||||
my ($self) = @_;
|
||||
return $self->{config}->{email_notification} == 1;
|
||||
return ($self->{config}->{email_notification} // 0) == 1;
|
||||
}
|
||||
|
||||
my $template = <<EOF;
|
||||
@@ -46,46 +47,46 @@ EOF
|
||||
|
||||
|
||||
sub buildFinished {
|
||||
my ($self, $build, $dependents) = @_;
|
||||
my ($self, $topbuild, $dependents) = @_;
|
||||
|
||||
die unless $build->finished;
|
||||
die unless $topbuild->finished;
|
||||
|
||||
# Figure out to whom to send notification for each build. For
|
||||
# each email address, we send one aggregate email listing only the
|
||||
# relevant builds for that address.
|
||||
my %addresses;
|
||||
foreach my $b ($build, @{$dependents}) {
|
||||
my $prevBuild = getPreviousBuild($b);
|
||||
foreach my $build ($topbuild, @{$dependents}) {
|
||||
my $prevBuild = getPreviousBuild($build);
|
||||
# Do we want to send mail for this build?
|
||||
unless ($ENV{'HYDRA_FORCE_SEND_MAIL'}) {
|
||||
next unless $b->jobset->enableemail;
|
||||
next unless $build->jobset->enableemail;
|
||||
|
||||
# If build is cancelled or aborted, do not send email.
|
||||
next if $b->buildstatus == 4 || $b->buildstatus == 3;
|
||||
next if $build->buildstatus == 4 || $build->buildstatus == 3;
|
||||
|
||||
# If there is a previous (that is not cancelled or aborted) build
|
||||
# with same buildstatus, do not send email.
|
||||
next if defined $prevBuild && ($b->buildstatus == $prevBuild->buildstatus);
|
||||
next if defined $prevBuild && ($build->buildstatus == $prevBuild->buildstatus);
|
||||
}
|
||||
|
||||
my $to = $b->jobset->emailoverride ne "" ? $b->jobset->emailoverride : $b->maintainers;
|
||||
my $to = $build->jobset->emailoverride ne "" ? $build->jobset->emailoverride : $build->maintainers;
|
||||
|
||||
foreach my $address (split ",", ($to // "")) {
|
||||
$address = trim $address;
|
||||
|
||||
$addresses{$address} //= { builds => [] };
|
||||
push @{$addresses{$address}->{builds}}, $b;
|
||||
push @{$addresses{$address}->{builds}}, $build;
|
||||
}
|
||||
}
|
||||
|
||||
my ($authors, $nrCommits, $emailable_authors) = getResponsibleAuthors($build, $self->{plugins});
|
||||
my ($authors, $nrCommits, $emailable_authors) = getResponsibleAuthors($topbuild, $self->{plugins});
|
||||
my $authorList;
|
||||
my $prevBuild = getPreviousBuild($build);
|
||||
my $prevBuild = getPreviousBuild($topbuild);
|
||||
if (scalar keys %{$authors} > 0 &&
|
||||
((!defined $prevBuild) || ($build->buildstatus != $prevBuild->buildstatus))) {
|
||||
((!defined $prevBuild) || ($topbuild->buildstatus != $prevBuild->buildstatus))) {
|
||||
my @x = map { "$_ <$authors->{$_}>" } (sort keys %{$authors});
|
||||
$authorList = join(" or ", scalar @x > 1 ? join(", ", @x[0..scalar @x - 2]): (), $x[-1]);
|
||||
$addresses{$_} = { builds => [ $build ] } foreach (@{$emailable_authors});
|
||||
$addresses{$_} = { builds => [ $topbuild ] } foreach (@{$emailable_authors});
|
||||
}
|
||||
|
||||
# Send an email to each interested address.
|
||||
@@ -96,11 +97,11 @@ sub buildFinished {
|
||||
my $tt = Template->new({});
|
||||
|
||||
my $vars =
|
||||
{ build => $build, prevBuild => getPreviousBuild($build)
|
||||
, dependents => [grep { $_->id != $build->id } @builds]
|
||||
{ build => $topbuild, prevBuild => getPreviousBuild($topbuild)
|
||||
, dependents => [grep { $_->id != $topbuild->id } @builds]
|
||||
, baseurl => getBaseUrl($self->{config})
|
||||
, showJobName => \&showJobName, showStatus => \&showStatus
|
||||
, showSystem => index($build->get_column('job'), $build->system) == -1
|
||||
, showSystem => index($topbuild->get_column('job'), $topbuild->system) == -1
|
||||
, nrCommits => $nrCommits
|
||||
, authorList => $authorList
|
||||
};
|
||||
@@ -113,16 +114,16 @@ sub buildFinished {
|
||||
$body =~ s/[\ ]+$//gm;
|
||||
|
||||
my $subject =
|
||||
showStatus($build) . ": Hydra job " . showJobName($build)
|
||||
. ($vars->{showSystem} ? " on " . $build->system : "")
|
||||
showStatus($topbuild) . ": Hydra job " . showJobName($topbuild)
|
||||
. ($vars->{showSystem} ? " on " . $topbuild->system : "")
|
||||
. (scalar @{$vars->{dependents}} > 0 ? " (and " . scalar @{$vars->{dependents}} . " others)" : "");
|
||||
|
||||
sendEmail(
|
||||
$self->{config}, $to, $subject, $body,
|
||||
[ 'X-Hydra-Project' => $build->get_column('project'),
|
||||
, 'X-Hydra-Jobset' => $build->get_column('jobset'),
|
||||
, 'X-Hydra-Job' => $build->get_column('job'),
|
||||
, 'X-Hydra-System' => $build->system
|
||||
[ 'X-Hydra-Project' => $topbuild->jobset->get_column('project'),
|
||||
, 'X-Hydra-Jobset' => $topbuild->jobset->get_column('name'),
|
||||
, 'X-Hydra-Job' => $topbuild->get_column('job'),
|
||||
, 'X-Hydra-System' => $topbuild->system
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package Hydra::Plugin::GitInput;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
@@ -13,7 +14,6 @@ use Data::Dumper;
|
||||
|
||||
my $CONFIG_SECTION = "git-input";
|
||||
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
$inputTypes->{'git'} = 'Git checkout';
|
||||
@@ -33,7 +33,7 @@ sub _parseValue {
|
||||
my $start_options = 3;
|
||||
# if deepClone has "=" then is considered an option
|
||||
# and not the enabling of deepClone
|
||||
if (index($deepClone, "=") != -1) {
|
||||
if (defined($deepClone) && index($deepClone, "=") != -1) {
|
||||
undef $deepClone;
|
||||
$start_options = 2;
|
||||
}
|
||||
@@ -117,7 +117,8 @@ sub fetchInput {
|
||||
$jobset->get_column('name'),
|
||||
$name);
|
||||
# give preference to the options from the input value
|
||||
while (my ($opt_name, $opt_value) = each %{$options}) {
|
||||
foreach my $opt_name (keys %{$options}) {
|
||||
my $opt_value = $options->{$opt_name};
|
||||
if ($opt_value =~ /^[+-]?\d+\z/) {
|
||||
$opt_value = int($opt_value);
|
||||
}
|
||||
@@ -182,12 +183,12 @@ sub fetchInput {
|
||||
# TODO: Fix case where the branch is reset to a previous commit.
|
||||
my $cachedInput;
|
||||
($cachedInput) = $self->{db}->resultset('CachedGitInputs')->search(
|
||||
{uri => $uri, branch => $branch, revision => $revision},
|
||||
{uri => $uri, branch => $branch, revision => $revision, isdeepclone => defined($deepClone) ? 1 : 0},
|
||||
{rows => 1});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$revision = $cachedInput->revision;
|
||||
@@ -216,13 +217,14 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', grab(cmd => ["nix-prefetch-git", $clonePath, $revision], chomp => 1);
|
||||
|
||||
# FIXME: time window between nix-prefetch-git and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedGitInputs')->update_or_create(
|
||||
{ uri => $uri
|
||||
, branch => $branch
|
||||
, revision => $revision
|
||||
, isdeepclone => defined($deepClone) ? 1 : 0
|
||||
, sha256hash => $sha256
|
||||
, storepath => $storePath
|
||||
});
|
||||
@@ -259,7 +261,7 @@ sub getCommits {
|
||||
|
||||
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
|
||||
|
||||
my $out = grab(cmd => ["git", "log", "--pretty=format:%H%x09%an%x09%ae%x09%at", "$rev1..$rev2"], dir => $clonePath);
|
||||
my $out = grab(cmd => ["git", "--git-dir=.git", "log", "--pretty=format:%H%x09%an%x09%ae%x09%at", "$rev1..$rev2"], dir => $clonePath);
|
||||
|
||||
my $res = [];
|
||||
foreach my $line (split /\n/, $out) {
|
||||
|
95
src/lib/Hydra/Plugin/GiteaStatus.pm
Normal file
95
src/lib/Hydra/Plugin/GiteaStatus.pm
Normal file
@@ -0,0 +1,95 @@
|
||||
package Hydra::Plugin::GiteaStatus;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
|
||||
use HTTP::Request;
|
||||
use JSON::MaybeXS;
|
||||
use LWP::UserAgent;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use List::Util qw(max);
|
||||
|
||||
sub isEnabled {
|
||||
my ($self) = @_;
|
||||
return defined $self->{config}->{gitea_authorization};
|
||||
}
|
||||
|
||||
sub toGiteaState {
|
||||
# See https://try.gitea.io/api/swagger#/repository/repoCreateStatus
|
||||
my ($status, $buildStatus) = @_;
|
||||
if ($status == 0 || $status == 1) {
|
||||
return "pending";
|
||||
} elsif ($buildStatus == 0) {
|
||||
return "success";
|
||||
} elsif ($buildStatus == 3 || $buildStatus == 4 || $buildStatus == 8 || $buildStatus == 10 || $buildStatus == 11) {
|
||||
return "error";
|
||||
} else {
|
||||
return "failure";
|
||||
}
|
||||
}
|
||||
|
||||
sub common {
|
||||
my ($self, $topbuild, $dependents, $status) = @_;
|
||||
my $baseurl = $self->{config}->{'base_uri'} || "http://localhost:3000";
|
||||
|
||||
# Find matching configs
|
||||
foreach my $build ($topbuild, @{$dependents}) {
|
||||
my $jobName = showJobName $build;
|
||||
my $evals = $topbuild->jobsetevals;
|
||||
my $ua = LWP::UserAgent->new();
|
||||
|
||||
# Don't send out "pending/running" status updates if the build is already finished
|
||||
next if $status < 2 && $build->finished == 1;
|
||||
|
||||
my $state = toGiteaState($status, $build->buildstatus);
|
||||
my $body = encode_json(
|
||||
{
|
||||
state => $state,
|
||||
target_url => "$baseurl/build/" . $build->id,
|
||||
description => "Hydra build #" . $build->id . " of $jobName",
|
||||
context => "Hydra " . $build->get_column('job'),
|
||||
});
|
||||
|
||||
while (my $eval = $evals->next) {
|
||||
my $giteastatusInput = $eval->jobsetevalinputs->find({ name => "gitea_status_repo" });
|
||||
next unless defined $giteastatusInput && defined $giteastatusInput->value;
|
||||
my $i = $eval->jobsetevalinputs->find({ name => $giteastatusInput->value, altnr => 0 });
|
||||
next unless defined $i;
|
||||
my $gitea_url = $eval->jobsetevalinputs->find({ name => "gitea_http_url" });
|
||||
|
||||
my $repoOwner = $eval->jobsetevalinputs->find({ name => "gitea_repo_owner" })->value;
|
||||
my $repoName = $eval->jobsetevalinputs->find({ name => "gitea_repo_name" })->value;
|
||||
my $accessToken = $self->{config}->{gitea_authorization}->{$repoOwner};
|
||||
|
||||
my $rev = $i->revision;
|
||||
my $domain = URI->new($i->uri)->host;
|
||||
my $host;
|
||||
unless (defined $gitea_url) {
|
||||
$host = "https://$domain";
|
||||
} else {
|
||||
$host = $gitea_url->value;
|
||||
}
|
||||
|
||||
my $url = "$host/api/v1/repos/$repoOwner/$repoName/statuses/$rev";
|
||||
|
||||
print STDERR "GiteaStatus POSTing $state to $url\n";
|
||||
my $req = HTTP::Request->new('POST', $url);
|
||||
$req->header('Content-Type' => 'application/json');
|
||||
$req->header('Authorization' => "token $accessToken");
|
||||
$req->content($body);
|
||||
my $res = $ua->request($req);
|
||||
print STDERR $res->status_line, ": ", $res->decoded_content, "\n" unless $res->is_success;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub buildQueued {
|
||||
common(@_, [], 0);
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
common(@_, 2);
|
||||
}
|
||||
|
||||
1;
|
@@ -1,10 +1,11 @@
|
||||
package Hydra::Plugin::GithubPulls;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use HTTP::Request;
|
||||
use LWP::UserAgent;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use File::Temp;
|
||||
use POSIX qw(strftime);
|
||||
@@ -29,7 +30,7 @@ sub _iterate {
|
||||
$pulls->{$pull->{number}} = $pull;
|
||||
}
|
||||
# TODO Make Link header parsing more robust!!!
|
||||
my @links = split ',', $res->header("Link");
|
||||
my @links = split ',', ($res->header("Link") // "");
|
||||
my $next = "";
|
||||
foreach my $link (@links) {
|
||||
my ($url, $rel) = split ";", $link;
|
||||
@@ -52,11 +53,12 @@ sub fetchInput {
|
||||
_iterate("https://api.github.com/repos/$owner/$repo/pulls?per_page=100", $auth, \%pulls, $ua);
|
||||
my $tempdir = File::Temp->newdir("github-pulls" . "XXXXX", TMPDIR => 1);
|
||||
my $filename = "$tempdir/github-pulls.json";
|
||||
|
||||
open(my $fh, ">", $filename) or die "Cannot open $filename for writing: $!";
|
||||
print $fh encode_json \%pulls;
|
||||
print $fh JSON->new->utf8->canonical->encode(\%pulls);
|
||||
close $fh;
|
||||
system("jq -S . < $filename > $tempdir/github-pulls-sorted.json");
|
||||
my $storePath = trim(`nix-store --add "$tempdir/github-pulls-sorted.json"`
|
||||
|
||||
my $storePath = trim(`nix-store --add "$filename"`
|
||||
or die "cannot copy path $filename to the Nix store.\n");
|
||||
chomp $storePath;
|
||||
my $timestamp = time;
|
||||
|
@@ -1,10 +1,11 @@
|
||||
package Hydra::Plugin::GithubRefs;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use HTTP::Request;
|
||||
use LWP::UserAgent;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use File::Temp;
|
||||
use POSIX qw(strftime);
|
||||
@@ -96,8 +97,8 @@ sub _iterate {
|
||||
}
|
||||
|
||||
sub fetchInput {
|
||||
my ($self, $type, $name, $value, $project, $jobset) = @_;
|
||||
return undef if $type ne "github_refs";
|
||||
my ($self, $input_type, $name, $value, $project, $jobset) = @_;
|
||||
return undef if $input_type ne "github_refs";
|
||||
|
||||
my ($owner, $repo, $type, $fut, $prefix) = split ' ', $value;
|
||||
die "type field is neither 'heads' nor 'tags', but '$type'"
|
||||
|
@@ -1,9 +1,10 @@
|
||||
package Hydra::Plugin::GithubStatus;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use HTTP::Request;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use LWP::UserAgent;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use List::Util qw(max);
|
||||
@@ -25,49 +26,49 @@ sub toGithubState {
|
||||
}
|
||||
|
||||
sub common {
|
||||
my ($self, $build, $dependents, $finished) = @_;
|
||||
my ($self, $topbuild, $dependents, $finished, $cachedEval) = @_;
|
||||
my $cfg = $self->{config}->{githubstatus};
|
||||
my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
|
||||
my $baseurl = $self->{config}->{'base_uri'} || "http://localhost:3000";
|
||||
|
||||
# Find matching configs
|
||||
foreach my $b ($build, @{$dependents}) {
|
||||
my $jobName = showJobName $b;
|
||||
my $evals = $build->jobsetevals;
|
||||
foreach my $build ($topbuild, @{$dependents}) {
|
||||
my $jobName = showJobName $build;
|
||||
my $evals = $topbuild->jobsetevals;
|
||||
my $ua = LWP::UserAgent->new();
|
||||
|
||||
foreach my $conf (@config) {
|
||||
next unless $jobName =~ /^$conf->{jobs}$/;
|
||||
# Don't send out "pending" status updates if the build is already finished
|
||||
next if !$finished && $b->finished == 1;
|
||||
next if !$finished && $build->finished == 1;
|
||||
|
||||
my $contextTrailer = $conf->{excludeBuildFromContext} ? "" : (":" . $b->id);
|
||||
my $contextTrailer = $conf->{excludeBuildFromContext} ? "" : (":" . $build->id);
|
||||
my $github_job_name = $jobName =~ s/-pr-\d+//r;
|
||||
my $extendedContext = $conf->{context} // "continuous-integration/hydra:" . $jobName . $contextTrailer;
|
||||
my $shortContext = $conf->{context} // "ci/hydra:" . $github_job_name . $contextTrailer;
|
||||
my $context = $conf->{useShortContext} ? $shortContext : $extendedContext;
|
||||
my $body = encode_json(
|
||||
{
|
||||
state => $finished ? toGithubState($b->buildstatus) : "pending",
|
||||
target_url => "$baseurl/build/" . $b->id,
|
||||
description => $conf->{description} // "Hydra build #" . $b->id . " of $jobName",
|
||||
state => $finished ? toGithubState($build->buildstatus) : "pending",
|
||||
target_url => "$baseurl/build/" . $build->id,
|
||||
description => $conf->{description} // "Hydra build #" . $build->id . " of $jobName",
|
||||
context => $context
|
||||
});
|
||||
my $inputs_cfg = $conf->{inputs};
|
||||
my @inputs = defined $inputs_cfg ? ref $inputs_cfg eq "ARRAY" ? @$inputs_cfg : ($inputs_cfg) : ();
|
||||
my %seen = map { $_ => {} } @inputs;
|
||||
while (my $eval = $evals->next) {
|
||||
foreach my $input (@inputs) {
|
||||
my $i = $eval->jobsetevalinputs->find({ name => $input, altnr => 0 });
|
||||
next unless defined $i;
|
||||
my $uri = $i->uri;
|
||||
my $rev = $i->revision;
|
||||
my $key = $uri . "-" . $rev;
|
||||
next if exists $seen{$input}->{$key};
|
||||
if (defined($cachedEval) && $cachedEval->id != $eval->id) {
|
||||
next;
|
||||
}
|
||||
|
||||
my $sendStatus = sub {
|
||||
my ($input, $owner, $repo, $rev) = @_;
|
||||
|
||||
my $key = $owner . "-" . $repo . "-" . $rev;
|
||||
return if exists $seen{$input}->{$key};
|
||||
$seen{$input}->{$key} = 1;
|
||||
$uri =~ m/([^/]+?)(?:.git)?$!;
|
||||
my $owner = $1;
|
||||
my $repo = $2;
|
||||
|
||||
my $url = "https://api.github.com/repos/$owner/$repo/statuses/$rev";
|
||||
my $req = HTTP::Request->new('POST', $url);
|
||||
$req->header('Content-Type' => 'application/json');
|
||||
@@ -91,6 +92,28 @@ sub common {
|
||||
} else {
|
||||
print STDERR "GithubStatus ratelimit $limitRemaining/$limit, resets in $diff\n";
|
||||
}
|
||||
};
|
||||
|
||||
if (defined $eval->flake) {
|
||||
my $fl = $eval->flake;
|
||||
print STDERR "Flake is $fl\n";
|
||||
if ($eval->flake =~ m!github:([^/]+)/([^/]+)/([[:xdigit:]]{40})$! or $eval->flake =~ m!git\+ssh://git\@github.com/([^/]+)/([^/]+)\?.*rev=([[:xdigit:]]{40})$!) {
|
||||
$sendStatus->("src", $1, $2, $3);
|
||||
} else {
|
||||
print STDERR "Can't parse flake, skipping GitHub status update\n";
|
||||
}
|
||||
} else {
|
||||
foreach my $input (@inputs) {
|
||||
my $i = $eval->jobsetevalinputs->find({ name => $input, altnr => 0 });
|
||||
if (! defined $i) {
|
||||
print STDERR "Evaluation $eval doesn't have input $input\n";
|
||||
}
|
||||
next unless defined $i;
|
||||
my $uri = $i->uri;
|
||||
my $rev = $i->revision;
|
||||
$uri =~ m/([^/]+?)(?:.git)?$!;
|
||||
$sendStatus->($input, $1, $2, $rev);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -109,4 +132,14 @@ sub buildFinished {
|
||||
common(@_, 1);
|
||||
}
|
||||
|
||||
sub cachedBuildQueued {
|
||||
my ($self, $evaluation, $build) = @_;
|
||||
common($self, $build, [], 0, $evaluation);
|
||||
}
|
||||
|
||||
sub cachedBuildFinished {
|
||||
my ($self, $evaluation, $build) = @_;
|
||||
common($self, $build, [], 1, $evaluation);
|
||||
}
|
||||
|
||||
1;
|
||||
|
@@ -15,10 +15,11 @@
|
||||
package Hydra::Plugin::GitlabPulls;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use HTTP::Request;
|
||||
use LWP::UserAgent;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use File::Temp;
|
||||
use POSIX qw(strftime);
|
||||
|
@@ -1,9 +1,10 @@
|
||||
package Hydra::Plugin::GitlabStatus;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use HTTP::Request;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use LWP::UserAgent;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use List::Util qw(max);
|
||||
@@ -37,25 +38,25 @@ sub toGitlabState {
|
||||
}
|
||||
|
||||
sub common {
|
||||
my ($self, $build, $dependents, $status) = @_;
|
||||
my ($self, $topbuild, $dependents, $status) = @_;
|
||||
my $baseurl = $self->{config}->{'base_uri'} || "http://localhost:3000";
|
||||
|
||||
# Find matching configs
|
||||
foreach my $b ($build, @{$dependents}) {
|
||||
my $jobName = showJobName $b;
|
||||
my $evals = $build->jobsetevals;
|
||||
foreach my $build ($topbuild, @{$dependents}) {
|
||||
my $jobName = showJobName $build;
|
||||
my $evals = $topbuild->jobsetevals;
|
||||
my $ua = LWP::UserAgent->new();
|
||||
|
||||
# Don't send out "pending/running" status updates if the build is already finished
|
||||
next if $status < 2 && $b->finished == 1;
|
||||
next if $status < 2 && $build->finished == 1;
|
||||
|
||||
my $state = toGitlabState($status, $b->buildstatus);
|
||||
my $state = toGitlabState($status, $build->buildstatus);
|
||||
my $body = encode_json(
|
||||
{
|
||||
state => $state,
|
||||
target_url => "$baseurl/build/" . $b->id,
|
||||
description => "Hydra build #" . $b->id . " of $jobName",
|
||||
name => "Hydra " . $b->get_column('job'),
|
||||
target_url => "$baseurl/build/" . $build->id,
|
||||
description => "Hydra build #" . $build->id . " of $jobName",
|
||||
name => "Hydra " . $build->get_column('job'),
|
||||
});
|
||||
while (my $eval = $evals->next) {
|
||||
my $gitlabstatusInput = $eval->jobsetevalinputs->find({ name => "gitlab_status_repo" });
|
||||
|
@@ -1,88 +0,0 @@
|
||||
package Hydra::Plugin::HipChatNotification;
|
||||
|
||||
use strict;
|
||||
use parent 'Hydra::Plugin';
|
||||
use LWP::UserAgent;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
sub isEnabled {
|
||||
my ($self) = @_;
|
||||
return defined $self->{config}->{hipchat};
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
my ($self, $build, $dependents) = @_;
|
||||
|
||||
my $cfg = $self->{config}->{hipchat};
|
||||
my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
|
||||
|
||||
my $baseurl = $self->{config}->{'base_uri'} || "http://localhost:3000";
|
||||
|
||||
# Figure out to which rooms to send notification. For each email
|
||||
# room, we send one aggregate message.
|
||||
my %rooms;
|
||||
foreach my $b ($build, @{$dependents}) {
|
||||
my $prevBuild = getPreviousBuild($b);
|
||||
my $jobName = showJobName $b;
|
||||
|
||||
foreach my $room (@config) {
|
||||
my $force = $room->{force};
|
||||
next unless $jobName =~ /^$room->{jobs}$/;
|
||||
|
||||
# If build is cancelled or aborted, do not send email.
|
||||
next if ! $force && ($b->buildstatus == 4 || $b->buildstatus == 3);
|
||||
|
||||
# If there is a previous (that is not cancelled or aborted) build
|
||||
# with same buildstatus, do not send email.
|
||||
next if ! $force && defined $prevBuild && ($b->buildstatus == $prevBuild->buildstatus);
|
||||
|
||||
$rooms{$room->{room}} //= { room => $room, builds => [] };
|
||||
push @{$rooms{$room->{room}}->{builds}}, $b;
|
||||
}
|
||||
}
|
||||
|
||||
return if scalar keys %rooms == 0;
|
||||
|
||||
my ($authors, $nrCommits) = getResponsibleAuthors($build, $self->{plugins});
|
||||
|
||||
# Send a message to each room.
|
||||
foreach my $roomId (keys %rooms) {
|
||||
my $room = $rooms{$roomId};
|
||||
my @deps = grep { $_->id != $build->id } @{$room->{builds}};
|
||||
|
||||
my $img =
|
||||
$build->buildstatus == 0 ? "$baseurl/static/images/checkmark_16.png" :
|
||||
$build->buildstatus == 2 ? "$baseurl/static/images/dependency_16.png" :
|
||||
$build->buildstatus == 4 ? "$baseurl/static/images/cancelled_16.png" :
|
||||
"$baseurl/static/images/error_16.png";
|
||||
|
||||
my $msg = "";
|
||||
$msg .= "<img src='$img'/> ";
|
||||
$msg .= "Job <a href='$baseurl/job/${\$build->get_column('project')}/${\$build->get_column('jobset')}/${\$build->get_column('job')}'>${\showJobName($build)}</a>";
|
||||
$msg .= " (and ${\scalar @deps} others)" if scalar @deps > 0;
|
||||
$msg .= ": <a href='$baseurl/build/${\$build->id}'>" . showStatus($build) . "</a>";
|
||||
|
||||
if (scalar keys %{$authors} > 0) {
|
||||
# FIXME: HTML escaping
|
||||
my @x = map { "<a href='mailto:$authors->{$_}'>$_</a>" } (sort keys %{$authors});
|
||||
$msg .= ", likely due to ";
|
||||
$msg .= "$nrCommits commits by " if $nrCommits > 1;
|
||||
$msg .= join(" or ", scalar @x > 1 ? join(", ", @x[0..scalar @x - 2]) : (), $x[-1]);
|
||||
}
|
||||
|
||||
print STDERR "sending hipchat notification to room $roomId: $msg\n";
|
||||
|
||||
my $ua = LWP::UserAgent->new();
|
||||
my $resp = $ua->post('https://api.hipchat.com/v1/rooms/message?format=json&auth_token=' . $room->{room}->{token}, {
|
||||
room_id => $roomId,
|
||||
from => 'Hydra',
|
||||
message => $msg,
|
||||
message_format => 'html',
|
||||
notify => $room->{room}->{notify} || 0,
|
||||
color => $build->buildstatus == 0 ? 'green' : 'red' });
|
||||
|
||||
print STDERR $resp->status_line, ": ", $resp->decoded_content,"\n" if !$resp->is_success;
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
@@ -1,6 +1,7 @@
|
||||
package Hydra::Plugin::InfluxDBNotification;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use HTTP::Request;
|
||||
use LWP::UserAgent;
|
||||
@@ -110,9 +111,9 @@ sub buildFinished {
|
||||
my $tagSet = {
|
||||
status => toBuildStatusClass($b->buildstatus),
|
||||
result => toBuildStatusDetailed($b->buildstatus),
|
||||
project => $b->get_column('project'),
|
||||
jobset => $b->get_column('jobset'),
|
||||
repo => ($b->get_column('jobset') =~ /^(.*)\.pr-/) ? $1 : $b->get_column('jobset'),
|
||||
project => $b->jobset->get_column('project'),
|
||||
jobset => $b->jobset->get_column('name'),
|
||||
repo => ($b->jobset->get_column('name') =~ /^(.*)\.pr-/) ? $1 : $b->jobset->get_column('name'),
|
||||
job => $b->get_column('job'),
|
||||
system => $b->system,
|
||||
cached => $b->iscachedbuild ? "true" : "false",
|
||||
|
@@ -1,11 +1,12 @@
|
||||
package Hydra::Plugin::MercurialInput;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
use Hydra::Helper::Exec;
|
||||
use Fcntl qw(:flock);
|
||||
|
||||
sub supportedInputTypes {
|
||||
@@ -66,9 +67,9 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedHgInputs')->search(
|
||||
{uri => $uri, branch => $branch, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
} else {
|
||||
@@ -83,7 +84,7 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', $stdout;
|
||||
|
||||
# FIXME: time window between nix-prefetch-hg and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedHgInputs')->update_or_create(
|
||||
|
@@ -1,14 +1,14 @@
|
||||
package Hydra::Plugin::PathInput;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use POSIX qw(strftime);
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
$inputTypes->{'path'} = 'Local path';
|
||||
$inputTypes->{'path'} = 'Local path or URL';
|
||||
}
|
||||
|
||||
sub fetchInput {
|
||||
@@ -29,7 +29,7 @@ sub fetchInput {
|
||||
{srcpath => $uri, lastseen => {">", $timestamp - $timeout}},
|
||||
{rows => 1, order_by => "lastseen DESC"});
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$timestamp = $cachedInput->timestamp;
|
||||
@@ -45,7 +45,7 @@ sub fetchInput {
|
||||
}
|
||||
chomp $storePath;
|
||||
|
||||
$sha256 = (queryPathInfo($storePath, 0))[1] or die;
|
||||
$sha256 = ($MACHINE_LOCAL_STORE->queryPathInfo($storePath, 0))[1] or die;
|
||||
|
||||
($cachedInput) = $self->{db}->resultset('CachedPathInputs')->search(
|
||||
{srcpath => $uri, sha256hash => $sha256});
|
||||
@@ -77,7 +77,7 @@ sub fetchInput {
|
||||
{ uri => $uri
|
||||
, storePath => $storePath
|
||||
, sha256hash => $sha256
|
||||
, revision => strftime "%Y%m%d%H%M%S", gmtime($timestamp)
|
||||
, revision => (strftime "%Y%m%d%H%M%S", gmtime($timestamp)) . ':' . $sha256
|
||||
};
|
||||
}
|
||||
|
||||
|
@@ -1,13 +1,85 @@
|
||||
package Hydra::Plugin::RunCommand;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use experimental 'smartmatch';
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use File::Basename qw(dirname);
|
||||
use File::Path qw(make_path);
|
||||
use IPC::Run3;
|
||||
use Try::Tiny;
|
||||
|
||||
sub isEnabled {
|
||||
my ($self) = @_;
|
||||
return defined $self->{config}->{runcommand};
|
||||
|
||||
return areStaticCommandsEnabled($self->{config}) || areDynamicCommandsEnabled($self->{config});
|
||||
}
|
||||
|
||||
sub areStaticCommandsEnabled {
|
||||
my ($config) = @_;
|
||||
|
||||
if (defined $config->{runcommand}) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub areDynamicCommandsEnabled {
|
||||
my ($config) = @_;
|
||||
|
||||
if ((defined $config->{dynamicruncommand})
|
||||
&& $config->{dynamicruncommand}->{enable}) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub isBuildEligibleForDynamicRunCommand {
|
||||
my ($build) = @_;
|
||||
|
||||
if ($build->get_column("buildstatus") != 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ($build->get_column("job") =~ "^runCommandHook\..+") {
|
||||
my $out = $build->buildoutputs->find({name => "out"});
|
||||
if (!defined $out) {
|
||||
warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: no output named 'out'.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
my $path = $out->path;
|
||||
if (-l $path) {
|
||||
$path = readlink($path);
|
||||
}
|
||||
|
||||
if (! -e $path) {
|
||||
warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The 'out' output doesn't exist locally. This is a bug.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (! -x $path) {
|
||||
warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The 'out' output is not executable.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (! -f $path) {
|
||||
warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The 'out' output is not a regular file or symlink.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (! $build->jobset->supportsDynamicRunCommand()) {
|
||||
warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The project or jobset don't have dynamic runcommand enabled.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub configSectionMatches {
|
||||
@@ -37,83 +109,165 @@ sub eventMatches {
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub fanoutToCommands {
|
||||
my ($config, $event, $build) = @_;
|
||||
|
||||
my @commands;
|
||||
|
||||
# Calculate all the statically defined commands to execute
|
||||
my $cfg = $config->{runcommand};
|
||||
my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
|
||||
|
||||
foreach my $conf (@config) {
|
||||
my $matcher = $conf->{job} // "*:*:*";
|
||||
next unless eventMatches($conf, $event);
|
||||
next unless configSectionMatches(
|
||||
$matcher,
|
||||
$build->jobset->get_column('project'),
|
||||
$build->jobset->get_column('name'),
|
||||
$build->get_column('job')
|
||||
);
|
||||
|
||||
if (!defined($conf->{command})) {
|
||||
warn "<runcommand> section for '$matcher' lacks a 'command' option";
|
||||
next;
|
||||
}
|
||||
|
||||
push(@commands, {
|
||||
matcher => $matcher,
|
||||
command => $conf->{command},
|
||||
})
|
||||
}
|
||||
|
||||
# Calculate all dynamically defined commands to execute
|
||||
if (areDynamicCommandsEnabled($config)) {
|
||||
if (isBuildEligibleForDynamicRunCommand($build)) {
|
||||
my $job = $build->get_column('job');
|
||||
my $out = $build->buildoutputs->find({name => "out"});
|
||||
push(@commands, {
|
||||
matcher => "DynamicRunCommand($job)",
|
||||
command => $out->path
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return \@commands;
|
||||
}
|
||||
|
||||
sub makeJsonPayload {
|
||||
my ($event, $build) = @_;
|
||||
my $json = {
|
||||
event => $event,
|
||||
build => $build->id,
|
||||
finished => $build->get_column('finished') ? JSON::MaybeXS::true : JSON::MaybeXS::false,
|
||||
timestamp => $build->get_column('timestamp'),
|
||||
project => $build->project->get_column('name'),
|
||||
jobset => $build->jobset->get_column('name'),
|
||||
job => $build->get_column('job'),
|
||||
drvPath => $build->get_column('drvpath'),
|
||||
startTime => $build->get_column('starttime'),
|
||||
stopTime => $build->get_column('stoptime'),
|
||||
buildStatus => $build->get_column('buildstatus'),
|
||||
nixName => $build->get_column('nixname'),
|
||||
system => $build->get_column('system'),
|
||||
homepage => $build->get_column('homepage'),
|
||||
description => $build->get_column('description'),
|
||||
license => $build->get_column('license'),
|
||||
outputs => [],
|
||||
products => [],
|
||||
metrics => [],
|
||||
};
|
||||
|
||||
for my $output ($build->buildoutputs) {
|
||||
my $j = {
|
||||
name => $output->name,
|
||||
path => $output->path,
|
||||
};
|
||||
push @{$json->{outputs}}, $j;
|
||||
}
|
||||
|
||||
for my $product ($build->buildproducts) {
|
||||
my $j = {
|
||||
productNr => $product->productnr,
|
||||
type => $product->type,
|
||||
subtype => $product->subtype,
|
||||
fileSize => $product->filesize,
|
||||
sha256hash => $product->sha256hash,
|
||||
path => $product->path,
|
||||
name => $product->name,
|
||||
defaultPath => $product->defaultpath,
|
||||
};
|
||||
push @{$json->{products}}, $j;
|
||||
}
|
||||
|
||||
for my $metric ($build->buildmetrics) {
|
||||
my $j = {
|
||||
name => $metric->name,
|
||||
unit => $metric->unit,
|
||||
value => 0 + $metric->value,
|
||||
};
|
||||
push @{$json->{metrics}}, $j;
|
||||
}
|
||||
|
||||
return $json;
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
my ($self, $build, $dependents) = @_;
|
||||
my $event = "buildFinished";
|
||||
|
||||
my $cfg = $self->{config}->{runcommand};
|
||||
my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
|
||||
my $commandsToRun = fanoutToCommands(
|
||||
$self->{config},
|
||||
$event,
|
||||
$build
|
||||
);
|
||||
|
||||
my $tmp;
|
||||
if (@$commandsToRun == 0) {
|
||||
# No matching jobs, don't bother generating the JSON
|
||||
return;
|
||||
}
|
||||
|
||||
foreach my $conf (@config) {
|
||||
next unless eventMatches($conf, $event);
|
||||
next unless configSectionMatches(
|
||||
$conf->{job} // "*:*:*",
|
||||
$build->get_column('project'),
|
||||
$build->get_column('jobset'),
|
||||
$build->get_column('job'));
|
||||
my $tmp = File::Temp->new(SUFFIX => '.json');
|
||||
print $tmp encode_json(makeJsonPayload($event, $build)) or die;
|
||||
$ENV{"HYDRA_JSON"} = $tmp->filename;
|
||||
|
||||
my $command = $conf->{command} // die "<runcommand> section lacks a 'command' option";
|
||||
foreach my $commandToRun (@{$commandsToRun}) {
|
||||
my $command = $commandToRun->{command};
|
||||
|
||||
unless (defined $tmp) {
|
||||
$tmp = File::Temp->new(SUFFIX => '.json');
|
||||
# todo: make all the to-run jobs "unstarted" in a batch, then start processing
|
||||
my $runlog = $self->{db}->resultset("RunCommandLogs")->create({
|
||||
job_matcher => $commandToRun->{matcher},
|
||||
build_id => $build->get_column('id'),
|
||||
command => $command
|
||||
});
|
||||
|
||||
my $json = {
|
||||
event => $event,
|
||||
build => $build->id,
|
||||
finished => $build->get_column('finished'),
|
||||
timestamp => $build->get_column('timestamp'),
|
||||
project => $build->get_column('project'),
|
||||
jobset => $build->get_column('jobset'),
|
||||
job => $build->get_column('job'),
|
||||
drvPath => $build->get_column('drvpath'),
|
||||
startTime => $build->get_column('starttime'),
|
||||
stopTime => $build->get_column('stoptime'),
|
||||
buildStatus => $build->get_column('buildstatus'),
|
||||
outputs => [],
|
||||
products => [],
|
||||
metrics => [],
|
||||
};
|
||||
$runlog->started();
|
||||
|
||||
for my $output ($build->buildoutputs) {
|
||||
my $j = {
|
||||
name => $output->name,
|
||||
path => $output->path,
|
||||
};
|
||||
push @{$json->{outputs}}, $j;
|
||||
}
|
||||
my $logPath = Hydra::Helper::Nix::constructRunCommandLogPath($runlog) or die "RunCommandLog not found.";
|
||||
my $dir = dirname($logPath);
|
||||
my $oldUmask = umask();
|
||||
my $f;
|
||||
|
||||
for my $product ($build->buildproducts) {
|
||||
my $j = {
|
||||
productNr => $product->productnr,
|
||||
type => $product->type,
|
||||
subtype => $product->subtype,
|
||||
fileSize => $product->filesize,
|
||||
sha256hash => $product->sha256hash,
|
||||
path => $product->path,
|
||||
name => $product->name,
|
||||
defaultPath => $product->defaultpath,
|
||||
};
|
||||
push @{$json->{products}}, $j;
|
||||
}
|
||||
try {
|
||||
# file: 640, dir: 750
|
||||
umask(0027);
|
||||
make_path($dir);
|
||||
|
||||
for my $metric ($build->buildmetrics) {
|
||||
my $j = {
|
||||
name => $metric->name,
|
||||
unit => $metric->unit,
|
||||
value => 0 + $metric->value,
|
||||
};
|
||||
push @{$json->{metrics}}, $j;
|
||||
}
|
||||
open($f, '>', $logPath);
|
||||
umask($oldUmask);
|
||||
|
||||
print $tmp encode_json($json) or die;
|
||||
}
|
||||
run3($command, \undef, $f, $f, { return_if_system_error => 1 }) == 1
|
||||
or warn "notification command '$command' failed with exit status $? ($!)\n";
|
||||
|
||||
$ENV{"HYDRA_JSON"} = $tmp->filename;
|
||||
close($f);
|
||||
|
||||
system("$command") == 0
|
||||
or warn "notification command '$command' failed with exit status $?\n";
|
||||
$runlog->completed_with_child_error($?, $!);
|
||||
1;
|
||||
} catch {
|
||||
die "Died while trying to process RunCommand (${\$runlog->uuid}): $_";
|
||||
} finally {
|
||||
umask($oldUmask);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package Hydra::Plugin::S3Backup;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use File::Temp;
|
||||
use File::Basename;
|
||||
@@ -13,6 +14,7 @@ use Nix::Config;
|
||||
use Nix::Store;
|
||||
use Hydra::Model::DB;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Helper::Nix;
|
||||
|
||||
sub isEnabled {
|
||||
my ($self) = @_;
|
||||
@@ -20,11 +22,18 @@ sub isEnabled {
|
||||
}
|
||||
|
||||
my $client;
|
||||
my %compressors = (
|
||||
xz => "| $Nix::Config::xz",
|
||||
bzip2 => "| $Nix::Config::bzip2",
|
||||
none => ""
|
||||
);
|
||||
my %compressors = ();
|
||||
|
||||
$compressors{"none"} = "";
|
||||
|
||||
if (defined($Nix::Config::bzip2)) {
|
||||
$compressors{"bzip2"} = "| $Nix::Config::bzip2",
|
||||
}
|
||||
|
||||
if (defined($Nix::Config::xz)) {
|
||||
$compressors{"xz"} = "| $Nix::Config::xz",
|
||||
}
|
||||
|
||||
my $lockfile = Hydra::Model::DB::getHydraPath . "/.hydra-s3backup.lock";
|
||||
|
||||
sub buildFinished {
|
||||
@@ -84,13 +93,14 @@ sub buildFinished {
|
||||
my $hash = substr basename($path), 0, 32;
|
||||
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($path, 0);
|
||||
my $system;
|
||||
if (defined $deriver and isValidPath($deriver)) {
|
||||
if (defined $deriver and $MACHINE_LOCAL_STORE->isValidPath($deriver)) {
|
||||
$system = derivationFromPath($deriver)->{platform};
|
||||
}
|
||||
foreach my $reference (@{$refs}) {
|
||||
push @needed_paths, $reference;
|
||||
}
|
||||
while (my ($compression_type, $configs) = each %compression_types) {
|
||||
foreach my $compression_type (keys %compression_types) {
|
||||
my $configs = $compression_types{$compression_type};
|
||||
my @incomplete_buckets = ();
|
||||
# Don't do any work if all the buckets have this path
|
||||
foreach my $bucket_config (@{$configs}) {
|
||||
@@ -136,7 +146,8 @@ sub buildFinished {
|
||||
}
|
||||
|
||||
# Upload narinfos
|
||||
while (my ($compression_type, $infos) = each %narinfos) {
|
||||
foreach my $compression_type (keys %narinfos) {
|
||||
my $infos = $narinfos{$compression_type};
|
||||
foreach my $bucket_config (@{$compression_types{$compression_type}}) {
|
||||
foreach my $info (@{$infos}) {
|
||||
my $bucket = $client->bucket( name => $bucket_config->{name} );
|
||||
|
@@ -1,11 +1,12 @@
|
||||
package Hydra::Plugin::SlackNotification;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use HTTP::Request;
|
||||
use LWP::UserAgent;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
@@ -77,7 +78,7 @@ sub renderDuration {
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
my ($self, $build, $dependents) = @_;
|
||||
my ($self, $topbuild, $dependents) = @_;
|
||||
my $cfg = $self->{config}->{slack};
|
||||
my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
|
||||
|
||||
@@ -86,12 +87,12 @@ sub buildFinished {
|
||||
# Figure out to which channelss to send notification. For each channel
|
||||
# we send one aggregate message.
|
||||
my %channels;
|
||||
foreach my $b ($build, @{$dependents}) {
|
||||
my $jobName = showJobName $b;
|
||||
my $buildStatus = $b->buildstatus;
|
||||
foreach my $build ($topbuild, @{$dependents}) {
|
||||
my $jobName = showJobName $build;
|
||||
my $buildStatus = $build->buildstatus;
|
||||
my $cancelledOrAborted = $buildStatus == 4 || $buildStatus == 3;
|
||||
|
||||
my $prevBuild = getPreviousBuild($b);
|
||||
my $prevBuild = getPreviousBuild($build);
|
||||
my $sameAsPrevious = defined $prevBuild && ($buildStatus == $prevBuild->buildstatus);
|
||||
my $prevBuildStatus = (defined $prevBuild) ? $prevBuild->buildstatus : -1;
|
||||
my $prevBuildId = (defined $prevBuild) ? $prevBuild->id : -1;
|
||||
@@ -114,34 +115,34 @@ sub buildFinished {
|
||||
|
||||
print STDERR "SlackNotification_Debug adding $jobName to the report list\n";
|
||||
$channels{$channel->{url}} //= { channel => $channel, builds => [] };
|
||||
push @{$channels{$channel->{url}}->{builds}}, $b;
|
||||
push @{$channels{$channel->{url}}->{builds}}, $build;
|
||||
}
|
||||
}
|
||||
|
||||
return if scalar keys %channels == 0;
|
||||
|
||||
my ($authors, $nrCommits) = getResponsibleAuthors($build, $self->{plugins});
|
||||
my ($authors, $nrCommits) = getResponsibleAuthors($topbuild, $self->{plugins});
|
||||
|
||||
# Send a message to each room.
|
||||
foreach my $url (keys %channels) {
|
||||
my $channel = $channels{$url};
|
||||
my @deps = grep { $_->id != $build->id } @{$channel->{builds}};
|
||||
my @deps = grep { $_->id != $topbuild->id } @{$channel->{builds}};
|
||||
|
||||
my $img =
|
||||
$build->buildstatus == 0 ? "$baseurl/static/images/checkmark_256.png" :
|
||||
$build->buildstatus == 2 ? "$baseurl/static/images/dependency_256.png" :
|
||||
$build->buildstatus == 4 ? "$baseurl/static/images/cancelled_256.png" :
|
||||
$topbuild->buildstatus == 0 ? "$baseurl/static/images/checkmark_256.png" :
|
||||
$topbuild->buildstatus == 2 ? "$baseurl/static/images/dependency_256.png" :
|
||||
$topbuild->buildstatus == 4 ? "$baseurl/static/images/cancelled_256.png" :
|
||||
"$baseurl/static/images/error_256.png";
|
||||
|
||||
my $color =
|
||||
$build->buildstatus == 0 ? "good" :
|
||||
$build->buildstatus == 4 ? "warning" :
|
||||
$topbuild->buildstatus == 0 ? "good" :
|
||||
$topbuild->buildstatus == 4 ? "warning" :
|
||||
"danger";
|
||||
|
||||
my $text = "";
|
||||
$text .= "Job <$baseurl/job/${\$build->get_column('project')}/${\$build->get_column('jobset')}/${\$build->get_column('job')}|${\showJobName($build)}>";
|
||||
$text .= "Job <$baseurl/job/${\$topbuild->jobset->get_column('project')}/${\$topbuild->jobset->get_column('name')}/${\$topbuild->get_column('job')}|${\showJobName($topbuild)}>";
|
||||
$text .= " (and ${\scalar @deps} others)" if scalar @deps > 0;
|
||||
$text .= ": <$baseurl/build/${\$build->id}|" . showStatus($build) . ">". " in " . renderDuration($build);
|
||||
$text .= ": <$baseurl/build/${\$topbuild->id}|" . showStatus($topbuild) . ">". " in " . renderDuration($topbuild);
|
||||
|
||||
if (scalar keys %{$authors} > 0) {
|
||||
# FIXME: escaping
|
||||
@@ -155,12 +156,12 @@ sub buildFinished {
|
||||
|
||||
my $msg =
|
||||
{ attachments =>
|
||||
[{ fallback => "Job " . showJobName($build) . " build number " . $build->id . ": " . showStatus($build),
|
||||
[{ fallback => "Job " . showJobName($topbuild) . " build number " . $topbuild->id . ": " . showStatus($topbuild),
|
||||
text => $text,
|
||||
thumb_url => $img,
|
||||
color => $color,
|
||||
title => "Job " . showJobName($build) . " build number " . $build->id,
|
||||
title_link => "$baseurl/build/${\$build->id}"
|
||||
title => "Job " . showJobName($topbuild) . " build number " . $topbuild->id,
|
||||
title_link => "$baseurl/build/${\$topbuild->id}"
|
||||
}]
|
||||
};
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package Hydra::Plugin::SoTest;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use HTTP::Request;
|
||||
@@ -101,8 +102,8 @@ sub buildFinished {
|
||||
open( $authfile, "<", $sotest->{authfile} )
|
||||
or die "Cannot open Sotest authfile \${\$sotest->{authfile}}";
|
||||
|
||||
while (<$authfile>) {
|
||||
if ( $_ =~ /(.+):(.+)/m ) {
|
||||
while (my $line = <$authfile>) {
|
||||
if ( $line =~ /(.+):(.+)/m ) {
|
||||
$sotest_username = $1;
|
||||
$sotest_password = $2;
|
||||
}
|
||||
|
@@ -1,11 +1,12 @@
|
||||
package Hydra::Plugin::SubversionInput;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use Digest::SHA qw(sha256_hex);
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use IPC::Run;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -43,9 +44,9 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedSubversionInputs')->search(
|
||||
{uri => $uri, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
} else {
|
||||
@@ -60,16 +61,16 @@ sub fetchInput {
|
||||
die "error checking out Subversion repo at `$uri':\n$stderr" if $res;
|
||||
|
||||
if ($type eq "svn-checkout") {
|
||||
$storePath = addToStore($wcPath, 1, "sha256");
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore($wcPath, 1, "sha256");
|
||||
} else {
|
||||
# Hm, if the Nix Perl bindings supported filters in
|
||||
# addToStore(), then we wouldn't need to make a copy here.
|
||||
my $tmpDir = File::Temp->newdir("hydra-svn-export.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die;
|
||||
(system "svn", "export", $wcPath, "$tmpDir/source", "--quiet") == 0 or die "svn export failed";
|
||||
$storePath = addToStore("$tmpDir/source", 1, "sha256");
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/source", 1, "sha256");
|
||||
}
|
||||
|
||||
$sha256 = queryPathHash($storePath); $sha256 =~ s/sha256://;
|
||||
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath); $sha256 =~ s/sha256://;
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedSubversionInputs')->update_or_create(
|
||||
|
105
src/lib/Hydra/PostgresListener.pm
Normal file
105
src/lib/Hydra/PostgresListener.pm
Normal file
@@ -0,0 +1,105 @@
|
||||
package Hydra::PostgresListener;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use IO::Select;
|
||||
|
||||
=head1 Hydra::PostgresListener
|
||||
|
||||
An abstraction around using Postgres' LISTEN / NOTIFY in an event loop.
|
||||
|
||||
=cut
|
||||
|
||||
=head2 new
|
||||
|
||||
Arguments:
|
||||
|
||||
=over 1
|
||||
|
||||
=item C<$dbh>
|
||||
L<DBI::db> The database connection.
|
||||
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub new {
|
||||
my ($self, $dbh) = @_;
|
||||
my $sel = IO::Select->new($dbh->func("getfd"));
|
||||
|
||||
return bless {
|
||||
"dbh" => $dbh,
|
||||
"sel" => $sel,
|
||||
}, $self;
|
||||
}
|
||||
|
||||
=head2 subscribe
|
||||
|
||||
Subscribe to the named channel for messages
|
||||
|
||||
Arguments:
|
||||
|
||||
=over 1
|
||||
|
||||
=item C<$channel>
|
||||
|
||||
The channel name.
|
||||
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
sub subscribe {
|
||||
my ($self, $channel) = @_;
|
||||
$channel = $self->{'dbh'}->quote_identifier($channel);
|
||||
$self->{'dbh'}->do("listen $channel");
|
||||
}
|
||||
|
||||
=head2 block_for_messages
|
||||
|
||||
Wait for messages to arrive within the specified timeout.
|
||||
|
||||
Arguments:
|
||||
|
||||
=over 1
|
||||
|
||||
=item C<$timeout>
|
||||
The maximum number of seconds to wait for messages.
|
||||
|
||||
Optional: if unspecified, block forever.
|
||||
|
||||
=back
|
||||
|
||||
Returns: a sub, call the sub repeatedly to get a message. The sub
|
||||
will return undef when there are no pending messages.
|
||||
|
||||
Example:
|
||||
|
||||
my $events = $listener->block_for_messages();
|
||||
while (my $message = $events->()) {
|
||||
...
|
||||
}
|
||||
|
||||
=cut
|
||||
|
||||
sub block_for_messages {
|
||||
my ($self, $timeout) = @_;
|
||||
|
||||
$self->{'sel'}->can_read($timeout);
|
||||
|
||||
return sub {
|
||||
my $notify = $self->{'dbh'}->func("pg_notifies");
|
||||
if (defined($notify)) {
|
||||
my ($channelName, $pid, $payload) = @$notify;
|
||||
return {
|
||||
channel => $channelName,
|
||||
pid => $pid,
|
||||
payload => $payload,
|
||||
}
|
||||
} else {
|
||||
return undef
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
@@ -9,11 +9,11 @@ use warnings;
|
||||
|
||||
use base 'DBIx::Class::Schema';
|
||||
|
||||
__PACKAGE__->load_classes;
|
||||
__PACKAGE__->load_namespaces;
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07014 @ 2011-12-05 14:08:56
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:BpPjKT5Pb1RYMHo+oKdZ+w
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:SW2xMZ1YBA/LJSnP+ClOfA
|
||||
|
||||
|
||||
# You can replace this text with custom content, and it will be preserved on regeneration
|
||||
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::AggregateConstituents;
|
||||
package Hydra::Schema::Result::AggregateConstituents;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::AggregateConstituents
|
||||
Hydra::Schema::Result::AggregateConstituents
|
||||
|
||||
=cut
|
||||
|
||||
@@ -76,13 +76,13 @@ __PACKAGE__->set_primary_key("aggregate", "constituent");
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Builds>
|
||||
Related object: L<Hydra::Schema::Result::Builds>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"aggregate",
|
||||
"Hydra::Schema::Builds",
|
||||
"Hydra::Schema::Result::Builds",
|
||||
{ id => "aggregate" },
|
||||
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
|
||||
);
|
||||
@@ -91,20 +91,20 @@ __PACKAGE__->belongs_to(
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Builds>
|
||||
Related object: L<Hydra::Schema::Result::Builds>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"constituent",
|
||||
"Hydra::Schema::Builds",
|
||||
"Hydra::Schema::Result::Builds",
|
||||
{ id => "constituent" },
|
||||
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:bQfQoSstlaFy7zw8i1R+ow
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:ksBE6gQqcu58rdZfbqEK/w
|
||||
|
||||
|
||||
# You can replace this text with custom code or comments, and it will be preserved on regeneration
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::BuildInputs;
|
||||
package Hydra::Schema::Result::BuildInputs;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::BuildInputs
|
||||
Hydra::Schema::Result::BuildInputs
|
||||
|
||||
=cut
|
||||
|
||||
@@ -145,13 +145,13 @@ __PACKAGE__->set_primary_key("id");
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Builds>
|
||||
Related object: L<Hydra::Schema::Result::Builds>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"build",
|
||||
"Hydra::Schema::Builds",
|
||||
"Hydra::Schema::Result::Builds",
|
||||
{ id => "build" },
|
||||
{
|
||||
is_deferrable => 0,
|
||||
@@ -165,13 +165,13 @@ __PACKAGE__->belongs_to(
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Builds>
|
||||
Related object: L<Hydra::Schema::Result::Builds>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"dependency",
|
||||
"Hydra::Schema::Builds",
|
||||
"Hydra::Schema::Result::Builds",
|
||||
{ id => "dependency" },
|
||||
{
|
||||
is_deferrable => 0,
|
||||
@@ -182,8 +182,8 @@ __PACKAGE__->belongs_to(
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:/Fwb8emBsvwrZlEab2X+gQ
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:IBNdR4VPMGusDQex5omT+g
|
||||
|
||||
my %hint = (
|
||||
columns => [
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::BuildMetrics;
|
||||
package Hydra::Schema::Result::BuildMetrics;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::BuildMetrics
|
||||
Hydra::Schema::Result::BuildMetrics
|
||||
|
||||
=cut
|
||||
|
||||
@@ -119,13 +119,13 @@ __PACKAGE__->set_primary_key("build", "name");
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Builds>
|
||||
Related object: L<Hydra::Schema::Result::Builds>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"build",
|
||||
"Hydra::Schema::Builds",
|
||||
"Hydra::Schema::Result::Builds",
|
||||
{ id => "build" },
|
||||
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
|
||||
);
|
||||
@@ -134,13 +134,13 @@ __PACKAGE__->belongs_to(
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Jobsets>
|
||||
Related object: L<Hydra::Schema::Result::Jobsets>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"jobset",
|
||||
"Hydra::Schema::Jobsets",
|
||||
"Hydra::Schema::Result::Jobsets",
|
||||
{ name => "jobset", project => "project" },
|
||||
{ is_deferrable => 0, on_delete => "NO ACTION", on_update => "CASCADE" },
|
||||
);
|
||||
@@ -149,20 +149,20 @@ __PACKAGE__->belongs_to(
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Projects>
|
||||
Related object: L<Hydra::Schema::Result::Projects>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"project",
|
||||
"Hydra::Schema::Projects",
|
||||
"Hydra::Schema::Result::Projects",
|
||||
{ name => "project" },
|
||||
{ is_deferrable => 0, on_delete => "NO ACTION", on_update => "CASCADE" },
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-05-27 17:40:41
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:AYUVs6RdefFKw+g9Yxcu/A
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:yp/kl6bkrm/CSEle7Y3How
|
||||
|
||||
sub json_hint {
|
||||
return { columns => ['value', 'unit'] };
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::BuildOutputs;
|
||||
package Hydra::Schema::Result::BuildOutputs;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::BuildOutputs
|
||||
Hydra::Schema::Result::BuildOutputs
|
||||
|
||||
=cut
|
||||
|
||||
@@ -49,7 +49,7 @@ __PACKAGE__->table("buildoutputs");
|
||||
=head2 path
|
||||
|
||||
data_type: 'text'
|
||||
is_nullable: 0
|
||||
is_nullable: 1
|
||||
|
||||
=cut
|
||||
|
||||
@@ -59,7 +59,7 @@ __PACKAGE__->add_columns(
|
||||
"name",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
"path",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
{ data_type => "text", is_nullable => 1 },
|
||||
);
|
||||
|
||||
=head1 PRIMARY KEY
|
||||
@@ -82,20 +82,20 @@ __PACKAGE__->set_primary_key("build", "name");
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Builds>
|
||||
Related object: L<Hydra::Schema::Result::Builds>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"build",
|
||||
"Hydra::Schema::Builds",
|
||||
"Hydra::Schema::Result::Builds",
|
||||
{ id => "build" },
|
||||
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:71R9clwAP6vzDh10EukTaw
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Jsabm3YTcI7YvCuNdKP5Ng
|
||||
|
||||
my %hint = (
|
||||
columns => [
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::BuildProducts;
|
||||
package Hydra::Schema::Result::BuildProducts;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::BuildProducts
|
||||
Hydra::Schema::Result::BuildProducts
|
||||
|
||||
=cut
|
||||
|
||||
@@ -124,20 +124,20 @@ __PACKAGE__->set_primary_key("build", "productnr");
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Builds>
|
||||
Related object: L<Hydra::Schema::Result::Builds>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"build",
|
||||
"Hydra::Schema::Builds",
|
||||
"Hydra::Schema::Result::Builds",
|
||||
{ id => "build" },
|
||||
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-07-27 18:21:03
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:O4R8b/GukNaUmmAErb3Jlw
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:6vyZPg5I2zbgpw1a6JsVjw
|
||||
|
||||
my %hint = (
|
||||
columns => [
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::BuildStepOutputs;
|
||||
package Hydra::Schema::Result::BuildStepOutputs;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::BuildStepOutputs
|
||||
Hydra::Schema::Result::BuildStepOutputs
|
||||
|
||||
=cut
|
||||
|
||||
@@ -55,7 +55,7 @@ __PACKAGE__->table("buildstepoutputs");
|
||||
=head2 path
|
||||
|
||||
data_type: 'text'
|
||||
is_nullable: 0
|
||||
is_nullable: 1
|
||||
|
||||
=cut
|
||||
|
||||
@@ -67,7 +67,7 @@ __PACKAGE__->add_columns(
|
||||
"name",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
"path",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
{ data_type => "text", is_nullable => 1 },
|
||||
);
|
||||
|
||||
=head1 PRIMARY KEY
|
||||
@@ -92,13 +92,13 @@ __PACKAGE__->set_primary_key("build", "stepnr", "name");
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Builds>
|
||||
Related object: L<Hydra::Schema::Result::Builds>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"build",
|
||||
"Hydra::Schema::Builds",
|
||||
"Hydra::Schema::Result::Builds",
|
||||
{ id => "build" },
|
||||
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
|
||||
);
|
||||
@@ -107,20 +107,20 @@ __PACKAGE__->belongs_to(
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::BuildSteps>
|
||||
Related object: L<Hydra::Schema::Result::BuildSteps>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"buildstep",
|
||||
"Hydra::Schema::BuildSteps",
|
||||
"Hydra::Schema::Result::BuildSteps",
|
||||
{ build => "build", stepnr => "stepnr" },
|
||||
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Y6DpbTM6z4cOGoYIhD3i1A
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Bad70CRTt7zb2GGuRoQ++Q
|
||||
|
||||
|
||||
# You can replace this text with custom code or comments, and it will be preserved on regeneration
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::BuildSteps;
|
||||
package Hydra::Schema::Result::BuildSteps;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::BuildSteps
|
||||
Hydra::Schema::Result::BuildSteps
|
||||
|
||||
=cut
|
||||
|
||||
@@ -168,13 +168,13 @@ __PACKAGE__->set_primary_key("build", "stepnr");
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Builds>
|
||||
Related object: L<Hydra::Schema::Result::Builds>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"build",
|
||||
"Hydra::Schema::Builds",
|
||||
"Hydra::Schema::Result::Builds",
|
||||
{ id => "build" },
|
||||
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
|
||||
);
|
||||
@@ -183,13 +183,13 @@ __PACKAGE__->belongs_to(
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::BuildStepOutputs>
|
||||
Related object: L<Hydra::Schema::Result::BuildStepOutputs>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"buildstepoutputs",
|
||||
"Hydra::Schema::BuildStepOutputs",
|
||||
"Hydra::Schema::Result::BuildStepOutputs",
|
||||
{ "foreign.build" => "self.build", "foreign.stepnr" => "self.stepnr" },
|
||||
undef,
|
||||
);
|
||||
@@ -198,13 +198,13 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Builds>
|
||||
Related object: L<Hydra::Schema::Result::Builds>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"propagatedfrom",
|
||||
"Hydra::Schema::Builds",
|
||||
"Hydra::Schema::Result::Builds",
|
||||
{ id => "propagatedfrom" },
|
||||
{
|
||||
is_deferrable => 0,
|
||||
@@ -215,8 +215,8 @@ __PACKAGE__->belongs_to(
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:AMjHq4g/fSUv/lZuZOljYg
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:GzztRd7OwomaT3Xi7NB2RQ
|
||||
|
||||
my %hint = (
|
||||
columns => [
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::Builds;
|
||||
package Hydra::Schema::Result::Builds;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::Builds
|
||||
Hydra::Schema::Result::Builds
|
||||
|
||||
=cut
|
||||
|
||||
@@ -52,18 +52,6 @@ __PACKAGE__->table("builds");
|
||||
data_type: 'integer'
|
||||
is_nullable: 0
|
||||
|
||||
=head2 project
|
||||
|
||||
data_type: 'text'
|
||||
is_foreign_key: 1
|
||||
is_nullable: 0
|
||||
|
||||
=head2 jobset
|
||||
|
||||
data_type: 'text'
|
||||
is_foreign_key: 1
|
||||
is_nullable: 0
|
||||
|
||||
=head2 jobset_id
|
||||
|
||||
data_type: 'integer'
|
||||
@@ -206,10 +194,6 @@ __PACKAGE__->add_columns(
|
||||
{ data_type => "integer", is_nullable => 0 },
|
||||
"timestamp",
|
||||
{ data_type => "integer", is_nullable => 0 },
|
||||
"project",
|
||||
{ data_type => "text", is_foreign_key => 1, is_nullable => 0 },
|
||||
"jobset",
|
||||
{ data_type => "text", is_foreign_key => 1, is_nullable => 0 },
|
||||
"jobset_id",
|
||||
{ data_type => "integer", is_foreign_key => 1, is_nullable => 0 },
|
||||
"job",
|
||||
@@ -278,13 +262,13 @@ __PACKAGE__->set_primary_key("id");
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::AggregateConstituents>
|
||||
Related object: L<Hydra::Schema::Result::AggregateConstituents>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"aggregateconstituents_aggregates",
|
||||
"Hydra::Schema::AggregateConstituents",
|
||||
"Hydra::Schema::Result::AggregateConstituents",
|
||||
{ "foreign.aggregate" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
@@ -293,13 +277,13 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::AggregateConstituents>
|
||||
Related object: L<Hydra::Schema::Result::AggregateConstituents>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"aggregateconstituents_constituents",
|
||||
"Hydra::Schema::AggregateConstituents",
|
||||
"Hydra::Schema::Result::AggregateConstituents",
|
||||
{ "foreign.constituent" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
@@ -308,13 +292,13 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::BuildInputs>
|
||||
Related object: L<Hydra::Schema::Result::BuildInputs>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"buildinputs_builds",
|
||||
"Hydra::Schema::BuildInputs",
|
||||
"Hydra::Schema::Result::BuildInputs",
|
||||
{ "foreign.build" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
@@ -323,13 +307,13 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::BuildInputs>
|
||||
Related object: L<Hydra::Schema::Result::BuildInputs>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"buildinputs_dependencies",
|
||||
"Hydra::Schema::BuildInputs",
|
||||
"Hydra::Schema::Result::BuildInputs",
|
||||
{ "foreign.dependency" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
@@ -338,13 +322,13 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::BuildMetrics>
|
||||
Related object: L<Hydra::Schema::Result::BuildMetrics>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"buildmetrics",
|
||||
"Hydra::Schema::BuildMetrics",
|
||||
"Hydra::Schema::Result::BuildMetrics",
|
||||
{ "foreign.build" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
@@ -353,13 +337,13 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::BuildOutputs>
|
||||
Related object: L<Hydra::Schema::Result::BuildOutputs>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"buildoutputs",
|
||||
"Hydra::Schema::BuildOutputs",
|
||||
"Hydra::Schema::Result::BuildOutputs",
|
||||
{ "foreign.build" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
@@ -368,13 +352,13 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::BuildProducts>
|
||||
Related object: L<Hydra::Schema::Result::BuildProducts>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"buildproducts",
|
||||
"Hydra::Schema::BuildProducts",
|
||||
"Hydra::Schema::Result::BuildProducts",
|
||||
{ "foreign.build" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
@@ -383,13 +367,13 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::BuildStepOutputs>
|
||||
Related object: L<Hydra::Schema::Result::BuildStepOutputs>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"buildstepoutputs",
|
||||
"Hydra::Schema::BuildStepOutputs",
|
||||
"Hydra::Schema::Result::BuildStepOutputs",
|
||||
{ "foreign.build" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
@@ -398,13 +382,13 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::BuildSteps>
|
||||
Related object: L<Hydra::Schema::Result::BuildSteps>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"buildsteps",
|
||||
"Hydra::Schema::BuildSteps",
|
||||
"Hydra::Schema::Result::BuildSteps",
|
||||
{ "foreign.build" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
@@ -413,13 +397,13 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::BuildSteps>
|
||||
Related object: L<Hydra::Schema::Result::BuildSteps>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"buildsteps_propagatedfroms",
|
||||
"Hydra::Schema::BuildSteps",
|
||||
"Hydra::Schema::Result::BuildSteps",
|
||||
{ "foreign.propagatedfrom" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
@@ -428,43 +412,28 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Jobsets>
|
||||
Related object: L<Hydra::Schema::Result::Jobsets>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"jobset",
|
||||
"Hydra::Schema::Jobsets",
|
||||
"Hydra::Schema::Result::Jobsets",
|
||||
{ id => "jobset_id" },
|
||||
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
|
||||
);
|
||||
|
||||
=head2 jobset_project_jobset
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Jobsets>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"jobset_project_jobset",
|
||||
"Hydra::Schema::Jobsets",
|
||||
{ name => "jobset", project => "project" },
|
||||
{ is_deferrable => 0, on_delete => "NO ACTION", on_update => "CASCADE" },
|
||||
);
|
||||
|
||||
=head2 jobsetevalinputs
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::JobsetEvalInputs>
|
||||
Related object: L<Hydra::Schema::Result::JobsetEvalInputs>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"jobsetevalinputs",
|
||||
"Hydra::Schema::JobsetEvalInputs",
|
||||
"Hydra::Schema::Result::JobsetEvalInputs",
|
||||
{ "foreign.dependency" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
@@ -473,30 +442,30 @@ __PACKAGE__->has_many(
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::JobsetEvalMembers>
|
||||
Related object: L<Hydra::Schema::Result::JobsetEvalMembers>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"jobsetevalmembers",
|
||||
"Hydra::Schema::JobsetEvalMembers",
|
||||
"Hydra::Schema::Result::JobsetEvalMembers",
|
||||
{ "foreign.build" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
|
||||
=head2 project
|
||||
=head2 runcommandlogs
|
||||
|
||||
Type: belongs_to
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::Projects>
|
||||
Related object: L<Hydra::Schema::Result::RunCommandLogs>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"project",
|
||||
"Hydra::Schema::Projects",
|
||||
{ name => "project" },
|
||||
{ is_deferrable => 0, on_delete => "NO ACTION", on_update => "CASCADE" },
|
||||
__PACKAGE__->has_many(
|
||||
"runcommandlogs",
|
||||
"Hydra::Schema::Result::RunCommandLogs",
|
||||
{ "foreign.build_id" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
|
||||
=head2 aggregates
|
||||
@@ -528,12 +497,12 @@ __PACKAGE__->many_to_many(
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-01-22 07:11:57
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Df5N0EByYJqoSUqA0dld/A
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-01-10 09:43:38
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:DQF8KRinnf0imJOP+lvH9Q
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"dependents",
|
||||
"Hydra::Schema::BuildInputs",
|
||||
"Hydra::Schema::Result::BuildInputs",
|
||||
{ "foreign.dependency" => "self.id" },
|
||||
);
|
||||
|
||||
@@ -541,13 +510,13 @@ __PACKAGE__->many_to_many(dependentBuilds => 'dependents', 'build');
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"inputs",
|
||||
"Hydra::Schema::BuildInputs",
|
||||
"Hydra::Schema::Result::BuildInputs",
|
||||
{ "foreign.build" => "self.id" },
|
||||
);
|
||||
|
||||
__PACKAGE__->has_one(
|
||||
"actualBuildStep",
|
||||
"Hydra::Schema::BuildSteps",
|
||||
"Hydra::Schema::Result::BuildSteps",
|
||||
{ 'foreign.drvpath' => 'self.drvpath'
|
||||
, 'foreign.build' => 'self.id'
|
||||
},
|
||||
@@ -569,7 +538,7 @@ sub makeSource {
|
||||
sub makeQueries {
|
||||
my ($name, $constraint) = @_;
|
||||
|
||||
my $activeJobs = "(select distinct project, jobset, job, system from Builds where isCurrent = 1 $constraint)";
|
||||
my $activeJobs = "(select distinct jobset_id, job, system from Builds where isCurrent = 1 $constraint)";
|
||||
|
||||
makeSource(
|
||||
"LatestSucceeded$name",
|
||||
@@ -579,7 +548,7 @@ sub makeQueries {
|
||||
(select
|
||||
(select max(b.id) from builds b
|
||||
where
|
||||
project = activeJobs.project and jobset = activeJobs.jobset
|
||||
jobset_id = activeJobs.jobset_id
|
||||
and job = activeJobs.job and system = activeJobs.system
|
||||
and finished = 1 and buildstatus = 0
|
||||
) as id
|
||||
@@ -591,41 +560,46 @@ QUERY
|
||||
}
|
||||
|
||||
makeQueries('', "");
|
||||
makeQueries('ForProject', "and project = ?");
|
||||
makeQueries('ForProject', "and jobset_id in (select id from jobsets j where j.project = ?)");
|
||||
makeQueries('ForJobset', "and jobset_id = ?");
|
||||
makeQueries('ForJob', "and jobset_id = ? and job = ?");
|
||||
makeQueries('ForJobName', "and jobset_id = (select id from jobsets j where j.name = ?) and job = ?");
|
||||
|
||||
sub as_json {
|
||||
my ($self) = @_;
|
||||
|
||||
my %hint = (
|
||||
columns => [
|
||||
'id',
|
||||
'finished',
|
||||
'timestamp',
|
||||
'starttime',
|
||||
'stoptime',
|
||||
'project',
|
||||
'jobset',
|
||||
'job',
|
||||
'nixname',
|
||||
'system',
|
||||
'priority',
|
||||
'buildstatus',
|
||||
'releasename',
|
||||
'drvpath',
|
||||
],
|
||||
relations => {
|
||||
jobsetevals => 'id'
|
||||
},
|
||||
eager_relations => {
|
||||
buildoutputs => 'name',
|
||||
buildproducts => 'productnr',
|
||||
buildmetrics => 'name',
|
||||
}
|
||||
);
|
||||
# After #1093 merges this can become $self->jobset;
|
||||
# However, with ->jobset being a column on master
|
||||
# it seems DBIX gets a it confused.
|
||||
my ($jobset) = $self->search_related('jobset')->first;
|
||||
|
||||
sub json_hint {
|
||||
return \%hint;
|
||||
my $json = {
|
||||
id => $self->get_column('id'),
|
||||
finished => $self->get_column('finished'),
|
||||
timestamp => $self->get_column('timestamp'),
|
||||
starttime => $self->get_column('starttime'),
|
||||
stoptime => $self->get_column('stoptime'),
|
||||
project => $jobset->get_column('project'),
|
||||
jobset => $jobset->name,
|
||||
job => $self->get_column('job'),
|
||||
nixname => $self->get_column('nixname'),
|
||||
system => $self->get_column('system'),
|
||||
priority => $self->get_column('priority'),
|
||||
buildstatus => $self->get_column('buildstatus'),
|
||||
releasename => $self->get_column('releasename'),
|
||||
drvpath => $self->get_column('drvpath'),
|
||||
jobsetevals => [ map { $_->id } $self->jobsetevals ],
|
||||
buildoutputs => { map { $_->name => $_ } $self->buildoutputs },
|
||||
buildproducts => { map { $_->productnr => $_ } $self->buildproducts },
|
||||
buildmetrics => { map { $_->name => $_ } $self->buildmetrics },
|
||||
};
|
||||
|
||||
return $json;
|
||||
}
|
||||
|
||||
sub project {
|
||||
my ($self) = @_;
|
||||
return $self->jobset->project;
|
||||
}
|
||||
|
||||
1;
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::CachedBazaarInputs;
|
||||
package Hydra::Schema::Result::CachedBazaarInputs;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::CachedBazaarInputs
|
||||
Hydra::Schema::Result::CachedBazaarInputs
|
||||
|
||||
=cut
|
||||
|
||||
@@ -83,8 +83,8 @@ __PACKAGE__->add_columns(
|
||||
__PACKAGE__->set_primary_key("uri", "revision");
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:X8L4C57lMOctdqOKSmfA/g
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:/9cCRtmGzlDGxjqBEPI2Mw
|
||||
|
||||
|
||||
# You can replace this text with custom content, and it will be preserved on regeneration
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::CachedCVSInputs;
|
||||
package Hydra::Schema::Result::CachedCVSInputs;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::CachedCVSInputs
|
||||
Hydra::Schema::Result::CachedCVSInputs
|
||||
|
||||
=cut
|
||||
|
||||
@@ -99,8 +99,8 @@ __PACKAGE__->add_columns(
|
||||
__PACKAGE__->set_primary_key("uri", "module", "sha256hash");
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:6eQ+i/th+oVZNRiDPd2luA
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:yQt8poWCs/wI6WbE4/YdxA
|
||||
|
||||
# You can replace this text with custom content, and it will be preserved on regeneration
|
||||
1;
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::CachedDarcsInputs;
|
||||
package Hydra::Schema::Result::CachedDarcsInputs;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::CachedDarcsInputs
|
||||
Hydra::Schema::Result::CachedDarcsInputs
|
||||
|
||||
=cut
|
||||
|
||||
@@ -90,8 +90,8 @@ __PACKAGE__->add_columns(
|
||||
__PACKAGE__->set_primary_key("uri", "revision");
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Buwq42sBXQVfYUy01WMyYw
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:20pTv4R98jXytvlWbriWRg
|
||||
|
||||
|
||||
# You can replace this text with custom code or comments, and it will be preserved on regeneration
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::CachedGitInputs;
|
||||
package Hydra::Schema::Result::CachedGitInputs;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::CachedGitInputs
|
||||
Hydra::Schema::Result::CachedGitInputs
|
||||
|
||||
=cut
|
||||
|
||||
@@ -50,6 +50,11 @@ __PACKAGE__->table("cachedgitinputs");
|
||||
data_type: 'text'
|
||||
is_nullable: 0
|
||||
|
||||
=head2 isdeepclone
|
||||
|
||||
data_type: 'boolean'
|
||||
is_nullable: 0
|
||||
|
||||
=head2 sha256hash
|
||||
|
||||
data_type: 'text'
|
||||
@@ -69,6 +74,8 @@ __PACKAGE__->add_columns(
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
"revision",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
"isdeepclone",
|
||||
{ data_type => "boolean", is_nullable => 0 },
|
||||
"sha256hash",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
"storepath",
|
||||
@@ -85,14 +92,16 @@ __PACKAGE__->add_columns(
|
||||
|
||||
=item * L</revision>
|
||||
|
||||
=item * L</isdeepclone>
|
||||
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->set_primary_key("uri", "branch", "revision");
|
||||
__PACKAGE__->set_primary_key("uri", "branch", "revision", "isdeepclone");
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:0sdK9uQZpx869oqS5thRLw
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:IxG58lqBfLgZ8RTZm1GQKA
|
||||
|
||||
1;
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::CachedHgInputs;
|
||||
package Hydra::Schema::Result::CachedHgInputs;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::CachedHgInputs
|
||||
Hydra::Schema::Result::CachedHgInputs
|
||||
|
||||
=cut
|
||||
|
||||
@@ -92,8 +92,8 @@ __PACKAGE__->add_columns(
|
||||
__PACKAGE__->set_primary_key("uri", "branch", "revision");
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:dYfjQ0SJG/mBrsZemAW3zw
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:meKBitAelYYc4TPT8NINYQ
|
||||
|
||||
|
||||
# You can replace this text with custom content, and it will be preserved on regeneration
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::CachedPathInputs;
|
||||
package Hydra::Schema::Result::CachedPathInputs;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::CachedPathInputs
|
||||
Hydra::Schema::Result::CachedPathInputs
|
||||
|
||||
=cut
|
||||
|
||||
@@ -90,7 +90,7 @@ __PACKAGE__->add_columns(
|
||||
__PACKAGE__->set_primary_key("srcpath", "sha256hash");
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:oV7tbWLNEMC8byKf9UnAlw
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:PZAkRje22dqftpqfU2jyGg
|
||||
|
||||
1;
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::CachedSubversionInputs;
|
||||
package Hydra::Schema::Result::CachedSubversionInputs;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::CachedSubversionInputs
|
||||
Hydra::Schema::Result::CachedSubversionInputs
|
||||
|
||||
=cut
|
||||
|
||||
@@ -83,7 +83,7 @@ __PACKAGE__->add_columns(
|
||||
__PACKAGE__->set_primary_key("uri", "revision");
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:VGt/0HG84eNZr9OIA8jzow
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:6kWslezt4Pb1H8gTW4EU6w
|
||||
|
||||
1;
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::EvaluationErrors;
|
||||
package Hydra::Schema::Result::EvaluationErrors;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::EvaluationErrors
|
||||
Hydra::Schema::Result::EvaluationErrors
|
||||
|
||||
=cut
|
||||
|
||||
@@ -86,23 +86,25 @@ __PACKAGE__->set_primary_key("id");
|
||||
|
||||
Type: has_many
|
||||
|
||||
Related object: L<Hydra::Schema::JobsetEvals>
|
||||
Related object: L<Hydra::Schema::Result::JobsetEvals>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->has_many(
|
||||
"jobsetevals",
|
||||
"Hydra::Schema::JobsetEvals",
|
||||
"Hydra::Schema::Result::JobsetEvals",
|
||||
{ "foreign.evaluationerror_id" => "self.id" },
|
||||
undef,
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-02-01 20:17:39
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:sZIg35KWCO8MOsQ5cfN1IA
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:QA8C+0TfW7unnM4xzOHXdA
|
||||
|
||||
__PACKAGE__->add_column(
|
||||
"+id" => { retrieve_on_insert => 1 }
|
||||
);
|
||||
|
||||
__PACKAGE__->mk_group_accessors('column' => 'has_error');
|
||||
|
||||
1;
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::FailedPaths;
|
||||
package Hydra::Schema::Result::FailedPaths;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::FailedPaths
|
||||
Hydra::Schema::Result::FailedPaths
|
||||
|
||||
=cut
|
||||
|
||||
@@ -57,8 +57,8 @@ __PACKAGE__->add_columns("path", { data_type => "text", is_nullable => 0 });
|
||||
__PACKAGE__->set_primary_key("path");
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:jr3XiGO4lWAzqfATbsMwFw
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:V/Ce4LuWe5qRHFAU32xXlw
|
||||
|
||||
|
||||
# You can replace this text with custom code or comments, and it will be preserved on regeneration
|
@@ -1,12 +1,12 @@
|
||||
use utf8;
|
||||
package Hydra::Schema::JobsetEvalInputs;
|
||||
package Hydra::Schema::Result::JobsetEvalInputs;
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader
|
||||
# DO NOT MODIFY THE FIRST PART OF THIS FILE
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Hydra::Schema::JobsetEvalInputs
|
||||
Hydra::Schema::Result::JobsetEvalInputs
|
||||
|
||||
=cut
|
||||
|
||||
@@ -134,13 +134,13 @@ __PACKAGE__->set_primary_key("eval", "name", "altnr");
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::Builds>
|
||||
Related object: L<Hydra::Schema::Result::Builds>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"dependency",
|
||||
"Hydra::Schema::Builds",
|
||||
"Hydra::Schema::Result::Builds",
|
||||
{ id => "dependency" },
|
||||
{
|
||||
is_deferrable => 0,
|
||||
@@ -154,20 +154,20 @@ __PACKAGE__->belongs_to(
|
||||
|
||||
Type: belongs_to
|
||||
|
||||
Related object: L<Hydra::Schema::JobsetEvals>
|
||||
Related object: L<Hydra::Schema::Result::JobsetEvals>
|
||||
|
||||
=cut
|
||||
|
||||
__PACKAGE__->belongs_to(
|
||||
"eval",
|
||||
"Hydra::Schema::JobsetEvals",
|
||||
"Hydra::Schema::Result::JobsetEvals",
|
||||
{ id => "eval" },
|
||||
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:/cFQGBLhvpmBO1UJztgIAg
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:AgMH2XIxp7519fFaYgesVw
|
||||
|
||||
my %hint = (
|
||||
columns => [
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user