hydra/src/hydra-queue-runner/queue-monitor.cc

727 lines
27 KiB
C++
Raw Normal View History

2015-07-21 15:14:17 +02:00
#include "state.hh"
#include "hydra-build-result.hh"
#include "globals.hh"
#include <cstring>
2015-07-21 15:14:17 +02:00
using namespace nix;
void State::queueMonitor()
{
while (true) {
try {
queueMonitorLoop();
} catch (std::exception & e) {
2023-06-23 15:06:34 +02:00
printError("queue monitor: %s", e.what());
2015-07-21 15:14:17 +02:00
sleep(10); // probably a DB problem, so don't retry right away
}
}
}
void State::queueMonitorLoop()
{
auto conn(dbPool.get());
receiver buildsAdded(*conn, "builds_added");
receiver buildsRestarted(*conn, "builds_restarted");
receiver buildsCancelled(*conn, "builds_cancelled");
receiver buildsDeleted(*conn, "builds_deleted");
receiver buildsBumped(*conn, "builds_bumped");
receiver jobsetSharesChanged(*conn, "jobset_shares_changed");
2015-07-21 15:14:17 +02:00
auto destStore = getDestStore();
2015-07-21 15:14:17 +02:00
unsigned int lastBuildId = 0;
bool quit = false;
while (!quit) {
localStore->clearPathInfoCache();
bool done = getQueuedBuilds(*conn, destStore, lastBuildId);
2015-07-21 15:14:17 +02:00
if (buildOne && buildOneDone) quit = true;
2015-07-21 15:14:17 +02:00
/* Sleep until we get notification from the database about an
event. */
if (done && !quit) {
conn->await_notification();
nrQueueWakeups++;
} else
conn->get_notifs();
2015-07-21 15:14:17 +02:00
if (auto lowestId = buildsAdded.get()) {
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
2015-07-21 15:14:17 +02:00
printMsg(lvlTalkative, "got notification: new builds added to the queue");
}
2015-07-21 15:14:17 +02:00
if (buildsRestarted.get()) {
printMsg(lvlTalkative, "got notification: builds restarted");
lastBuildId = 0; // check all builds
}
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
processQueueChange(*conn);
2015-07-21 15:14:17 +02:00
}
if (jobsetSharesChanged.get()) {
printMsg(lvlTalkative, "got notification: jobset shares changed");
processJobsetSharesChange(*conn);
}
2015-07-21 15:14:17 +02:00
}
exit(0);
2015-07-21 15:14:17 +02:00
}
struct PreviousFailure : public std::exception {
Step::ptr step;
PreviousFailure(Step::ptr step) : step(step) { }
};
bool State::getQueuedBuilds(Connection & conn,
ref<Store> destStore, unsigned int & lastBuildId)
2015-07-21 15:14:17 +02:00
{
prom.queue_checks_started.Increment();
printInfo("checking the queue for builds > %d...", lastBuildId);
2015-07-21 15:14:17 +02:00
/* Grab the queued builds from the database, but don't process
them yet (since we don't want a long-running transaction). */
std::vector<BuildID> newIDs;
std::map<BuildID, Build::ptr> newBuildsByID;
std::multimap<StorePath, BuildID> newBuildsByPath;
2015-07-21 15:14:17 +02:00
unsigned int newLastBuildId = lastBuildId;
2015-07-21 15:14:17 +02:00
{
pqxx::work txn(conn);
auto res = txn.exec_params
2022-01-09 08:58:36 -05:00
("select builds.id, builds.jobset_id, jobsets.project as project, "
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
"globalPriority, priority from Builds "
"inner join jobsets on builds.jobset_id = jobsets.id "
"where builds.id > $1 and finished = 0 order by globalPriority desc, builds.id",
lastBuildId);
2015-07-21 15:14:17 +02:00
for (auto const & row : res) {
auto builds_(builds.lock());
BuildID id = row["id"].as<BuildID>();
if (buildOne && id != buildOne) continue;
if (id > newLastBuildId) {
newLastBuildId = id;
prom.queue_max_id.Set(id);
}
2015-11-02 14:29:12 +01:00
if (builds_->count(id)) continue;
2015-07-21 15:14:17 +02:00
auto build = std::make_shared<Build>(
localStore->parseStorePath(row["drvPath"].as<std::string>()));
2015-07-21 15:14:17 +02:00
build->id = id;
2022-01-09 08:58:36 -05:00
build->jobsetId = row["jobset_id"].as<JobsetID>();
build->projectName = row["project"].as<std::string>();
build->jobsetName = row["jobset"].as<std::string>();
build->jobName = row["job"].as<std::string>();
2015-07-21 15:14:17 +02:00
build->maxSilentTime = row["maxsilent"].as<int>();
build->buildTimeout = row["timeout"].as<int>();
build->timestamp = row["timestamp"].as<time_t>();
build->globalPriority = row["globalPriority"].as<int>();
build->localPriority = row["priority"].as<int>();
2022-01-09 08:58:36 -05:00
build->jobset = createJobset(txn, build->projectName, build->jobsetName, build->jobsetId);
2015-07-21 15:14:17 +02:00
newIDs.push_back(id);
newBuildsByID[id] = build;
2020-06-23 13:43:54 +02:00
newBuildsByPath.emplace(std::make_pair(build->drvPath, id));
2015-07-21 15:14:17 +02:00
}
}
std::set<Step::ptr> newRunnable;
unsigned int nrAdded;
std::function<void(Build::ptr)> createBuild;
std::set<StorePath> finishedDrvs;
2015-07-21 15:14:17 +02:00
createBuild = [&](Build::ptr build) {
prom.queue_build_loads.Increment();
2023-06-23 15:06:34 +02:00
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
2015-07-21 15:14:17 +02:00
nrAdded++;
newBuildsByID.erase(build->id);
2015-07-21 15:14:17 +02:00
if (!localStore->isValidPath(build->drvPath)) {
2015-07-21 15:14:17 +02:00
/* Derivation has been GC'ed prematurely. */
2023-06-23 15:06:34 +02:00
printError("aborting GC'ed build %1%", build->id);
2015-07-21 15:14:17 +02:00
if (!build->finishedInDB) {
2016-02-29 15:10:30 +01:00
auto mc = startDbUpdate();
2015-07-21 15:14:17 +02:00
pqxx::work txn(conn);
txn.exec_params0
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0",
build->id,
(int) bsAborted,
time(0));
2015-07-21 15:14:17 +02:00
txn.commit();
build->finishedInDB = true;
nrBuildsDone++;
}
return;
}
std::set<Step::ptr> newSteps;
Step::ptr step;
/* Create steps for this derivation and its dependencies. */
try {
step = createStep(destStore, conn, build, build->drvPath,
build, 0, finishedDrvs, newSteps, newRunnable);
} catch (PreviousFailure & ex) {
/* Some step previously failed, so mark the build as
failed right away. */
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
printMsg(lvlError, "marking build %d as cached failure due to %s",
build->id, localStore->printStorePath(ex.step->drvPath));
if (!build->finishedInDB) {
auto mc = startDbUpdate();
pqxx::work txn(conn);
/* Find the previous build step record, first by
derivation path, then by output path. */
BuildID propagatedFrom = 0;
auto res = txn.exec_params1
("select max(build) from BuildSteps where drvPath = $1 and startTime != 0 and stopTime != 0 and status = 1",
2020-04-06 19:48:34 +02:00
localStore->printStorePath(ex.step->drvPath));
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
if (!propagatedFrom) {
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) {
// ca-derivations not actually supported yet
assert(optOutputPath);
auto res = txn.exec_params
("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1",
localStore->printStorePath(*optOutputPath));
if (!res[0][0].is_null()) {
propagatedFrom = res[0][0].as<BuildID>();
break;
}
}
}
createBuildStep(txn, 0, build->id, ex.step, "", bsCachedFailure, "", propagatedFrom);
txn.exec_params
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3, isCachedBuild = 1, notificationPendingSince = $3 "
"where id = $1 and finished = 0",
build->id,
(int) (ex.step->drvPath == build->drvPath ? bsFailed : bsDepFailed),
time(0));
notifyBuildFinished(txn, build->id, {});
txn.commit();
build->finishedInDB = true;
nrBuildsDone++;
}
return;
}
2015-07-21 15:14:17 +02:00
/* Some of the new steps may be the top level of builds that
we haven't processed yet. So do them now. This ensures that
if build A depends on build B with top-level step X, then X
will be "accounted" to B in doBuildStep(). */
for (auto & r : newSteps) {
auto i = newBuildsByPath.find(r->drvPath);
if (i == newBuildsByPath.end()) continue;
auto j = newBuildsByID.find(i->second);
if (j == newBuildsByID.end()) continue;
createBuild(j->second);
2015-07-21 15:14:17 +02:00
}
/* If we didn't get a step, it means the step's outputs are
all valid. So we mark this as a finished, cached build. */
if (!step) {
BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath);
2015-07-21 15:14:17 +02:00
for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore))
addRoot(i.second);
2016-02-29 15:10:30 +01:00
{
auto mc = startDbUpdate();
2015-07-21 15:14:17 +02:00
pqxx::work txn(conn);
time_t now = time(0);
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id);
2015-07-21 15:14:17 +02:00
markSucceededBuild(txn, build, res, true, now, now);
notifyBuildFinished(txn, build->id, {});
2015-07-21 15:14:17 +02:00
txn.commit();
2016-02-29 15:10:30 +01:00
}
2015-07-21 15:14:17 +02:00
build->finishedInDB = true;
return;
}
/* Note: if we exit this scope prior to this, the build and
all newly created steps are destroyed. */
{
auto builds_(builds.lock());
if (!build->finishedInDB) // FIXME: can this happen?
(*builds_)[build->id] = build;
build->toplevel = step;
}
build->propagatePriorities();
printMsg(lvlChatty, "added build %1% (top-level step %2%, %3% new steps)",
build->id, localStore->printStorePath(step->drvPath), newSteps.size());
2015-07-21 15:14:17 +02:00
};
/* Now instantiate build steps for each new build. The builder
threads can start building the runnable build steps right away,
even while we're still processing other new builds. */
system_time start = std::chrono::system_clock::now();
for (auto id : newIDs) {
auto i = newBuildsByID.find(id);
if (i == newBuildsByID.end()) continue;
auto build = i->second;
2015-07-21 15:14:17 +02:00
auto now1 = std::chrono::steady_clock::now();
2015-07-21 15:14:17 +02:00
newRunnable.clear();
nrAdded = 0;
try {
createBuild(build);
} catch (Error & e) {
2020-07-08 12:50:02 +02:00
e.addTrace({}, hintfmt("while loading build %d: ", build->id));
2015-07-21 15:14:17 +02:00
throw;
}
auto now2 = std::chrono::steady_clock::now();
buildReadTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
2015-07-21 15:14:17 +02:00
/* Add the new runnable build steps to runnable and wake up
the builder threads. */
2023-06-23 15:06:34 +02:00
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
2015-07-21 15:14:17 +02:00
for (auto & r : newRunnable)
makeRunnable(r);
if (buildOne && newRunnable.size() == 0) buildOneDone = true;
2015-07-21 15:14:17 +02:00
nrBuildsRead += nrAdded;
/* Stop after a certain time to allow priority bumps to be
processed. */
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) {
prom.queue_checks_early_exits.Increment();
break;
2023-11-21 18:41:52 +07:00
}
2015-07-21 15:14:17 +02:00
}
prom.queue_checks_finished.Increment();
lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1;
return newBuildsByID.empty();
2015-07-21 15:14:17 +02:00
}
void Build::propagatePriorities()
{
/* Update the highest global priority and lowest build ID fields
of each dependency. This is used by the dispatcher to start
steps in order of descending global priority and ascending
build ID. */
visitDependencies([&](const Step::ptr & step) {
auto step_(step->state.lock());
step_->highestGlobalPriority = std::max(step_->highestGlobalPriority, globalPriority);
step_->highestLocalPriority = std::max(step_->highestLocalPriority, localPriority);
step_->lowestBuildID = std::min(step_->lowestBuildID, id);
step_->jobsets.insert(jobset);
}, toplevel);
}
void State::processQueueChange(Connection & conn)
2015-07-21 15:14:17 +02:00
{
/* Get the current set of queued builds. */
std::map<BuildID, int> currentIds;
2015-07-21 15:14:17 +02:00
{
pqxx::work txn(conn);
auto res = txn.exec("select id, globalPriority from Builds where finished = 0");
2015-07-21 15:14:17 +02:00
for (auto const & row : res)
currentIds[row["id"].as<BuildID>()] = row["globalPriority"].as<BuildID>();
2015-07-21 15:14:17 +02:00
}
{
auto builds_(builds.lock());
for (auto i = builds_->begin(); i != builds_->end(); ) {
auto b = currentIds.find(i->first);
if (b == currentIds.end()) {
2023-06-23 15:06:34 +02:00
printInfo("discarding cancelled build %1%", i->first);
i = builds_->erase(i);
// FIXME: ideally we would interrupt active build steps here.
continue;
}
if (i->second->globalPriority < b->second) {
2023-06-23 15:06:34 +02:00
printInfo("priority of build %1% increased", i->first);
i->second->globalPriority = b->second;
i->second->propagatePriorities();
}
++i;
}
}
{
auto activeSteps(activeSteps_.lock());
for (auto & activeStep : *activeSteps) {
std::set<Build::ptr> dependents;
std::set<Step::ptr> steps;
getDependents(activeStep->step, dependents, steps);
if (!dependents.empty()) continue;
{
auto activeStepState(activeStep->state_.lock());
if (activeStepState->cancelled) continue;
activeStepState->cancelled = true;
if (activeStepState->pid != -1) {
printInfo("killing builder process %d of build step %s",
activeStepState->pid,
localStore->printStorePath(activeStep->step->drvPath));
if (kill(activeStepState->pid, SIGINT) == -1)
printError("error killing build step %s: %s",
localStore->printStorePath(activeStep->step->drvPath),
strerror(errno));
}
}
}
2015-07-21 15:14:17 +02:00
}
}
Step::ptr State::createStep(ref<Store> destStore,
Connection & conn, Build::ptr build, const StorePath & drvPath,
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
2015-07-21 15:14:17 +02:00
std::set<Step::ptr> & newSteps, std::set<Step::ptr> & newRunnable)
{
if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return 0;
/* Check if the requested step already exists. If not, create a
new step. In any case, make the step reachable from
referringBuild or referringStep. This is done atomically (with
steps locked), to ensure that this step can never become
reachable from a new build after doBuildStep has removed it
from steps. */
Step::ptr step;
bool isNew = false;
{
auto steps_(steps.lock());
/* See if the step already exists in steps and is not
stale. */
auto prev = steps_->find(drvPath);
if (prev != steps_->end()) {
step = prev->second.lock();
/* Since step is a strong pointer, the referred Step
object won't be deleted after this. */
if (!step) steps_->erase(drvPath); // remove stale entry
}
/* If it doesn't exist, create it. */
if (!step) {
2020-06-23 13:43:54 +02:00
step = std::make_shared<Step>(drvPath);
2015-07-21 15:14:17 +02:00
isNew = true;
}
auto step_(step->state.lock());
assert(step_->created != isNew);
if (referringBuild)
step_->builds.push_back(referringBuild);
if (referringStep)
step_->rdeps.push_back(referringStep);
2020-06-23 13:43:54 +02:00
steps_->insert_or_assign(drvPath, step);
2015-07-21 15:14:17 +02:00
}
if (!isNew) return step;
2022-04-06 20:23:02 -04:00
prom.queue_steps_created.Increment();
printMsg(lvlDebug, "considering derivation %1%", localStore->printStorePath(drvPath));
2015-07-21 15:14:17 +02:00
/* Initialize the step. Note that the step may be visible in
steps before this point, but that doesn't matter because
it's not runnable yet, and other threads won't make it
runnable while step->created == false. */
2020-08-04 11:33:29 +02:00
step->drv = std::make_unique<Derivation>(localStore->readDerivation(drvPath));
2020-06-23 13:43:54 +02:00
step->parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *step->drv);
2020-08-27 17:46:36 +02:00
step->preferLocalBuild = step->parsedDrv->willBuildLocally(*localStore);
step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1";
step->systemType = step->drv->platform;
2015-07-21 15:14:17 +02:00
{
auto i = step->drv->env.find("requiredSystemFeatures");
StringSet features;
if (i != step->drv->env.end())
features = step->requiredSystemFeatures = tokenizeString<std::set<std::string>>(i->second);
if (step->preferLocalBuild)
features.insert("local");
if (!features.empty()) {
step->systemType += ":";
step->systemType += concatStringsSep(",", features);
}
2015-07-21 15:14:17 +02:00
}
/* If this derivation failed previously, give up. */
if (checkCachedFailure(step, conn))
throw PreviousFailure{step};
2015-07-21 15:14:17 +02:00
/* Are all outputs valid? */
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
2015-07-21 15:14:17 +02:00
bool valid = true;
std::map<DrvOutput, std::optional<StorePath>> missing;
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
auto outputHash = outputHashes.at(outputName);
if (maybeOutputPath && destStore->isValidPath(*maybeOutputPath))
continue;
valid = false;
missing.insert({{outputHash, outputName}, maybeOutputPath});
}
/* Try to copy the missing paths from the local store or from
substitutes. */
if (!missing.empty()) {
size_t avail = 0;
for (auto & [i, maybePath] : missing) {
// If we don't know the output path from the destination
// store, see if the local store can tell us.
if (/* localStore != destStore && */ !maybePath && experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
if (auto maybeRealisation = localStore->queryRealisation(i))
maybePath = maybeRealisation->outPath;
if (!maybePath) {
// No hope of getting the store object if we don't know
// the path.
continue;
}
auto & path = *maybePath;
if (/* localStore != destStore && */ localStore->isValidPath(path))
avail++;
else if (useSubstitutes) {
SubstitutablePathInfos infos;
localStore->querySubstitutablePathInfos({{path, {}}}, infos);
if (infos.size() == 1)
avail++;
}
}
if (missing.size() == avail) {
valid = true;
for (auto & [i, maybePath] : missing) {
// If we found everything, then we should know the path
// to every missing store object now.
assert(maybePath);
auto & path = *maybePath;
try {
time_t startTime = time(0);
if (localStore->isValidPath(path))
printInfo("copying output %1% of %2% from local store",
localStore->printStorePath(path),
localStore->printStorePath(drvPath));
else {
printInfo("substituting output %1% of %2%",
localStore->printStorePath(path),
localStore->printStorePath(drvPath));
localStore->ensurePath(path);
// FIXME: should copy directly from substituter to destStore.
}
copyClosure(*localStore, *destStore,
StorePathSet { path },
NoRepair, CheckSigs, NoSubstitute);
time_t stopTime = time(0);
{
auto mc = startDbUpdate();
pqxx::work txn(conn);
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path);
txn.commit();
}
} catch (Error & e) {
printError("while copying/substituting output %s of %s: %s",
localStore->printStorePath(path),
localStore->printStorePath(drvPath),
e.what());
valid = false;
break;
}
}
2015-07-21 15:14:17 +02:00
}
}
// FIXME: check whether all outputs are in the binary cache.
if (valid) {
2020-06-23 13:43:54 +02:00
finishedDrvs.insert(drvPath);
2015-07-21 15:14:17 +02:00
return 0;
}
/* No, we need to build. */
printMsg(lvlDebug, "creating build step %1%", localStore->printStorePath(drvPath));
2015-07-21 15:14:17 +02:00
/* Create steps for the dependencies. */
2023-11-21 18:41:52 +07:00
for (auto & i : step->drv->inputDrvs.map) {
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
2015-07-21 15:14:17 +02:00
if (dep) {
auto step_(step->state.lock());
step_->deps.insert(dep);
}
}
/* If the step has no (remaining) dependencies, make it
runnable. */
{
auto step_(step->state.lock());
assert(!step_->created);
step_->created = true;
if (step_->deps.empty())
newRunnable.insert(step);
}
newSteps.insert(step);
2015-07-21 15:14:17 +02:00
return step;
}
Jobset::ptr State::createJobset(pqxx::work & txn,
2022-01-09 08:58:36 -05:00
const std::string & projectName, const std::string & jobsetName, const JobsetID jobsetID)
{
auto p = std::make_pair(projectName, jobsetName);
{
auto jobsets_(jobsets.lock());
auto i = jobsets_->find(p);
if (i != jobsets_->end()) return i->second;
}
auto res = txn.exec_params1
2022-01-09 08:58:36 -05:00
("select schedulingShares from Jobsets where id = $1",
jobsetID);
if (res.empty()) throw Error("missing jobset - can't happen");
auto shares = res["schedulingShares"].as<unsigned int>();
auto jobset = std::make_shared<Jobset>();
jobset->setShares(shares);
/* Load the build steps from the last 24 hours. */
auto res2 = txn.exec_params
("select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
2022-01-09 08:58:36 -05:00
"where s.startTime is not null and s.stopTime > $1 and jobset_id = $2",
time(0) - Jobset::schedulingWindow * 10,
2022-01-09 08:58:36 -05:00
jobsetID);
for (auto const & row : res2) {
time_t startTime = row["startTime"].as<time_t>();
time_t stopTime = row["stopTime"].as<time_t>();
jobset->addStep(startTime, stopTime - startTime);
}
auto jobsets_(jobsets.lock());
// Can't happen because only this thread adds to "jobsets".
assert(jobsets_->find(p) == jobsets_->end());
(*jobsets_)[p] = jobset;
return jobset;
}
void State::processJobsetSharesChange(Connection & conn)
{
/* Get the current set of jobsets. */
pqxx::work txn(conn);
auto res = txn.exec("select project, name, schedulingShares from Jobsets");
for (auto const & row : res) {
auto jobsets_(jobsets.lock());
auto i = jobsets_->find(std::make_pair(row["project"].as<std::string>(), row["name"].as<std::string>()));
if (i == jobsets_->end()) continue;
i->second->setShares(row["schedulingShares"].as<unsigned int>());
}
}
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::StorePath & drvPath)
{
auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore);
{
pqxx::work txn(conn);
for (auto & [name, output] : derivationOutputs) {
auto r = txn.exec_params
("select id, buildStatus, releaseName, closureSize, size from Builds b "
"join BuildOutputs o on b.id = o.build "
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
localStore->printStorePath(output));
if (r.empty()) continue;
BuildID id = r[0][0].as<BuildID>();
2023-06-23 15:06:34 +02:00
printInfo("reusing build %d", id);
BuildOutput res;
res.failed = r[0][1].as<int>() == bsFailedWithOutput;
res.releaseName = r[0][2].is_null() ? "" : r[0][2].as<std::string>();
2020-08-04 11:34:05 +02:00
res.closureSize = r[0][3].is_null() ? 0 : r[0][3].as<uint64_t>();
res.size = r[0][4].is_null() ? 0 : r[0][4].as<uint64_t>();
auto products = txn.exec_params
("select type, subtype, fileSize, sha256hash, path, name, defaultPath from BuildProducts where build = $1 order by productnr",
id);
for (auto row : products) {
BuildProduct product;
product.type = row[0].as<std::string>();
product.subtype = row[1].as<std::string>();
if (row[2].is_null())
product.isRegular = false;
else {
product.isRegular = true;
product.fileSize = row[2].as<off_t>();
}
if (!row[3].is_null())
2020-08-07 21:42:09 +02:00
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
if (!row[4].is_null())
product.path = row[4].as<std::string>();
product.name = row[5].as<std::string>();
if (!row[6].is_null())
product.defaultPath = row[6].as<std::string>();
res.products.emplace_back(product);
}
auto metrics = txn.exec_params
("select name, unit, value from BuildMetrics where build = $1",
id);
for (auto row : metrics) {
BuildMetric metric;
metric.name = row[0].as<std::string>();
metric.unit = row[1].is_null() ? "" : row[1].as<std::string>();
metric.value = row[2].as<double>();
res.metrics.emplace(metric.name, metric);
}
return res;
}
}
NarMemberDatas narMembers;
return getBuildOutput(destStore, narMembers, derivationOutputs);
}