diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 280d4a12..f3f22e63 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -2,7 +2,6 @@ #include "build-result.hh" #include "globals.hh" - using namespace nix; @@ -159,7 +158,7 @@ bool State::getQueuedBuilds(Connection & conn, ref localStore, all valid. So we mark this as a finished, cached build. */ if (!step) { Derivation drv = readDerivation(build->drvPath); - BuildOutput res = getBuildOutput(destStore, destStore->getFSAccessor(), drv); + BuildOutput res = getBuildOutputCached(conn, destStore, drv); { auto mc = startDbUpdate(); @@ -531,3 +530,72 @@ void State::processJobsetSharesChange(Connection & conn) i->second->setShares(row["schedulingShares"].as()); } } + + +BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref destStore, const nix::Derivation & drv) +{ + { + pqxx::work txn(conn); + + for (auto & output : drv.outputs) { + auto r = txn.parameterized + ("select id, buildStatus, releaseName, closureSize, size from Builds b " + "join BuildOutputs o on b.id = o.build " + "where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1") + (output.second.path).exec(); + if (r.empty()) continue; + BuildID id = r[0][0].as(); + + printMsg(lvlInfo, format("re-using products of build %d") % id); + + BuildOutput res; + res.failed = r[0][1].as() == bsFailedWithOutput; + res.releaseName = r[0][2].is_null() ? "" : r[0][2].as(); + res.closureSize = r[0][3].is_null() ? 0 : r[0][3].as(); + res.size = r[0][4].is_null() ? 0 : r[0][4].as(); + + auto products = txn.parameterized + ("select type, subtype, fileSize, sha1hash, sha256hash, path, name, defaultPath from BuildProducts where build = $1 order by productnr") + (id).exec(); + + for (auto row : products) { + BuildProduct product; + product.type = row[0].as(); + product.subtype = row[1].as(); + if (row[2].is_null()) + product.isRegular = false; + else { + product.isRegular = true; + product.fileSize = row[2].as(); + } + if (!row[3].is_null()) + product.sha1hash = parseHash(htSHA1, row[3].as()); + if (!row[4].is_null()) + product.sha256hash = parseHash(htSHA256, row[4].as()); + if (!row[5].is_null()) + product.path = row[5].as(); + product.name = row[6].as(); + if (!row[7].is_null()) + product.defaultPath = row[7].as(); + res.products.emplace_back(product); + } + + auto metrics = txn.parameterized + ("select name, unit, value from BuildMetrics where build = $1") + (id).exec(); + + for (auto row : metrics) { + BuildMetric metric; + metric.name = row[0].as(); + metric.unit = row[1].is_null() ? "" : row[1].as(); + metric.value = row[2].as(); + res.metrics.emplace(metric.name, metric); + } + + return res; + } + + } + + return getBuildOutput(destStore, destStore->getFSAccessor(), drv); +} diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 34f6eaa1..061d5657 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -410,6 +410,9 @@ private: /* Handle cancellation, deletion and priority bumps. */ void processQueueChange(Connection & conn); + BuildOutput getBuildOutputCached(Connection & conn, nix::ref destStore, + const nix::Derivation & drv); + Step::ptr createStep(nix::ref store, Connection & conn, Build::ptr build, const nix::Path & drvPath, Build::ptr referringBuild, Step::ptr referringStep, std::set & finishedDrvs,