2015-07-21 15:14:17 +02:00
# include "state.hh"
2022-03-10 02:01:48 +01:00
# include "hydra-build-result.hh"
2015-10-05 14:57:44 +02:00
# include "globals.hh"
2016-10-31 14:58:29 +01:00
# include <cstring>
2015-07-21 15:14:17 +02:00
using namespace nix ;
void State : : queueMonitor ( )
{
while ( true ) {
try {
queueMonitorLoop ( ) ;
} catch ( std : : exception & e ) {
2023-06-23 15:06:34 +02:00
printError ( " queue monitor: %s " , e . what ( ) ) ;
2015-07-21 15:14:17 +02:00
sleep ( 10 ) ; // probably a DB problem, so don't retry right away
}
}
}
void State : : queueMonitorLoop ( )
{
auto conn ( dbPool . get ( ) ) ;
receiver buildsAdded ( * conn , " builds_added " ) ;
receiver buildsRestarted ( * conn , " builds_restarted " ) ;
receiver buildsCancelled ( * conn , " builds_cancelled " ) ;
receiver buildsDeleted ( * conn , " builds_deleted " ) ;
2015-08-10 16:18:06 +02:00
receiver buildsBumped ( * conn , " builds_bumped " ) ;
2015-08-12 13:17:56 +02:00
receiver jobsetSharesChanged ( * conn , " jobset_shares_changed " ) ;
2015-07-21 15:14:17 +02:00
2016-02-15 21:10:29 +01:00
auto destStore = getDestStore ( ) ;
2015-07-21 15:14:17 +02:00
unsigned int lastBuildId = 0 ;
2021-03-16 16:09:36 -04:00
bool quit = false ;
while ( ! quit ) {
2017-04-06 17:20:23 +02:00
localStore - > clearPathInfoCache ( ) ;
2016-10-07 20:23:05 +02:00
bool done = getQueuedBuilds ( * conn , destStore , lastBuildId ) ;
2015-07-21 15:14:17 +02:00
2021-03-16 16:09:36 -04:00
if ( buildOne & & buildOneDone ) quit = true ;
2015-07-21 15:14:17 +02:00
/* Sleep until we get notification from the database about an
event . */
2021-03-16 16:09:36 -04:00
if ( done & & ! quit ) {
2015-10-22 17:00:46 +02:00
conn - > await_notification ( ) ;
nrQueueWakeups + + ;
} else
conn - > get_notifs ( ) ;
2015-07-21 15:14:17 +02:00
2017-07-21 14:25:33 +02:00
if ( auto lowestId = buildsAdded . get ( ) ) {
2017-07-25 19:46:24 -05:00
lastBuildId = std : : min ( lastBuildId , static_cast < unsigned > ( std : : stoul ( * lowestId ) - 1 ) ) ;
2015-07-21 15:14:17 +02:00
printMsg ( lvlTalkative , " got notification: new builds added to the queue " ) ;
2017-07-21 14:25:33 +02:00
}
2015-07-21 15:14:17 +02:00
if ( buildsRestarted . get ( ) ) {
printMsg ( lvlTalkative , " got notification: builds restarted " ) ;
lastBuildId = 0 ; // check all builds
}
2015-08-10 16:18:06 +02:00
if ( buildsCancelled . get ( ) | | buildsDeleted . get ( ) | | buildsBumped . get ( ) ) {
printMsg ( lvlTalkative , " got notification: builds cancelled or bumped " ) ;
processQueueChange ( * conn ) ;
2015-07-21 15:14:17 +02:00
}
2015-08-12 13:17:56 +02:00
if ( jobsetSharesChanged . get ( ) ) {
printMsg ( lvlTalkative , " got notification: jobset shares changed " ) ;
processJobsetSharesChange ( * conn ) ;
}
2015-07-21 15:14:17 +02:00
}
2021-03-16 16:09:36 -04:00
exit ( 0 ) ;
2015-07-21 15:14:17 +02:00
}
2016-04-15 14:28:00 +02:00
struct PreviousFailure : public std : : exception {
Step : : ptr step ;
PreviousFailure ( Step : : ptr step ) : step ( step ) { }
} ;
2016-10-07 20:23:05 +02:00
bool State : : getQueuedBuilds ( Connection & conn ,
2016-02-15 21:10:29 +01:00
ref < Store > destStore , unsigned int & lastBuildId )
2015-07-21 15:14:17 +02:00
{
2022-04-06 19:59:53 -04:00
prom . queue_checks_started . Increment ( ) ;
2022-04-06 11:41:04 -07:00
2017-07-21 14:25:33 +02:00
printInfo ( " checking the queue for builds > %d... " , lastBuildId ) ;
2015-07-21 15:14:17 +02:00
/* Grab the queued builds from the database, but don't process
them yet ( since we don ' t want a long - running transaction ) . */
2015-08-11 02:14:11 +02:00
std : : vector < BuildID > newIDs ;
std : : map < BuildID , Build : : ptr > newBuildsByID ;
2019-12-30 22:49:26 +01:00
std : : multimap < StorePath , BuildID > newBuildsByPath ;
2015-07-21 15:14:17 +02:00
2015-10-22 17:00:46 +02:00
unsigned int newLastBuildId = lastBuildId ;
2015-07-21 15:14:17 +02:00
{
pqxx : : work txn ( conn ) ;
2020-01-11 22:38:40 -08:00
auto res = txn . exec_params
2022-01-09 08:58:36 -05:00
( " select builds.id, builds.jobset_id, jobsets.project as project, "
" jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
" globalPriority, priority from Builds "
" inner join jobsets on builds.jobset_id = jobsets.id "
" where builds.id > $1 and finished = 0 order by globalPriority desc, builds.id " ,
2020-01-11 22:38:40 -08:00
lastBuildId ) ;
2015-07-21 15:14:17 +02:00
for ( auto const & row : res ) {
auto builds_ ( builds . lock ( ) ) ;
BuildID id = row [ " id " ] . as < BuildID > ( ) ;
if ( buildOne & & id ! = buildOne ) continue ;
2022-04-06 20:18:29 -04:00
if ( id > newLastBuildId ) {
newLastBuildId = id ;
prom . queue_max_id . Set ( id ) ;
}
2015-11-02 14:29:12 +01:00
if ( builds_ - > count ( id ) ) continue ;
2015-07-21 15:14:17 +02:00
2020-02-15 14:54:21 +01:00
auto build = std : : make_shared < Build > (
2022-03-09 23:50:30 +01:00
localStore - > parseStorePath ( row [ " drvPath " ] . as < std : : string > ( ) ) ) ;
2015-07-21 15:14:17 +02:00
build - > id = id ;
2022-01-09 08:58:36 -05:00
build - > jobsetId = row [ " jobset_id " ] . as < JobsetID > ( ) ;
2022-03-09 23:50:30 +01:00
build - > projectName = row [ " project " ] . as < std : : string > ( ) ;
build - > jobsetName = row [ " jobset " ] . as < std : : string > ( ) ;
build - > jobName = row [ " job " ] . as < std : : string > ( ) ;
2015-07-21 15:14:17 +02:00
build - > maxSilentTime = row [ " maxsilent " ] . as < int > ( ) ;
build - > buildTimeout = row [ " timeout " ] . as < int > ( ) ;
2015-07-31 00:57:30 +02:00
build - > timestamp = row [ " timestamp " ] . as < time_t > ( ) ;
2015-08-10 16:18:06 +02:00
build - > globalPriority = row [ " globalPriority " ] . as < int > ( ) ;
2015-08-12 12:05:43 +02:00
build - > localPriority = row [ " priority " ] . as < int > ( ) ;
2022-01-09 08:58:36 -05:00
build - > jobset = createJobset ( txn , build - > projectName , build - > jobsetName , build - > jobsetId ) ;
2015-07-21 15:14:17 +02:00
2015-08-11 02:14:11 +02:00
newIDs . push_back ( id ) ;
newBuildsByID [ id ] = build ;
2020-06-23 13:43:54 +02:00
newBuildsByPath . emplace ( std : : make_pair ( build - > drvPath , id ) ) ;
2015-07-21 15:14:17 +02:00
}
}
std : : set < Step : : ptr > newRunnable ;
unsigned int nrAdded ;
std : : function < void ( Build : : ptr ) > createBuild ;
2019-12-30 22:49:26 +01:00
std : : set < StorePath > finishedDrvs ;
2015-07-21 15:14:17 +02:00
createBuild = [ & ] ( Build : : ptr build ) {
2022-04-06 20:18:29 -04:00
prom . queue_build_loads . Increment ( ) ;
2023-06-23 15:06:34 +02:00
printMsg ( lvlTalkative , " loading build %1% (%2%) " , build - > id , build - > fullJobName ( ) ) ;
2015-07-21 15:14:17 +02:00
nrAdded + + ;
2015-08-11 02:14:11 +02:00
newBuildsByID . erase ( build - > id ) ;
2015-07-21 15:14:17 +02:00
2016-02-15 21:10:29 +01:00
if ( ! localStore - > isValidPath ( build - > drvPath ) ) {
2015-07-21 15:14:17 +02:00
/* Derivation has been GC'ed prematurely. */
2023-06-23 15:06:34 +02:00
printError ( " aborting GC'ed build %1% " , build - > id ) ;
2015-07-21 15:14:17 +02:00
if ( ! build - > finishedInDB ) {
2016-02-29 15:10:30 +01:00
auto mc = startDbUpdate ( ) ;
2015-07-21 15:14:17 +02:00
pqxx : : work txn ( conn ) ;
2020-01-11 22:38:40 -08:00
txn . exec_params0
( " update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0 " ,
build - > id ,
( int ) bsAborted ,
time ( 0 ) ) ;
2015-07-21 15:14:17 +02:00
txn . commit ( ) ;
build - > finishedInDB = true ;
nrBuildsDone + + ;
}
return ;
}
std : : set < Step : : ptr > newSteps ;
2016-04-15 14:28:00 +02:00
Step : : ptr step ;
/* Create steps for this derivation and its dependencies. */
try {
step = createStep ( destStore , conn , build , build - > drvPath ,
build , 0 , finishedDrvs , newSteps , newRunnable ) ;
} catch ( PreviousFailure & ex ) {
/* Some step previously failed, so mark the build as
failed right away . */
2021-03-16 16:09:36 -04:00
if ( ! buildOneDone & & build - > id = = buildOne ) buildOneDone = true ;
2019-12-30 22:49:26 +01:00
printMsg ( lvlError , " marking build %d as cached failure due to ‘ %s’ " ,
build - > id , localStore - > printStorePath ( ex . step - > drvPath ) ) ;
2016-04-15 14:28:00 +02:00
if ( ! build - > finishedInDB ) {
auto mc = startDbUpdate ( ) ;
pqxx : : work txn ( conn ) ;
/* Find the previous build step record, first by
derivation path , then by output path . */
BuildID propagatedFrom = 0 ;
2020-01-11 22:38:40 -08:00
auto res = txn . exec_params1
( " select max(build) from BuildSteps where drvPath = $1 and startTime != 0 and stopTime != 0 and status = 1 " ,
2020-04-06 19:48:34 +02:00
localStore - > printStorePath ( ex . step - > drvPath ) ) ;
2020-01-11 22:38:40 -08:00
if ( ! res [ 0 ] . is_null ( ) ) propagatedFrom = res [ 0 ] . as < BuildID > ( ) ;
2016-04-15 14:28:00 +02:00
if ( ! propagatedFrom ) {
2023-12-11 12:55:57 -05:00
for ( auto & [ outputName , optOutputPath ] : destStore - > queryPartialDerivationOutputMap ( ex . step - > drvPath , & * localStore ) ) {
2024-01-25 21:32:22 -05:00
constexpr std : : string_view common = " select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1 " ;
auto res = optOutputPath
? txn . exec_params (
std : : string { common } + " and path = $1 " ,
localStore - > printStorePath ( * optOutputPath ) )
: txn . exec_params (
std : : string { common } + " and drvPath = $1 and name = $2 " ,
localStore - > printStorePath ( ex . step - > drvPath ) ,
outputName ) ;
if ( ! res [ 0 ] [ 0 ] . is_null ( ) ) {
propagatedFrom = res [ 0 ] [ 0 ] . as < BuildID > ( ) ;
break ;
2016-04-15 14:28:00 +02:00
}
}
}
2016-10-31 14:58:29 +01:00
createBuildStep ( txn , 0 , build - > id , ex . step , " " , bsCachedFailure , " " , propagatedFrom ) ;
2020-01-11 22:38:40 -08:00
txn . exec_params
2017-07-26 15:17:51 +02:00
( " update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3, isCachedBuild = 1, notificationPendingSince = $3 "
2020-01-11 22:38:40 -08:00
" where id = $1 and finished = 0 " ,
build - > id ,
( int ) ( ex . step - > drvPath = = build - > drvPath ? bsFailed : bsDepFailed ) ,
time ( 0 ) ) ;
2019-08-09 19:11:38 +02:00
notifyBuildFinished ( txn , build - > id , { } ) ;
2016-04-15 14:28:00 +02:00
txn . commit ( ) ;
build - > finishedInDB = true ;
nrBuildsDone + + ;
}
return ;
}
2015-07-21 15:14:17 +02:00
/* Some of the new steps may be the top level of builds that
we haven ' t processed yet . So do them now . This ensures that
if build A depends on build B with top - level step X , then X
will be " accounted " to B in doBuildStep ( ) . */
for ( auto & r : newSteps ) {
2015-08-11 02:14:11 +02:00
auto i = newBuildsByPath . find ( r - > drvPath ) ;
if ( i = = newBuildsByPath . end ( ) ) continue ;
auto j = newBuildsByID . find ( i - > second ) ;
if ( j = = newBuildsByID . end ( ) ) continue ;
createBuild ( j - > second ) ;
2015-07-21 15:14:17 +02:00
}
/* If we didn't get a step, it means the step's outputs are
all valid . So we mark this as a finished , cached build . */
if ( ! step ) {
2023-12-04 16:05:50 -05:00
BuildOutput res = getBuildOutputCached ( conn , destStore , build - > drvPath ) ;
2015-07-21 15:14:17 +02:00
2023-12-11 12:46:36 -05:00
for ( auto & i : destStore - > queryDerivationOutputMap ( build - > drvPath , & * localStore ) )
2023-12-04 16:05:50 -05:00
addRoot ( i . second ) ;
2017-10-12 18:55:38 +02:00
2016-02-29 15:10:30 +01:00
{
auto mc = startDbUpdate ( ) ;
2015-07-21 15:14:17 +02:00
pqxx : : work txn ( conn ) ;
time_t now = time ( 0 ) ;
2021-03-16 16:09:36 -04:00
if ( ! buildOneDone & & build - > id = = buildOne ) buildOneDone = true ;
2019-12-30 22:49:26 +01:00
printMsg ( lvlInfo , " marking build %1% as succeeded (cached) " , build - > id ) ;
2015-07-21 15:14:17 +02:00
markSucceededBuild ( txn , build , res , true , now , now ) ;
2019-08-09 19:11:38 +02:00
notifyBuildFinished ( txn , build - > id , { } ) ;
2015-07-21 15:14:17 +02:00
txn . commit ( ) ;
2016-02-29 15:10:30 +01:00
}
2015-07-21 15:14:17 +02:00
build - > finishedInDB = true ;
return ;
}
/* Note: if we exit this scope prior to this, the build and
all newly created steps are destroyed . */
{
auto builds_ ( builds . lock ( ) ) ;
if ( ! build - > finishedInDB ) // FIXME: can this happen?
( * builds_ ) [ build - > id ] = build ;
build - > toplevel = step ;
}
2015-08-11 01:30:24 +02:00
build - > propagatePriorities ( ) ;
2019-12-30 22:49:26 +01:00
printMsg ( lvlChatty , " added build %1% (top-level step %2%, %3% new steps) " ,
build - > id , localStore - > printStorePath ( step - > drvPath ) , newSteps . size ( ) ) ;
2015-07-21 15:14:17 +02:00
} ;
/* Now instantiate build steps for each new build. The builder
threads can start building the runnable build steps right away ,
even while we ' re still processing other new builds . */
2015-10-22 17:00:46 +02:00
system_time start = std : : chrono : : system_clock : : now ( ) ;
2015-08-11 02:14:11 +02:00
for ( auto id : newIDs ) {
auto i = newBuildsByID . find ( id ) ;
if ( i = = newBuildsByID . end ( ) ) continue ;
auto build = i - > second ;
2015-07-21 15:14:17 +02:00
2016-03-08 13:09:14 +01:00
auto now1 = std : : chrono : : steady_clock : : now ( ) ;
2015-07-21 15:14:17 +02:00
newRunnable . clear ( ) ;
nrAdded = 0 ;
try {
createBuild ( build ) ;
} catch ( Error & e ) {
2020-07-08 12:50:02 +02:00
e . addTrace ( { } , hintfmt ( " while loading build %d: " , build - > id ) ) ;
2015-07-21 15:14:17 +02:00
throw ;
}
2016-03-08 13:09:14 +01:00
auto now2 = std : : chrono : : steady_clock : : now ( ) ;
buildReadTimeMs + = std : : chrono : : duration_cast < std : : chrono : : milliseconds > ( now2 - now1 ) . count ( ) ;
2015-07-21 15:14:17 +02:00
/* Add the new runnable build steps to ‘ runnable’ and wake up
the builder threads . */
2023-06-23 15:06:34 +02:00
printMsg ( lvlChatty , " got %1% new runnable steps from %2% new builds " , newRunnable . size ( ) , nrAdded ) ;
2015-07-21 15:14:17 +02:00
for ( auto & r : newRunnable )
makeRunnable ( r ) ;
2021-03-16 16:09:36 -04:00
if ( buildOne & & newRunnable . size ( ) = = 0 ) buildOneDone = true ;
2015-07-21 15:14:17 +02:00
nrBuildsRead + = nrAdded ;
2015-10-22 17:00:46 +02:00
/* Stop after a certain time to allow priority bumps to be
processed . */
2022-04-06 20:18:29 -04:00
if ( std : : chrono : : system_clock : : now ( ) > start + std : : chrono : : seconds ( 600 ) ) {
prom . queue_checks_early_exits . Increment ( ) ;
break ;
2023-11-21 18:41:52 +07:00
}
2015-07-21 15:14:17 +02:00
}
2015-10-22 17:00:46 +02:00
2022-04-06 20:18:29 -04:00
prom . queue_checks_finished . Increment ( ) ;
2015-10-22 17:00:46 +02:00
lastBuildId = newBuildsByID . empty ( ) ? newLastBuildId : newBuildsByID . begin ( ) - > first - 1 ;
return newBuildsByID . empty ( ) ;
2015-07-21 15:14:17 +02:00
}
2015-08-10 16:18:06 +02:00
void Build : : propagatePriorities ( )
{
/* Update the highest global priority and lowest build ID fields
of each dependency . This is used by the dispatcher to start
steps in order of descending global priority and ascending
build ID . */
visitDependencies ( [ & ] ( const Step : : ptr & step ) {
auto step_ ( step - > state . lock ( ) ) ;
step_ - > highestGlobalPriority = std : : max ( step_ - > highestGlobalPriority , globalPriority ) ;
2015-08-12 12:05:43 +02:00
step_ - > highestLocalPriority = std : : max ( step_ - > highestLocalPriority , localPriority ) ;
2015-08-10 16:18:06 +02:00
step_ - > lowestBuildID = std : : min ( step_ - > lowestBuildID , id ) ;
2015-08-11 01:30:24 +02:00
step_ - > jobsets . insert ( jobset ) ;
2015-08-10 16:18:06 +02:00
} , toplevel ) ;
}
void State : : processQueueChange ( Connection & conn )
2015-07-21 15:14:17 +02:00
{
/* Get the current set of queued builds. */
2015-08-10 16:18:06 +02:00
std : : map < BuildID , int > currentIds ;
2015-07-21 15:14:17 +02:00
{
pqxx : : work txn ( conn ) ;
2015-08-10 16:18:06 +02:00
auto res = txn . exec ( " select id, globalPriority from Builds where finished = 0 " ) ;
2015-07-21 15:14:17 +02:00
for ( auto const & row : res )
2015-08-10 16:18:06 +02:00
currentIds [ row [ " id " ] . as < BuildID > ( ) ] = row [ " globalPriority " ] . as < BuildID > ( ) ;
2015-07-21 15:14:17 +02:00
}
2016-10-31 14:58:29 +01:00
{
auto builds_ ( builds . lock ( ) ) ;
for ( auto i = builds_ - > begin ( ) ; i ! = builds_ - > end ( ) ; ) {
auto b = currentIds . find ( i - > first ) ;
if ( b = = currentIds . end ( ) ) {
2023-06-23 15:06:34 +02:00
printInfo ( " discarding cancelled build %1% " , i - > first ) ;
2016-10-31 14:58:29 +01:00
i = builds_ - > erase ( i ) ;
// FIXME: ideally we would interrupt active build steps here.
continue ;
}
if ( i - > second - > globalPriority < b - > second ) {
2023-06-23 15:06:34 +02:00
printInfo ( " priority of build %1% increased " , i - > first ) ;
2016-10-31 14:58:29 +01:00
i - > second - > globalPriority = b - > second ;
i - > second - > propagatePriorities ( ) ;
}
+ + i ;
2015-08-10 16:18:06 +02:00
}
2016-10-31 14:58:29 +01:00
}
{
auto activeSteps ( activeSteps_ . lock ( ) ) ;
for ( auto & activeStep : * activeSteps ) {
std : : set < Build : : ptr > dependents ;
std : : set < Step : : ptr > steps ;
getDependents ( activeStep - > step , dependents , steps ) ;
if ( ! dependents . empty ( ) ) continue ;
2016-11-07 19:34:35 +01:00
{
auto activeStepState ( activeStep - > state_ . lock ( ) ) ;
if ( activeStepState - > cancelled ) continue ;
activeStepState - > cancelled = true ;
if ( activeStepState - > pid ! = - 1 ) {
printInfo ( " killing builder process %d of build step ‘ %s’ " ,
2019-12-30 22:49:26 +01:00
activeStepState - > pid ,
localStore - > printStorePath ( activeStep - > step - > drvPath ) ) ;
2016-11-07 19:34:35 +01:00
if ( kill ( activeStepState - > pid , SIGINT ) = = - 1 )
printError ( " error killing build step ‘ %s’ : %s " ,
2019-12-30 22:49:26 +01:00
localStore - > printStorePath ( activeStep - > step - > drvPath ) ,
strerror ( errno ) ) ;
2016-11-07 19:34:35 +01:00
}
}
2015-08-10 16:18:06 +02:00
}
2015-07-21 15:14:17 +02:00
}
}
2016-02-15 21:10:29 +01:00
Step : : ptr State : : createStep ( ref < Store > destStore ,
2019-12-30 22:49:26 +01:00
Connection & conn , Build : : ptr build , const StorePath & drvPath ,
Build : : ptr referringBuild , Step : : ptr referringStep , std : : set < StorePath > & finishedDrvs ,
2015-07-21 15:14:17 +02:00
std : : set < Step : : ptr > & newSteps , std : : set < Step : : ptr > & newRunnable )
{
if ( finishedDrvs . find ( drvPath ) ! = finishedDrvs . end ( ) ) return 0 ;
/* Check if the requested step already exists. If not, create a
new step . In any case , make the step reachable from
referringBuild or referringStep . This is done atomically ( with
‘ steps ’ locked ) , to ensure that this step can never become
reachable from a new build after doBuildStep has removed it
from ‘ steps ’ . */
Step : : ptr step ;
bool isNew = false ;
{
auto steps_ ( steps . lock ( ) ) ;
/* See if the step already exists in ‘ steps’ and is not
stale . */
auto prev = steps_ - > find ( drvPath ) ;
if ( prev ! = steps_ - > end ( ) ) {
step = prev - > second . lock ( ) ;
/* Since ‘ step’ is a strong pointer, the referred Step
object won ' t be deleted after this . */
if ( ! step ) steps_ - > erase ( drvPath ) ; // remove stale entry
}
/* If it doesn't exist, create it. */
if ( ! step ) {
2020-06-23 13:43:54 +02:00
step = std : : make_shared < Step > ( drvPath ) ;
2015-07-21 15:14:17 +02:00
isNew = true ;
}
auto step_ ( step - > state . lock ( ) ) ;
assert ( step_ - > created ! = isNew ) ;
if ( referringBuild )
step_ - > builds . push_back ( referringBuild ) ;
if ( referringStep )
step_ - > rdeps . push_back ( referringStep ) ;
2020-06-23 13:43:54 +02:00
steps_ - > insert_or_assign ( drvPath , step ) ;
2015-07-21 15:14:17 +02:00
}
if ( ! isNew ) return step ;
2022-04-06 20:23:02 -04:00
prom . queue_steps_created . Increment ( ) ;
2019-12-30 22:49:26 +01:00
printMsg ( lvlDebug , " considering derivation ‘ %1%’ " , localStore - > printStorePath ( drvPath ) ) ;
2015-07-21 15:14:17 +02:00
/* Initialize the step. Note that the step may be visible in
‘ steps ’ before this point , but that doesn ' t matter because
it ' s not runnable yet , and other threads won ' t make it
runnable while step - > created = = false . */
2020-08-04 11:33:29 +02:00
step - > drv = std : : make_unique < Derivation > ( localStore - > readDerivation ( drvPath ) ) ;
2020-06-23 13:43:54 +02:00
step - > parsedDrv = std : : make_unique < ParsedDerivation > ( drvPath , * step - > drv ) ;
2015-09-02 13:42:25 +02:00
2020-08-27 17:46:36 +02:00
step - > preferLocalBuild = step - > parsedDrv - > willBuildLocally ( * localStore ) ;
2022-06-16 14:54:57 +02:00
step - > isDeterministic = getOr ( step - > drv - > env , " isDetermistic " , " 0 " ) = = " 1 " ;
2015-09-02 13:42:25 +02:00
2019-12-30 22:49:26 +01:00
step - > systemType = step - > drv - > platform ;
2015-07-21 15:14:17 +02:00
{
2024-01-24 21:04:14 -05:00
StringSet features = step - > requiredSystemFeatures = step - > parsedDrv - > getRequiredSystemFeatures ( ) ;
2015-09-02 13:42:25 +02:00
if ( step - > preferLocalBuild )
features . insert ( " local " ) ;
if ( ! features . empty ( ) ) {
2015-08-17 14:37:57 +02:00
step - > systemType + = " : " ;
2015-09-02 13:42:25 +02:00
step - > systemType + = concatStringsSep ( " , " , features ) ;
2015-08-17 14:37:57 +02:00
}
2015-07-21 15:14:17 +02:00
}
2016-04-15 14:28:00 +02:00
/* If this derivation failed previously, give up. */
if ( checkCachedFailure ( step , conn ) )
throw PreviousFailure { step } ;
2015-07-21 15:14:17 +02:00
/* Are all outputs valid? */
2023-12-04 16:05:50 -05:00
auto outputHashes = staticOutputHashes ( * localStore , * ( step - > drv ) ) ;
2015-07-21 15:14:17 +02:00
bool valid = true ;
2023-12-04 16:05:50 -05:00
std : : map < DrvOutput , std : : optional < StorePath > > missing ;
2023-12-11 12:46:36 -05:00
for ( auto & [ outputName , maybeOutputPath ] : destStore - > queryPartialDerivationOutputMap ( drvPath , & * localStore ) ) {
2023-12-04 16:05:50 -05:00
auto outputHash = outputHashes . at ( outputName ) ;
2023-12-11 12:46:36 -05:00
if ( maybeOutputPath & & destStore - > isValidPath ( * maybeOutputPath ) )
continue ;
valid = false ;
missing . insert ( { { outputHash , outputName } , maybeOutputPath } ) ;
2023-12-04 16:05:50 -05:00
}
2015-10-05 14:57:44 +02:00
2016-10-07 20:23:05 +02:00
/* Try to copy the missing paths from the local store or from
substitutes . */
if ( ! missing . empty ( ) ) {
size_t avail = 0 ;
2023-12-23 19:19:54 -05:00
for ( auto & [ i , pathOpt ] : missing ) {
2023-12-11 12:46:36 -05:00
// If we don't know the output path from the destination
// store, see if the local store can tell us.
2023-12-23 19:19:54 -05:00
if ( /* localStore != destStore && */ ! pathOpt & & experimentalFeatureSettings . isEnabled ( Xp : : CaDerivations ) )
2023-12-11 12:46:36 -05:00
if ( auto maybeRealisation = localStore - > queryRealisation ( i ) )
2023-12-23 19:19:54 -05:00
pathOpt = maybeRealisation - > outPath ;
2023-12-11 12:46:36 -05:00
2023-12-23 19:19:54 -05:00
if ( ! pathOpt ) {
2023-12-11 12:46:36 -05:00
// No hope of getting the store object if we don't know
// the path.
continue ;
}
2023-12-23 19:10:58 -05:00
auto & path = * pathOpt ;
2023-12-11 12:46:36 -05:00
if ( /* localStore != destStore && */ localStore - > isValidPath ( path ) )
2016-10-07 20:23:05 +02:00
avail + + ;
2023-12-11 12:46:36 -05:00
else if ( useSubstitutes ) {
2016-10-07 20:23:05 +02:00
SubstitutablePathInfos infos ;
2023-12-11 12:46:36 -05:00
localStore - > querySubstitutablePathInfos ( { { path , { } } } , infos ) ;
2016-10-07 20:23:05 +02:00
if ( infos . size ( ) = = 1 )
avail + + ;
}
}
if ( missing . size ( ) = = avail ) {
2015-10-05 14:57:44 +02:00
valid = true ;
2023-12-23 19:19:54 -05:00
for ( auto & [ i , pathOpt ] : missing ) {
2023-12-11 12:46:36 -05:00
// If we found everything, then we should know the path
// to every missing store object now.
2023-12-23 19:19:54 -05:00
assert ( pathOpt ) ;
2023-12-23 19:10:58 -05:00
auto & path = * pathOpt ;
2023-12-11 12:46:36 -05:00
try {
time_t startTime = time ( 0 ) ;
if ( localStore - > isValidPath ( path ) )
printInfo ( " copying output ‘ %1%’ of ‘ %2%’ from local store " ,
localStore - > printStorePath ( path ) ,
localStore - > printStorePath ( drvPath ) ) ;
else {
printInfo ( " substituting output ‘ %1%’ of ‘ %2%’ " ,
localStore - > printStorePath ( path ) ,
localStore - > printStorePath ( drvPath ) ) ;
localStore - > ensurePath ( path ) ;
// FIXME: should copy directly from substituter to destStore.
}
2016-10-07 20:23:05 +02:00
2023-12-11 12:46:36 -05:00
copyClosure ( * localStore , * destStore ,
StorePathSet { path } ,
NoRepair , CheckSigs , NoSubstitute ) ;
2016-10-07 20:23:05 +02:00
2023-12-11 12:46:36 -05:00
time_t stopTime = time ( 0 ) ;
2015-10-05 14:57:44 +02:00
2023-12-11 12:46:36 -05:00
{
auto mc = startDbUpdate ( ) ;
pqxx : : work txn ( conn ) ;
createSubstitutionStep ( txn , startTime , stopTime , build , drvPath , * ( step - > drv ) , " out " , path ) ;
txn . commit ( ) ;
2015-10-05 14:57:44 +02:00
}
2023-12-11 12:46:36 -05:00
} catch ( Error & e ) {
printError ( " while copying/substituting output ‘ %s’ of ‘ %s’ : %s " ,
localStore - > printStorePath ( path ) ,
localStore - > printStorePath ( drvPath ) ,
e . what ( ) ) ;
valid = false ;
break ;
2015-10-05 14:57:44 +02:00
}
}
2015-07-21 15:14:17 +02:00
}
}
// FIXME: check whether all outputs are in the binary cache.
if ( valid ) {
2020-06-23 13:43:54 +02:00
finishedDrvs . insert ( drvPath ) ;
2015-07-21 15:14:17 +02:00
return 0 ;
}
/* No, we need to build. */
2019-12-30 22:49:26 +01:00
printMsg ( lvlDebug , " creating build step ‘ %1%’ " , localStore - > printStorePath ( drvPath ) ) ;
2015-07-21 15:14:17 +02:00
/* Create steps for the dependencies. */
2023-11-21 18:41:52 +07:00
for ( auto & i : step - > drv - > inputDrvs . map ) {
2016-02-15 21:10:29 +01:00
auto dep = createStep ( destStore , conn , build , i . first , 0 , step , finishedDrvs , newSteps , newRunnable ) ;
2015-07-21 15:14:17 +02:00
if ( dep ) {
auto step_ ( step - > state . lock ( ) ) ;
step_ - > deps . insert ( dep ) ;
}
}
/* If the step has no (remaining) dependencies, make it
runnable . */
{
auto step_ ( step - > state . lock ( ) ) ;
assert ( ! step_ - > created ) ;
step_ - > created = true ;
if ( step_ - > deps . empty ( ) )
newRunnable . insert ( step ) ;
}
2016-04-15 14:28:00 +02:00
newSteps . insert ( step ) ;
2015-07-21 15:14:17 +02:00
return step ;
}
2015-08-11 01:30:24 +02:00
Jobset : : ptr State : : createJobset ( pqxx : : work & txn ,
2022-01-09 08:58:36 -05:00
const std : : string & projectName , const std : : string & jobsetName , const JobsetID jobsetID )
2015-08-11 01:30:24 +02:00
{
auto p = std : : make_pair ( projectName , jobsetName ) ;
2015-08-12 13:17:56 +02:00
{
auto jobsets_ ( jobsets . lock ( ) ) ;
auto i = jobsets_ - > find ( p ) ;
if ( i ! = jobsets_ - > end ( ) ) return i - > second ;
}
2015-08-11 01:30:24 +02:00
2020-01-11 22:38:40 -08:00
auto res = txn . exec_params1
2022-01-09 08:58:36 -05:00
( " select schedulingShares from Jobsets where id = $1 " ,
jobsetID ) ;
2015-08-11 01:30:24 +02:00
if ( res . empty ( ) ) throw Error ( " missing jobset - can't happen " ) ;
2020-01-11 22:38:40 -08:00
auto shares = res [ " schedulingShares " ] . as < unsigned int > ( ) ;
2015-08-11 01:30:24 +02:00
2015-08-12 13:17:56 +02:00
auto jobset = std : : make_shared < Jobset > ( ) ;
jobset - > setShares ( shares ) ;
2015-08-11 01:30:24 +02:00
/* Load the build steps from the last 24 hours. */
2020-01-11 22:38:40 -08:00
auto res2 = txn . exec_params
2015-08-11 01:30:24 +02:00
( " select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
2022-01-09 08:58:36 -05:00
" where s.startTime is not null and s.stopTime > $1 and jobset_id = $2 " ,
2020-01-11 22:38:40 -08:00
time ( 0 ) - Jobset : : schedulingWindow * 10 ,
2022-01-09 08:58:36 -05:00
jobsetID ) ;
2020-01-11 22:38:40 -08:00
for ( auto const & row : res2 ) {
2015-08-11 01:30:24 +02:00
time_t startTime = row [ " startTime " ] . as < time_t > ( ) ;
time_t stopTime = row [ " stopTime " ] . as < time_t > ( ) ;
jobset - > addStep ( startTime , stopTime - startTime ) ;
}
2015-08-12 13:17:56 +02:00
auto jobsets_ ( jobsets . lock ( ) ) ;
// Can't happen because only this thread adds to "jobsets".
assert ( jobsets_ - > find ( p ) = = jobsets_ - > end ( ) ) ;
2015-08-11 01:30:24 +02:00
( * jobsets_ ) [ p ] = jobset ;
return jobset ;
}
2015-08-12 13:17:56 +02:00
void State : : processJobsetSharesChange ( Connection & conn )
{
/* Get the current set of jobsets. */
pqxx : : work txn ( conn ) ;
auto res = txn . exec ( " select project, name, schedulingShares from Jobsets " ) ;
for ( auto const & row : res ) {
auto jobsets_ ( jobsets . lock ( ) ) ;
2022-03-09 23:50:30 +01:00
auto i = jobsets_ - > find ( std : : make_pair ( row [ " project " ] . as < std : : string > ( ) , row [ " name " ] . as < std : : string > ( ) ) ) ;
2015-08-12 13:17:56 +02:00
if ( i = = jobsets_ - > end ( ) ) continue ;
i - > second - > setShares ( row [ " schedulingShares " ] . as < unsigned int > ( ) ) ;
}
}
2016-04-13 15:38:06 +02:00
2023-12-04 16:05:50 -05:00
BuildOutput State : : getBuildOutputCached ( Connection & conn , nix : : ref < nix : : Store > destStore , const nix : : StorePath & drvPath )
2016-04-13 15:38:06 +02:00
{
2023-12-11 14:05:18 -05:00
auto derivationOutputs = destStore - > queryDerivationOutputMap ( drvPath , & * localStore ) ;
2023-12-04 16:05:50 -05:00
2016-04-13 15:38:06 +02:00
{
pqxx : : work txn ( conn ) ;
2023-12-04 16:05:50 -05:00
for ( auto & [ name , output ] : derivationOutputs ) {
2020-01-12 10:18:52 -08:00
auto r = txn . exec_params
2016-04-13 15:38:06 +02:00
( " select id, buildStatus, releaseName, closureSize, size from Builds b "
" join BuildOutputs o on b.id = o.build "
2020-01-11 22:38:40 -08:00
" where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1 " ,
2023-12-04 16:05:50 -05:00
localStore - > printStorePath ( output ) ) ;
2016-04-13 15:38:06 +02:00
if ( r . empty ( ) ) continue ;
2020-01-12 10:18:52 -08:00
BuildID id = r [ 0 ] [ 0 ] . as < BuildID > ( ) ;
2016-04-13 15:38:06 +02:00
2023-06-23 15:06:34 +02:00
printInfo ( " reusing build %d " , id ) ;
2016-04-13 15:38:06 +02:00
BuildOutput res ;
2020-01-12 10:18:52 -08:00
res . failed = r [ 0 ] [ 1 ] . as < int > ( ) = = bsFailedWithOutput ;
res . releaseName = r [ 0 ] [ 2 ] . is_null ( ) ? " " : r [ 0 ] [ 2 ] . as < std : : string > ( ) ;
2020-08-04 11:34:05 +02:00
res . closureSize = r [ 0 ] [ 3 ] . is_null ( ) ? 0 : r [ 0 ] [ 3 ] . as < uint64_t > ( ) ;
res . size = r [ 0 ] [ 4 ] . is_null ( ) ? 0 : r [ 0 ] [ 4 ] . as < uint64_t > ( ) ;
2016-04-13 15:38:06 +02:00
2020-01-11 22:38:40 -08:00
auto products = txn . exec_params
2020-07-27 18:24:10 +02:00
( " select type, subtype, fileSize, sha256hash, path, name, defaultPath from BuildProducts where build = $1 order by productnr " ,
2020-01-11 22:38:40 -08:00
id ) ;
2016-04-13 15:38:06 +02:00
for ( auto row : products ) {
BuildProduct product ;
product . type = row [ 0 ] . as < std : : string > ( ) ;
product . subtype = row [ 1 ] . as < std : : string > ( ) ;
if ( row [ 2 ] . is_null ( ) )
product . isRegular = false ;
else {
product . isRegular = true ;
product . fileSize = row [ 2 ] . as < off_t > ( ) ;
}
if ( ! row [ 3 ] . is_null ( ) )
2023-12-07 11:40:30 -05:00
product . sha256hash = Hash : : parseAny ( row [ 3 ] . as < std : : string > ( ) , HashAlgorithm : : SHA256 ) ;
2016-04-13 15:38:06 +02:00
if ( ! row [ 4 ] . is_null ( ) )
2020-07-27 18:24:10 +02:00
product . path = row [ 4 ] . as < std : : string > ( ) ;
product . name = row [ 5 ] . as < std : : string > ( ) ;
if ( ! row [ 6 ] . is_null ( ) )
product . defaultPath = row [ 6 ] . as < std : : string > ( ) ;
2016-04-13 15:38:06 +02:00
res . products . emplace_back ( product ) ;
}
2020-01-11 22:38:40 -08:00
auto metrics = txn . exec_params
( " select name, unit, value from BuildMetrics where build = $1 " ,
id ) ;
2016-04-13 15:38:06 +02:00
for ( auto row : metrics ) {
BuildMetric metric ;
metric . name = row [ 0 ] . as < std : : string > ( ) ;
metric . unit = row [ 1 ] . is_null ( ) ? " " : row [ 1 ] . as < std : : string > ( ) ;
metric . value = row [ 2 ] . as < double > ( ) ;
res . metrics . emplace ( metric . name , metric ) ;
}
return res ;
}
}
2020-07-27 20:38:59 +02:00
NarMemberDatas narMembers ;
2023-12-04 16:05:50 -05:00
return getBuildOutput ( destStore , narMembers , derivationOutputs ) ;
2016-04-13 15:38:06 +02:00
}