2008-11-25 11:01:42 +00:00
|
|
|
package Hydra::Helper::Nix;
|
2008-11-18 14:48:40 +00:00
|
|
|
|
|
|
|
use strict;
|
2008-11-28 14:36:04 +00:00
|
|
|
use Exporter;
|
2009-02-06 21:01:20 +00:00
|
|
|
use File::Path;
|
2009-01-13 14:02:07 +00:00
|
|
|
use File::Basename;
|
2008-11-28 14:36:04 +00:00
|
|
|
|
|
|
|
our @ISA = qw(Exporter);
|
2009-02-06 15:02:49 +00:00
|
|
|
our @EXPORT = qw(
|
2009-02-26 16:57:05 +00:00
|
|
|
isValidPath queryPathInfo
|
2009-04-22 22:43:04 +00:00
|
|
|
getHydraPath getHydraDBPath openHydraDB txn_do
|
2009-03-15 11:56:11 +00:00
|
|
|
registerRoot getGCRootsDir gcRootFor
|
2009-10-15 11:13:36 +00:00
|
|
|
getPrimaryBuildsForReleaseSet
|
|
|
|
getPrimaryBuildTotal
|
|
|
|
getRelease getLatestSuccessfulRelease );
|
2008-11-18 14:48:40 +00:00
|
|
|
|
|
|
|
|
|
|
|
sub isValidPath {
|
|
|
|
my $path = shift;
|
2009-01-13 14:02:07 +00:00
|
|
|
#$SIG{CHLD} = 'DEFAULT'; # !!! work around system() failing if SIGCHLD is ignored
|
|
|
|
#return system("nix-store --check-validity $path 2> /dev/null") == 0;
|
|
|
|
|
|
|
|
# This is faster than calling nix-store, but it breaks abstraction...
|
|
|
|
return -e ("/nix/var/nix/db/info/" . basename $path);
|
2008-11-18 14:48:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-02-26 16:57:05 +00:00
|
|
|
sub queryPathInfo {
|
|
|
|
my $path = shift;
|
|
|
|
|
|
|
|
# !!! like above, this breaks abstraction. What we really need is
|
|
|
|
# Perl bindings for libstore :-)
|
|
|
|
|
|
|
|
open FH, "</nix/var/nix/db/info/" . basename $path
|
|
|
|
or die "cannot open info file for $path";
|
|
|
|
|
|
|
|
my $hash;
|
|
|
|
my $deriver;
|
|
|
|
my @refs = ();
|
|
|
|
|
|
|
|
while (<FH>) {
|
|
|
|
if (/^Hash: (\S+)$/) {
|
|
|
|
$hash = $1;
|
|
|
|
}
|
|
|
|
elsif (/^Deriver: (\S+)$/) {
|
|
|
|
$deriver = $1;
|
|
|
|
}
|
|
|
|
elsif (/^References: (.*)$/) {
|
|
|
|
@refs = split / /, $1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
close FH;
|
|
|
|
|
2009-04-25 11:27:46 +00:00
|
|
|
die "path $path does not have a hash" unless defined $hash;
|
2009-02-26 16:57:05 +00:00
|
|
|
|
|
|
|
return ($hash, $deriver, \@refs);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-28 14:36:04 +00:00
|
|
|
sub getHydraPath {
|
2008-11-28 17:25:30 +00:00
|
|
|
my $dir = $ENV{"HYDRA_DATA"};
|
2008-11-28 14:36:04 +00:00
|
|
|
die "The HYDRA_DATA environment variable is not set!\n" unless defined $dir;
|
|
|
|
die "The HYDRA_DATA directory does not exist!\n" unless -d $dir;
|
|
|
|
return $dir;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sub getHydraDBPath {
|
2009-04-29 11:07:46 +00:00
|
|
|
my $db = $ENV{"HYDRA_DBI"};
|
2009-05-09 16:00:08 +00:00
|
|
|
if ( defined $db ) {
|
2009-04-29 11:07:46 +00:00
|
|
|
return $db ;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
my $path = getHydraPath . '/hydra.sqlite';
|
|
|
|
die "The Hydra database ($path) not exist!\n" unless -f $path;
|
|
|
|
return "dbi:SQLite:$path";
|
|
|
|
}
|
2008-11-28 14:36:04 +00:00
|
|
|
}
|
2008-11-18 14:48:40 +00:00
|
|
|
|
2008-11-28 14:36:04 +00:00
|
|
|
|
|
|
|
sub openHydraDB {
|
|
|
|
my $db = Hydra::Schema->connect(getHydraDBPath, "", "", {});
|
2009-04-25 07:48:30 +00:00
|
|
|
$db->storage->dbh->do("PRAGMA synchronous = OFF;")
|
|
|
|
if defined $ENV{'HYDRA_NO_FSYNC'};
|
2008-11-28 14:36:04 +00:00
|
|
|
return $db;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-04-22 22:43:04 +00:00
|
|
|
# Awful hack to handle timeouts in SQLite: just retry the transaction.
|
|
|
|
# DBD::SQLite *has* a 30 second retry window, but apparently it
|
|
|
|
# doesn't work.
|
|
|
|
sub txn_do {
|
|
|
|
my ($db, $coderef) = @_;
|
|
|
|
while (1) {
|
|
|
|
eval {
|
|
|
|
$db->txn_do($coderef);
|
|
|
|
};
|
|
|
|
last if !$@;
|
|
|
|
die $@ unless $@ =~ "database is locked";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-02-06 21:01:20 +00:00
|
|
|
sub getGCRootsDir {
|
|
|
|
die unless defined $ENV{LOGNAME};
|
2009-03-15 11:56:11 +00:00
|
|
|
my $dir = "/nix/var/nix/gcroots/per-user/$ENV{LOGNAME}/hydra-roots";
|
|
|
|
mkpath $dir if !-e $dir;
|
|
|
|
return $dir;
|
2009-02-06 21:01:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-15 11:56:11 +00:00
|
|
|
sub gcRootFor {
|
2009-02-06 21:01:20 +00:00
|
|
|
my ($path) = @_;
|
2009-03-31 15:59:47 +00:00
|
|
|
return getGCRootsDir . "/" . basename $path;
|
2009-03-15 11:56:11 +00:00
|
|
|
}
|
2009-02-06 21:01:20 +00:00
|
|
|
|
|
|
|
|
2009-03-15 11:56:11 +00:00
|
|
|
sub registerRoot {
|
|
|
|
my ($path) = @_;
|
|
|
|
|
|
|
|
my $link = gcRootFor $path;
|
|
|
|
|
2009-02-06 21:01:20 +00:00
|
|
|
if (!-l $link) {
|
|
|
|
symlink($path, $link)
|
2009-03-15 11:56:11 +00:00
|
|
|
or die "cannot create GC root `$link' to `$path'";
|
2009-02-06 21:01:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-02-06 15:02:49 +00:00
|
|
|
sub attrsToSQL {
|
|
|
|
my ($attrs, $id) = @_;
|
|
|
|
my @attrs = split / /, $attrs;
|
|
|
|
|
|
|
|
my $query = "1 = 1";
|
|
|
|
|
|
|
|
foreach my $attr (@attrs) {
|
|
|
|
$attr =~ /^([\w-]+)=([\w-]*)$/ or die "invalid attribute in release set: $attr";
|
|
|
|
my $name = $1;
|
|
|
|
my $value = $2;
|
|
|
|
# !!! Yes, this is horribly injection-prone... (though
|
|
|
|
# name/value are filtered above). Should use SQL::Abstract,
|
|
|
|
# but it can't deal with subqueries. At least we should use
|
|
|
|
# placeholders.
|
2009-03-20 14:50:09 +00:00
|
|
|
$query .= " and exists (select 1 from buildinputs where build = $id and name = '$name' and value = '$value')";
|
2009-02-06 15:02:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return $query;
|
|
|
|
}
|
|
|
|
|
2009-10-15 11:13:36 +00:00
|
|
|
sub allPrimaryBuilds {
|
|
|
|
my ($project, $primaryJob) = @_;
|
|
|
|
my $allPrimaryBuilds = $project->builds->search(
|
|
|
|
{ jobset => $primaryJob->get_column('jobset'), job => $primaryJob->get_column('job'), finished => 1 },
|
|
|
|
{ join => 'resultInfo', order_by => "timestamp DESC"
|
|
|
|
, '+select' => ["resultInfo.releasename", "resultInfo.buildstatus"]
|
|
|
|
, '+as' => ["releasename", "buildstatus"]
|
|
|
|
, where => \ attrsToSQL($primaryJob->attrs, "me.id")
|
|
|
|
});
|
|
|
|
return $allPrimaryBuilds;
|
|
|
|
}
|
2009-02-06 15:02:49 +00:00
|
|
|
|
2009-10-15 11:13:36 +00:00
|
|
|
sub getPrimaryBuildTotal {
|
2009-02-06 15:02:49 +00:00
|
|
|
my ($project, $primaryJob) = @_;
|
2009-10-15 11:13:36 +00:00
|
|
|
return scalar(allPrimaryBuilds($project, $primaryJob)) ;
|
|
|
|
}
|
|
|
|
|
|
|
|
sub getPrimaryBuildsForReleaseSet {
|
|
|
|
my ($project, $primaryJob, $page, $resultsPerPage) = @_;
|
|
|
|
$page = (defined $page ? int($page) : 1) || 1;
|
|
|
|
$resultsPerPage = (defined $resultsPerPage ? int($resultsPerPage) : 20) || 20;
|
|
|
|
|
|
|
|
my @primaryBuilds = allPrimaryBuilds($project, $primaryJob)->search( {},
|
|
|
|
{ rows => $resultsPerPage
|
|
|
|
, page => $page
|
2009-02-06 15:02:49 +00:00
|
|
|
});
|
2009-10-15 11:13:36 +00:00
|
|
|
|
2009-02-06 15:02:49 +00:00
|
|
|
return @primaryBuilds;
|
|
|
|
}
|
|
|
|
|
2009-10-15 11:13:36 +00:00
|
|
|
sub findLastJobForBuilds {
|
|
|
|
my ($builds, $job) = @_;
|
|
|
|
my $thisBuild;
|
|
|
|
|
|
|
|
# Find a build of this job that had the primary build
|
|
|
|
# as input. If there are multiple, prefer successful
|
|
|
|
# ones, and then oldest. !!! order_by buildstatus is hacky
|
|
|
|
($thisBuild) = $builds->search(
|
|
|
|
{ project => $job->get_column('project'), jobset => $job->get_column('jobset')
|
|
|
|
, job => $job->get_column('job'), finished => 1
|
|
|
|
}
|
|
|
|
, { join => 'resultInfo', rows => 1
|
|
|
|
, order_by => ["buildstatus", "timestamp"]
|
|
|
|
, where => \ attrsToSQL($job->attrs, "build.id")
|
|
|
|
, '+select' => ["resultInfo.buildstatus"], '+as' => ["buildstatus"]
|
|
|
|
});
|
|
|
|
return $thisBuild ;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
sub findLastJobForPrimaryBuild {
|
|
|
|
my ($primaryBuild, $job) = @_;
|
|
|
|
my $thisBuild;
|
|
|
|
my $depBuilds;
|
|
|
|
$depBuilds = $primaryBuild->dependentBuilds;
|
|
|
|
$thisBuild = findLastJobForBuilds($depBuilds, $job) ;
|
|
|
|
|
|
|
|
# don't do recursive yet
|
|
|
|
# if (!defined $thisBuild) {
|
|
|
|
#
|
|
|
|
# foreach my $build ($depBuilds->all) {
|
|
|
|
# $thisBuild = findLastJobForPrimaryBuild($build, $job) ;
|
|
|
|
# if (defined $thisBuild) {
|
|
|
|
# last ;
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
|
|
|
|
return $thisBuild;
|
|
|
|
}
|
2009-02-06 15:02:49 +00:00
|
|
|
|
|
|
|
sub getRelease {
|
|
|
|
my ($primaryBuild, $jobs) = @_;
|
|
|
|
|
|
|
|
my @jobs = ();
|
|
|
|
|
|
|
|
my $status = 0; # = okay
|
|
|
|
|
|
|
|
# The timestamp of the release is the highest timestamp of all
|
|
|
|
# constitutent builds.
|
|
|
|
my $timestamp = 0;
|
|
|
|
|
|
|
|
foreach my $job (@{$jobs}) {
|
|
|
|
my $thisBuild;
|
|
|
|
|
|
|
|
if ($job->isprimary) {
|
|
|
|
$thisBuild = $primaryBuild;
|
|
|
|
} else {
|
2009-10-15 11:13:36 +00:00
|
|
|
$thisBuild = findLastJobForPrimaryBuild($primaryBuild, $job) ;
|
2009-02-06 15:02:49 +00:00
|
|
|
}
|
|
|
|
|
2009-10-15 21:35:19 +00:00
|
|
|
if (!defined $thisBuild) {
|
|
|
|
$status = 2 if $status == 0; # = unfinished
|
|
|
|
} elsif ($thisBuild->get_column('buildstatus') != 0) {
|
|
|
|
$status = 1; # = failed
|
2009-02-06 15:02:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
$timestamp = $thisBuild->timestamp
|
|
|
|
if defined $thisBuild && $thisBuild->timestamp > $timestamp;
|
|
|
|
|
|
|
|
push @jobs, { build => $thisBuild, job => $job };
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
{ id => $primaryBuild->id
|
|
|
|
, releasename => $primaryBuild->get_column('releasename')
|
|
|
|
, jobs => [@jobs]
|
|
|
|
, status => $status
|
|
|
|
, timestamp => $timestamp
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sub getLatestSuccessfulRelease {
|
|
|
|
my ($project, $primaryJob, $jobs) = @_;
|
|
|
|
my $latest;
|
|
|
|
foreach my $build (getPrimaryBuildsForReleaseSet($project, $primaryJob)) {
|
|
|
|
return $build if getRelease($build, $jobs)->{status} == 0;
|
|
|
|
}
|
|
|
|
return undef;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-28 14:36:04 +00:00
|
|
|
1;
|