()
launchTagsList << "DATE:${new Date().format("yyyy-MM-dd")}"
launchTagsList << env.REPORTPORTAL_TAGS ?: ""
if (Float.valueOf("16.2") >= 13 && "yes".toBoolean()) {
launchTagsList << "containers"
}
HashMap envVars = env.getEnvironment() as HashMap
envVars.remove('REPORTPORTAL_TAGS')
envVars.each { evKey, evValue ->
launchTagsList << "$evKey:$evValue"
}
String launchTagsStr = launchTagsList.join(';')
launchTagsStr = launchTagsStr.replaceAll('"', "'")
if (updateReportPortal && uploadTestResults) {
triggerReportPortalUpdateJob = true
reportportalUpdateTriggerJobParams = [
[$class: 'BooleanParameterValue', name: 'ADD_JENKINS_STAGES_TO_TESTS_RESULTS', value: true],
[$class: 'BooleanParameterValue', name: 'FULL_LOG_ATTACHMENT', value: fullLogAttachment],
[$class: 'BooleanParameterValue', name: 'IGNORE_SKIPPED_TESTS', value: ignoreSkippedTests],
[$class: 'BooleanParameterValue', name: 'LOG_LAST_TRACEBACK_ONLY', value: logLastTracebackOnly],
[$class: 'BooleanParameterValue', name: 'TFACON', value: enableTFA],
[$class: 'BooleanParameterValue', name: 'UPDATE_RHOS_PSI_RP_INSTANCE', value: true],
[$class: 'BooleanParameterValue', name: 'UPDATE_STAGING_PROJECTS', value: updateStagingProjects],
[$class: 'StringParameterValue', name: 'CALLING_BUILD_URL', value: BUILD_URL],
[$class: 'StringParameterValue', name: 'COMPOSE_ID', value: composeID],
[$class: 'StringParameterValue', name: 'DEPLOYMENT_RESULTS_PATH', value: null],
[$class: 'StringParameterValue', name: 'LAUNCH_ATTRIBUTES', value: launchTagsStr],
[$class: 'StringParameterValue', name: 'LAUNCH_NAME', value: env.RP_LAUNCH_ALTNAME ?: JOB_NAME],
[$class: 'StringParameterValue', name: 'LAUNCH_START_TIME', value: String.valueOf(currentBuild.startTimeInMillis)],
[$class: 'StringParameterValue', name: 'TEST_RESULTS_PATH', value: uploadTestResults ? remoteTestsDir: null],
]
log "Triggering the 'reportportal-update' job with the following parameters:\n" +
"$reportportalUpdateTriggerJobParams", level: "DEBUG"
build(
job: 'reportportal-update',
parameters: reportportalUpdateTriggerJobParams,
wait: false,
)
reportPortalUpdateJobTriggeredSuccessfully = true
HashMap esResponse = ESWaitOnMatchQuery(
"http://seal52.lab.eng.tlv2.redhat.com",
"reportportal_update_sync",
"calling_build_url.keyword",
BUILD_URL
)
if (esResponse != null &&
esResponse._shards.successful == 1 &&
esResponse.hits.total.value > 0) {
triggeredBuildUrl = esResponse.hits.hits[0]._source.triggered_build_url
log "ReportPortal Update Build: $triggeredBuildUrl"
ArrayList tokenizedTB = triggeredBuildUrl.tokenize("/")
String badgeText = "${tokenizedTB[-2]} #${tokenizedTB[-1]}"
addBadgeText(badgeText, "black", "lavender", "1px", "black")
}
else {
log "Couldn't get the URL to the ReportPortal Update Build.\n$esResponse", level: "DEBUG"
}
} // ReportPortal Trigger - End of trigger condition
} // ReportPortal Trigger - End of timeout
} // ReportPortal Trigger - End of Main try
catch (Exception rpUpdateTriggerEx) {
log "Failed to trigger 'update-reportportal' Jenkins job:\n$rpUpdateTriggerEx", level: "ERROR"
// TODO: Send email to aopincar
}
finally {
Long rpTriggerTimeEnd = new Date().getTime()
// TODO: Update elasticsearch
}
}
def stage_finally() {
/**
* Run an Ansible role for aggregating logs from different nodes.
*
* Options which are supported by openstack/ansible-role-collect-logs can be
* found here [1].
* Before you use any of the options in [1] make sure, it's defined in
* infrared plugin.spec [2], otherwise that option will not work with Infrared
* and it will fail on unrecognized argument.
*
* @jjb_param {artcl_options} optional
* Contains default ansible-role-collect-logs used in jobs using
* this stage.
*
* @jjb_param {artcl_openstack_nodes} optional
* OpenStack nodes ansible-role-collect-logs will be executed on.
* Default is all:!localhost:!hypervisor
*
* @jjb_param {artcl_collect_log_types} optional
* Contains types of logs to collect divided by comma, such as
* openstack logs, network logs, system logs, etc. Acceptable values
* are system, monitoring, network, openstack and container.
* Default is container,monitoring,network,openstack,system
*
* @jjb_param {artcl_options_append} optional
* In case you want to use some options from [1] specific for your
* job, this variable will do the trick. Note, that the option
* you want use here must be specified in [2] as well.
*
* @jjb_param {artcl_collect_list} optional
* By default contains default list of files to be collected defined
* within this stage. Set this variable in case you want to collect
* custom files.
* If you want to only add some extra files, see
* {artcl_collect_list_append} variable below.
*
* @jjb_param {artcl_collect_list_append} optional
* If you want to collect extra files than only those defined in
* {artcl_collect_list}, set this value.
*
* @jjb_param {artcl_exclude_list} optional
* By default contains default list of files to be excluded defined
* within this stage. Set this variable in case you want to exclude
* custom files.
* If you want to only add some extra files, see
* {artcl_exclude_list_append} variable below.
*
* @jjb_param {artcl_exclude_list_append} optional
* If you want to exclude extra files than only those defined in
* {artcl_exclude_list}, set this value.
*
* @jjb_param {ir_publish_to_server} optional
* A hostname of the server the logs will be published to.
* Default is rhos-ci-logs.lab.eng.tlv2.redhat.com
*
* @jjb_param {ir_publish_baseurl} optional
* An URL the logs are available at after they are published.
* Default is rhos-ci-logs.lab.eng.tlv2.redhat.com/logs/
*
* [1] https://github.com/openstack/ansible-role-collect-logs/tree/master/defaults
* [2] https://github.com/openstack/ansible-role-collect-logs/blob/master/infrared_plugin/plugin.spec
**/
stage2('Collect logs') {
addToArtifacts(artifacts: '**/*.log')
addToArtifacts(artifacts: 'infrared/*.cfg')
addToArtifacts(artifacts: 'infrared/*.yml')
addToArtifacts(artifacts: 'infrared/*.yaml', allowEmptyArchive: true)
addToArtifacts(artifacts: 'infrared/.workspaces/')
addToArtifacts(artifacts: 'infrared/workspace.tgz')
addToArtifacts(artifacts: 'infrared/.envrc', allowEmptyArchive: true)
addToArtifacts(artifacts: 'jump/**/*.json', allowEmptyArchive: true)
timeout(time: 60, unit: 'MINUTES') {
def logserver_dest = "rhos-ci-logs.lab.eng.tlv2.redhat.com"
def logserver_user = "rhos-ci"
def log_baseurl = "http://rhos-ci-logs.lab.eng.tlv2.redhat.com/logs"
if (!logserver_dest) {
println("Skipping log collection - ir_publish_to_server is not set.")
return
}
// use global env variable in Jenkins master to prevent logs collisions
// when multiple jenkins masters (like production and staging) are using same
// log server destination (also make sure such location exists on log server
def server_prefix = env.LOGS_JENKINS_PREFIX ?: ''
// ensure prefix starts with slash and not ends (rsync path should be without it)
if (server_prefix && server_prefix[0] != '/') { server_prefix = '/'+server_prefix }
if (server_prefix && server_prefix[-1] == '/') { server_prefix = server_prefix[0..-2] }
def PLUGIN_REPO = "https://github.com/openstack/ansible-role-collect-logs.git"
def ARTIFACTS_SERVER_REL_PATH = "${JOB_NAME}/${BUILD_ID}"
def ARTIFACTS_SERVER_DEST_PATH = "/rhos-infra-dev-netapp/jenkins-logs${server_prefix}/${ARTIFACTS_SERVER_REL_PATH}"
def ARTIFACTS_SERVER_DEST = "${logserver_user}@${logserver_dest}:${ARTIFACTS_SERVER_DEST_PATH}"
if (log_baseurl == "") {
// we cannot use groovy variables inside jjb's default value, so doing switch to default in groovy
log_baseurl = "http://${logserver_dest}"
}
// strip trailing slash if present in baseurl
if (log_baseurl[-1] == '/') { log_baseurl = log_baseurl[0..-2] }
def LOG_URL = log_baseurl + server_prefix + "/${ARTIFACTS_SERVER_REL_PATH}/"
currentBuild.description = (currentBuild.description ?: '') + "Browse logs"
println("Browse logs: ${LOG_URL}")
/** KEEP THE LISTS OF FILES AND OPTIONS IN SYNC WITH THE LISTS IN
* jobs/compact/script/lib/logs.lib.sh
*/
def artcl_CLI_options_common = """--artcl_txt_rename true \
--artcl_gzip true \
--artcl_find_maxdepth 10 \
--artcl_use_rsync true \
--artcl_rsync_collect_list false \
--artcl_build_url "" \
--artcl_publish false \
--ara_enabled false \
"""
def artcl_CLI_hypervisor_collect_options = artcl_CLI_options_common
artcl_CLI_hypervisor_collect_options += " --openstack_nodes hypervisor "
artcl_CLI_hypervisor_collect_options += " --collect_log_types hypervisor "
def artcl_commands_hypervisor_list = [
"hypervisor.journal.cmd='journalctl -x --since=-8h --lines=100000'",
"hypervisor.journal-kernel.cmd='journalctl -xk --since=-8h --lines=100000'",
"hypervisor.virsh-list.cmd='virsh list --all'",
"hypervisor.disk.cmd='blkid;lsblk;df -T;df -i;'",
"hypervisor.memory.cmd='free -m'",
"hypervisor.rpms.cmd='rpm -qa'",
]
def artcl_commands_hypervisor_list_str = artcl_commands_hypervisor_list.join(',')
def artcl_collect_hypervisor_list = [
"/etc/ssh/",
"/var/lib/libvirt/qemu/*.log",
"/var/log/extra/journal.txt",
"/var/log/extra/journal-kernel.txt",
"/var/log/extra/virsh-list.txt",
"/var/log/extra/disk.txt",
"/var/log/extra/memory.txt",
"/var/log/extra/rpms.txt",
]
def artcl_collect_hypervisor_list_str = artcl_collect_hypervisor_list.join(',')
def artcl_collect_default_list = [
"/etc/",
"/etc/neutron",
"/etc/tempest/*.xml",
"/etc/tempest/saved_state.json",
"/etc/tempest/tempest.conf",
"/etc/tempest/tempest.conf.sample",
"/home/*/*.conf",
"/home/*/*.json",
"/home/*/*.log",
"/home/*/*.sh",
"/home/*/*.yaml",
"/home/*/*.yml",
"/home/*/*/black_list_*",
"/home/*/*/white_list_*",
"/home/*/*rc",
"/home/*/.instack/install-undercloud.log",
"/home/*/.tripleo",
"/home/*/central/*.yaml",
"/home/*/central/*/*.yaml",
"/home/*/central/network/nic-configs/",
"/home/*/composable_roles/*.yaml",
"/home/*/composable_roles/*/*.yaml",
"/home/*/composable_roles/network/nic-configs/",
"/home/*/config-download/",
"/home/*/dcn*/*.yaml",
"/home/*/dcn*/*/*.yaml",
"/home/*/dcn*/network/nic-configs/",
"/home/*/inventory/group_vars/*.yml",
"/home/*/openshift_deploy_logs/*.log",
"/home/*/ostest/",
"/home/*/ovb",
"/home/*/overcloud_deploy.sh",
"/home/*/overcloudrc*",
"/home/*/playbooks_logs/*.log",
"/home/*/robot/",
"/home/*/shiftstackrc*",
"/home/*/tempest*/*.log",
"/home/*/tempest*/*.xml",
"/home/*/tempest*/etc/*.conf",
"/home/*/tempest*/saved_state.json",
"/home/*/tempest*/.stestr/",
"/home/*/tripleo-heat-installer-templates/",
"/home/*/undercloud-ansible-*",
"/home/*/undercloud-install-*.tar.bzip2",
"/home/*/virt",
"/home/*/tripleo-deploy/",
"/home/*/overcloud-deploy/",
"/home/*/templates/",
"/root/",
"/usr/share/ceph-osd-run.sh",
"/usr/share/openstack-tripleo-heat-templates",
"/var/lib/cloud/",
"/var/lib/config-data/",
"/var/lib/config-data/puppet-generated/",
"/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/",
"/var/lib/container-puppet/",
"/var/lib/docker-puppet",
"/var/lib/heat-config",
"/var/lib/libvirt/qemu/*.log",
"/var/lib/mistral/",
"/var/lib/neutron/",
"/var/lib/openvswitch/ovn/*.db",
"/var/lib/tripleo-config",
"/var/log/",
"/var/log/containers/opendaylight",
"/var/log/extra/containers/",
"/var/log/extra/podman/containers",
"/var/run/heat-config",
"/var/tmp/packstack",
"rally-dir/*.html",
"rally-dir/*.log",
"rally-dir/*.txt",
"rally-dir/*.xml",
]
def artcl_collect_default_list_str = artcl_collect_default_list.join(',')
def artcl_exclude_default_list = [
"'.*'",
"/etc/pki/*",
"/etc/selinux/targeted/*",
"/root/*.initrd*",
"/root/*.tar*",
"/root/*.vmlinuz*",
"/root/*.qcow*",
"/udev/hwdb.bin",
"/var/lib/config-data/*/etc/puppet/modules",
"/var/lib/config-data/*/etc/selinux/targeted/*",
"/var/log/journal/*",
]
def artcl_exclude_default_list_str = artcl_exclude_default_list.join(',')
if ("" != "") {
artcl_CLI_options = ""
} else {
artcl_CLI_options = artcl_CLI_options_common
}
if ("" != "") { artcl_CLI_options += " " }
if ("" != "") {
artcl_CLI_options += " --openstack_nodes "
} else {
artcl_CLI_options += " --openstack_nodes all:!localhost:!hypervisor"
}
if ("" != "") {
artcl_CLI_options += " --collect_log_types "
} else {
artcl_CLI_options += " --collect_log_types container,monitoring,network,openstack,system"
}
if ("" != "") {
artcl_CLI_options += " --artcl_collect_list "
} else {
artcl_CLI_options += " --artcl_collect_list $artcl_collect_default_list_str"
}
if ("" != "") { artcl_CLI_options += " --artcl_collect_list_append " }
if ("" != "") {
artcl_CLI_options += " --artcl_exclude_list "
} else {
artcl_CLI_options += " --artcl_exclude_list $artcl_exclude_default_list_str"
}
if ("" != "") { artcl_CLI_options += " --artcl_exclude_list_append " }
def collect_dir = "$WORKSPACE/logs"
artcl_CLI_options = "--local_working_dir '${ir_venv}' --artcl_collect_dir '$collect_dir' " + artcl_CLI_options
artcl_CLI_hypervisor_collect_options = "--local_working_dir '${ir_venv}' --artcl_collect_dir '${collect_dir}' " + artcl_CLI_hypervisor_collect_options
artcl_CLI_hypervisor_collect_options += " --artcl_commands $artcl_commands_hypervisor_list_str"
artcl_CLI_hypervisor_collect_options += " --artcl_collect_list '$artcl_collect_hypervisor_list_str'"
def copy_artifact_files = "mkdir -p ${collect_dir};\nshopt -s globstar;\n";
for(artifact_pattern in ArtclCollectList.instance) {
fail_if_missing = artifact_pattern.allowEmptyArchive ? ' || true;' : ';'
copy_artifact_files += "cp --parents -r ${artifact_pattern.artifacts} ${collect_dir} ${fail_if_missing}\n"
}
CIResourceCheck(CIResourcesThisBuild + logserver_dest + [resource_from_url(PLUGIN_REPO)])
try {
String workspaceExportCopyKeys = "true".toBoolean() ? "-K" : ""
sh2 """
. ${ir_venv}/bin/activate
unset ANSIBLE_FORCE_COLOR
pushd "$WORKSPACE/infrared"
# try exporting workspace, in case of issue need to still continue with log collection
infrared workspace export ${workspaceExportCopyKeys} -f workspace || true
printenv | sort &> env.log
pushd "$WORKSPACE"
# gather all files we want from jenkins slave into collect_dir for artcl to pick them up
${copy_artifact_files}
pushd "$WORKSPACE/infrared"
infrared plugin add $PLUGIN_REPO --src-path infrared_plugin
infrared plugin list
export ANSIBLE_ROLES_PATH="$WORKSPACE/infrared/plugins"
export ANSIBLE_LOG_PATH="$WORKSPACE/infrared/collect.log"
# do not cause failure here see RHOSINFRA-3205
set +eo pipefail
infrared ansible-role-collect-logs --disable_artifacts_cleanup true ${artcl_CLI_options}
echo "Collecting logs from hypervisor"
infrared ansible-role-collect-logs --disable_artifacts_cleanup true ${artcl_CLI_hypervisor_collect_options}
artcl_exit=\$?
### Find known big issues (oom,segfault,selinux) in logs
### ... count them and print as build marks
pushd "${collect_dir}"
# do not cause failure if we have no-hits or missing files here
set +eo pipefail
findcat() {
while read F; do
if [[ "\$F" =~ .gz\$ ]]; then
zcat "\$F"
else
cat "\$F"
fi
done < <(find . "\$@") # all params are passed to find
}
# here we use sed to strip all pid/uid and such numbers in an attempt of counting
# just (almost) unique avc denials (not just 8000 of repeated ones)
#
# (there some numbered contexts (e.g. subj=...,c96,c442) which so far seem to have no info,
# still resulting in just single audit2allow entry, so stripping those too
SELINUX=\$(findcat -maxdepth 5 -path '*/var/log/audit/audit*' | \
grep -i denied | \
sed -r 's/(audit\\(|(\\S+id|ino|ses)=|[,.]c)[0-9.:]+/\\1.../g' | \
sort -u | \
wc -l)
OOM=\$(findcat -maxdepth 4 -path '*/var/log/messages*' | \
grep -i oom-killer | \
wc -l)
SEGFAULT=\$(findcat -maxdepth 4 -path '*/var/log/messages*' | \
grep -i segfault | \
wc -l)
set +x
# split to separate strings so that presence of code in console is not matched as mark itself
echo "Build" "mark: selinux_problems_found=\$SELINUX"
echo "Build" "mark: oom_killer_problems_found=\$OOM"
echo "Build" "mark: segfault_problems_found=\$SEGFAULT"
echo "rsyncing logs to ${ARTIFACTS_SERVER_DEST}/"
du -sh ${collect_dir}/*
# Copy console logs from all stages to the collect_dir
mkdir -p ${collect_dir}/console_logs
cp -r $WORKSPACE/.sh/* ${collect_dir}/console_logs
# Publish logs
ssh ${logserver_user}@${logserver_dest} mkdir -p "${ARTIFACTS_SERVER_DEST_PATH}"
rsync -av --quiet ${collect_dir}/* "${ARTIFACTS_SERVER_DEST}/"
echo "rsyncing logs finished"
# Symlink README located in the collect server to this build log directory
ssh ${logserver_user}@${logserver_dest} 'ln -s /rhos-infra/rhos-logs-readme.html ${ARTIFACTS_SERVER_DEST_PATH}/README.html'
exit \$artcl_exit
""", basename: 'collect-artcl.log', echoScript: false, maxLines: -1
} finally {
buildMarks() // NOTE: in case we have failure in log-collection we loose these, so log collection should be in try-block and this in it's finally
}
} // endOf timeout()
} // enfOfStage Collect logs
}
def stage_finally_post() {
/**
* Post build actions.
* Delete virtual environment when build is done.
*
**/
try {
if (ir_venv?.size() > 6) {
if (env.DISABLE_CLEANUP == "true") {
// hides confusing error related to ssh sockets
sh "mv -f $ir_venv $WORKSPACE/ 2>&1 >> cleanup.log || true"
} else {
sh "rm -rf $ir_venv"
}
}
}
catch(Exception e) {
echo 'WARN: [CI] Failed to move $ir_venv to WORKSPACE: ' + e.getMessage()
sh "rm -rf $ir_venv"
// housekeeping failures should not affect builds status
// currentBuild.result = 'UNSTABLE' // could only downgrade
finally_errors.add(['name': 'venv_cleanup', 'message': e.getMessage(), 'fail_build': false])
}
/**
* Post build actions.
* Delete workspace when build is done.
*
*/
dir('infrared') {
try {
// Do cleanup only when running on production
if (env.JENKINS_URL.contains("rhos-ci-jenkins.lab.eng.tlv2.redhat.com")) {
step([$class: 'WsCleanup'])
}
}
catch(Exception e) {
echo 'Failed to clean workspace ' + e.getMessage()
finally_errors.add(['name': 'workspace_cleanup', 'message': e.getMessage(), 'fail_build': false])
}
}
/**
* Send mail report at the end of the job.
* Attachments could be added.
*
* @jjb_param {email_recipients} Email recipients address. Separate by comma.
* @jjb_param {email_reply_addr} Email replyto address.
* @jjb_param {email_attach} Email attachments. Separate multiple attachments by comma.
*
*/
if (env.SKIPMAIL == 'true') {
echo "Skipping email report due to SKIPMAIL parameter."
} else if (currentBuild.result == 'ABORTED') {
echo "Skipping email report as this build is ABORTED."
} else if ('' == '') {
echo "Skipping email report as no recipients provided."
} else {
emailext (
from: 'rhos-ci-jenkins@redhat.com',
to: '',
replyTo: 'noreply@redhat.com',
subject: "Jenkins Job - " + currentBuild.currentResult + " - $env.JOB_NAME",
body: """The $env.JOB_NAME job finished with """ + currentBuild.currentResult + """ status.
The job could be found at: $env.BUILD_URL
For more info, look for the attachments.
""",
mimeType: 'text/html',
attachmentsPattern: '',
attachLog: true,
compressLog: true
)
}
}
pipeWrapper() {
if ( env.IR_PROVISION_HOST && env.NODE_NAME == 'qe-virthost-cpu' ) {
if ( env.IR_PROVISION_HOST.endsWith('brq.redhat.com') || env.IR_PROVISION_HOST.endsWith('brq2.redhat.com') ) {
env.NODE_NAME = 'qe-generic && brq2'
addMark("NODE_NAME_OVERRIDE: qe-generic && brq2", "pink")
}
else if ( env.IR_PROVISION_HOST.endsWith('rdu2.redhat.com') ) {
env.NODE_NAME = 'qe-generic && rdu2'
addMark("NODE_NAME_OVERRIDE: qe-generic && rdu2", "pink")
}
else if ( env.IR_PROVISION_HOST.endsWith('tlv.redhat.com') || env.IR_PROVISION_HOST.endsWith('tlv2.redhat.com') ) {
env.NODE_NAME = 'qe-generic && tlv2'
addMark("NODE_NAME_OVERRIDE: qe-generic && tlv2", "pink")
}
}
node2(env.NODE_NAME ?: 'dfg-upgrades || qe-virthost-cpu') {
pickMirror()
if (env.TAGS ?: '') {
addMark("TAGS: " + env.TAGS, "LightGreen")
}
addMark("SLAVE: " + env.NODE_NAME, "orange")
try {
// timeout should inside node to avoid timing out queued jobs
job_exec_timeout = env.JOB_TIMEOUT ?: '360'
timeout(Integer.parseInt(job_exec_timeout)) {
step([$class: 'WsCleanup'])
prefix=sh(returnStdout: true, script: 'echo $(echo -n ' + env.JOB_NAME + env.BUILD_NUMBER + ' | md5sum | /bin/cut -f1 -d" ")-').trim()
ir_venv=sh(returnStdout: true, script: 'mktemp -p /tmp -d ir-venv-XXXXXXX').trim()
uc_type=sh(returnStdout: true, script: 'echo "uc-full-deploy"').trim()
host=sh(returnStdout: true, script: 'echo ${IR_PROVISION_HOST:-$(hostname)}').trim()
if ('True'.toBoolean()) {
CIResourcesThisBuild.add(host)
CIResourceCheck(CIResourcesThisBuild)
}
stage_before_try()
// set IS_CVP env. variable at the start of the job so we can check it later, during infrared run
// it's used for verification if all the pieces (i.e.: container image overrides) has been properly passed to infrared tripleo-overcloud command
env.IS_CVP = 'False'.toBoolean()
// in case some CI jobs don't set IGNORE_CVP_FAILSAFES build param at all, we need to set it to false (safe choice) by default
// so the rest of the job (groovy/compact shell scripts) will not complain that this env variable is undefined
env.IGNORE_CVP_FAILSAFES = env.IGNORE_CVP_FAILSAFES ? env.IGNORE_CVP_FAILSAFES.toBoolean() : false
log "env.IGNORE_CVP_FAILSAFES: " + env.IGNORE_CVP_FAILSAFES, level: "DEBUG"
log "{is_cvp|}.toBoolean(): " + 'False'.toBoolean(), level: "DEBUG"
log "env.IS_CVP.toBoolean(): " + env.IS_CVP.toBoolean(), level: "DEBUG"
log "overcloud_container_images_urls: " + overcloud_container_images_urls, level: "DEBUG"
if (env.JOB_NAME =~ /^cvp-.*/ && ! env.IGNORE_CVP_FAILSAFES.toBoolean() && ! env.IS_CVP) {
error("this job is a CVP one (its name starts with 'cvp-') hence it should have the jbb param 'is_cvp' set to true, " +
"please correct that in the job's configuration") }
// the check for overcloud_container_images_urls is performed here at the start of the job to fail fast if this variable is empty
// it's also checked later, during tripleo-overcloud stage, to make sure it's passed there correctly too
if (env.IS_CVP.toBoolean()) {
if (overcloud_container_images_urls ==~ /.*\w+.*/) {
log "the value of overcloud_container_images_urls will be used for container image URLs override of tripleo overcloud", level: "WARN"
} else {
if (! env.IGNORE_CVP_FAILSAFES.toBoolean()) {
error("this job is a CVP one (jjb param 'is_cvp: true') hence overcloud_container_images_urls can't be empty; " +
"make sure you provided the properly formatted CI_MESSAGE and run parse_ci_message() groovy method as part of the job")
}
}
if (env.IGNORE_CVP_FAILSAFES.toBoolean()) { log "IGNORE_CVP_FAILSAFES=true, " +
"the results of this build may be affected by it", level: "WARN" }
}
sh 'set|grep -v -E "^(BASHOPTS|BASH_VERSINFO|EUID|PPID|SHELLOPTS|UID)=.*" > $WORKSPACE/.envrc'
archiveArtifacts artifacts: '.envrc'
try {
stage_inside_try_pre()
stage_inside_try()
stage_inside_try_post()
stage_inside_try_2_pre()
stage_inside_try_2()
stage_inside_try_2_post()
}
catch (Exception ex) {
currentBuild.result = 'FAILURE'
log "Oops! Something went wrong.\n\t${ ex }", level: 'ERROR'
stage_ex = ex
stage_catch()
throw stage_ex;
}
finally {
stage_finally_dont_wrap()
stage2('Finally Steps') {
stage_finally_upload_test_results()
stage_finally_post_processing_triggers()
stage_finally_pre()
stage_finally()
stage_finally_post()
def finally_fatal_errors = ''
for (finally_step in finally_errors) {
if (finally_step['fail_build'] ?: false) {
finally_fatal_errors += "${finally_step['name']} ${finally_step['message']} \n"
}
else {
println "Warning: We had failure(s) in finally stage: ${finally_step['name']} ${finally_step['message']} \n"
}
}
if (finally_fatal_errors != '' || stage_ex != null || env.STAGE_FATAL_EX != '') {
try {
pipeline_stage_ex = env.STAGE_FATAL_EX ?: stage_ex.getMessage()
}
catch (Exception ex) {
pipeline_stage_ex = ''
log "IR-Try-Finally: Unable to get exception message\n" +
"finally_fatal_errors: ${ pipeline_stage_ex }\n" +
"stage_ex: ${ stage_ex }\n" +
"env.STAGE_FATAL_EX: ${ env.STAGE_FATAL_EX }", level: "WARN"
}
currentBuild.result = 'FAILURE'
if (pipeline_stage_ex != '') {
error "We had fatal failure(s) in finally stage(s): \n ${finally_fatal_errors}"
} else if (finally_fatal_errors == '') {
error "Job failed with exception: \n ${pipeline_stage_ex}"
} else {
error "\n Job stage failed with exception: \n ${ pipeline_stage_ex } \n We had fatal failure(s) in finally stage(s): \n ${ finally_fatal_errors } \n "
}
}
} // stage2
} // finally
} // timeout
} // try
catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException ex) {
if (ex.causes.size() > 0) {
def ex_cause = ex.causes[0]
if (ex_cause instanceof org.jenkinsci.plugins.workflow.steps.TimeoutStepExecution.ExceededTimeout) {
log "Job execution timeout (${ job_exec_timeout } minutes) expired.", level: 'ERROR'
}
else if (ex_cause instanceof jenkins.model.CauseOfInterruption.UserInterruption) {
log "Job aborted by '${ ex_cause.getUserId() }' (${ ex_cause.getUser().getDisplayName() }).", level: 'WARNING'
}
}
throw ex
} // end catch
} // node
} // pipeWrapper