@Library('common') _ @NonCPS def buildMarks(rules) { // rules should be passed in format [[name, bg_color],...]: // [["poodle", "lightyellow"],["puddle_core", "yellow"], ...] def matcher rules.each { matcher = manager.getLogMatcher(".*Build mark: " + it[0] + "=([a-zA-Z0-9_.-]+).*") if(matcher?.matches()) { addBadgeText(it[0] + ": " + matcher.group(1), "black", it[1], "1px", "black") } } def warning_matcher = manager.getLogMatcher(".*Build mark: WARNING(.*)\$") if(warning_matcher?.matches()) { addBadgeText("WARNING: " + warning_matcher.group(1), "red", "", "1px", "black") } } /** * Searches for build marks in build log in reverse. * * @param rules - should be passed in format [[name, bg_color],...]: * [["poodle", "lightyellow"],["puddle_core", "yellow"], ...] */ @NonCPS def buildMarksReverseSearch(rules) { def matcher rules.each { buildMarkName = it[0] buildMarkBgColor = it[1] pattern = ~/.*Build mark: ${buildMarkName}=([a-zA-Z0-9_.-]+).*/ log_lines = manager.build.logFile.text.tokenize("\n") for (line in log_lines.reverse()) { matcher = pattern.matcher(line) if(matcher?.matches()) { addBadgeText(buildMarkName + ": " + matcher.group(1), "black", buildMarkBgColor, "1px", "black") break } } } } def addMark(mark, tag_color) { addBadgeText(mark, "black", tag_color, "1px", "black") } def getBuildVar(product_build) { if (product_build == "cdn") { buildVar = "--cdn cdn_creds.yml" } else { buildVar = "--build ${product_build}" } return buildVar } // @param listObj is an object that contains e.g [unified, [7-9]|1[0-5]:all, [7-9]:data_processing-tempest] // @param releaseNumber is a release number. E.g 10 // @param debug indicates whether to turn on debugging info // @returns comma separated tests list from the listObj def getTestStringFromList(Object listObj, String releaseNumber, Boolean debug) { def testList = "" def list = listObj[0..-1].split("', '") // note space is important in "', '" list.each { item -> def regExpTestPair = item.split(":") def regExp = regExpTestPair.first() if (releaseNumber.matches(regExp)){ if (testList.equals("")) testList = regExpTestPair.last() else testList = testList + "," + regExpTestPair.last() } if (debug) println "RelNum: " + releaseNumber + " RegExp: " + regExp + " TestList:" + testList } return testList } // @param releaseNumber is a release number. E.g 10 // @param componentName is the name of the component. E.g 'cinder', 'nova', 'neutron' // @returns comma separated tests list from the map if the tests list is defined for the component and corresponding // release number; null string otherwize def getTestStringFromComp2TestMap(String component_to_tests_map, String releaseNumber, String componentName) { def testList = "" def debug = false // component_to_tests_map is defined only in those unified jobs that verify GERRIT_REFSPEC change if (!component_to_tests_map){ return testList } component_to_tests_map = component_to_tests_map.replace("([","") component_to_tests_map = component_to_tests_map.replace("])])","])") component_to_tests_map = component_to_tests_map.replace("OrderedDict", "") component_to_tests_map = component_to_tests_map.replace("']), ('", "; ") component_to_tests_map = component_to_tests_map.replace("', ['", ":: ") component_to_tests_map = component_to_tests_map.replace("'])", "") component_to_tests_map = component_to_tests_map.replace("('", "") def map = // Take the String value between // the [ and ] brackets. component_to_tests_map[0..-1] // Split on , to get a List. .split('; ') // note space is important in '; ' .collectEntries { entry -> def pair = entry.split(':: ') if (debug) println pair.toString() [(pair.first()): pair.last()] } if (debug) println map.toString() if (map.containsKey(componentName)){ def listAsString = map[componentName] /* * [sahara, [7-9]:data_processing-tempest, [10-14]:data_processing] * [swift, [7-14]:object_storage] * [neutron-lbaas, [8-9]:neutron, [10,11,13,14]:neutron,neutron_lbaas] * [unified, [7-9]|1[0-5]:all, [7-9]:data_processing-tempest, 1[0-5]:data_processing] */ testList = getTestStringFromList(listAsString, releaseNumber, debug) if (debug) println "RelNum: " + releaseNumber + " TestList:" + testList } return testList } // ****************************************** // Global variables // ****************************************** try { // Everything here shouldn't be in a try-catch block after some testing period log "Job was generated with 'tripleo_ffu_osp16_2_17_1_upgrade_defaults_full' defaults", level: "DEBUG" GlobalVars.vars['JJB'] = [ 'COMPONENT': "ffu", 'DFG': "upgrades", 'PRODUCT_BUILD': "passed_phase2", 'PRODUCT_VERSION': "16.2", 'SECOND_PRODUCT_VERSION': "17.1", 'RHEL_VERSION': "rhel-8.4", 'NEXT_RHEL_VERSION': "9.2", 'OSP_RHEL_MAP': [ '10': 'rhel-7.7', '13': 'rhel-7.9', '14': 'rhel-7.7', '15': 'rhel-8.2', '16': 'rhel-8.1', '16.1': 'rhel-8.2', '16.2': 'rhel-8.4', '17.0': 'rhel-9.0', '17.1': 'rhel-9.2' ] ] } catch (Exception ex) { log "An error occurred while adding JJB to globalVars\n${ex}", level: "ERROR" } jump_status = 'passed' Exception stage_ex = null env.STAGE_FATAL_EX = '' env.IR_ANSIBLE_LOG_OUTPUT_NO_ANSI = 'yes' env.IR_GEN_VARS_JSON = 'yes' pipeline_stage_ex = '' finally_errors = [] prefix = null ir_venv = null host = null // baremetal globals bm_templates_link = null bm_templates_subnet_src = null bm_infrared_subnet = null bm_external_storage_src = null bm_external_storage_name = null // Kepp all stages inside dedicated methods to avoid issue with "method too large" // https://issues.jenkins-ci.org/browse/JENKINS-37984 import groovy.json.JsonOutput // UMB global vars and functions - used only if the build was triggered by a UMB/CI MESSAGE (i.e.: part of CVP) // based on https://gitlab.sat.engineering.redhat.com/cvp/pipeline/blob/master/samples/group-testing/Jenkinsfile buildMetadata = [:] // image build metadata; parsed from the UMB message overcloud_container_images_urls = '' datagrepper_url = "https://datagrepper.engineering.redhat.com" datagrepper_topic_url = datagrepper_url + "/raw?topic=/topic" datagrepper_msg_url = datagrepper_url + "/id?id=" send_umb_iterator = 0 // mapping of build status in Jenkins -> UMB status // Jenkins statuses: https://github.com/jenkinsci/jenkins/blob/master/core/src/main/java/hudson/model/Result.java // UMB conforms to ResultsDB outcome standards hence for info on possible statuses in UMB check // 'outcome' field at https://resultsdb20.docs.apiary.io/#reference/0/results umb_build_result_map = [ 'SUCCESS': 'PASSED', 'FAILURE': 'FAILED', 'UNSTABLE': 'NEEDS_INSPECTION', 'NOT_BUILT': 'FAILED', 'ABORTED': 'FAILED' ] def calculate_umb_build_status() { // UNSTABLE in Jenkins = NEEDS_INSPECTION in UMB.. we want to report NEEDS_INSPECTION in situation we can't retrieve current build result _currentBuild_result = currentBuild.result ?: currentBuild.currentResult ?: 'UNSTABLE' return umb_build_result_map[_currentBuild_result] } def extract_images_urls(Map _buildMetadata) { currentBuild.description = (currentBuild.description ?: '') + "errata_id: ${_buildMetadata['errata_id']}, " print "_buildMetadata['images']: " + _buildMetadata['images'] _buildMetadata['images'].each { image -> overcloud_container_images_urls = overcloud_container_images_urls ? "${overcloud_container_images_urls},${image['full_name']}" : image['full_name'] } print "overcloud_container_images_urls: " + overcloud_container_images_urls currentBuild.description = (currentBuild.description ?: '') + "
Overcloud Container Images URLs:
" overcloud_container_images_urls.split(',').eachWithIndex { url,c -> currentBuild.description = (currentBuild.description ?: '') + "${c}. ${url}
" } return overcloud_container_images_urls } def parse_ci_message(String ciMessage, String release = '') { def _buildMetadata = [:] currentBuild.rawBuild.properties.causes.eachWithIndex { n,i -> println "n.properties: ${n.properties}" } currentBuild.description = (currentBuild.description ?: '') + "${currentBuild.rawBuild.properties.causes.shortDescription.join(', ')}, " echo "Raw CI message:\n${ciMessage}" try { // Parse the message into a Map def ciData = readJSON text: ciMessage _buildMetadata['ciData'] = ciData // Extract the main info if ( _buildMetadata['ciData']['build_url'] ) { _buildMetadata['compose_promote'] = ciData?.compose_promote as String _buildMetadata['compose_version'] = ciData?.compose_version as String _buildMetadata['skip_next_phase'] = ciData?.skip_next_phase as String _buildMetadata['status'] = ciData?.status as String _buildMetadata['version'] = ciData?.version as String } if ( _buildMetadata['ciData']['artifact'] && _buildMetadata['ciData']['artifact']['id'] ) { _buildMetadata['component'] = ciData?.artifact?.component _buildMetadata['type'] = ciData?.artifact?.type _buildMetadata['who'] = ciData?.artifact?.who as String _buildMetadata['errata_id'] = ciData?.artifact?.errata_id as Integer _buildMetadata['images'] = ciData?.artifact?.images // try to discover the version of the product if ( _buildMetadata['ciData']['artifact']['errata_id'] ) { def product_version = 'product_version_unknown' def product_name = 'product_name_unknown' def base_name = 'base_name_unknown' def base_version_major = 'base_version_major_unknown' if ( release == '' ) { // once we have a krb service principal (https://redhat.service-now.com/help?id=rh_ticket&table=sc_req_item&sys_id=ff397305db47405483afed72ca9619a2) // and yet-to-be-created a jira ticket to get errata account for the above krb service principal // ... we can then get details from errata from https://errata.engineering.redhat.com/errata/show_xml/48390 // or https://errata.engineering.redhat.com/advisory/48390/builds.json using that service account // more info on getting access to errata tool: https://errata.devel.redhat.com/user-guide/intro-introduction.html#intro-getting-access-to-errata-tool // TODO: request and start using a service account to access Errata Tool (if possible)... till then.. // ... in the meantime... // we use these base64'ed credentials of wznoinsk, if these stop working either ping him or create new ones with: // echo -ne "your_username:your_password" | base64 --wrap 0 def errata_auth = 'd3pub2luc2s6VmlzaXREdWJhaS4yMDIy' def errata_url = "https://errata.engineering.redhat.com/advisory/" + _buildMetadata['errata_id'] + "/builds.json" def curl_cmd = "sudo curl --header \"Content-Type: application/json\" --cacert /etc/ipa/ca.crt --negotiate -u : ${errata_url}" echo "fetching " + errata_url def advisory_builds_raw = sh(returnStdout: true, script: "${curl_cmd}") println "advisory_builds_raw: " + advisory_builds_raw def advisory_builds = readJSON text: advisory_builds_raw print "advisory_builds: " + advisory_builds release = advisory_builds.keySet()[0] } else { println "didn't query Errata Tool as the RELEASE (release) parameter was provided by the user" } // i.e.: RHOS-16.1-RHEL-8 if (release.matches("RHOS-.*")) { product_name = release.split('-')[0] product_version = release.split('-')[1] base_name = release.split('-')[2] base_version_major = release.split('-')[3] } // i.e.: RHEL-7-OS-13-ELS else if (release.matches(".*-OS-.*")) { product_name = release.split('-')[2] product_version = release.split('-')[3] + '-' + release.split('-')[4] base_name = release.split('-')[0] base_version_major = release.split('-')[1] } // i.e.: STF-1.3-RHEL-8 else if (release.matches("STF-.*")) { product_name = release.split('-')[0] product_version = release.split('-')[1] base_name = release.split('-')[2] base_version_major = release.split('-')[3] } // the below code may be enabled to add '.0' to the product_version number but for now we take // product_version from the CI_MESSAGE as-is // if ((product_version.matches("\\d+")) && (! product_version.matches(".*\\..*"))) { // product_version = product_version + '.0' // } println "product_name: " + product_name println "product_version: " + product_version println "base_name: " + base_name println "base_version_major: " + base_version_major _buildMetadata['product_name'] = product_name.trim() _buildMetadata['product_version'] = product_version.trim() _buildMetadata['base_name'] = base_name.trim() _buildMetadata['base_version_major'] = base_version_major.trim() currentBuild.description = (currentBuild.description ?: '') + "
product_name: ${ product_name }
product version: ${ product_version }
base name: ${ base_name }
base version major: ${ base_version_major }
" } else if ( _buildMetadata['images'] ) { // usually this is a really weak way to discover osp or product version; // ideally there should be a different field in CI/UMB message that should indicate the version // or you'd need to query an external source - similar to querying Errata Tool above _buildMetadata['product_version'] = _buildMetadata['images'][0].tag.split('-')[0] currentBuild.description = (currentBuild.description ?: '') + "product version: " + _buildMetadata['product_version'] + ", " } print "_buildMetadata['images']: " + _buildMetadata['images'] } } catch (Exception e) { echo "Exception caught during parsing CI_MESSAGE: ${e}" } return _buildMetadata } def send_ci_message(Map args) { // params that have default values dry_run = args.dry_run.toString() ? args.dry_run.toBoolean() : true.toBoolean() println "args.dry_run: " + args.dry_run println "args.dry_run.toString(): " + args.dry_run.toString() provider = args.provider ?: "Red Hat UMB" iterator = args.iterator ?: send_umb_iterator ci_name = args.ci_name ?: env.JOB_NAME msg_properties = "CI_NAME = ${ ci_name }" if (args.msg_properties) { msg_properties = msg_properties + args.msg_properties } ci_type = args.ci_type ?: 'Custom' // params without default values - they should always be provided when calling this method umb_topic = args.umb_topic msg_content = args.msg_content // @raises groovy.json.JsonException new groovy.json.JsonSlurper().parseText(msg_content) print("DEBUG: provider: " + provider) print("DEBUG: iterator: " + iterator) print("DEBUG: dry_run: " + dry_run) print("DEBUG: umb_topic: " + umb_topic) print("DEBUG: msg_properties: " + msg_properties) print("DEBUG: msg_content: " + msg_content) def timeStamp = new Date().format("yyyy-MM-dd HH:mm:ss", TimeZone.getTimeZone("UTC")) echo "${timeStamp} (UTC): I will send the following UMB message to ${datagrepper_topic_url}/${umb_topic}:\n${msg_content}" // println("DEBUG: dry_run: " + ${params.dry_run} + ", provider: " + ${params.provider} + ", umb_topic: " + ${params.umb_topic} + ", msg_properties: " + ${params.msg_properties} + ", msg_content: " + ${params.msg_content} + ", iterator: " + ${params.iterator}) def msgSentResult if (! dry_run) { timeout(time: 1, unit: 'MINUTES') { try { msgSentResult = sendCIMessage(messageContent: msg_content, messageType: ci_type, overrides: [topic: umb_topic], providerName: provider, messageProperties: msg_properties ) } catch (Exception e) { echo "Exception caught during sending UMB message: ${e}, check the URL below to see if the message was actually sent or not" } finally { def msgSentID = msgSentResult.getMessageId() def msgSentURL = "${datagrepper_msg_url}${msgSentID}&is_raw=true&size=extra-large" echo "message sent URL: ${msgSentURL}" currentBuild.description = (currentBuild.description ?: '') + iterator + ". UMB sent: ${msgSentID}
" } } } else { println "WARNING: running in 'dry_run' mode, no messages are/were actually sent to UMB" } } def send_results_to_umb(Map args){ print "Sending Build Result to UMB" // send UMB(s) with results // code based on https://gitlab.sat.engineering.redhat.com/cvp/pipeline/blob/master/samples/umb-interactions/Jenkinsfile#L44 // fields which are required based on https://docs.engineering.redhat.com/pages/viewpage.action?spaceKey=CVP&title=Container+Verification+Pipeline+E2E+Documentation#ContainerVerificationPipelineE2EDocumentation-SendamessagetotheUMBindicatingyourexternalteststatus // NOTE: the below is not compatible with schema 0.2.x (https://pagure.io/fedora-ci/messages/blob/master/f/examples/brew-build.test.complete.json) println "params: " + params params_json = JsonOutput.toJson(params) ?: 'na' println "params_json: " + params_json dry_run = args.dry_run.toString() ? args.dry_run.toBoolean() : true.toBoolean() puddle_version = args.puddle_version ?: 'puddle_unknown' product_version = args.product_version ?: 'product_version_unknown' description = args.description ?: 'no_description' ci_type = args.ci_type ?: 'Custom' def provider = "Red Hat UMB" // change the provider to "Red Hat UMB Stage" for development purposes // the 'namespace'.'type'.'category' below have to match the 'test_case_name' in 'rules' section in gating.yaml, // i.e.: http://pkgs.devel.redhat.com/cgit/containers/openstack-nova-libvirt/tree/gating.yaml?h=rhos-15.0-rhel-8#n7 // not the namespace below will be the current jenkins master url - this way we'll be able to dinstinguish // the results coming from production vs. staging jenkins (in case this job is run on staging) def namespace = env.JENKINS_URL.split('/')[2].split('\\.')[0] def type = "${env.JOB_NAME}" def category = 'integration' def umb_topic = env.JP_REPORT_UMB_RESULT_TOPIC ?: "None" // Status can be 'PASSED', 'FAILED', 'INFO' (soft pass) or 'NEEDS_INSPECTION' (soft fail). // See Factory 2.0 CI UMB messages for more info - https://docs.google.com/document/d/16L5odC-B4L6iwb9dp8Ry0Xk5Sc49h9KvTHrG86fdfQM/edit#heading=h.ixgzbhywliel // NOTE: we need to send one UMB message per NVR (per image) // based on https://datagrepper.engineering.redhat.com/id?id=ID:jenkins-1-gfpcd-45742-1569399029463-10735:1:1:1:1&is_raw=true&size=extra-large def UMBbuildResult = calculate_umb_build_status() currentBuild.description = (currentBuild.description ?: '') + "
Sent UMBs:
" if ( buildMetadata['images'] ) { // the ci job build that have this build param set are the CVP jobs // we send an UMB messages per each artifact (image) in that case buildMetadata['images'].eachWithIndex { image,image_no -> def image_name = image['name'] // i.e.: openstack-heat-api def image_full_name = image['full_name'] // i.e.: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/rhosp16/openstack-heat-api:16.0-31 def image_tag = image['tag'] // i.e.: 16.0-31 def TaskID = image['id'] def Nvr = image['nvr'] // i.e.: openstack-nova-compute-ironic-container-13.0-97.1567588122 def component = image['component'] // i.e.: RHOS def issuer = image['issuer'] // i.e.: freshmaker def registry_url = image['registry_url'] // i.e.: freshmaker def scratch = image['scratch'] def msg_properties = "" def timestamp = new Date().format("yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'", TimeZone.getTimeZone("UTC")) def pipeline_name = "${type}-${Nvr}" def pipeline_id = java.security.MessageDigest.getInstance("MD5").digest(pipeline_name.bytes).encodeHex().toString().substring(0,8) // as suggested in https://docs.engineering.redhat.com/pages/viewpage.action?spaceKey=CVP&title=Container+Verification+Pipeline+E2E+Documentation#ContainerVerificationPipelineE2EDocumentation-Addinggating.yamltodist-gitrepo def msg_content = """ { "contact": { "url": "${env.JENKINS_URL}", "team": "Red Hat OpenStack Release Delivery", "email": "rhos-qe-dept@redhat.com", "name": "Red Hat OpenStack downstream CI", "docs": "http://rhos-qe-mirror.lab.eng.brq2.redhat.com/infrared/" }, "build_params": ${params_json}, "run": { "url": "${BUILD_URL}", "log": "${BUILD_URL}console" }, "pipeline": { "id": "rhos-qe-jenkins-${pipeline_id}", "name": "rhos-qe-jenkins-${pipeline_name}" }, "artifact": { "name": "${image_name}", "full_names": ["${image_full_name}"], "tag": "${image_tag}", "nvr": "${Nvr}", "component": "${component}", "type": "redhat-container-image", "task_id": ${TaskID}, "id": "sha256:${TaskID}fffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "issuer": "${issuer}", "registry_url": "${registry_url}", "scratch": ${scratch} }, "test": { "namespace": "${namespace}", "type": "${type}", "category": "${category}", "result": "${UMBbuildResult.toLowerCase()}" }, "generated_at": "${timestamp}", "version": "1.1.17" }""" send_ci_message(dry_run: dry_run, provider: provider, umb_topic: umb_topic, msg_properties: msg_properties, msg_content: msg_content, iterator: image_no) } // end "for image..." } else if (env.JOB_NAME =~ "multijob-phase") { // this part of code will take care of the phase/1/2/3 multijobs // and set the proper content, properties, contact info etc. def msg_content = """ { "status": "${UMBbuildResult}", "build_url": "${BUILD_URL}", "puddle_version": "${puddle_version}", "version": "${product_version}", "ci": { "url": "${env.JENKINS_URL}", "team": "Red Hat OpenStack Release Delivery", "email": "rhos-qe-dept@redhat.com", "name": "Red Hat OpenStack downstream CI" }, "build_params": ${params_json} }""" def msg_properties = """ version=${product_version} description=${description} product=Red Hat OpenStack Platform email=rhos-qe-dept@redhat.com owner=RHOS QE dept=Quality Engineering """ send_ci_message(dry_run: dry_run, provider: provider, umb_topic: umb_topic, msg_properties: msg_properties, msg_content: msg_content, iterator: send_umb_iterator) } else { // if the job build is neither a CVP nor a phase1/2/3 multijob then // we'll treat it in a generic way, meaning: no special parsing/extra fields def msg_content = """ { "status": "${UMBbuildResult}", "build_url": "${BUILD_URL}", "puddle_version": "${puddle_version}", "version": "${product_version}", "ci": { "url": "${env.JENKINS_URL}" }, "build_params": ${params_json} }""" def msg_properties = """ version=${product_version} description=${description} product=Red Hat OpenStack Platform email=rhos-qe-dept@redhat.com owner=RHOS QE dept=Quality Engineering """ send_ci_message(dry_run: dry_run, provider: provider, umb_topic: umb_topic, msg_properties: msg_properties, msg_content: msg_content, iterator: send_umb_iterator) } } CIResourcesThisBuild = [] def resource_from_url(String url){ URI uri = url.toURI() res = null if (uri.host != null) { res = uri.host } else if (uri.path != null) { res = uri.path.split('/')[0] } else if (uri.scheme != null) { /* foobar:port is handled here */ res = uri.scheme } if (res != null) { return res } return null // likely empty or not regular parsable url } def resource_from_mirror(String mirror){ def _ret if (mirror != "") { _ret = "rhos-qe-mirror-" + mirror + ".usersys.redhat.com" } else { _ret = "" // no mirror used } return _ret } def resource_from_string(String resources) { return resources.split(',') } def CIResourceCheck(resources){ timestamps{ if ('True'.toBoolean()) { def num_retries = 60 def retry_num = 0 // WARNING // if the machine from alertmanager_host variable is unreachable, that is its fqdn not resolvable or its IP not pingable // then the alertmanager_connection_timeout would _not_ work - Jenkins pretty much hangs there waiting for some lower-level // exception/timeout to happen... // in a similar situation (not same one tho) when the host is reachable yet the port (prometheus_port variable) refuses // connections then the timeout _is_ working as expected // TODO: the above behaviour needs to be investigated more and dealt with inside the below code (if possible) def alertmanager_connection_timeout = 15 // seconds def user_input_timeout = 60 // seconds if (resources != null) { // filter out empty resources - see resource_from_* functions above when they return such values resources = resources.findResults { (it != null && it != "") ? it : null } retry(num_retries) { retry_num++ resources.each { resource_name -> def alertmanager_host = "http://rhos-ci-monitoring.lab.eng.tlv2.redhat.com" def alertmanager_port = "9093" def grafana_host = alertmanager_host // 'now_in_rfc3339' var is only used when querying Prometheus API... the Alert Manager api doesn't support querying by time // def tz = TimeZone.getTimeZone("UTC") // now = new Date() // now_in_rfc3339 = sprintf("%s-%s-%sT%s:%s:%s.00Z", now.format('yyyy', tz), now.format('MM', tz), now.format('dd', tz), now.format('HH',tz), now.format('mm',tz), now.format('ss',tz)) // using v2 of alertmanager open API - https://github.com/prometheus/alertmanager/blob/master/api/v2/openapi.yaml def alertmanager_query = "/api/v2/alerts?silenced=false&inhibited=false&active=true&filter={instance=\"${resource_name}\"}&filter={alertname=\"ResourceCriticalAlert\"}" def alertmanager_query_url = alertmanager_host + ":" + alertmanager_port + alertmanager_query def now_less_5min = new Date().getTime() - 300000 def now_plus_5min = new Date().getTime() + 300000 def resource_name_with_link = resource_name + " | " + grafana_host + "/d/resource-metrics/resource-metrics?orgId=1&from=" + now_less_5min + "&to=" + now_plus_5min + "&var-resource=" + resource_name def ciresourcecheck_response println 'CIResourceCheck query Alert Manager - ' + alertmanager_query_url try { timeout(time: alertmanager_connection_timeout, unit: 'SECONDS') { def connection = new URL(alertmanager_query_url).openConnection() as HttpURLConnection connection.setRequestProperty("Accept", 'application/json') connection.setRequestProperty("Content-Type", 'application/json') connection.connect() def statusCode = connection.responseCode if (statusCode != 200 && statusCode != 201) { String exc = connection.getErrorStream().text connection = null throw new Exception(exc) } String ciresourcecheck_response_raw = connection.content.text // print("ciresourcecheck_response_raw: " + ciresourcecheck_response_raw) connection = null ciresourcecheck_response = readJSON text: ciresourcecheck_response_raw // println("ciresourcecheck_response: " + ciresourcecheck_response) } } catch (Exception e) { println "CIResourceCheck caught an exception: " + e + ", skipping CI Resource check" return } if (ciresourcecheck_response.size() > 0) { println "\nCIResourceCheck Result - Failed: '" + resource_name_with_link + "' has Critical Alerts\nWARNING: CI job build will 'pause' here and recheck the CI Resource in " + user_input_timeout + " seconds" try { timeout(time: user_input_timeout, unit: 'SECONDS') { input message: '(Check ' + retry_num + ' of ' + num_retries + ') Do you want to ignore all the alerts and continue the build?', ok: 'Ignore this CIResourceCheck failure and continue the build' } } catch (Exception e) { error("CIResourceCheck failed for " + resource_name) throw(e) } } else { println "\nCIResourceCheck Result - OK: '" + resource_name_with_link + "' doesn't have any Critical Alerts (or they were Silenced in Alert Manager), continuing the build\n" return } } } } } else { println "CIResourceCheck not enabled (jjb: ci_resource_check_enabled), skipping CI Resource check" } } } /** * Sets up three Groovy variables used for dynamic baremetal cluster selection by Enterprise DFG * Must be used in all jobs as EST_BAREMETAL is used as a logic switch in common stages such as Undercloud and Overcloud * * EST_BAREMETAL_NODE_LABELS is a List of each baremetal worker node label used by Enterprise DFG * EST_BAREMETAL is a Boolean used for pipeline scripts to determine if a job is using a baremetal cluster, based on env.NODE_NAME * BM_RESOURCE_MAP is a Map used to dynamically override certain JJB params, based on host * Note that BM_RESOURCE_MAP is populated (if nessessary) in jobs/defaults/stages/ir_baremetalenv_link_templates.groovy.inc * This is due to it needing the WORKSPACE variable present after IR installation * * Please contact Enterprise DFG for additional info * Email: rhos-qe-enterprise@redhat.com * IRC: #rhos-Enterprise */ EST_BAREMETAL_NODE_LABELS = ["enterprise-baremetal-worker", "enterprise-sealusa-38", "enterprise-worker-38", "enterprise-titan-100", "enterprise-worker-100"] EST_BAREMETAL = EST_BAREMETAL_NODE_LABELS.contains(env.NODE_NAME) BM_RESOURCE_MAP = [:] // ****************************************** // Before try stages // ****************************************** def stage_before_try() { /** * Parses the CI_MESSAGE (UMB) parameter. */ timeout(time: 20, unit: 'MINUTES') { if ( "${env.CI_MESSAGE}" != "" ) { buildMetadata = parse_ci_message(params.CI_MESSAGE) if ( 'False'.toBoolean() ) { overcloud_container_images_urls = extract_images_urls(buildMetadata) } } else { println "'CI_MESSAGE' build param is empty, not parsing" } } /** * Checkout component from code.engineering.redhat.com if GERRIT_PROJECT * is defined (gerrittriger defines it) or skip stage otherwise. * * It allows to check out component with proposed gerrit change. * * @jjb_param ffu name of the OpenStack component to be checked out * * @param $GERRIT_BRANCH Git branch to be used * @param $GERRIT_REFSPEC Refspec to be used. It is also required to satisfy * checkout of gerrit proposed patch. * * Example of GERRIT_REFSPEC (with and without gerrit refs): * refs/changes/30/355330/1 * +refs/heads/$GERRIT_BRANCH:refs/remotes/origin/$GERRIT_BRANCH * * Currently we keep a blacklist of fake components that do not have repos. */ /* generic checkout closure */ def CheckoutComponent = { String BranchName, String ComponentName, String RefSpecName -> retry(3) { CIResourceCheck(CIResourcesThisBuild + ['code.engineering.redhat.com']) checkout([$class: 'GitSCM', branches: [[name: BranchName]], doGenerateSubmoduleConfigurations: false, extensions: [ [$class: 'RelativeTargetDirectory', relativeTargetDir: ComponentName], [$class: 'BuildChooserSetting', buildChooser: [$class: 'GerritTriggerBuildChooser']], [$class: 'CloneOption', honorRefspec: true] ], submoduleCfg: [], userRemoteConfigs: [ [name: 'origin', refspec: RefSpecName, url: "https://code.engineering.redhat.com/gerrit/${ComponentName}.git"] ] ]) } } if (['backup_restore', 'updates', 'upgrade', 'features', 'sanity', 'gabbi', 'ospdui', 'infrared', 'composable', 'overcloud', 'ffu', 'splitstack', 'ospd', 'refstack'].contains('ffu')) { env.GERRIT_PROJECT = '' } else if ('ffu' != 'unified') { env.GERRIT_PROJECT = 'ffu' } // 1. if the job was triggered by gerrit (backport) then is might be the case of: // a. an old non unifed job - in that case behave as it was before the introduction of unified jobs // b. a unifed job - in that case we will detect the component name from the env.Gerrit project and // afterwards behave just as 1.a jobs by running a reduced set of tempest tests. // 2. if the job is a unified job then one of 3 cases take place: // a. a job was triggered by gerrit (backport) - overlaps with 1.b. // b. a job was triggered by a periodic multijob - mimicing backport by delivering component // name as environment variable and creating a dummy patch at patch-component stage. In this case // we will execute the full patch that includes creating mock rpm that updates glance image followed // by overcloud deplyment and finally running the 'all' tempest tests suite. // c. a job was triggered by QE and is used to verify the puddle by overcloud deplyment and running // the 'all' tempest tests suite. if ( env.GERRIT_PROJECT || ('ffu' == 'unified') ) { def cancelJob = false def BranchName = env.GERRIT_BRANCH def ComponentName = (env.GERRIT_PROJECT == 'unified') ? "" : env.GERRIT_PROJECT def RefSpecName = env.GERRIT_REFSPEC stage2('Checkout Component') { if ( ('ffu' == 'unified') && env.GERRIT_PROJECT ){ // case 1.b.: if component_to_tests_map defines tempest for the // let's checkout the repo, otherwize, the job will be canceled and TAG will be added // to note that the component is not supported by the job if (! getTestStringFromComp2TestMap("OrderedDict([('obj', 'component_to_tests_map')])", '16.2', env.GERRIT_PROJECT)) cancelJob = 1 } // case 2.b.: we want to checkout the component when unified job is running in a periodic JOB_RUN_MODE // in that case, the component name will be taken from env.PR_COMPONENT_NAME if ( ('ffu' == 'unified') && (env.JOB_RUN_MODE == 'periodic') ){ BranchName = "rhos-{product_version}.{minor_version}-patches" ComponentName = env.PR_COMPONENT_NAME RefSpecName = "+refs/heads/rhos-{product_version}.{minor_version}-patches:refs/remotes/origin/rhos-{product_version}.{minor_version}-patches" } // we have the following cases when the component is checked-out // - gerrit change: cases 1.a. and 1.b // - periodic job: case 2.b if ( ((ComponentName != "") || (env.JOB_RUN_MODE == 'periodic')) && !cancelJob) { addMark("COMPONENT: " + ComponentName, "orange") if (env.JOB_RUN_MODE == 'periodic') addMark("RUN_MODE: " + env.JOB_RUN_MODE, "orange") CheckoutComponent(BranchName, ComponentName, RefSpecName) } } // this one must be outside the stage in order to terminate the job if required if (cancelJob){ addMark("no support for " + env.GERRIT_PROJECT, "red") currentBuild.result = 'ABORTED' return } } /** * Installs InfraRed together with all default plugins inside * python virtual environment. * * @jjb_param {ir_ansible_callback_whitelist} whitelist ansible callback plugins. * @jjb_param {polarion_test_run_id} optional * Defines ID of Polarion test run. * Paramter expect to be string - for example '20171017-1613' * * @param $ir_venv path where InfraRed will be installed using virtualenv * @param $WORKSPACE Jenkins workspace set up automatically by Jenkins * @param $IR_BRANCH branch name to pull IR from * @param $UPLOAD_WORKSPACE Infrared workspace which was uploaded from file parameter. * @param $WORKSPACE_URL Location of exported Infrared workspace from another job. * * @log $WORKSPACE/ir_pip.log containing output from installation command * */ timeout(time: 20, unit: 'MINUTES') { def CIResourcesThisStage = [] POLARION_TEST_RUN_ID = env.POLARION_TEST_RUN_ID ?: '20221208-0905' if ( POLARION_TEST_RUN_ID.empty ) { SEARCH_FOR_TEST_RUN_STATUS = getPolarionParams('polarion/polarion_data.csv', env.JOB_NAME) if (!SEARCH_FOR_TEST_RUN_STATUS) { if ((env.TAGS ?: '') && (env.TAGS.contains('phase3'))) { // don't fail the job if it is a single node tester if ( ! ['pep8', 'unit', 'functional', 'dsvm-functional'].contains('') ){ currentBuild.result = 'FAILURE' error "Error. The polarion test run id wasn't initialized. Do you need to add the 'job' to 'polarion_test_run_id' mapping to http://git.app.eng.bos.redhat.com/git/rhos-common.git/tree/resources/polarion/polarion_data.csv ?" } } } else { env.POLARION_TEST_RUN_ID = SEARCH_FOR_TEST_RUN_STATUS[1] } } stage2('Install InfraRed') { dir('infrared') { env.JUNIT_OUTPUT_DIR = "${WORKSPACE}/junit" CIResourcesThisStage.add('review.gerrithub.io') CIResourceCheck(CIResourcesThisBuild + CIResourcesThisStage) def IR_BRANCH = env.IR_BRANCH ?: getInfraredBranch('17.1' ?: '16.2') git branch: IR_BRANCH, url: 'https://review.gerrithub.io/redhat-openstack/infrared.git' if (env.IR_GERRIT_CHANGE) { env.IR_GERRIT_CHANGE = env.IR_GERRIT_CHANGE.trim() println "downloading InfraRed change(s) ${IR_GERRIT_CHANGE} (and its parents) from GerritHub" sh """ git remote add gerrit https://review.gerrithub.io/redhat-openstack/infrared if [[ \$IR_GERRIT_CHANGE = *[[:space:]]* ]]; then git config --global user.email "jenkins@noreply.local" git config --global user.name "Jenkins" for a_patch in \$IR_GERRIT_CHANGE; do git review -x \$a_patch done else git review -d \$IR_GERRIT_CHANGE fi """ } env.IR_HOME = env.PWD sh2 script: """ # copied from jobs/release_delivery/ospd.groovy if [[ "\${IR_PATCHES_TOPIC:-}" != '' ]]; then query_url="https://review.gerrithub.io/changes/?O=10b&q=topic%3A%22\${IR_PATCHES_TOPIC}%22%20status%3Aopen%20project%3A%22redhat-openstack/infrared%22" timeout 60s curl --silent --fail "\$query_url" | tail -n '+2' | jq '.' | sed -nr 's/^\\s*"ref": "(.*)".*/\\1/p' | uniq > IR_PATCHES_TOPIC.log cat IR_PATCHES_TOPIC.log | while read REF; do timeout 60s git fetch "https://review.gerrithub.io/redhat-openstack/infrared" "\$REF" git cherry-pick FETCH_HEAD done fi git log -n 5 --pretty virtualenv $ir_venv echo "export IR_HOME=`pwd`" >> $ir_venv/bin/activate . $ir_venv/bin/activate pip install -U pip pip install . > $WORKSPACE/ir_pip.log pip list --format=columns > $WORKSPACE/ir_pip_packages.txt cp infrared.cfg.example infrared.cfg infrared plugin add all cat << EOF > ansible.cfg [defaults] host_key_checking = False forks = 500 timeout = 30 force_color = 1 roles_path = infrared/common/roles library = infrared/common/library filter_plugins = infrared/common/filter_plugins callback_plugins = infrared/common/callback_plugins callback_whitelist = timer,profile_tasks,junit_report [ssh_connection] pipelining = True control_path = $ir_venv/%%h-%%r EOF sed -i 's/callback_whitelist.*/callback_whitelist = timer,profile_tasks,junit_report/' ansible.cfg """, basename: 'ir-install' def tripleo_upgrade_branch = "none" if (env.FFU_PRODUCT_BUILD) { switch ( ("16.2" as Float).intValue() ) { case 17: tripleo_upgrade_branch = "stable/wallaby" break case 16: tripleo_upgrade_branch = "stable/wallaby" break case 13: tripleo_upgrade_branch = "stable/train" break case 10: tripleo_upgrade_branch = "stable/queens" break default: tripleo_upgrade_branch = "master" } } else if (env.UPGRADE_TO) { switch ( 16.2 ) { case 13: tripleo_upgrade_branch = "stable/rocky" break case 12: tripleo_upgrade_branch = "stable/queens" break case 11: tripleo_upgrade_branch = "stable/pike" break default: tripleo_upgrade_branch = "master" } } else if (env.UPDATE_TO) { switch ( Math.max(Float.parseFloat("16.2"), Float.parseFloat("17.1"?:"0")) as Integer ) { case 17: tripleo_upgrade_branch = "stable/wallaby" break case 16: tripleo_upgrade_branch = "stable/train" break case 13: tripleo_upgrade_branch = "stable/queens" break case 12: tripleo_upgrade_branch = "stable/pike" break default: tripleo_upgrade_branch = "master" } } if (tripleo_upgrade_branch != "none") { println "Setting up InfraRed tripleo-upgrade plugin for ${ tripleo_upgrade_branch }" sh2 """ . $ir_venv/bin/activate infrared plugin remove tripleo-upgrade infrared plugin add --revision ${ tripleo_upgrade_branch } tripleo-upgrade """, basename: 'ir-tripleo-upgrade-plugin-setup' if (env.OOO_UPGRADE_PLUGIN_GERRIT_CHANGE) { println "Downloading InfraRed tripleo-upgrade change(s) ${ env.OOO_UPGRADE_PLUGIN_GERRIT_CHANGE }" sh2 """ git config --global user.email "jenkins@noreply.local" git config --global user.name "Jenkins" pushd plugins/tripleo-upgrade git remote add gerrit https://review.opendev.org/openstack/tripleo-upgrade.git if [[ \$OOO_UPGRADE_PLUGIN_GERRIT_CHANGE = *[[:space:]]* ]]; then for a_patch in \$OOO_UPGRADE_PLUGIN_GERRIT_CHANGE; do git review -r gerrit -x \$a_patch done else git review -r gerrit -d \$OOO_UPGRADE_PLUGIN_GERRIT_CHANGE fi popd """, basename: 'ir-tripleo-upgrade-plugin-setup' } println "Finalising InfraRed tripleo-upgrade plugin setup" sh2 """ # use mv instead of symbolic link to avoid too many levels of symbolic links issue mkdir -p \$(pwd)/plugins/tripleo-upgrade/infrared_plugin/roles/tripleo-upgrade find \$(pwd)/plugins/tripleo-upgrade -maxdepth 1 -mindepth 1 -not -name infrared_plugin \ -exec mv '{}' \$(pwd)/plugins/tripleo-upgrade/infrared_plugin/roles/tripleo-upgrade \\; """, basename: 'ir-tripleo-upgrade-plugin-setup' } //For FFWD jobs in OSP17.1 we need to hack env.FFU_PRODUCT_BUILD based on RHEL env.FFU_17_EL8_BUILD='' env.FFU_17_EL9_BUILD='' if (env.FFU_PRODUCT_BUILD && env.UPGRADE_OSP_VERSION) { // Verify that we need to apply the hack - only for 8.4 17.1 jobs upgrade_osp_version = (Float.parseFloat("17.1"?:"0") as Integer ) if ( upgrade_osp_version == 17 ) { // We just try to directly fetch el9 puddle to see if we got EL9 puddle providedpuddle = puddleDateFormatter("17.1", env.FFU_PRODUCT_BUILD, null, "9") if (!providedpuddle.contains("Not Found")) { el9puddle = providedpuddle // If the puddle exist we know we need to look for correct el8 build PCTOOLING-596 el8url = siteURL("http://download-node-02.eng.bos.redhat.com/rcm-guest/puddles/OpenStack/17.1-RHEL-9/${el9puddle}/RHEL8_COMPOSE_ID") fetchpuddle = sh(returnStdout: true, script: "curl -L -s -S ${el8url}").trim() if(fetchpuddle.contains("Not Found")) { el8puddle = "latest-RHOS-17.1-RHEL-8.4" } else { el8puddle = fetchpuddle } // For 8.4 upgrade job we override FFU_PRODUCT_BUILD to el8 puddle if ( "9.2" == "8.4" ) { // If we override FFU_PRODUCT_BUILD the SECOND_IMAGE_SET_NAME has to be updated here if ( env.SECOND_IMAGE_SET_NAME && env.SECOND_IMAGE_SET_NAME != "") { SECOND_IMAGE_SET_NAME = SECOND_IMAGE_SET_NAME.replace("${env.FFU_PRODUCT_BUILD}", el8puddle) } env.FFU_PRODUCT_BUILD = el8puddle println "FFWD Multi-RHEL FFU_PRODUCT_BUILD was updated to ${env.FFU_PRODUCT_BUILD}" } } else { el8puddle = env.FFU_PRODUCT_BUILD // EL9 puddle was not provided we just default to passed_phase2 el9puddle = "passed_phase2" } env.FFU_17_EL8_BUILD = el8puddle env.FFU_17_EL9_BUILD = el9puddle println "Multi-RHEL FFU_17_EL8_BUILD ${env.FFU_17_EL8_BUILD} FFU_17_EL9_BUILD ${env.FFU_17_EL9_BUILD}" } } if (env.UPGRADE_WORKAROUNDS) { println "Downloading workarounds from ${ env.UPGRADE_WORKAROUNDS }" sh2 """ if [ ! -f workarounds.yaml ]; then curl -sSko workarounds.yaml ${ env.UPGRADE_WORKAROUNDS } fi """, basename: 'ir-tripleo-upgrade-workarounds-setup' if (env.OSP_VERSION && env.PRODUCT_BUILD) { println "Replacing workaround file source rhos-release OSP build with ${ env.PRODUCT_BUILD }" sh2 """ sed -i -E 's|(.*rhos-release ${ env.OSP_VERSION }.*)-p [A-Za-z0-9_.-]+|\\1-p ${ env.PRODUCT_BUILD }|' workarounds.yaml """, basename: 'ir-tripleo-upgrade-workarounds-setup' println "Replacing workaround file source OSP build PRODUCT_BUILD with ${ env.PRODUCT_BUILD }" sh2 """ sed -i -E 's|PRODUCT_BUILD=[A-Za-z0-9_.-]+|PRODUCT_BUILD=${ env.PRODUCT_BUILD }|' workarounds.yaml """, basename: 'ir-tripleo-upgrade-workarounds-setup' } if (env.UPGRADE_OSP_VERSION && (env.FFU_PRODUCT_BUILD || env.UPGRADE_TO)) { def target_build = env.FFU_PRODUCT_BUILD ?: env.UPGRADE_TO println "Replacing workaround file target rhos-release OSP build with ${ target_build }" sh2 """ sed -i -E 's|(.*rhos-release ${ env.UPGRADE_OSP_VERSION }.*)-p [A-Za-z0-9_.-]+|\\1-p ${ target_build }|' workarounds.yaml """, basename: 'ir-tripleo-upgrade-workarounds-setup' println "Replacing workaround file target OSP build FFU_PRODUCT_BUILD with ${ target_build }" sh2 """ sed -i -E 's|FFU_PRODUCT_BUILD=[A-Za-z0-9_.-]+|FFU_PRODUCT_BUILD=${ target_build }|' workarounds.yaml """, basename: 'ir-tripleo-upgrade-workarounds-setup' } // Multi-RHEL FFWD 17.1 workarounds if (!(env.FFU_17_EL8_BUILD=='') && !(env.FFU_17_EL9_BUILD=='')) { println "Replacing workaround file target OSP build FFU_EL8_PRODUCT_BUILD with ${ env.FFU_17_EL8_BUILD }" sh2 """ sed -i -E 's|FFU_EL8_PRODUCT_BUILD=[A-Za-z0-9_.-]+|FFU_EL8_PRODUCT_BUILD=${ env.FFU_17_EL8_BUILD }|' workarounds.yaml """, basename: 'ir-tripleo-upgrade-workarounds-setup' println "Replacing workaround file target OSP build FFU_EL9_PRODUCT_BUILD with ${ env.FFU_17_EL9_BUILD }" sh2 """ sed -i -E 's|FFU_EL9_PRODUCT_BUILD=[A-Za-z0-9_.-]+|FFU_EL9_PRODUCT_BUILD=${ env.FFU_17_EL9_BUILD }|' workarounds.yaml """, basename: 'ir-tripleo-upgrade-workarounds-setup' } if (env.UPGRADE_OSP_VERSION && env.UPGRADE_RHEL_VERSION) { println "Replacing workaround file target rhos-release RHEL version with ${ env.UPGRADE_RHEL_VERSION }" sh2 """ sed -i -E 's|(.*rhos-release ${ env.UPGRADE_OSP_VERSION }.*)-r [0-9.]+|\\1-r ${ env.UPGRADE_RHEL_VERSION }|' workarounds.yaml """, basename: 'ir-tripleo-upgrade-workarounds-setup' println "Replacing workaround file target RHEL version UPGRADE_RHEL_VERSION with ${ env.UPGRADE_RHEL_VERSION }" sh2 """ sed -i -E 's|UPGRADE_OSP_VERSION=[0-9.]+|UPGRADE_OSP_VERSION=${ env.UPGRADE_RHEL_VERSION }|' workarounds.yaml """, basename: 'ir-tripleo-upgrade-workarounds-setup' } if (env.FDP_TEST_REPO == 'true') { println "Adding FDP repositories into workaround file" sh2 """ if grep -q fdp_test_stash workarounds.yaml; then sed -i -E '/pre_ffu_undercloud_upgrade_workarounds:/a - UCTestOvsOvnFromFDP:\\n <<: *UCTestOvsOvnFromFDP' workarounds.yaml sed -i -E '/pre_ffu_overcloud_upgrade_prepare_workarounds:/a - OCTestOvsOvnFromFDP:\\n <<: *OCTestOvsOvnFromFDP' workarounds.yaml sed -i -E '/UpgradeInitCommand:/a\\ sudo curl http://download-node-02.eng.bos.redhat.com/rhel-8/nightly/updates/FDP/latest-FDP-8-RHEL-8/compose/Server/x86_64/os/fdp-nightly-updates.repo -o /etc/yum.repos.d/fdp.repo' workarounds.yaml fi """, basename: 'ir-tripleo-upgrade-workarounds-setup' } if (env.LEAPP_TEST_REPO == 'true') { println "Adding LEAPP test repository into workaround file" sh2 """ sed -i -E '/sudo yum -y install leapp-upgrade/i \\ sudo rhos-release -O leapp-7' workarounds.yaml sed -i -E 's/https:.*leapp-data17.tar.gz/http:\\/\\/gitlab.cee.redhat.com\\/leapp\\/oamg-rhel7-vagrant\\/raw\\/master\\/roles\\/init\\/files\\/prepare_test_env.sh/g' workarounds.yaml sed -i -E 's/sudo tar -xzf leapp-data17.tar.gz.*/chmod +x .\\/prepare_test_env.sh \\&\\& sudo \\/bin\\/bash -c "source .\\/prepare_test_env.sh; get_data_files"/g' workarounds.yaml """, basename: 'ir-tripleo-upgrade-workarounds-setup' } } if (env.UPDATE_FDP_TEST_REPO == 'true') { def wk_version = Math.max(Float.parseFloat("16.2"), Float.parseFloat("17.1"?:"0")) println "Adding FDP related workarounds and repository for update to ${wk_version}." if (env.UPDATE_WORKAROUNDS_URL) { env.UPDATE_WORKAROUNDS_URL += ",https://gitlab.cee.redhat.com/rhos-upgrades/workarounds/-/raw/master/update/wk-FDP-repo-testing-${wk_version}.yaml" } else { env.UPDATE_WORKAROUNDS_URL = "https://gitlab.cee.redhat.com/rhos-upgrades/workarounds/-/raw/master/update/wk-FDP-repo-testing-${wk_version}.yaml" } } if (env.UPDATE_WORKAROUNDS_URL) { /* Before GA we have only one puddle. Grab a workaround to force change in container tag */ def UPDATE_BUILD = env.UPDATE_TO ?: "passed_phase2" /* second product version can be used for the new release model: 16.0 to 16.1 */ def TARGET_UPDATE_RELEASE = "17.1" ?: "16.2" def UPDATE_CONTAINER_BEFORE_GA_WORKAROUND_URL="" /* Get the exact compose id from the product and update build */ if ( env.PRODUCT_BUILD == 'passed_phase2' && env.UPDATE_FORCE_SAME_PUDDLE.toBoolean() ) { el9url = siteURL("http://download-node-02.eng.bos.redhat.com/rcm-guest/puddles/OpenStack/17.1-RHEL-9/${UPDATE_BUILD}/COMPOSE_ID") UPDATE_BUILD_COMPOSE = sh(returnStdout: true, script: "curl -L -s -S ${el9url}").trim() el9url = siteURL("http://download-node-02.eng.bos.redhat.com/rcm-guest/puddles/OpenStack/17.1-RHEL-9/${env.PRODUCT_BUILD}/COMPOSE_ID") PRODUCT_BUILD_COMPOSE = sh(returnStdout: true, script: "curl -L -s -S ${el9url}").trim() if (UPDATE_BUILD_COMPOSE == PRODUCT_BUILD_COMPOSE && TARGET_UPDATE_RELEASE == "16.2") { UPDATE_CONTAINER_BEFORE_GA_WORKAROUND_URL="https://gitlab.cee.redhat.com/rhos-upgrades/workarounds/-/raw/master/update/TestContainerUpdateBeforeGA.yaml" switch (env.UPDATE_WORKAROUNDS_URL) { case ~/^$/: env.UPDATE_WORKAROUNDS_URL = UPDATE_CONTAINER_BEFORE_GA_WORKAROUND_URL break; case ~/.*, *$/: env.UPDATE_WORKAROUNDS_URL = "${env.UPDATE_WORKAROUNDS_URL}${UPDATE_CONTAINER_BEFORE_GA_WORKAROUND_URL}" break case ~/^ *http.*/: env.UPDATE_WORKAROUNDS_URL = "${env.UPDATE_WORKAROUNDS_URL},${UPDATE_CONTAINER_BEFORE_GA_WORKAROUND_URL}" break } println "Same puddles for source and destination, mangling the containers' tag using ${env.UPDATE_WORKAROUNDS_URL}" } else { println "Different puddles for source(${PRODUCT_BUILD_COMPOSE}) and destination(${UPDATE_BUILD_COMPOSE}) detected no need to change container tag." } } sh2 script: """ if [ ! -f workarounds.yaml ]; then if echo ${ env.UPDATE_WORKAROUNDS_URL } | grep -q ',';then TMPDIR=\$(mktemp -d) wget https://github.com/mikefarah/yq/releases/download/v4.9.6/yq_linux_amd64 -O \$TMPDIR/yq && chmod +x \$TMPDIR/yq WORKAROUNDS=\$(echo ${ env.UPDATE_WORKAROUNDS_URL } | tr ',' ' ') mkdir -p workaround_files && pushd workaround_files for workaround in \$WORKAROUNDS do curl -kO \$workaround done popd \$TMPDIR/yq eval-all '. as \$item ireduce ({}; . *+ \$item)' workaround_files/* > workarounds.yaml else curl -ko workarounds.yaml '${ env.UPDATE_WORKAROUNDS_URL }' fi fi """, basename: 'ir-tripleo-overcloud-update-workarounds-setup' } deploy_workspace_url = "" // get workspace from upload file if (env.UPLOAD_WORKSPACE) { deploy_workspace_url = "../${UPLOAD_WORKSPACE}" unstashParam "UPLOAD_WORKSPACE" } // get workspace from url if (env.WORKSPACE_URL) { deploy_workspace_url = "${WORKSPACE_URL}" CIResourceCheck(CIResourcesThisBuild + [resource_from_url(deploy_workspace_url)]) } // import infrared workspace if (deploy_workspace_url) { echo "Importing InfraRed workspace" sh2 """ . $ir_venv/bin/activate infrared workspace import ${deploy_workspace_url} infrared workspace list """, basename: 'ir-workspace-import' } } archiveArtifacts artifacts: 'ir_pip.log, ir_pip_packages.txt' } } } // ****************************************** // Before try stages // ****************************************** def stage_inside_try_pre() { /** * This stage will check if everything we need for the build is * exist and overcome what we can * In this stage the build will check if UC image is exist. either locally * or from URL - in case of URL it will download the image to confirm we don't * hit any timeout or download error * * In the future this stage may grow and contain more stuff * */ timeout(time: 20, unit: 'MINUTES') { stage2('Prepare Env') { CIResourceCheck(CIResourcesThisBuild) dir('infrared') { PRODUCT_BUILD = env.PRODUCT_BUILD?.trim() ?: 'passed_phase2' IS_SSL = env.IS_SSL ?: "True" UNDERCLOUD_VERSION = env.OSP_VERSION ?: '16.2' env.OVERCLOUD_DEPLOYED_VERSION = '' def osp_rhel_list = [ '10': 'rhel-7.7', '13': 'rhel-7.9', '14': 'rhel-7.7', '15': 'rhel-8.2', '16': 'rhel-8.1', '16.1': 'rhel-8.2', '16.2': 'rhel-8.4', '17.0': 'rhel-9.0', '17.1': 'rhel-9.2' ] RHEL_VERSION = env.RHEL_VERSION ?: 'rhel-8.4' if ( RHEL_VERSION == '' ) { RHEL_VERSION = osp_rhel_list[UNDERCLOUD_VERSION.toString()] } UNDERCLOUD_SNAPSHOT = env.UNDERCLOUD_SNAPSHOT ?: "False" puddle_version = puddleDateFormatter("${UNDERCLOUD_VERSION}","${PRODUCT_BUILD}") ssl_str = '' if ( UNDERCLOUD_VERSION.toDouble() <= 13 ) { ssl_str = ((env.IS_SSL ?: "True") == "yes") ? '-ssl' : '' } is_snapshot_exist = false snapshot_url = siteURL("http://rhos-ci-logs.lab.eng.tlv2.redhat.com/osp-images") uc_type="uc-full-deploy" if (UNDERCLOUD_SNAPSHOT.toBoolean()) { uc_type="uc-snapshot-ready" undercloud_image_file_name = "undercloud-0-snapshot-osp-${UNDERCLOUD_VERSION}-${RHEL_VERSION}-puddle-${puddle_version}${ssl_str}.qcow2" undercloud_url = "${snapshot_url}/${UNDERCLOUD_VERSION}/${undercloud_image_file_name}" print "checking is snapshot url ${undercloud_url} usable" CIResourceCheck(CIResourcesThisBuild + [resource_from_url(undercloud_url)]) is_snapshot_exist = isValidURL("${undercloud_url}") if (is_snapshot_exist) { try { web_checksum=sh(returnStdout: true, script: "curl -f -S -s ${undercloud_url}.sha256 | cut -d' ' -f1").trim() print "Snapshot checksum in remote storage is ${web_checksum}" final_image_path = "/var/lib/libvirt/images/$undercloud_image_file_name" tmp_image_path = final_image_path+".tmp" def local_checksum = '' local_checksum = sh(returnStdout: true, script: "ssh root@$host touch --no-create $final_image_path >&2; sha256sum $final_image_path | cut -d' ' -f1") if ( local_checksum != '' ) { echo "File ${final_image_path} already exists with checksum ${local_checksum}" } counter = 0 echo "Obtaining and validating undercloud snapshot image" while ('' == local_checksum || web_checksum != local_checksum) { if (counter == 3) { echo "The UC snapshot download failed too many times, giving up and going for full deployment" sh2 """ ssh root@$host "rm -f $final_image_path" """, basename: "prepare_env-FAILED_TO_DOWNLOAD_IMAGE" uc_type='uc-full-deploy' break } sh2 """ echo "Downloading the Undercloud image" ssh root@$host "set -ex; curl --create-dirs -f $undercloud_url --output $tmp_image_path; mv $tmp_image_path $final_image_path" """, basename: "prepare_env-download_undercloud_image-attempt${counter}", maxLines: -1 local_checksum = sh(returnStdout: true, script: "ssh root@$host sha256sum $final_image_path | cut -d' ' -f1").trim() print "Downloaded with checksum ${local_checksum}" counter++ } } //try catch (Exception e) { echo "The UC snapshot download failed: ${e}" uc_type='uc-full-deploy' } //catch } else { echo "The UC Snapshot Image URL detected as not valid. Can't use snapshot at this time" uc_type='uc-full-deploy' } //if/else closer } //if UNDERCLOUD SNAPSHOT } //dir } //stage2 } //timeout /** * Two stages as one is invoked before second * * Stage 1: Removes OpenStack provisioned system using InfraRed virsh plugin. * Stage 2: Provision OpenStack using InfraRed virsh plugin * * @requires install_ir.groovy.inc * * @jjb_param {private_key} root SSH key to access the system. * * @jjb_param {ir_virsh_topology_nodes} topology to be used. Please refer to InfraRed * 'virsh' plugin for supported topologies. * * @jjb_param {ir_virsh_provision_override_options} optional override * options that can be specified by any job definition. * * @jjb_param {ir_tripleo_undercloud_snapshot} is a boolean parameter to decide if you want * to install undercloud from scratch (false) or to use already made snapshot (true) * * @jjb_param {ir_virsh_image} URL to the image used for node provisioning. * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @param $IR_PROVISION_HOST is hostname or IP of a baremetal machine to run * virsh commands (Virtual Machines) on. If specified it will overwrite default setting * of current host. * * Example: * puma.lab.eng.tlv2.redhat.com * 10.35.64.69 * */ addMark("HOST: " + host, "aqua") stage2('Cleanup') { CIResourceCheck(CIResourcesThisBuild) dir('infrared') { sh2 """ . $ir_venv/bin/activate infrared virsh \ -o cleanup.yml \ --host-address $host \ --host-key $HOME/.ssh/rhos-jenkins/id_rsa \ --cleanup yes """, basename: 'ir-virsh-cleanup', maxLines: 0 } if ( "False".toBoolean() ) { archiveArtifacts allowEmptyArchive: true, artifacts: '**/ansible-facts/*' } } //TODO (migi): ./jobs/nightly/ospd/JenkinsFileOSPDUI.groovy has additional // override.*, some mehanism to pass it should be in place. stage2('Provision') { dir('infrared') { PRODUCT_BUILD = env.PRODUCT_BUILD?.trim() ?: "passed_phase2" UNDERCLOUD_VERSION = env.OSP_VERSION ?: '16.2' RHEL_VERSION = env.RHEL_VERSION ?: 'rhel-8.4' UNDERCLOUD_SNAPSHOT = env.UNDERCLOUD_SNAPSHOT ?: "False" TOPOLOGY = env.TOPOLOGY?.trim() ?: "controller:3,computehci:3" IMAGE_SET_NAME = env.IMAGE_SET_NAME ?: "" IMAGE_SET_DOWNLOAD_PATH = env.IMAGE_SET_DOWNLOAD_PATH ?: env.WORKSPACE puddle_version = puddleDateFormatter("${UNDERCLOUD_VERSION}","${PRODUCT_BUILD}") ssl_str = ((env.IS_SSL ?: "True") == "yes") ? '-ssl' : '' snapshot_url = siteURL("http://rhos-ci-logs.lab.eng.tlv2.redhat.com/osp-images") is_snapshot_exist = false if (UNDERCLOUD_SNAPSHOT.toBoolean()){ if (TOPOLOGY.contains('undercloud')) { TOPOLOGY -= 'undercloud:1,' } undercloud_url = "${snapshot_url}/${UNDERCLOUD_VERSION}/undercloud-0-snapshot-osp-${UNDERCLOUD_VERSION}-${RHEL_VERSION}-puddle-${puddle_version}${ssl_str}.qcow2" CIResourceCheck(CIResourcesThisBuild + [resource_from_url(undercloud_url)]) is_snapshot_exist = isValidURL("${undercloud_url}") } if ((!UNDERCLOUD_SNAPSHOT.toBoolean()) || ((UNDERCLOUD_SNAPSHOT.toBoolean()) && (uc_type == "uc-full-deploy"))) { TOPOLOGY = (!TOPOLOGY.contains('undercloud') && !TOPOLOGY.contains('tripleo')) ? "undercloud:1,$TOPOLOGY" : TOPOLOGY } IMAGE_SET_OPTIONS='' image_set_exists=false if ('false'.toBoolean() && IMAGE_SET_NAME == "") { throw new Exception("No IMAGE_SET_NAME set for image set only job") } if (IMAGE_SET_NAME != "") { if (IMAGE_SET_NAME.contains("${PRODUCT_BUILD}")) { os_version = RHEL_VERSION.replaceAll('rhel-','').replaceAll('\\..*','') real_puddle = puddleDateFormatter("${UNDERCLOUD_VERSION}", "${PRODUCT_BUILD}", null, os_version) IMAGE_SET_NAME = IMAGE_SET_NAME.replace("${PRODUCT_BUILD}", real_puddle) echo "### Updated IMAGE_SET_NAME to $IMAGE_SET_NAME" } image_set_container = env.IMAGE_SET_CONTAINER ?: "s3://image_sets" image_set_cleanup = env.IMAGE_SET_CLEANUP ?: True echo "### Installing aws cli for image set checks." sh2 """ /usr/bin/python3 -m venv \$WORKSPACE/.awsclientvenv source \$WORKSPACE/.awsclientvenv/bin/activate python3 -m pip install --quiet awscli """ // detect whether an image set exists (but may not be complete) image_set_exists = sh ( script: """\ \$WORKSPACE/.awsclientvenv/bin/aws --endpoint-url \$AWS_ENDPOINT_URL s3 ls $image_set_container/$IMAGE_SET_NAME/status &>/dev/null """, returnStatus: true ) == 0 // detect whether an image set exists and is complete image_set_complete = sh ( script: """\ \$WORKSPACE/.awsclientvenv/bin/aws --endpoint-url \$AWS_ENDPOINT_URL s3 cp $image_set_container/$IMAGE_SET_NAME/status - | grep -q upload-complete &>/dev/null """, returnStatus: true ) == 0 if (image_set_complete) { echo "### Complete image set found. Removing deployment stages from RUN_STAGES." run_stages_list = env.RUN_STAGES.split(",") run_stages_list-=['Undercloud', 'Images', 'Introspect', 'Tagging', 'Overcloud', 'Post tasks'] env.RUN_STAGES = run_stages_list.join(",") IMAGE_SET_OPTIONS = [ '--virsh-snapshot-download yes', '--virsh-snapshot-container ' + image_set_container, '--virsh-snapshot-path ' + IMAGE_SET_DOWNLOAD_PATH + '/' + IMAGE_SET_NAME, '--virsh-snapshot-import yes', '--virsh-snapshot-cleanup ' + image_set_cleanup ].join(' ') } else if (!image_set_exists && 'false'.toBoolean()) { throw new Exception("Image set does not exit and job is image set only") } else if (!image_set_exists) { echo "### No image set found for $IMAGE_SET_NAME. Adding 'Export & Upload Image Set' stage to RUN_STAGES." env.RUN_STAGES+=',Export & Upload Image Set' } else { echo "### Incomplete image set found. Continuing with deployment stages." } } IR_VIRSH_COLLECT_ANSIBLE_FACTS='' if ( 'False' != '' ) { IR_VIRSH_COLLECT_ANSIBLE_FACTS='--collect-ansible-facts False' } //Add bootmode support to virsh provisioning IR_VIRSH_PROVISION_BOOTMODE='' if ( '' != '' ) { IR_VIRSH_PROVISION_BOOTMODE="--bootmode " } if ( IR_VIRSH_PROVISION_BOOTMODE == '' && (UNDERCLOUD_VERSION as float) >= 17 ) { // from 17 expected by default is uefi, but ir-virsh does not know/expect info about version IR_VIRSH_PROVISION_BOOTMODE="--bootmode uefi" } IR_VIRSH_HOST_MTU_SIZE='' if ( '' != '' ) { IR_VIRSH_HOST_MTU_SIZE='--host-mtu-size ' } IR_VIRSH_IMAGE=env.IR_VIRSH_IMAGE?.trim() ?: '' def img_list = [ 'rhel-9.2':'http://download.devel.redhat.com/rhel-9/rel-eng/RHEL-9/RHEL-9.2.0-Beta-1.0/compose/BaseOS/x86_64/images/rhel-guest-image-9.2-20230306.4.x86_64.qcow2', 'rhel-9.1':'http://download.devel.redhat.com/rhel-9/rel-eng/RHEL-9/RHEL-9.1.0-20221027.3/compose/BaseOS/x86_64/images/rhel-guest-image-9.1-20221027.3.x86_64.qcow2', 'rhel-9.0':'http://download.eng.tlv.redhat.com/rhel-9/rel-eng/RHEL-9/latest-RHEL-9.0.0/compose/BaseOS/x86_64/images/rhel-guest-image-9.0-20220420.0.x86_64.qcow2', 'rhel-8.5':'http://download.eng.tlv.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.5.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.5-1174.x86_64.qcow2', 'rhel-8.4':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.4.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.4-992.x86_64.qcow2', 'rhel-8.3':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.3.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.3-401.x86_64.qcow2', 'rhel-8.2':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.2.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.2-290.x86_64.qcow2', 'rhel-8.1':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.1.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.1-263.x86_64.qcow2', 'rhel-8.0':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.0.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.0-1854.x86_64.qcow2', 'rhel-7.9':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/latest-RHEL-7.9/compose/Server/x86_64/images/rhel-guest-image-7.9-30.x86_64.qcow2', 'rhel-7.8':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/latest-RHEL-7.8/compose/Server/x86_64/images/rhel-guest-image-7.8-41.x86_64.qcow2', 'rhel-7.7':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/latest-RHEL-7.7/compose/Server/x86_64/images/rhel-guest-image-7.7-261.x86_64.qcow2', 'rhel-7.6':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/latest-RHEL-7.6/compose/Server/x86_64/images/rhel-guest-image-7.6-210.x86_64.qcow2', 'rhel-7.5':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/RHEL-7.5-RC-1.3/compose/Server/x86_64/images/rhel-guest-image-7.5-146.x86_64.qcow2', 'rhel-7.4':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/RHEL-7.4-RC-1.2/compose/Server/x86_64/images/rhel-guest-image-7.4-191.x86_64.qcow2', 'rhel-7.3':'http://download.devel.redhat.com/pub/rhel/released/RHEL-7/7.3/Server/x86_64/images/rhel-guest-image-7.3-33.x86_64.qcow2', 'windows2019':'http://10.0.152.55/dfg-compute-images/win2019.qcow2', 'windows2022':'http://10.0.152.55/dfg-compute-images/win2022.qcow2', 'rhel-8.3-ppc64le':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.3.0/compose/BaseOS/ppc64le/images/rhel-guest-image-8.3-400.ppc64le.qcow2', ] def osp_rhel_list = [ '10': 'rhel-7.7', '13': 'rhel-7.9', '14': 'rhel-7.7', '15': 'rhel-8.2', '16': 'rhel-8.1', '16.1': 'rhel-8.2', '16.2': 'rhel-8.4', '17.0': 'rhel-9.0', '17.1': 'rhel-9.2' ] if ( IR_VIRSH_IMAGE == '' ) { if ( RHEL_VERSION == '' ) { IR_VIRSH_IMAGE = img_list[osp_rhel_list[UNDERCLOUD_VERSION]] } else { // when image is not specified directly, neither from env or as override for UC only // use just corresponding image from list for given rhel_version IR_VIRSH_IMAGE = img_list[RHEL_VERSION] } } IR_VIRSH_IMAGE = siteURL(IR_VIRSH_IMAGE) //Copy virsh image to enviroment variable for using it in other stages. env.IR_VIRSH_IMAGE = IR_VIRSH_IMAGE IR_PROVISION_OVERRIDE_OPTIONS = env.IR_PROVISION_OVERRIDE_OPTIONS ?: """-e override.undercloud.memory=32768 \ -e override.undercloud.disks.disk1.size="100G" \ -e override.computehci.disks.disk1.size=20G \ -e override.ceph.memory=6144 \ --topology-timezone UTC """.trim() //bgp options if ( '' != '' ) { IR_VIRSH_BGP_OPTIONS='--bgp-topology-mode ' IR_VIRSH_BGP_OPTIONS+='-e provision_networks=provision_networks_bgp.yml -e provision_vms=provision_vms_bgp.yml ' } else { IR_VIRSH_BGP_OPTIONS='' } CIResourceCheck(CIResourcesThisBuild + [resource_from_url(IR_VIRSH_IMAGE)]) sh2 """ . $ir_venv/bin/activate infrared virsh \ -o provision.yml \ --topology-nodes $TOPOLOGY \ --topology-network 3_nets_ipv4 \ --host-address $host \ --host-key $HOME/.ssh/rhos-jenkins/id_rsa \ --host-memory-overcommit True \ --image-url ${IR_VIRSH_IMAGE} \ --serial-files true \ ${IR_VIRSH_PROVISION_BOOTMODE} \ ${IR_VIRSH_COLLECT_ANSIBLE_FACTS} \ ${IR_VIRSH_HOST_MTU_SIZE} \ ${IMAGE_SET_OPTIONS} \ ${IR_VIRSH_BGP_OPTIONS} \ ${IR_PROVISION_OVERRIDE_OPTIONS} if [ '' == '' ]; then if [ -z "\$(infrared plugin list | grep install-redhat-ca)" ]; then infrared plugin add "https://gitlab.cee.redhat.com/rhos-ci/ir-redhat-CA.git" fi infrared install-redhat-ca --hosts-pattern="undercloud" fi """, basename: 'ir-virsh-provision' if ((IMAGE_SET_OPTIONS != '') && (image_set_exists)) { sh2 """ . $ir_venv/bin/activate infrared virsh -v \ --host-address $host \ --host-key $HOME/.ssh/rhos-jenkins/id_rsa \ --virsh-snapshot-quiesce True \ --ansible-args="tags=quiesce" """, basename: 'ir-virsh-snapshot-quiesce' } } if ( "False".toBoolean() ) { archiveArtifacts allowEmptyArchive: true, artifacts: '**/ansible-facts/*' } // Add a build mark for provisioning done from image sets buildMarksReverseSearch([["core_puddle", "khaki"]]) } stage('Clone projects') { dir('ansible-nfv') { git branch: 'master', url: 'https://github.com/redhat-openstack/ansible-nfv.git' } dir('neutron-qe') { git branch: 'master', url: 'https://code.engineering.redhat.com/gerrit/Neutron-QE' } } /** * Patch OpenStack with component from the source code using patch-components * IR plugin. * * @requires install_ir.groovy.inc * @requires codeeng_checkout_component.groovy.inc * * @jjb_param {component} name of the OpenStack component to be used * for patching * * @jjb_param {product_version} Specifies product version (director ver.) * - Numbers are for OSP releases * - Names are for RDO releases * Please refer to InfraRed 'tripleo-overcloud' plugin for more * information. * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * */ if ((params['GERRIT_BRANCH'] ?: '').matches("rhos-.*-patches.*")) { def JOB_RUN_MODE = env.JOB_RUN_MODE stage2('Patching component') { if (env.PATCH_COMPONENTS_GERRIT_CHANGE) { env.PATCH_COMPONENTS_GERRIT_CHANGE = env.PATCH_COMPONENTS_GERRIT_CHANGE.trim() println "downloading patch-components change ${PATCH_COMPONENTS_GERRIT_CHANGE}" sh """ . $ir_venv/bin/activate pushd infrared infrared plugin remove patch-components sed -i '/patch-components/,+4 d' $ir_venv/lib/python*/site-packages/infrared/__init__.py rm -rf plugins/patch-components git clone https://github.com/rhos-infra/patch-components.git pushd patch-components git remote add gerrit https://review.gerrithub.io/rhos-infra/patch-components git review -d ${PATCH_COMPONENTS_GERRIT_CHANGE} git --no-pager log -n 5 --pretty popd infrared plugin add patch-components """ } // env.PR_COMPONENT_NAME is set from the periodic multijob and here we will add // a dummy patch to exercise all the bits in the gate that incude patching stage if (env.PR_COMPONENT_NAME){ def BRANCH_NAME = "rhos-{product_version}.{minor_version}-patches" dir(env.PR_COMPONENT_NAME) { sh2 """ git checkout $BRANCH_NAME date > foo git add foo git commit -a -m "foo patch" """, basename: 'ir-patch-create-dummy-commit' } } TRUNK_BRANCH='' if ((params['GERRIT_BRANCH'] ?: '').matches(".*-trunk-.*")) { TRUNK_BRANCH = "--branch-trunk true" } dir('infrared') { def COMPONENT_NAME = (JOB_RUN_MODE == "periodic")?env.PR_COMPONENT_NAME:env.GERRIT_PROJECT sh2 """ . $ir_venv/bin/activate if [[ $GERRIT_REFSPEC =~ refs/changes* ]] || [[ "$JOB_RUN_MODE" == "periodic" ]]; then if [ -z "\$(infrared plugin list | grep install-redhat-ca)" ]; then infrared plugin add "https://gitlab.cee.redhat.com/rhos-ci/ir-redhat-CA.git" fi infrared install-redhat-ca --hosts-pattern tester,undercloud-0 infrared patch-components \ --component-name $COMPONENT_NAME \ --component-version 16.2 ${TRUNK_BRANCH} else echo "Skipping patching of component as no GERRIT_REFSPEC was provided." fi """, basename: 'ir-patch' } } } /** * Update hypervisor configurations * 1. iptables rules * 2. routing between virt environments * * @requires install_ir.groovy.inc * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * */ stage2('Router Network Config') { if ('controller:3,computehci:3'.contains('cfme_tester')) { dir('infrared') { UNDERCLOUD_VERSION = env.OSP_VERSION ?: '16.2' def osp_rhel_list = [ '10': 'rhel-7.7', '13': 'rhel-7.9', '14': 'rhel-7.7', '15': 'rhel-8.2', '16': 'rhel-8.1', '16.1': 'rhel-8.2', '16.2': 'rhel-8.4', '17.0': 'rhel-9.0', '17.1': 'rhel-9.2' ] RHEL_VERSION = env.RHEL_VERSION ?: 'rhel-8.4' if ( RHEL_VERSION == '' ) { RHEL_VERSION = osp_rhel_list[UNDERCLOUD_VERSION.toString()].toUpperCase() } GENERIC_RHEL_VERSION = (RHEL_VERSION =~ /\d+/)[0] EXTENDED_RHEL_VERSION = (RHEL_VERSION =~ /[0-9\.]+/)[0] // Variables according to RHEL/OSP version _RHEL_BOOL = GENERIC_RHEL_VERSION.toInteger() > 8 TABLES_BIN = (_RHEL_BOOL) ? 'iptables-nft' : 'iptables' TABLES_PACKAGE = (_RHEL_BOOL) ? 'iptables-nft-services' : 'iptables-services' // http://rhos-qe-mirror-tlv.usersys.redhat.com/rel-eng/rhel-7/RHEL-7/latest-RHEL-7/ // http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/rel-eng/rhel-7/RHEL-7/latest-RHEL-7.9/ // http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/rel-eng/rhel-8/RHEL-8/latest-RHEL-8/ // http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/rel-eng/rhel-8/RHEL-8/latest-RHEL-8.4/ // http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/rel-eng/rhel-9/RHEL-9/latest-RHEL-9/ // http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/rel-eng/rhel-9/RHEL-9/latest-RHEL-9.1/ URL_PATH = "rel-eng/rhel-${GENERIC_RHEL_VERSION}/RHEL-${GENERIC_RHEL_VERSION}/latest-RHEL-${GENERIC_RHEL_VERSION}/compose" try { sh2 """ curl --fail "http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/${URL_PATH}" """ } catch (Exception err) { echo err.getMessage() } RHEL_REPOS_CMDS = (GENERIC_RHEL_VERSION.toInteger() > 7) \ ? """ infrared ssh cfme_tester-0 "echo '[rhosp-rhel-${EXTENDED_RHEL_VERSION}-baseos]' > /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'name=rhel${GENERIC_RHEL_VERSION} baseos' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'baseurl=http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/${URL_PATH}/BaseOS/x86_64/os/' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'enabled=1' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'gpgcheck=0' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo '' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo '[rhosp-rhel${EXTENDED_RHEL_VERSION}-appstream]' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'name=rhel${GENERIC_RHEL_VERSION} appstream' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'baseurl=http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/${URL_PATH}/AppStream/x86_64/os/' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'enabled=1' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'gpgcheck=0' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" """ \ : """ infrared ssh cfme_tester-0 "echo '[rhel${GENERIC_RHEL_VERSION}-client]' > /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'name=rhel${GENERIC_RHEL_VERSION} client' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'baseurl=http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/${URL_PATH}/Client/x86_64/os/' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'enabled=1' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "echo 'gpgcheck=0' >> /tmp/rhel${GENERIC_RHEL_VERSION}.repo" """ sh2 basename: 'day2test-ffu-test', script: """ . $ir_venv/bin/activate infrared ssh cfme_tester-0 "sudo nmcli con add type vlan con-name eth0.218 dev eth0 id 218 ip4 10.218.0.10/24" infrared ssh cfme_tester-0 "sudo nmcli con up eth0.218" ${RHEL_REPOS_CMDS} infrared ssh cfme_tester-0 "sudo cp /tmp/rhel${GENERIC_RHEL_VERSION}.repo /etc/yum.repos.d/rhel${GENERIC_RHEL_VERSION}.repo" infrared ssh cfme_tester-0 "sudo yum install ${TABLES_PACKAGE} -y && sudo systemctl start iptables && sudo systemctl enable iptables" infrared ssh cfme_tester-0 "sudo ${TABLES_BIN} -F" infrared ssh cfme_tester-0 "sudo ${TABLES_BIN} -X" infrared ssh cfme_tester-0 "sudo ${TABLES_BIN} -t nat -F" infrared ssh cfme_tester-0 "sudo ${TABLES_BIN} -t nat -X" infrared ssh cfme_tester-0 "sudo ${TABLES_BIN} -t mangle -F" infrared ssh cfme_tester-0 "sudo ${TABLES_BIN} -t mangle -X" infrared ssh cfme_tester-0 "sudo ${TABLES_BIN} -P INPUT ACCEPT" infrared ssh cfme_tester-0 "sudo ${TABLES_BIN} -P FORWARD ACCEPT" infrared ssh cfme_tester-0 "sudo ${TABLES_BIN} -P OUTPUT ACCEPT" infrared ssh cfme_tester-0 "sudo ${TABLES_BIN} -t nat -A POSTROUTING -o eth0 -j MASQUERADE" infrared ssh cfme_tester-0 "sudo service iptables save" infrared ssh cfme_tester-0 "cp /etc/sysctl.conf /tmp/" infrared ssh cfme_tester-0 "grep 'ipv4.ip_forward' /tmp/sysctl.conf && sed 's/net.ipv4.ip_forward.*/net.ipv4.ip_forward = 1/g' -i /tmp/sysctl.conf || echo 'net.ipv4.ip_forward = 1' >> /tmp/sysctl.conf" infrared ssh cfme_tester-0 "sudo cp /tmp/sysctl.conf /etc/" infrared ssh cfme_tester-0 "sudo sysctl -p /etc/sysctl.conf" infrared ssh undercloud-0 "sudo nmcli con add type vlan con-name eth2.218 dev eth2 id 218 ip4 10.218.0.155/24" infrared ssh undercloud-0 "sudo nmcli con up eth2.218" """ } } else { println('ir_virsh_topology_nodes does not contain cfme_tester, skipping stage Run Network Config') } } } def stage_inside_try() { /** * Deploy OpenStack UnderCloud. * * @requires install_ir.groovy.inc * * @jjb_param {product_version} Specifies product version (director ver.) * - Numbers are for OSP releases * - Names are for RDO releases * Please refer to InfraRed 'tripleo-undercloud' plugin for more * information. * * @jjb_param {product_build} Specifies the product build. Default value is * 'latest'. Please refer to InfraRed 'tripleo-undercloud' plugin * for more information. * * @jjb_param {ir_tripleo_undercloud_snapshot} is a boolean parameter to decide if you want * to install undercloud from scratch (false) or to use already made snapshot (true) * * @jjb_param {ir_tripleo_undercloud_ssl} whether ths SSL should be used * for undercloud. A self-signed SSL cert will be generated. * Please refer to InfraRed 'tripleo-undercloud' plugin for more * information. * * @jjb_param {ir_tripleo_mirror} specified mirror (for rpm, pip etc) * * @jjb_param {ir_tripleo_undercloud_override_options} optional override * options that can be specified by any job definition. * * @jjb_param {ir_tripleo_undercloud_config_file} Specifies custom undercloud.conf * file, if required. * * @jjb_param {ir_tripleo_undercloud_packages} * List of packages to install separated by commas. * * @jjb_param {ir_tripleo_multirhel_enabled} * Specifies if multirhel is enables. * * @jjb_param {ir_tripleo_ntp_pool} A list of NTP servers seperated by a comma * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * */ stage2('Undercloud') { def CIResourcesThisStage = [] dir('infrared') { PRODUCT_BUILD = env.PRODUCT_BUILD?.trim() ?: "passed_phase2" UNDERCLOUD_WA = env.UNDERCLOUD_WA ?: "" IS_SSL = env.IS_SSL ?: "True" BUILDVAR = getBuildVar(PRODUCT_BUILD) if (BUILDVAR.contains('cdn')) { sh "wget ${siteURL('http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/cgit/rhos-infrared/plain/private/cdn/cdn_creds.yml')}" } UNDERCLOUD_CONFIG='' if ('' != '') { UNDERCLOUD_CONFIG = "--config-file " } UNDERCLOUD_INSECURE_REGISTRIES='' IR_REGISTRY_INSECURE = env.IR_REGISTRY_INSECURE ?: '' if ("${IR_REGISTRY_INSECURE}" != '') { UNDERCLOUD_INSECURE_REGISTRIES = " --config-options DEFAULT.container_insecure_registries=${IR_REGISTRY_INSECURE}" } UNDERCLOUD_WORKAROUNDS='' if (UNDERCLOUD_WA != '') { UNDERCLOUD_WORKAROUNDS = "--workarounds $UNDERCLOUD_WA" } UNDERCLOUD_PACKAGES='' if ('' != '') { UNDERCLOUD_PACKAGES = "--packages " } IR_TRIPLEO_REPOS_URLS = '' IR_TRIPLEO_REPOS_URLS_ = env.REPOS_URLS ?: """ """.trim() if ( IR_TRIPLEO_REPOS_URLS_ != '' ) { IR_TRIPLEO_REPOS_URLS = "--repos-urls ${IR_TRIPLEO_REPOS_URLS_}" } IR_TRIPLEO_REGISTRY_CEPH_NAMESPACE = '' IR_REGISTRY_CEPH_NAMESPACE = env.IR_REGISTRY_CEPH_NAMESPACE ?: '' if ( "${IR_REGISTRY_CEPH_NAMESPACE}" != '' ) { CIResourcesThisStage.add(resource_from_url(IR_REGISTRY_CEPH_NAMESPACE)) IR_TRIPLEO_REGISTRY_CEPH_NAMESPACE = "--registry-ceph-namespace ${IR_REGISTRY_CEPH_NAMESPACE}" } IR_TRIPLEO_REGISTRY_CEPH_IMAGE = '' IR_REGISTRY_CEPH_IMAGE = env.IR_REGISTRY_CEPH_IMAGE ?: '' if ( "${IR_REGISTRY_CEPH_IMAGE}" != '' ) { IR_TRIPLEO_REGISTRY_CEPH_IMAGE = "--registry-ceph-image ${IR_REGISTRY_CEPH_IMAGE}" } // For OSP>=14 deployments this step is moved from OC into UC stage IR_TRIPLEO_REGISTRY_CEPH_TAG = '' if (env.IR_REGISTRY_CEPH_TAG) { IR_REGISTRY_CEPH_TAG = env.IR_REGISTRY_CEPH_TAG } else if ('') { IR_REGISTRY_CEPH_TAG = '' } else { IR_REGISTRY_CEPH_TAG = '' } if ( "${IR_REGISTRY_CEPH_TAG}" != '' ) { IR_TRIPLEO_REGISTRY_CEPH_TAG = "--registry-ceph-tag ${IR_REGISTRY_CEPH_TAG}" } // Setting NAMESPACE in UC deployments stage. IR_TRIPLEO_REGISTRY_NAMESPACE = '' if ( '' != '' ) { IR_TRIPLEO_REGISTRY_NAMESPACE = "--registry-namespace " CIResourcesThisStage.add(resource_from_url('')) } IR_UNDERCLOUD_CEPH_REPOS = '' if ( 'yes' == 'no' ) { IR_UNDERCLOUD_CEPH_REPOS = "--ceph-repos yes" } IR_TRIPLEO_PUBLIC_VIPS_TLS_EVERYWHERE = '' if ( '' != '' ) { IR_TRIPLEO_PUBLIC_VIPS_TLS_EVERYWHERE = "-e @" } MULTIRHEL_ENABLE = '' if ( 'false'.toBoolean() ) { MULTIRHEL_ENABLE = '--multirhel-enabled false' } UNDERCLOUD_OVERRIDE_OPTIONS = env.UNDERCLOUD_OVERRIDE_OPTIONS ?: """--tls-ca https://password.corp.redhat.com/RH-IT-Root-CA.crt \ --hieradata-config \ "nova::cache::enabled=true,\ nova::cache::backend='oslo_cache.memcache_pool',\ nova::cache::memcache_servers='%{\072\072fqdn}:11211',\ heat::cache::enabled=true,\ heat::cache::backend='oslo_cache.memcache_pool',\ heat::cache::memcache_servers='%{\072\072fqdn}:11211',\ cinder::keystone::authtoken::memcached_servers='%{\072\072fqdn}:11211',\ glance::api::authtoken::memcached_servers='%{\072\072fqdn}:11211',\ heat::keystone::authtoken::memcached_servers='%{\072\072fqdn}:11211',\ ironic::api::authtoken::memcached_servers='%{\072\072fqdn}:11211',\ ironic::inspector::authtoken::memcached_servers='%{\072\072fqdn}:11211',\ mistral::keystone::authtoken::memcached_servers='%{\072\072fqdn}:11211',\ neutron::keystone::authtoken::memcached_servers='%{\072\072fqdn}:11211',\ nova::keystone::authtoken::memcached_servers='%{\072\072fqdn}:11211',\ swift::proxy::authtoken::memcached_servers='%{\072\072fqdn}:11211',\ zaqar::keystone::authtoken::memcached_servers='%{\072\072fqdn}:11211',\ nova::compute::ironic::max_concurrent_builds=10" """.trim() if (EST_BAREMETAL) { if (IS_TLS) { UNDERCLOUD_OVERRIDE_OPTIONS += BM_RESOURCE_MAP[host]["ipa-forwarder"] } } NTP_POOL='' if ( 'clock.corp.redhat.com' != '' ) { if (16.2 < 14) { NTP_POOL='--ntp-server clock.corp.redhat.com' } else { NTP_POOL='--ntp-pool clock.corp.redhat.com' } } // Override Undercloud version if job parameter is defined UNDERCLOUD_VERSION = env.OSP_VERSION ?: '16.2' def osp_rhel_list = [ '10': 'rhel-7.7', '13': 'rhel-7.9', '14': 'rhel-7.7', '15': 'rhel-8.2', '16': 'rhel-8.1', '16.1': 'rhel-8.2', '16.2': 'rhel-8.4', '17.0': 'rhel-9.0', '17.1': 'rhel-9.2' ] RHEL_VERSION = env.RHEL_VERSION ?: 'rhel-8.4' ssl_str = '' if ( RHEL_VERSION == '' ) { RHEL_VERSION = osp_rhel_list[UNDERCLOUD_VERSION.toString()] } UNDERCLOUD_SNAPSHOT = env.UNDERCLOUD_SNAPSHOT ?: "False" UNDERCLOUD_SNAPSHOT_ACCESS_NETWORK = env.UNDERCLOUD_SNAPSHOT_ACCESS_NETWORK ?: "external" puddle_version = puddleDateFormatter("${UNDERCLOUD_VERSION}","${PRODUCT_BUILD}") if ( UNDERCLOUD_VERSION.toDouble() <= 13 ) { ssl_str = ((env.IS_SSL ?: "True") == "yes") ? '-ssl' : '' } is_snapshot_exist = false snapshot_url = siteURL("http://rhos-ci-logs.lab.eng.tlv2.redhat.com/osp-images") SELECTED_MIRROR = EST_BAREMETAL? (BM_RESOURCE_MAP[host]["selected_mirror"]): env.QE_MIRROR.trim() ?: "" SHADE_HOST = EST_BAREMETAL? (BM_RESOURCE_MAP[host]["shade_host"]): 'undercloud-0' if ("False".toBoolean()) { SHADE_HOST='undercloudbgp-0' IR_TRIPLEO_UNDERCLOUD_BGP_OPTIONS='--bgp-enabled yes' } else { IR_TRIPLEO_UNDERCLOUD_BGP_OPTIONS='' } if ((UNDERCLOUD_SNAPSHOT.toBoolean()) && (uc_type == "uc-snapshot-ready")) { undercloud_path = "/var/lib/libvirt/images/undercloud-0-snapshot-osp-${UNDERCLOUD_VERSION}-${RHEL_VERSION}-puddle-${puddle_version}${ssl_str}.qcow2" print "checking is snapshot url ${undercloud_path} usable" sh2 """ . $ir_venv/bin/activate infrared tripleo-undercloud \ -o install.yml \ --snapshot-restore yes \ --snapshot-image ${undercloud_path}\ --snapshot-access-network ${UNDERCLOUD_SNAPSHOT_ACCESS_NETWORK} \ --version ${UNDERCLOUD_VERSION} \ """, basename: 'ir-tripleo-undercloud' } CIResourcesThisStage.add(resource_from_mirror(SELECTED_MIRROR)) CIResourceCheck(CIResourcesThisBuild + CIResourcesThisStage) if ((!UNDERCLOUD_SNAPSHOT.toBoolean()) || ((UNDERCLOUD_SNAPSHOT.toBoolean()) && (uc_type == "uc-full-deploy"))) { if (UNDERCLOUD_VERSION.toDouble() >= 15.0) { UNDERCLOUD_CONFIG += " --config-options DEFAULT.undercloud_timezone=UTC" } sh2 """ . $ir_venv/bin/activate infrared tripleo-undercloud \ -o install.yml \ -o undercloud-install.yml \ --mirror "${ SELECTED_MIRROR }" \ --version ${UNDERCLOUD_VERSION} \ --splitstack no \ --shade-host ${ SHADE_HOST } \ ${ BUILDVAR } \ --ssl ${ IS_SSL } \ ${ UNDERCLOUD_CONFIG } \ ${ UNDERCLOUD_WORKAROUNDS } \ ${UNDERCLOUD_INSECURE_REGISTRIES} \ ${IR_TRIPLEO_REGISTRY_CEPH_NAMESPACE} \ ${IR_TRIPLEO_REGISTRY_CEPH_IMAGE} \ ${IR_TRIPLEO_REGISTRY_CEPH_TAG} \ ${IR_TRIPLEO_REGISTRY_NAMESPACE} \ ${IR_TRIPLEO_REPOS_URLS} \ ${IR_TRIPLEO_PUBLIC_VIPS_TLS_EVERYWHERE} \ ${IR_UNDERCLOUD_CEPH_REPOS} \ ${UNDERCLOUD_PACKAGES} \ ${NTP_POOL} \ ${IR_TRIPLEO_UNDERCLOUD_BGP_OPTIONS} \ ${MULTIRHEL_ENABLE} \ ${ UNDERCLOUD_OVERRIDE_OPTIONS } """, basename: 'ir-tripleo-undercloud' } } buildMarks([["poodle", "lightyellow"], ["puddle", "yellow"], ["director_puddle", "aqua"], ["core_puddle", "khaki"]]) } /** * Prepares OpenStack Overcloud images on existing Undercloud environment. * * @requires install_ir.groovy.inc * * @jjb_param {ir_tripleo_undercloud_images_task} Specifies the source for the * OverCloud images. Please refer to InfraRed 'tripleo-undercloud' * plugin for more information. * * @jjb_param {ir_tripleo_undercloud_images_update} Update OverCloud. Please * refer to InfraRed 'tripleo-undercloud' plugin for more * information. * * @jjb_param {ir_tripleo_mirror} specified mirror (for rpm, pip etc) * * @jjb_param {ir_tripleo_undercloud_images_override_options} optional * override options that can be specified by any job definition. * * @jjb_param {ir_tripleo_undercloud_images_packages} * List of packages to install separated by commas. * * @jjb_param {ir_tripleo_undercloud_images_remove_packages} * List of packages to uninstall separated by commas. * * @jjb_param {ir_tripleo_undercloud_images_remove_no_deps_packages} * List of packages to uninstall without their RPM dependencies, separated by commas. * * @jjb_param {ir_tripleo_undercloud_update_overcloud_kernel} Boolean. * Updating default overcloud kernel with kernel files retrieved from customized overcloud image. * * @jjb_param {product_build} Specifies the product build. Default value is * 'latest'. Please refer to InfraRed 'tripleo-undercloud' plugin * for more information. * * @jjb_param {ir_tripleo_multirhel_enabled} * Specifies if multirhel is enables. * * @jjb_param {ir_tripleo_multirhel_containers_prepare_file} * Specifics a path/url to the container prepare file when multirhel is enabled. * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @jjb_param {ir_tripleo_undercloud_images_stage_timeout|80} specifies timeout for stage * */ timeout(time: "80".toInteger(), unit: 'MINUTES') { stage2('Images') { def CIResourcesThisStage = [] dir('infrared') { PRODUCT_BUILD = env.PRODUCT_BUILD ?: "passed_phase2" BUILDVAR = getBuildVar(PRODUCT_BUILD) if (BUILDVAR.contains('--cdn')) { sh "wget ${siteURL('http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/cgit/rhos-infrared/plain/private/cdn/cdn_creds.yml')}" } IR_TRIPLEO_OVERCLOUD_IMAGES_PACKAGES = '' IR_TRIPLEO_OVERCLOUD_IMAGES_PACKAGES_ = env.OVERCLOUD_IMAGES_PACKAGES ?: """ """.trim() if ( IR_TRIPLEO_OVERCLOUD_IMAGES_PACKAGES_ != '' ) { IR_TRIPLEO_OVERCLOUD_IMAGES_PACKAGES = "--images-packages ${IR_TRIPLEO_OVERCLOUD_IMAGES_PACKAGES_}" } IR_UC_IMAGES_UPDATE = "--images-update no" IR_TRIPLEO_REPOS_URLS = '' IR_TRIPLEO_REPOS_URLS_ = env.REPOS_URLS ?: """ """.trim() if ( IR_TRIPLEO_REPOS_URLS_ != '' ) { IR_TRIPLEO_REPOS_URLS = "--repos-urls ${IR_TRIPLEO_REPOS_URLS_}" IR_UC_IMAGES_UPDATE = "--images-update yes" } IR_UNDERCLOUD_CEPH_REPOS = '' if ( 'yes' == 'no' ) { IR_UNDERCLOUD_CEPH_REPOS = "--ceph-repos yes" } UNDERCLOUD_IMAGES_PACKAGES='' if ('' != '') { UNDERCLOUD_IMAGES_PACKAGES = "--images-packages " } UNDERCLOUD_IMAGES_REMOVE_PACKAGES='' if ('' != '') { UNDERCLOUD_IMAGES_REMOVE_PACKAGES = "--images-remove-packages " } UNDERCLOUD_IMAGES_REMOVE_NO_DEPS_PACKAGES='' if ('' != '') { UNDERCLOUD_IMAGES_REMOVE_NO_DEPS_PACKAGES = "--images-remove-no-deps-packages " } UNDERCLOUD_UPDATE_OVERCLOUD_KERNEL='' if ('false' != 'false') { UNDERCLOUD_UPDATE_OVERCLOUD_KERNEL = '--overcloud-update-kernel false' } MULTIRHEL_ENABLE = '' if ( 'false'.toBoolean() ) { MULTIRHEL_ENABLE = '--multirhel-enabled false' } SELECTED_MIRROR = env.QE_MIRROR.trim() ?: "" CIResourcesThisStage.add(resource_from_mirror(SELECTED_MIRROR)) CIResourceCheck(CIResourcesThisBuild + CIResourcesThisStage) if ((UNDERCLOUD_SNAPSHOT.toBoolean()) && (uc_type == "uc-snapshot-ready")) { print "Snapshot deployment, skipping Images stage since it was done as part of the uc-snapshot image backup job already" } else { sh2 """ . $ir_venv/bin/activate # adding workaround until the following is fixed: # - https://bugzilla.redhat.com/show_bug.cgi?id=1702715#c12 (workaround mentioned in that comment) # - https://bugzilla.redhat.com/show_bug.cgi?id=1714205 # - https://review.opendev.org/#/c/661582/ export WEBSOCKET_CLIENT_CA_BUNDLE=/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem infrared tripleo-undercloud -o undercloud-images.yml \ --images-task rpm \ ${IR_UC_IMAGES_UPDATE} \ ${ IR_TRIPLEO_OVERCLOUD_IMAGES_PACKAGES } \ ${IR_TRIPLEO_REPOS_URLS} \ ${IR_UNDERCLOUD_CEPH_REPOS} \ ${UNDERCLOUD_IMAGES_PACKAGES} \ ${UNDERCLOUD_IMAGES_REMOVE_NO_DEPS_PACKAGES} \ ${UNDERCLOUD_IMAGES_REMOVE_PACKAGES} \ ${UNDERCLOUD_UPDATE_OVERCLOUD_KERNEL} \ ${MULTIRHEL_ENABLE} \ ${ BUILDVAR } \ --mirror "${ SELECTED_MIRROR }" \ """, basename: "ir-tripleo-undercloud-images" } } } } /** * Run Introspect on the OpenStack OverCloud. * * @requires install_ir.groovy.inc * * @jjb_param {product_version} Specifies product version (director ver.) * - Numbers are for OSP releases * - Names are for RDO releases * Please refer to InfraRed 'tripleo-overcloud' plugin for more * information. * * @jjb_param {ir_tripleo_overcloud_deployment_files} Specifies templates * of the overcloud deployment. By default presets are 'virsh' * which are templates for virtual POC environment. * Please refer to InfraRed 'tripleo-overcloud' plugin for more * information. * * @jjb_param {ir_tripleo_overcloud_introspect_instackenv_file} * Specifies custom instackenv file if required. * * @jjb_param {ir_tripleo_hybrid_deployment} Specifies if hybrid mode should be used * during the deployment. * * @jjb_param {ir_tripleo_overcloud_introspect_override_options} optional * override options that can be specified by any job definition. * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @jjb_param {ir_tripleo_overcloud_provison_virsh_network_name} Specifies virsh network name * */ stage2('Introspect') { dir('infrared') { INSTACKENV='' if ('' != '') { if ('no' == 'yes') { INSTACKENV = "--hybrid -e provison_virsh_network_name=br-ctlplane" } else { INSTACKENV = "--instackenv-file " } } // Override Overcloud version if job parameter is defined OVERCLOUD_VERSION= env.OSP_VERSION ?: "16.2" CIResourceCheck(CIResourcesThisBuild) sh2 basename: 'ir-tripleo-overcloud-install-introspect', script: """ . $ir_venv/bin/activate infrared tripleo-overcloud \ -o overcloud-install.yml \ --version ${OVERCLOUD_VERSION} \ --deployment-files composable_roles \ ${ INSTACKENV } \ --introspect yes \ --vbmc-force True \ --vbmc-host undercloud \ --tagging no \ --deploy no \ --root-disk-override node=computehci,hint=name,hintvalue=/dev/vda """ } } /** * Run Tagging for the OpenStack OverCloud. * * @requires install_ir.groovy.inc * * @jjb_param {ir_tripleo_overcloud_deployment_files} Specifies templates * of the overcloud deployment. By default presets are 'virsh' * which are templates for virtual POC environment. * Please refer to InfraRed 'tripleo-overcloud' plugin for more * information. * * @jjb_param {ir_tripleo_overcloud_introspect_instackenv_file} * Specifies custom instackenv file if required. * * @jjb_param {ir_tripleo_hybrid_deployment} Specifies if hybrid mode should be used * during the deployment. * * @jjb_param {ir_tripleo_overcloud_tagging_override_options} optional * override options that can be specified by any job definition. * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @jjb_param {ir_tripleo_overcloud_provison_virsh_network_name} Specifies virsh network name * */ stage2('Tagging') { dir('infrared') { INSTACKENV='' if ('' != '') { if ('no' == 'yes') { INSTACKENV = "--hybrid -e provison_virsh_network_name=br-ctlplane" } else { INSTACKENV = "--instackenv-file " } } CIResourceCheck(CIResourcesThisBuild) sh2 basename: 'ir-tripleo-overcloud-install-tagging', script: """ . $ir_venv/bin/activate infrared tripleo-overcloud \ -o overcloud-install.yml \ --deployment-files composable_roles \ ${ INSTACKENV } \ --version 16.2 \ --introspect no \ --tagging yes \ --deploy no \ """ } } /** * Run deployment for the OpenStack Overcloud. * * @requires install_ir.groovy.inc * * @jjb_param {product_version} Specifies product version (director ver.) * - Numbers are for OSP releases * - Names are for RDO releases * Please refer to InfraRed 'tripleo-overcloud' plugin for more * information. * * @jjb_param {ir_tripleo_overcloud_deployment_compat_install} optional * Defines backward compatible installation. * Paramter expect to be yaml boolean - "yes" instead of "'yes'" * Default set to no in tripelo_tests_defaults.yaml * If set to true overcloud version will be ({product_version} - 1) * * @jjb_param {ir_tripleo_heat_template_base_dir} optional * In case of compat or custom installation, * Use this paramter to point to heat template directory * Default value is set to /usr/share/openstack-tripleo-heat-templates/ * * @jjb_param {ir_tripleo_overcloud_deployment_files} Specifies templates * of the overcloud deployment. By default presets are 'virsh' * which are templates for virtual POC environment. * Please refer to InfraRed 'tripleo-overcloud' plugin for more * information. * * @jjb_param {ir_tripleo_overcloud_templates} extra environment template * files to "overcloud deploy" command * Please refer to InfraRed 'tripleo-overcloud' plugin for more * information. * * @jjb_param {ir_tripleo_overcloud_glance_backend} the glance backend that will be * used. Please refer to InfraRed 'tripleo-overcloud' plugin for * more information. * * @jjb_param {ir_tripleo_overcloud_ceph_cluster_name} the name to be used for the ceph cluster. * * @jjb_param {ir_tripleo_overcloud_storage_backend} the storage that will be * used. Please refer to InfraRed 'tripleo-overcloud' plugin for * more information. * * @jjb_param {ir_tripleo_overcloud_ssl} whether ths SSL should be used * for overcloud. * * @jjb_param {ir_tripleo_overcloud_network_backend} overcloud network * backend * * @jjb_param {ir_tripleo_overcloud_network_protocol} overcloud network * protocol. * * @jjb_param {ir_tripleo_overcloud_network_bgpvpn} activate Neutron BGPVPN * support on the overcloud * * @jjb_param {ir_tripleo_overcloud_network_dvr} activate Neutron DVR * extension on the overcloud * * @jjb_param {ir_tripleo_overcloud_network_ovn} activate Neutron OVN * extension on the overcloud * * @jjb_param {ir_tripleo_overcloud_network_ovs} activate Neutron OVS * extension on the overcloud * * @jjb_param {ir_tripleo_overcloud_network_l2gw} activate Neutron L2 * gateway support on the overcloud * * @jjb_param {ir_tripleo_overcloud_storage_config} adds --storage-config * parameter which contains storage config heat template to use. It * can be an URL, a file path or an infrared template name (old * behavior) * * @jjb_param {ir_tripleo_overcloud_storage_external} whether to use an * external storage rather than setting it up with the director. * * @jjb_param {ir_tripleo_overcloud_composable_role_files} sub-folder under * the files/roles/ folder where InfraRed should look for the * composable roles files. * Please refer to InfraRed 'tripleo-overcloud' plugin for more * information. * * @jjb_param {ir_tripleo_overcloud_debug} whether overcloud service * should enable debug mode * * @jjb_param {ir_tripleo_overcloud_deploy_override_options} optional * override options that can be specified by any job definition. * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @jjb_param {ir_containers} optional * Specifies whether the containers should be used for deployment or not. * * @jjb_param {ir_containers_registry_mirror} Alternative docker * registry to use for deployment. * * @jjb_param {ir_containers_registry_namespace} Alternative docker * registry namespace to use for deployment. * * # TODO (migi): Remove ir_containers_images_patch when following is merged * https://review.gerrithub.io/#/c/395722/ * * @jjb_param {ir_containers_images_patch} Optional parameter to specify * comma separated list of containers to be patched. * * @jjb_param {ir_tripleo_overcloud_introspect_instackenv_file} * In case of hybrid deployment, instackenv.json file with * "--hybrid" parameter should be provided within the overcloud * deployment step as well. * * @jjb_param {ir_tripleo_hybrid_deployment} Specifies if hybrid mode should be used * during the deployment. * * @jjb_param {ir_tripleo_overcloud_deploy_script} Specifies overcloud deploy script * if required. * * @jjb_param {ir_tripleo_overcloud_public_vlan_ip} Provide IP address from the * external network range for the Undercloud host. * * @jjb_param {ir_tripleo_overcloud_stack_name} Custom overcloud stack name * * @jjb_param {ir_tripleo_overcloud_network_name} Specifies the name of the public network * * @jjb_param {ir_tripleo_overcloud_provison_virsh_network_name} Specifies virsh network name * * @jjb_param {ir_tripleo_multirhel_enabled} * Specifies if multirhel is enables. * * @jjb_param {ir_tripleo_multirhel_containers_prepare_file} * Specifics a path/url to the container prepare file when multirhel is enabled. * * @jjb_param {ir_tripleo_overcloud_config_host_and_reboot} Specifies if computes need to be rebooted after configuration * @jjb_param {ir_tripleo_ntp_pool} A list of NTP servers seperated by a comma * * @param IR_UNDERCLOUD_PACKAGES Specifies the undercloud packages to use e.g. https://cbs.centos.org/repos/storage7-ceph-luminous-candidate/x86_64/os/Packages/ceph-ansible-3.1.0.0-0.rc19.1.el7.noarch.rpm * * @param IR_REGISTRY_CEPH_TAG Specifies the registry-ceph-tag use e.g. ceph-3.2-rhel-7-containers-candidate-78036-20181206225716 * * @param IR_TRIPLEO_CEPH_OSD_SCENARIO Specifies the ceph-osd-type use e.g.use '' or 'bluestore' or 'filestore' * * @param IR_TRIPLEO_CEPH_OSD_SCENARIO Specifies the ceph-osd-scenario use e.g.use '' or 'lvm' or 'non-collocated' or 'collocated' * */ stage2('Overcloud') { def CIResourcesThisStage = [] dir('infrared') { COMPOSABLE_ROLE_FILES = env.COMPOSABLE_ROLE_FILES ?: """Controller,ComputeHCI """.trim() COMPOSABLE_ROLES='' if ( COMPOSABLE_ROLE_FILES != 'None') { COMPOSABLE_ROLES = "--role-files ${COMPOSABLE_ROLE_FILES}" } if ('' != '') { storage_config = "--storage-config " } else if ('ceph' == 'ceph' && 'no' == 'yes') { if (16.2 < 12) { ceph_external_conf = 'external_ceph_rhos_16.2' } else if (16.2 < 16) { ceph_external_conf = 'external_ceph_rhos_12_and_above' if (host.contains('rdu2')) { ceph_external_conf += '_rdu2' } } else { ceph_external_conf = 'external_ceph_rhos_16_with_manila' if (host.contains('rdu2')) { ceph_external_conf += 'rdu2' } } if (host.contains('rdu2')) { // as per external Ceph environment file for RDU2 Ceph cluster CIResourcesThisBuild.add('cephusa1.mobius.lab.eng.rdu2.redhat.com') CIResourcesThisBuild.add('cephusa2.mobius.lab.eng.rdu2.redhat.com') CIResourcesThisBuild.add('cephusa3.mobius.lab.eng.rdu2.redhat.com') } else { // as per external Ceph environment file for TLV2 Ceph cluster CIResourcesThisBuild.add('ceph1.lab.eng.tlv2.redhat.com') CIResourcesThisBuild.add('ceph2.lab.eng.tlv2.redhat.com') CIResourcesThisBuild.add('ceph3.lab.eng.tlv2.redhat.com') } private_storage_file = "${siteURL('http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/cgit/rhos-infrared/plain/private/storage/' + ceph_external_conf +'.yml')}" CIResourceCheck(['rhos-qe-mirror.lab.eng.tlv2.redhat.com']) // TODO(yfried): dump file to PWD once we support external file input sh "wget ${private_storage_file} -P plugins/tripleo-overcloud/vars/storage/config/" storage_config = '--storage-config ' + ceph_external_conf } else { storage_config = '' } CONTAINERS_SETTINGS='' if ('yes' != '') { CONTAINERS_SETTINGS += '--containers yes' } if ('' != '') { CIResourcesThisStage.add(resource_from_url('')) CONTAINERS_SETTINGS += " --registry-mirror ${siteURL('')}" } if ('' != '') { CIResourcesThisStage.add(resource_from_url('')) CONTAINERS_SETTINGS += ' --registry-namespace ' } IR_CONFIG_AND_REBOOT_HOST='' if ('' != '') { IR_CONFIG_AND_REBOOT_HOST = '--overcloud-host-config-and-reboot ' } HEAT_TEMPLATE_BASE_DIR='' if ('None' != 'None') { HEAT_TEMPLATE_BASE_DIR='--heat-templates-basedir None' } if ("False".toBoolean() && "16.2".isInteger()) { OVERCLOUD_VERSION = "16.2".toInteger() - 1 } else { OVERCLOUD_VERSION = "16.2" } // Override Overcloud version if job parameter is defined OVERCLOUD_VERSION= env.OSP_VERSION ?: OVERCLOUD_VERSION HYBRID_DEPLOY='' if ('' != '' && 'no' == 'yes') { HYBRID_DEPLOY = "--hybrid -e provison_virsh_network_name=br-ctlplane" } OVERCLOUD_SCRIPT='' if ('' != '') { OVERCLOUD_SCRIPT = "--overcloud-script " } PUBLIC_VLAN_IP='' if (EST_BAREMETAL) { PUBLIC_VLAN_IP = BM_RESOURCE_MAP[host]["public_vlan_ip_override"] } else if ('' != '') { PUBLIC_VLAN_IP = "--public-vlan-ip " } IR_TRIPLEO_OVERCLOUD_COLLECT_ANSIBLE_FACTS='' if ( 'False' != '' ) { IR_TRIPLEO_OVERCLOUD_COLLECT_ANSIBLE_FACTS='--collect-ansible-facts False' } IR_TRIPLEO_REPOS_URLS = '' IR_TRIPLEO_REPOS_URLS_ = env.REPOS_URLS ?: """ """.trim() if ( IR_TRIPLEO_REPOS_URLS_ != '' ) { IR_TRIPLEO_REPOS_URLS = "--container-extra-repos ${IR_TRIPLEO_REPOS_URLS_}" } IR_TRIPLEO_UPDATE_REPO_NAME = '' IR_TRIPLEO_UPDATE_REPO_NAME_ = env.UPDATE_REPO_NAME ?: """ """.trim() if ( IR_TRIPLEO_UPDATE_REPO_NAME_ != '' ) { IR_TRIPLEO_UPDATE_REPO_NAME = "--update_repo ${IR_TRIPLEO_UPDATE_REPO_NAME_}" } IR_TRIPLEO_OVERCLOUD_CONTAINER_IMAGES_PACKAGES = '' CONTAINER_IMAGES_PACKAGES = env.CONTAINER_IMAGES_PACKAGES ?: '' if ( CONTAINER_IMAGES_PACKAGES != '' ) { for (CONTAINER_IMAGES_PACKAGES_ in CONTAINER_IMAGES_PACKAGES.split(';')) { IR_TRIPLEO_OVERCLOUD_CONTAINER_IMAGES_PACKAGES += "--container-images-packages ${CONTAINER_IMAGES_PACKAGES_} " } } IR_TRIPLEO_OVERCLOUD_GLANCE_BACKEND='' if ( '' != '' ) { IR_TRIPLEO_OVERCLOUD_GLANCE_BACKEND = '--glance-backend ' } IR_TRIPLEO_OVERCLOUD_CEPH_CLUSTER_NAME='' IR_TRIPLEO_OVERCLOUD_CEPH_CLUSTER_NAME_ = env.CEPH_CLUSTER_NAME ?: """ceph """.trim() if ( IR_TRIPLEO_OVERCLOUD_CEPH_CLUSTER_NAME_ != '' ) { IR_TRIPLEO_OVERCLOUD_CEPH_CLUSTER_NAME = "--ceph-cluster-name ${IR_TRIPLEO_OVERCLOUD_CEPH_CLUSTER_NAME_}" } IR_TRIPLEO_OVERCLOUD_STACK_NAME='' if ( 'qe-Cloud-0' != '' ) { IR_TRIPLEO_OVERCLOUD_STACK_NAME = '--overcloud-stack qe-Cloud-0' } IR_TRIPLEO_OVERCLOUD_NETWORK_NAME='' if ( 'public' != '' ) { IR_TRIPLEO_OVERCLOUD_NETWORK_NAME = '--public-net-name public' } IR_TRIPLEO_OVERCLOUD_UNDERCLOUD_PACKAGES='' IR_UNDERCLOUD_PACKAGES = env.IR_UNDERCLOUD_PACKAGES ?: '' if ( "${IR_UNDERCLOUD_PACKAGES}" != '' ) { IR_TRIPLEO_OVERCLOUD_UNDERCLOUD_PACKAGES = "--undercloud-packages ${IR_UNDERCLOUD_PACKAGES}" } IR_TRIPLEO_REGISTRY_CEPH_TAG='' IR_REGISTRY_CEPH_TAG = env.IR_REGISTRY_CEPH_TAG ?: '' if ( "${IR_REGISTRY_CEPH_TAG}" != '' ) { IR_TRIPLEO_REGISTRY_CEPH_TAG = "--registry-ceph-tag ${IR_REGISTRY_CEPH_TAG}" } IR_TRIPLEO_CEPH_OSD_TYPE='' IR_CEPH_OSD_TYPE = env.IR_CEPH_OSD_TYPE ?: '' if ( "${IR_CEPH_OSD_TYPE}" != '' ) { IR_TRIPLEO_CEPH_OSD_TYPE = "--ceph-osd-type ${IR_CEPH_OSD_TYPE}" } IR_TRIPLEO_CEPH_OSD_SCENARIO='' IR_CEPH_OSD_SCENARIO = env.IR_CEPH_OSD_SCENARIO ?: '' if ( "${IR_CEPH_OSD_SCENARIO}" != '' ) { IR_TRIPLEO_CEPH_OSD_SCENARIO = "--ceph-osd-scenario ${IR_CEPH_OSD_SCENARIO}" } IR_TRIPLEO_OVERCLOUD_NETWORK_OVN='' if ('' != '') { IR_TRIPLEO_OVERCLOUD_NETWORK_OVN="--network-ovn " } IR_TRIPLEO_OVERCLOUD_NETWORK_OVS='' if ('True' != '') { IR_TRIPLEO_OVERCLOUD_NETWORK_OVS="--network-ovs True" } IR_TRIPLEO_OVERCLOUD_TEMPLATES = env.OVERCLOUD_TEMPLATES ?: """performance,cinder_backup_ap,l3_fip_qos""" if ("${IR_TRIPLEO_OVERCLOUD_TEMPLATES}".contains('---')) { writeFile file: 'extra_templates.yaml', text: IR_TRIPLEO_OVERCLOUD_TEMPLATES IR_TRIPLEO_OVERCLOUD_TEMPLATES = "extra_templates.yaml" } if (("${IR_TRIPLEO_OVERCLOUD_TEMPLATES}".contains('rhsm.yml'))||("${IR_TRIPLEO_OVERCLOUD_TEMPLATES}".contains('overcloud_cdn.yml'))) { sh "wget ${siteURL('http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/cgit/rhos-infrared/plain/private/cdn/${ir_tripleo_overcloud_templates}')}" } IR_TRIPLEO_PUBLIC_VIPS_TLS_EVERYWHERE = '' if ( '' != '' ) { IR_TRIPLEO_PUBLIC_VIPS_TLS_EVERYWHERE = "-e @" } IR_TRIPLEO_NETWORK_OVERRIDE_DVR_NIC = '' if ( '' != '' ) { IR_TRIPLEO_NETWORK_OVERRIDE_DVR_NIC = "--network-override-dvr-nic " } IR_TRIPLEO_OVERCLOUD_SRV_VOLUME_SIZE = '' if ( '' != '' ) { IR_TRIPLEO_OVERCLOUD_SRV_VOLUME_SIZE = "--srv-volume-size " } NTP_POOL='' if ( 'clock.corp.redhat.com' != '' ) { if (16.2 < 14) { NTP_POOL='--ntp-server clock.corp.redhat.com' } else { NTP_POOL='--ntp-pool clock.corp.redhat.com' } } def OVERCLOUD_CONTAINER_IMAGES_URLS = '' if ( overcloud_container_images_urls != '' ) { OVERCLOUD_CONTAINER_IMAGES_URLS = "--container-images-urls " + overcloud_container_images_urls } if (! env.IGNORE_CVP_FAILSAFES.toBoolean() && env.IS_CVP.toBoolean() && OVERCLOUD_CONTAINER_IMAGES_URLS == '') { error(message: "exception in ir_tripleo_overcloud_deploy.groovy.inc: This jobs is a CVP one ('is_cvp: true' is set in jjb, IS_CVP in ir_tripleo_overcloud_deploy.groovy.inc) yet OVERCLOUD_CONTAINER_IMAGES_URLS is not defined - BUG in job configuration or runtime (groovy/shell scripts) suspected!... failing the build...") } MULTIRHEL_ENABLE = '' if ( 'false'.toBoolean() ) { MULTIRHEL_ENABLE = '--multirhel-enabled false' } MULTIRHEL_CONTAINER_IMAGE_PREPARE_FILE='' if ('' != '' && 'false'.toBoolean() ) { MULTIRHEL_CONTAINER_IMAGE_PREPARE_FILE = '--multirhel-overcloud-container-image-prepare-parameter-file ' } OVERCLOUD_DEPLOY_OVERRIDE_OPTIONS = env.OVERCLOUD_DEPLOY_OVERRIDE_OPTIONS ?: """--deployment-timeout 240 \ --config-heat OvercloudControllerOpenstackFlavor=controller \ --config-heat OvercloudControlFlavor=controller \ --config-heat ControllerOpenstackHostnameFormat='controller-%index%' \ --config-heat CephStorageHostnameFormat='ceph-%index%' \ --config-heat ComputeParameters.KernelArgs=tsx=off \ --config-heat ComputeHCIParameters.KernelArgs=tsx=off \ --config-heat ComputeParameters.KernelArgs=tsx=off \ --ceph-pgnum 128 \ --config-heat ComputeHCIParameters.NeutronL3AgentMode='dvr' """.trim() OVERCLOUD_DEPLOYMENT_FILES = env.OVERCLOUD_DEPLOYMENT_FILES ?: """composable_roles """.trim() if ("False".toBoolean()) { // TODO(eolivare): replace by --bgp-enabled when infrared patch 549816 is merged IR_TRIPLEO_OVERCLOUD_BGP_OPTIONS='--bgp yes' } else { IR_TRIPLEO_OVERCLOUD_BGP_OPTIONS='' } CIResourceCheck(CIResourcesThisBuild + CIResourcesThisStage) sh2 basename: 'ir-tripleo-overcloud-install', script: """ . $ir_venv/bin/activate infrared tripleo-overcloud \ -o overcloud-install.yml \ --version ${OVERCLOUD_VERSION} \ --deployment-files ${OVERCLOUD_DEPLOYMENT_FILES} \ --overcloud-templates ${IR_TRIPLEO_OVERCLOUD_TEMPLATES} \ --storage-backend ceph \ --overcloud-ssl yes \ --network-backend vxlan \ --network-protocol ipv4 \ --network-bgpvpn no \ --network-dvr True \ ${IR_TRIPLEO_OVERCLOUD_NETWORK_OVN} \ ${IR_TRIPLEO_OVERCLOUD_NETWORK_OVS} \ --network-l2gw no \ ${storage_config} --storage-external no \ --splitstack no \ --overcloud-debug yes \ --overcloud-fencing no \ --introspect no \ --tagging no \ --tht-roles yes \ --deploy yes \ ${IR_CONFIG_AND_REBOOT_HOST} \ ${IR_TRIPLEO_OVERCLOUD_STACK_NAME} \ ${CONTAINERS_SETTINGS} \ ${COMPOSABLE_ROLES} \ ${HEAT_TEMPLATE_BASE_DIR} \ ${HYBRID_DEPLOY} \ ${OVERCLOUD_SCRIPT} \ ${PUBLIC_VLAN_IP} \ ${IR_TRIPLEO_OVERCLOUD_GLANCE_BACKEND} \ ${IR_TRIPLEO_OVERCLOUD_CEPH_CLUSTER_NAME} \ ${IR_TRIPLEO_OVERCLOUD_COLLECT_ANSIBLE_FACTS} \ ${IR_TRIPLEO_OVERCLOUD_CONTAINER_IMAGES_PACKAGES} \ ${IR_TRIPLEO_REPOS_URLS} \ ${IR_TRIPLEO_UPDATE_REPO_NAME} \ ${IR_TRIPLEO_OVERCLOUD_NETWORK_NAME} \ ${IR_TRIPLEO_OVERCLOUD_UNDERCLOUD_PACKAGES} \ ${IR_TRIPLEO_REGISTRY_CEPH_TAG} \ ${IR_TRIPLEO_CEPH_OSD_TYPE} \ ${IR_TRIPLEO_CEPH_OSD_SCENARIO} \ ${IR_TRIPLEO_PUBLIC_VIPS_TLS_EVERYWHERE} \ ${IR_TRIPLEO_NETWORK_OVERRIDE_DVR_NIC} \ ${IR_TRIPLEO_OVERCLOUD_SRV_VOLUME_SIZE} \ ${NTP_POOL} \ ${OVERCLOUD_CONTAINER_IMAGES_URLS} \ ${IR_TRIPLEO_OVERCLOUD_BGP_OPTIONS} \ ${MULTIRHEL_ENABLE} \ ${MULTIRHEL_CONTAINER_IMAGE_PREPARE_FILE} \ ${OVERCLOUD_DEPLOY_OVERRIDE_OPTIONS} \ """ } env.OVERCLOUD_DEPLOYED_VERSION = OVERCLOUD_VERSION if ( "False".toBoolean() ) { archiveArtifacts allowEmptyArchive: true, artifacts: '**/ansible-facts/*' } } /** * Run cloud-config tasks. * * @requires ir_tripleo_overcloud_deploy.groovy.inc * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @jjb_param {ir_cloud_config_tasks_list } List of post deployment * tasks to be performed (Tasks represent playbooks, which * are stored in the 'lookup_dir' folder in plugin directory * * @jjb_param {ir_tripleo_overcloud_deployment_files} Specifies templates * of the overcloud deployment. By default presets are 'virsh' * which are templates for virtual POC environment. * * @jjb_param {ir_tripleo_overcloud_public_subnet } Specifies the public subnet * configuration file. * * @jjb_param {ir_tripleo_overcloud_network_protocol } Overcloud network * protocol. * * @jjb_param {ir_cloud_config_scale_down_node_name } Name of the node * to remove * * @jjb_param {ir_cloud_config_scale_up_scale_nodes } List of compute nodes * to be added * * @jjb_param {ir_tripleo_overcloud_stack_name} Custom overcloud stack name * * @jjb_param {ir_cloud_config_tasks_options} optional custom cloud-config options * * @jjb_param {ir_tripleo_overcloud_network_name} Specifies the name of the public network * * @jjb_param {ir_tripleo_overcloud_provison_virsh_network_name} Specifies virsh network name * * @jjb_param {ir_post_commands} List of pairs host+command to be executed on infrared nodes * For example: * ir_post_commands: * - ['undercloud-0', 'ip add'] * - ['compute-0', 'cat /etc/os-release'] * */ stage2('Post tasks') { dir('infrared') { SCALE_DOWN_OPTIONS='' if ('create_external_network,forward_overcloud_dashboard,network_time,tempest_deployer_input'.contains('scale_down')) { SCALE_DOWN_OPTIONS += ' --node-name ' } SCALE_UP_OPTIONS='' if ('create_external_network,forward_overcloud_dashboard,network_time,tempest_deployer_input'.contains('scale_up')) { SCALE_UP_OPTIONS += ' --scale-nodes ' } IR_TRIPLEO_OVERCLOUD_STACK_NAME='' if ( 'qe-Cloud-0' != '' ) { IR_TRIPLEO_OVERCLOUD_STACK_NAME = '--overcloud-stack qe-Cloud-0' } HYBRID_DEPLOY='' if ('' != '' && 'no' == 'yes') { HYBRID_DEPLOY = "--hybrid True -e provison_virsh_network_name=br-ctlplane" } VLAN='' if ('' != '') { VLAN = "--external-vlan " } IR_TRIPLEO_OVERCLOUD_NETWORK_NAME='' if ( 'public' != '' ) { IR_TRIPLEO_OVERCLOUD_NETWORK_NAME = '--public-net-name public' } OVERCLOUD_VERSION = env.OSP_VERSION ?: '16.2' for (command in []) { sh2 """ . $ir_venv/bin/activate infrared ssh ${command.get(0)} "${command.get(1)}" """ } // task add_extra_overcloud_ssh_keys is added when the OSP version is // 17 or higher because an additional set of keys is needed when // connecting from the undercloud to the overcloud nodes using paramiko IR_CLOUD_CONFIG_TASKS = "--tasks create_external_network,forward_overcloud_dashboard,network_time,tempest_deployer_input" OSP_VERSION_FOR_CLOUD_CONFIG = env.OSP_VERSION ?: '16.2' if (OSP_VERSION_FOR_CLOUD_CONFIG.toDouble() >= 17 && !IR_CLOUD_CONFIG_TASKS.contains("add_extra_overcloud_ssh_keys")) { IR_CLOUD_CONFIG_TASKS += ",add_extra_overcloud_ssh_keys" } // External network is passed as shell ENV parameter in following code // since ir_tripleo_overcloud_deployment_files can contain // groovy variable computed in steps before (e.g. "$bm_templates_link"). // For detailse see https://projects.engineering.redhat.com/browse/RHOSINFRA-1841 . sh2 """ if [[ '${IR_CLOUD_CONFIG_TASKS}' == *"create_external_network"* ]]; then export EXTERNAL_NETWORK=" --deployment-files composable_roles \\ --public-subnet default_subnet \\ --network-protocol ipv4" fi . $ir_venv/bin/activate ir cloud-config \ -o cloud-config.yml \ ${IR_CLOUD_CONFIG_TASKS} \ ${IR_TRIPLEO_OVERCLOUD_STACK_NAME} \ ${SCALE_DOWN_OPTIONS} \ ${SCALE_UP_OPTIONS} \ ${HYBRID_DEPLOY} \ ${VLAN} \ ${IR_TRIPLEO_OVERCLOUD_NETWORK_NAME} \ --version ${OVERCLOUD_VERSION} \ \$EXTERNAL_NETWORK \ """, basename: "ir-cloud-config" if ('no' == 'yes') { sh2 basename: 'ir-tripleo-overcloud-redhat-ca', script: """ . $ir_venv/bin/activate infrared plugin add "https://gitlab.cee.redhat.com/rhos-ci/ir-redhat-CA.git" infrared install-redhat-ca """ } } } } def stage_inside_try_post() { /** * Export an image set from a virsh host using infrared. * * @requires install_ir.groovy.inc * * @requires ir_virsh_cleanup_provision.groovy.inc * * @requires OS_CLOUD and OS_STORAGE_URL environment * variables to be set and a clouds.yaml file * which includes the credentials for OS_CLOUD. * */ stage2('Export & Upload Image Set') { dir('infrared') { product_version = env.OSP_VERSION ?: "16.2" product_build = env.PRODUCT_BUILD ?: "passed_phase2" rhel_version = env.RHEL_VERSION ?: "rhel-8.4" IMAGE_SET_NAME = env.IMAGE_SET_NAME ?: "" IMAGE_SET_DOWNLOAD_PATH = env.IMAGE_SET_DOWNLOAD_PATH ?: env.WORKSPACE if (IMAGE_SET_NAME != "") { if (IMAGE_SET_NAME.contains("${product_build}")) { os_version = RHEL_VERSION.replaceAll('rhel-','').replaceAll('\\..*','') real_puddle = puddleDateFormatter(product_version, product_build, null, os_version) IMAGE_SET_NAME = IMAGE_SET_NAME.replace("${product_build}", real_puddle) echo "### Updated IMAGE_SET_NAME to $IMAGE_SET_NAME" } image_set_container = env.IMAGE_SET_CONTAINER ?: "s3://image_sets" image_set_cleanup = env.IMAGE_SET_CLEANUP ?: True sh2 """ . $ir_venv/bin/activate echo "### Exporting the current running environment as an image set." infrared virsh -v \ --host-address $host \ --host-key $HOME/.ssh/rhos-jenkins/id_rsa \ --virsh-snapshot-export yes \ --virsh-snapshot-path $IMAGE_SET_DOWNLOAD_PATH/$IMAGE_SET_NAME \ --virsh-snapshot-upload yes \ --virsh-snapshot-container $image_set_container \ --virsh-snapshot-cleanup $image_set_cleanup echo "### Quiesce the environment after the VM's were restarted." infrared virsh -v \ --host-address $host \ --host-key $HOME/.ssh/rhos-jenkins/id_rsa \ --virsh-snapshot-quiesce True \ --ansible-args="tags=quiesce" """ } } } } def stage_inside_try_2_pre() { } def stage_inside_try_2() { /** * OpenStack Undercloud Fast Forward Upgrade Content Switch. * * @requires install_ir.groovy.inc * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @jjb_param {ir_tripleo_mirror} Specified mirror * */ stage2('Undercloud FFU Switch Content') { dir('infrared') { SELECTED_MIRROR = env.QE_MIRROR.trim() ?: "" def IR_FFU_EXTRA_OPTIONS = "" def IR_TRIPLEO_OS_UPGRADE_EXTRA_VARS = "" sh2 """ . $ir_venv/bin/activate infrared tripleo-undercloud \ -o undercloud-upgrade.yml \ --upgrade yes \ --mirror "${ SELECTED_MIRROR }" \ --build ${ env.FFU_PRODUCT_BUILD } \ --version 17.1 \ --ansible-args="tags=discover_python,upgrade_repos,undercloud_version_discovery,undercloud_containers,export" """, basename: 'ir-tripleo-undercloud-upgrade-repo' } buildMarksReverseSearch([["core_puddle", "pink"]]) } /** * Run OSP>=17 tripleo "pre-upgrade" validations * * @requires install_ir.groovy.inc * */ stage2('Pre-Upgrade Validations 17'){ dir('infrared') { IR_TRIPLEO_PRE_UPGRADE_VALIDATIONS_EXTRA_VARS='' IR_TRIPLEO_PRE_UPGRADE_VALIDATIONS_EXTRA_VARS = env.IR_TRIPLEO_PRE_UPGRADE_VALIDATIONS_EXTRA_VARS ?: """ """.trim() if ( IR_TRIPLEO_PRE_UPGRADE_VALIDATIONS_EXTRA_VARS != '' ) { IR_TRIPLEO_PRE_UPGRADE_VALIDATIONS_EXTRA_VARS = "--extra-vars ${IR_TRIPLEO_PRE_UPGRADE_VALIDATIONS_EXTRA_VARS}" } IR_TRIPLEO_OVERCLOUD_STACK_NAME = 'qe-Cloud-0' ?: "overcloud" try { sh2 """ . $ir_venv/bin/activate infrared ssh `ir workspace node-list -g undercloud -f json | jq -r .nodes[].name` \\ "source stackrc && \\ sudo dnf -y update openstack-tripleo-validations python3-validations-libs validations-common && \\ sudo chown stack:stack /var/lib/mistral/.ssh/tripleo-admin-rsa && \\ sudo cat /var/lib/mistral/${IR_TRIPLEO_OVERCLOUD_STACK_NAME}/tripleo-ansible-inventory.yaml > inventory.yaml && \\ validation run -i inventory.yaml --group pre-upgrade ${IR_TRIPLEO_PRE_UPGRADE_VALIDATIONS_EXTRA_VARS}" """ } catch (err) { unstable(message: "Error detected with ${env.STAGE_NAME}, printed below") echo err.getMessage() echo "Continuing with the upgrade..." } } } /** * OpenStack Undercloud Fast Forward Upgrade. * * @requires install_ir.groovy.inc * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @jjb_param {ir_tripleo_overcloud_stack_name} Custom overcloud stack name * * @jjb_param {ir_tripleo_upgrade_ffu_version} ffu upgrade version */ stage2('Undercloud FFU Upgrade 17') { dir('infrared') { IR_TRIPLEO_OVERCLOUD_STACK_NAME='' if ( 'qe-Cloud-0' != '' ) { IR_TRIPLEO_OVERCLOUD_STACK_NAME = '--overcloud-stack qe-Cloud-0' } IR_FFU_EXTRA_OPTIONS = "" if (env.UPGRADE_WORKAROUNDS) { IR_FFU_EXTRA_OPTIONS+=' --upgrade-ffu-workarounds true -e @workarounds.yaml' } sh2 """ unset IR_REGISTRY_CEPH_NAMESPACE IR_REGISTRY_CEPH_IMAGE IR_REGISTRY_CEPH_TAG . $ir_venv/bin/activate infrared tripleo-upgrade \ --undercloud-ffu-upgrade yes \ --undercloud-ffu-releases '16.2,-,17.1' \ ${IR_TRIPLEO_OVERCLOUD_STACK_NAME} \ ${ IR_FFU_EXTRA_OPTIONS } \ """, basename: 'ir-tripleo-undercloud-ffu-upgrade', maxLines: -1 } buildMarksReverseSearch([["core_puddle", "pink"]]) } /** * OpenStack OverCloud Fast Forward Upgrade. * * Fast Forward Upgrade prepare step. * * @requires install_ir.groovy.inc * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @jjb_param {ir_tripleo_overcloud_stack_name} Custom overcloud stack name * * @jjb_param {ir_tripleo_upgrade_ffu_version} ffu upgrade version * * @jjb_param {ir_tripleo_overcloud_update_files} Specifies overcloud deploy files for * updates if name different than virt * * @jjb_param {ir_tripleo_overcloud_deployment_files} Specifies templates * of the overcloud deployment. By default presets are 'virsh' * which are templates for virtual POC environment. * Please refer to InfraRed 'tripleo-overcloud' plugin for more * information. * */ stage2('Overcloud FFU prepare') { dir('infrared') { IR_TRIPLEO_OVERCLOUD_STACK_PARAM='' IR_TRIPLEO_OVERCLOUD_STACK_NAME = "overcloud" if ( 'qe-Cloud-0' != '' ) { IR_TRIPLEO_OVERCLOUD_STACK_NAME = 'qe-Cloud-0' IR_TRIPLEO_OVERCLOUD_STACK_PARAM = '--overcloud-stack qe-Cloud-0' } IR_FFU_EXTRA_OPTIONS='' if (env.UPGRADE_WORKAROUNDS) { IR_FFU_EXTRA_OPTIONS+=' --upgrade-ffu-workarounds yes -e @workarounds.yaml' } if ( 16.2 > 13 ) { IR_FFU_EXTRA_OPTIONS+=' --upgrade-workloadcleanup yes' } if ( ( 16.2 >= 13 ) && ( 16.2 <= 16 ) ) { IR_FFU_EXTRA_OPTIONS+=' -e upgrade_prepare_extra_params="/home/stack/overcloud-params.yaml"' } if ( 16.2 > 16 ) { // for now we provide overcloud adoption envs in WA into ~/tmp IR_FFU_EXTRA_OPTIONS+=" -e upgrade_prepare_extra_params=\"/home/stack/overcloud-params.yaml,/home/stack/overcloud-deploy/${IR_TRIPLEO_OVERCLOUD_STACK_NAME}/${IR_TRIPLEO_OVERCLOUD_STACK_NAME}-network-environment.yaml,/home/stack/tmp/baremetal_deployment.yaml,/home/stack/tmp/generated-networks-deployed.yaml,/home/stack/tmp/generated-vip-deployed.yaml\"" // Removal of heat-admin was introduced into 16.2.4 so we still use it just to be sure IR_FFU_EXTRA_OPTIONS+=' --overcloud-ssh-user heat-admin' } if ( '' != '' ) { IR_FFU_EXTRA_OPTIONS+=' --overcloud-ffu-replace-env-files ""' } IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES = env.OVERCLOUD_DEPLOYMENT_FILES ?: """composable_roles """.trim() if ('' != '') { IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES = "" } IR_TRIPLEO_UPGRADE_VALIDATION='' if ( '' != '' ) { IR_TRIPLEO_UPGRADE_VALIDATION = '--run-validations ' } IR_TRIPLEO_UPGRADE_SKIPLIST_VALIDATIONS='' if ( '' != '' ) { IR_TRIPLEO_UPGRADE_SKIPLIST_VALIDATIONS = '--skiplist-validations ' } IR_TRIPLEO_UPGRADE_PREPARE_VALIDATIONS_EXTRA_ARGS='' if ( '' != '' ) { IR_TRIPLEO_UPGRADE_PREPARE_VALIDATIONS_EXTRA_ARGS = '--validations-extra-args ""' } IR_FURIOUS_UPGRADE_OPTIONS='' if (env.FURIOUS_UPGRADE == 'true') { IR_FURIOUS_UPGRADE_OPTIONS=' --config-heat AllInOneUpgrade=true ' } sh2 """ . $ir_venv/bin/activate infrared tripleo-upgrade \ --deployment-files ${IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES} \ --overcloud-ffu-upgrade yes \ --overcloud-ffu-releases '16.2,-,17.1' \ --upgrade-floatingip-check yes \ --upgrade-workload yes \ ${IR_TRIPLEO_OVERCLOUD_STACK_PARAM} \ ${IR_FFU_EXTRA_OPTIONS} \ ${ IR_TRIPLEO_UPGRADE_VALIDATION } \ ${ IR_TRIPLEO_UPGRADE_SKIPLIST_VALIDATIONS } \ ${ IR_TRIPLEO_UPGRADE_PREPARE_VALIDATIONS_EXTRA_ARGS } \ --ansible-args="skip-tags=create_ffu_scripts,ffu_overcloud_run,ffu_overcloud_upgrade_role,ffu_overcloud_ceph,ffu_overcloud_converge,ffu_overcloud_post" \ ${IR_FURIOUS_UPGRADE_OPTIONS} \ \ """, basename: 'ir-tripleo-ffu-prepare', maxLines: -1 } } /** * OpenStack OverCloud Fast Forward Upgrade. * * Run the FFU steps for upgrading Ceph * * @requires install_ir.groovy.inc * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @jjb_param {ir_tripleo_overcloud_stack_name} Custom overcloud stack name * * @jjb_param {ir_tripleo_overcloud_update_files} Specifies overcloud deploy files for * updates if name different than virt * * @jjb_param {ir_tripleo_upgrade_ffu_version} ffu upgrade version * * @jjb_param {ir_tripleo_overcloud_deployment_files} Specifies templates * of the overcloud deployment. By default presets are 'virsh' * which are templates for virtual POC environment. * Please refer to InfraRed 'tripleo-overcloud' plugin for more * information. * */ stage2('Overcloud FFU Ceph adopt') { dir('infrared') { IR_TRIPLEO_OVERCLOUD_STACK_PARAM='' IR_TRIPLEO_OVERCLOUD_STACK_NAME = "overcloud" if ( 'qe-Cloud-0' != '' ) { IR_TRIPLEO_OVERCLOUD_STACK_NAME = 'qe-Cloud-0' IR_TRIPLEO_OVERCLOUD_STACK_PARAM = '--overcloud-stack qe-Cloud-0' } IR_FFU_EXTRA_OPTIONS+=' --upgrade-workloadcleanup yes' // for now we provide overcloud adoption envs in WA into ~/tmp IR_FFU_EXTRA_OPTIONS+=" -e upgrade_prepare_extra_params=\"/home/stack/overcloud-params.yaml,/home/stack/overcloud-deploy/${IR_TRIPLEO_OVERCLOUD_STACK_NAME}/${IR_TRIPLEO_OVERCLOUD_STACK_NAME}-network-environment.yaml,/home/stack/tmp/baremetal_deployment.yaml,/home/stack/tmp/generated-networks-deployed.yaml,/home/stack/tmp/generated-vip-deployed.yaml,/usr/share/openstack-tripleo-heat-templates/environments/cephadm/cephadm-rbd-only.yaml,/usr/share/openstack-tripleo-heat-templates/environments/nova-hw-machine-type-upgrade.yaml\" -e \"ceph_upgrade_skip_tags=ceph_health,opendev-validation,ceph_ansible_remote_tmp\"" IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES = env.OVERCLOUD_DEPLOYMENT_FILES ?: """composable_roles """.trim() if ('' != '') { IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES = "" } sh2 """ . $ir_venv/bin/activate infrared tripleo-upgrade \ --deployment-files ${IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES} \ --overcloud-ffu-upgrade yes \ --overcloud-ffu-releases '16.2,-,17.1' \ --upgrade-floatingip-check yes \ --upgrade-workload yes \ ${IR_TRIPLEO_OVERCLOUD_STACK_PARAM} \ ${IR_FFU_EXTRA_OPTIONS} \ --ansible-args="skip-tags=create_ffu_scripts,ffu_overcloud_prepare,ffu_overcloud_run,ffu_overcloud_upgrade_role,ffu_overcloud_converge,ffu_overcloud_post" \ infrared ssh undercloud-0 'sh overcloud_upgrade_prepare.sh' """, basename: 'ir-tripleo-ffu-ceph', maxLines: -1 } } /** * OpenStack OverCloud Fast Forward Upgrade for OSP17. * * Run the FFU steps for upgrading the controllers and controlplane nodes. * * @requires install_ir.groovy.inc * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @jjb_param {ir_tripleo_overcloud_stack_name} Custom overcloud stack name * * @jjb_param {ir_tripleo_upgrade_ffu_version} ffu upgrade version */ stage2('Overcloud FFU') { timeout(time: "600".toInteger(), unit: 'MINUTES') { dir('infrared') { def FLOATING_IP_CHECK = "yes" IR_TRIPLEO_OVERCLOUD_STACK_PARAM='' IR_TRIPLEO_OVERCLOUD_STACK_NAME = "overcloud" if ( 'qe-Cloud-0' != '' ) { IR_TRIPLEO_OVERCLOUD_STACK_NAME = 'qe-Cloud-0' IR_TRIPLEO_OVERCLOUD_STACK_PARAM = '--overcloud-stack qe-Cloud-0' } IR_FFU_EXTRA_OPTIONS='' if (env.UPGRADE_WORKAROUNDS) { IR_FFU_EXTRA_OPTIONS+=' --upgrade-ffu-workarounds yes -e @workarounds.yaml' } IR_FFU_EXTRA_OPTIONS+=' --upgrade-workloadcleanup yes' // for now we provide overcloud adoption envs in WA into ~/tmp IR_FFU_EXTRA_OPTIONS+=" -e upgrade_prepare_extra_params=\"/home/stack/overcloud-params.yaml,/home/stack/overcloud-deploy/${IR_TRIPLEO_OVERCLOUD_STACK_NAME}/${IR_TRIPLEO_OVERCLOUD_STACK_NAME}-network-environment.yaml,/home/stack/tmp/baremetal_deployment.yaml,/home/stack/tmp/generated-networks-deployed.yaml,/home/stack/tmp/generated-vip-deployed.yaml,/usr/share/openstack-tripleo-heat-templates/environments/cephadm/cephadm-rbd-only.yaml,/usr/share/openstack-tripleo-heat-templates/environments/nova-hw-machine-type-upgrade.yaml\" -e tripleo_upgrade_debug=True " IR_FFU_SKIP_TAGS = '--ansible-args="skip-tags=create_ffu_scripts,ffu_overcloud_prepare,ffu_overcloud_upgrade_compute,ffu_overcloud_ceph,ffu_overcloud_converge,ffu_overcloud_post,ffu_overcloud_system_upgrade"' IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES = env.OVERCLOUD_DEPLOYMENT_FILES ?: """composable_roles """.trim() if ('' != '') { IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES = "" } IR_FFU_EXTRA_OPTIONS+=' --fast-and-furious true' sh2 """ . $ir_venv/bin/activate infrared tripleo-upgrade \ --deployment-files ${IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES} \ --overcloud-ffu-upgrade yes \ --overcloud-ffu-releases '16.2,-,17.1' \ --upgrade-floatingip-check ${FLOATING_IP_CHECK} \ --upgrade-workload yes \ ${IR_TRIPLEO_OVERCLOUD_STACK_PARAM} \ ${IR_FFU_EXTRA_OPTIONS} \ ${IR_FFU_SKIP_TAGS} \ """, basename: 'ir-tripleo-ffu-controller', maxLines: -1 } } } /** * OpenStack Undercloud Fast Forward System Upgrade. * * @requires install_ir.groovy.inc * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * */ stage2('Undercloud FFU System Upgrade 17') { dir('infrared') { /* Operating system upgrade for the Undercloud */ IR_FFU_EXTRA_OPTIONS = "" if (env.UPGRADE_WORKAROUNDS) { IR_FFU_EXTRA_OPTIONS+=' --upgrade-ffu-workarounds true -e @workarounds.yaml' } sh2 """ unset IR_REGISTRY_CEPH_NAMESPACE IR_REGISTRY_CEPH_IMAGE IR_REGISTRY_CEPH_TAG . $ir_venv/bin/activate infrared tripleo-upgrade \ --undercloud-ffu-os-upgrade yes \ --upgrade-ffu-workarounds true -e @workarounds.yaml \ --skiplist-validations repos \ -e leapp_unsubscribed=True -e leapp_skip_release_check=True """, basename: 'ir-tripleo-undercloud-ffu-system-upgrade', maxLines: -1 } } /** * OpenStack OverCloud Ctlplane Fast Forward System Upgrade for OSP17. * * Run the FFU steps for upgrading the controllers and controlplane nodes. * * @requires install_ir.groovy.inc * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @jjb_param {ir_tripleo_overcloud_stack_name} Custom overcloud stack name * * @jjb_param {ir_tripleo_upgrade_ffu_version} ffu upgrade version */ stage2('Overcloud Ctlplane System Upgrade FFU OSP17') { timeout(time: "600".toInteger(), unit: 'MINUTES') { dir('infrared') { def FLOATING_IP_CHECK = "yes" IR_TRIPLEO_OVERCLOUD_STACK_PARAM='' IR_TRIPLEO_OVERCLOUD_STACK_NAME = "overcloud" if ( 'qe-Cloud-0' != '' ) { IR_TRIPLEO_OVERCLOUD_STACK_NAME = 'qe-Cloud-0' IR_TRIPLEO_OVERCLOUD_STACK_PARAM = '--overcloud-stack qe-Cloud-0' } IR_FFU_EXTRA_OPTIONS='' if (env.UPGRADE_WORKAROUNDS) { IR_FFU_EXTRA_OPTIONS+=' --upgrade-ffu-workarounds yes -e @workarounds.yaml' } IR_FFU_EXTRA_OPTIONS+=' --upgrade-workloadcleanup yes' // for now we provide overcloud adoption envs in WA into ~/tmp IR_FFU_EXTRA_OPTIONS+=" -e upgrade_prepare_extra_params=\"/home/stack/overcloud-params.yaml,/home/stack/overcloud-deploy/${IR_TRIPLEO_OVERCLOUD_STACK_NAME}/${IR_TRIPLEO_OVERCLOUD_STACK_NAME}-network-environment.yaml,/home/stack/tmp/baremetal_deployment.yaml,/home/stack/tmp/generated-networks-deployed.yaml,/home/stack/tmp/generated-vip-deployed.yaml,/usr/share/openstack-tripleo-heat-templates/environments/cephadm/cephadm-rbd-only.yaml,/usr/share/openstack-tripleo-heat-templates/environments/nova-hw-machine-type-upgrade.yaml,/home/stack/system_upgrade.yaml\" -e tripleo_upgrade_debug=True " IR_FFU_SKIP_TAGS = '--ansible-args="skip-tags=ffu_overcloud_run_compute,ffu_overcloud_upgrade_run,ffu_overcloud_ceph,ffu_overcloud_converge,ffu_overcloud_post"' IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES = env.OVERCLOUD_DEPLOYMENT_FILES ?: """composable_roles """.trim() if ('' != '') { IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES = "" } // FIXME: split into upgrade prepare -> system upgrade two tasks // FIXME: use separate WA file to have clean application // FIXME: tripleo-upgrade needs ctlplane tag sh2 """ . $ir_venv/bin/activate infrared tripleo-upgrade \ --deployment-files ${IR_TRIPLEO_OVERCLOUD_DEPLOYMENT_FILES} \ --overcloud-ffu-upgrade yes \ --overcloud-ffu-releases '16.2,-,17.1' \ --upgrade-floatingip-check ${FLOATING_IP_CHECK} \ --upgrade-workload yes \ ${IR_TRIPLEO_OVERCLOUD_STACK_PARAM} \ ${IR_FFU_EXTRA_OPTIONS} \ ${IR_FFU_SKIP_TAGS} \ """, basename: 'ir-tripleo-ffu-controller', maxLines: -1 } } } /** * Run OSP>=16.x tripleo "post-upgrade" validations * * @requires install_ir.groovy.inc * */ stage2('Post-Upgrade Validations'){ dir('infrared') { IR_TRIPLEO_POST_UPGRADE_VALIDATIONS_EXTRA_VARS='' IR_TRIPLEO_POST_UPGRADE_VALIDATIONS_EXTRA_VARS = env.IR_TRIPLEO_POST_UPGRADE_VALIDATIONS_EXTRA_VARS ?: """ """.trim() if ( IR_TRIPLEO_POST_UPGRADE_VALIDATIONS_EXTRA_VARS != '' ) { IR_TRIPLEO_POST_UPGRADE_VALIDATIONS_EXTRA_VARS = "--extra-vars ${IR_TRIPLEO_POST_UPGRADE_VALIDATIONS_EXTRA_VARS}" } IR_TRIPLEO_OVERCLOUD_STACK_NAME='' if ( 'qe-Cloud-0' != '' ) { IR_TRIPLEO_OVERCLOUD_STACK_NAME = '--stack qe-Cloud-0' } try { if ( ("16.2" as Float).intValue() == 16 ) { sh2 """ . $ir_venv/bin/activate infrared ssh `ir workspace node-list -g undercloud -f json | jq -r .nodes[].name` \\ sudo cat /var/lib/mistral/${IR_TRIPLEO_OVERCLOUD_STACK_NAME}/tripleo-ansible-inventory.yaml > inventory.yaml && \\ validation run -i inventory.yaml --group post-upgrade ${IR_TRIPLEO_PRE_UPGRADE_VALIDATIONS_EXTRA_VARS} """ } else if ( ("16.2" as Float).intValue() == 17 ) { sh2 """ . $ir_venv/bin/activate infrared ssh `ir workspace node-list -g undercloud -f json | jq -r .nodes[].name` \\ validation run -i overcloud-deploy/${IR_TRIPLEO_OVERCLOUD_STACK_NAME}/tripleo-ansible-inventory.yaml --group post-upgrade ${IR_TRIPLEO_PRE_UPGRADE_VALIDATIONS_EXTRA_VARS} """ } else { sh2 """ . $ir_venv/bin/activate infrared ssh `ir workspace node-list -g undercloud -f json | jq -r .nodes[].name` \\ "source stackrc && \\ openstack tripleo validator run \ --group post-upgrade ${IR_TRIPLEO_OVERCLOUD_STACK_NAME} \ ${IR_TRIPLEO_POST_UPGRADE_VALIDATIONS_EXTRA_VARS}" """ } } catch (err) { unstable(message: "Error detected with ${env.STAGE_NAME}, printed below") echo err.getMessage() echo "Continuing with the execution..." } } } } def stage_inside_try_2_post() { /** * Run functional tests for stf client-side * * @requires install_ir.groovy.inc * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * */ stage2('Run STF functional tests') { if ('performance,cinder_backup_ap,l3_fip_qos'.contains('stf')) { dir('infrared') { dir('playbooks'){ git branch: "master", url: 'https://github.com/infrawatch/functional-tests.git' sh(script:"curl --insecure https://gitlab.cee.redhat.com/cee_ops/quicklab/raw/master/docs/quicklab.key --output quicklab.key;chmod 600 quicklab.key",returnStdout: false) } sh2 basename: 'stf-tests', script: """ . $ir_venv/bin/activate ANSIBLE_CALLBACK_WHITELIST=custom_logger ansible-playbook -i `ir workspace inventory` -i playbooks/default.inv playbooks/stf_functional_tests.yml --tags performance,cinder_backup_ap,l3_fip_qos,OSP16.2 ansible localhost -vvv \ -m copy -a "src=\$WORKSPACE/infrared/test_run_result.out \ dest=\$WORKSPACE/" """ } archiveArtifacts artifacts: 'test_run_result.out' currentBuild.description = (currentBuild.description ?: '') + "
\nSTF Test Results\n" } else { println('ir_tripleo_overcloud_templates does not contain stf, skipping stage Run STF functional tests') } } /** * Prepare to run novajoin tempest tests on the undercloud * * @requires install_ir.groovy.inc * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * */ stage2('Prepare to run novajoin tempest tests') { dir('infrared') { if ( '' == 'novajoin' || 'sanity,smoke,cinder_backup' == 'novajoin' ) { sh2 basename: 'python-novajoin-tempest-test', script: """ . $ir_venv/bin/activate curl -k https://gitlab.cee.redhat.com/OSP-DFG-security/automation/raw/master/playbooks/python-novajoin_tempest_prep.yml -O ansible-playbook -i `ir workspace inventory` python-novajoin_tempest_prep.yml """ if ( 16.2 < 17 ) { sh2 basename: 'python-novajoin-tempest-test-rcfile1', script: """ . $ir_venv/bin/activate myworkspace=`dirname \$(ir workspace inventory)` cp \$myworkspace/stackrc /tmp/novajoin_stackrc """ } } else { println("ir_tempest_tests not set to novajoin skipping stage") } } } /** * Runs Tempest tests using InfraRed. * * @requires install_ir.groovy.inc * * @jjb_param sanity,smoke,cinder_backup list of tests to be executed. This is only * applicable if infrared tester is selected. * Please refer to InfraRed 'tempest' plugin for more information. * * @jjb_param tripleo Installer that was used to * provision OpenStack. Please refer to InfraRed 'tempest' plugin * for more information. * * @jjb_param {ir_tempest_second_guest_image} optional * Guest image to use for this stage. * * @jjb_param {ir_tempest_second_run_override_options} optional * override options that can be specified by any job definition, * such as multiple '--config-options' * * @jjb_param {ir_tempest_second_tests} optional * override options that can be specified by any job definition, * such as multiple '--config-options' * * @jjb_param {ir_tempest_second_config} optional * create tempest configuration file * * @jjb_param if applicable (git source only) * specifies git revision to be used for Tempest Test Suite * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * */ stage2('Second Tempest Run') { def FLV_IJECT = "" TEMPEST_IMAGE_OPT = "" TEMPEST_GUEST_IMAGE = "cirros" ?: "cirros" OPENSTACK_VERSION = "17.1" ?: "16.2" if ( TEMPEST_GUEST_IMAGE.contains("rhel") || TEMPEST_GUEST_IMAGE.contains("windows") ) { def ir_volume_size = "" ?: "10" def overcloud_rc = 'overcloudrc' if ( '' != '' ) { overcloud_rc = '' } else if ( 'qe-Cloud-0' != '' ) { overcloud_rc = 'qe-Cloud-0rc' } def tempest_img_list = [ 'rhel-9.2':'http://download.devel.redhat.com/rhel-9/rel-eng/RHEL-9/RHEL-9.2.0-Beta-1.0/compose/BaseOS/x86_64/images/rhel-guest-image-9.2-20230306.4.x86_64.qcow2', 'rhel-9.1':'http://download.devel.redhat.com/rhel-9/rel-eng/RHEL-9/RHEL-9.1.0-20221027.3/compose/BaseOS/x86_64/images/rhel-guest-image-9.1-20221027.3.x86_64.qcow2', 'rhel-9.0':'http://download.eng.tlv.redhat.com/rhel-9/rel-eng/RHEL-9/latest-RHEL-9.0.0/compose/BaseOS/x86_64/images/rhel-guest-image-9.0-20220420.0.x86_64.qcow2', 'rhel-8.5':'http://download.eng.tlv.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.5.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.5-1174.x86_64.qcow2', 'rhel-8.4':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.4.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.4-992.x86_64.qcow2', 'rhel-8.3':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.3.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.3-401.x86_64.qcow2', 'rhel-8.2':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.2.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.2-290.x86_64.qcow2', 'rhel-8.1':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.1.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.1-263.x86_64.qcow2', 'rhel-8.0':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.0.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.0-1854.x86_64.qcow2', 'rhel-7.9':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/latest-RHEL-7.9/compose/Server/x86_64/images/rhel-guest-image-7.9-30.x86_64.qcow2', 'rhel-7.8':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/latest-RHEL-7.8/compose/Server/x86_64/images/rhel-guest-image-7.8-41.x86_64.qcow2', 'rhel-7.7':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/latest-RHEL-7.7/compose/Server/x86_64/images/rhel-guest-image-7.7-261.x86_64.qcow2', 'rhel-7.6':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/latest-RHEL-7.6/compose/Server/x86_64/images/rhel-guest-image-7.6-210.x86_64.qcow2', 'rhel-7.5':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/RHEL-7.5-RC-1.3/compose/Server/x86_64/images/rhel-guest-image-7.5-146.x86_64.qcow2', 'rhel-7.4':'http://download.devel.redhat.com/rhel-7/rel-eng/RHEL-7/RHEL-7.4-RC-1.2/compose/Server/x86_64/images/rhel-guest-image-7.4-191.x86_64.qcow2', 'rhel-7.3':'http://download.devel.redhat.com/pub/rhel/released/RHEL-7/7.3/Server/x86_64/images/rhel-guest-image-7.3-33.x86_64.qcow2', 'windows2019':'http://10.0.152.55/dfg-compute-images/win2019.qcow2', 'windows2022':'http://10.0.152.55/dfg-compute-images/win2022.qcow2', 'rhel-8.3-ppc64le':'http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.3.0/compose/BaseOS/ppc64le/images/rhel-guest-image-8.3-400.ppc64le.qcow2', ] def tempest_guest_image_url = tempest_img_list[TEMPEST_GUEST_IMAGE] ?: "http://download.devel.redhat.com/rhel-8/rel-eng/RHEL-8/latest-RHEL-8.3.0/compose/BaseOS/x86_64/images/rhel-guest-image-8.3-401.x86_64.qcow2" dir('infrared') { // Create two flavors: // 1. Main testing flavor // 2. Secondary flavor ('alt') for resize tests (otherwise we get 'When resizing, instances must change flavor!') // * First, deleting previous flavors with same ID, otherwise, the flavors creation commands will be failed when running this stage multiple times // TODO: Move the flavors creation part to InfraRed tempest plugin sh """ . $ir_venv/bin/activate ansible -o -i `infrared workspace inventory` undercloud -m shell -a "source ~/${overcloud_rc} && (openstack flavor delete 200 || true) && openstack flavor create --id 200 --ram 2048 --disk ${ir_volume_size} --vcpus 2 guest_image" ansible -o -i `infrared workspace inventory` undercloud -m shell -a "source ~/${overcloud_rc} && (openstack flavor delete 201 || true) && openstack flavor create --id 201 --ram 2048 --disk ${ir_volume_size} --vcpus 2 guest_image_alt" """ // NOTE: 'volume_size' needs to be bigger than default 1GB when using RHEL/Windows guest // otherwise 'volume' tempest tests fail with: // "If Image virtual size is 10GB and doesn't fit in a volume of size 1GB" FLV_IJECT="--config-options compute.flavor_ref=200 --config-options compute.flavor_ref_alt=201 --config-options compute.image_ssh_user=cloud-user --config-options validation.image_ssh_user=cloud-user --config-options scenario.dhcp_client=dhclient --config-options volume.volume_size=${ir_volume_size}" TEMPEST_IMAGE_OPT="--image ${tempest_guest_image_url}" } if ( TEMPEST_GUEST_IMAGE == 'rhel' ) { IR_VIRSH_IMAGE=sh(returnStdout: true, script: 'echo ${IR_VIRSH_IMAGE:-}').trim() TEMPEST_IMAGE_OPT="--image ${IR_VIRSH_IMAGE}" } } else if ( TEMPEST_GUEST_IMAGE == 'cirros' ) { def cirros_img_url = siteURL('http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img') def cirros_alt_img_url = siteURL('http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-uec.tar.gz') // There is known issue with cirros-0.4 and OSP17 env - see https://issues.redhat.com/browse/RHOSINFRA-4643 if ( OPENSTACK_VERSION.toDouble() >= 17 ) { cirros_alt_img_url = siteURL('http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/images/cirros-0.5.2-x86_64-uec.tar.gz') cirros_img_url = siteURL('http://rhos-qe-mirror.lab.eng.tlv2.redhat.com/images/cirros-0.5.2-x86_64-disk.img') } if ( OPENSTACK_VERSION.toDouble() <= 10 ){ TEMPEST_IMAGE_OPT = "--image " + cirros_img_url + " --config-options image.http_image=\'" + cirros_alt_img_url + "\'" } else { TEMPEST_IMAGE_OPT = "--image " + cirros_img_url } } TEMPEST_SECOND_RUN_OVERRIDE_OPTIONS = env.TEMPEST_SECOND_RUN_OVERRIDE_OPTIONS ?: siteURL("""--threads 8 --cleanup yes \ --config-options service_available.mistral='false' """).trim() if ( 'junitxml,html' != '' ) { RESULTS_FORMATS='--results-formats junitxml,html' } // add the extra keys generated during "Post tasks" stage when OSP release // is 17 or later - these keys are needed to connect from the undercloud to // the overcloud nodes using paramiko if (OPENSTACK_VERSION.toDouble() >= 17 && !TEMPEST_SECOND_RUN_OVERRIDE_OPTIONS.contains("/home/stack/.ssh/id_extra_keys")) { TEMPEST_SECOND_RUN_OVERRIDE_OPTIONS += " --config-options ssh_credentials.overcloud_key_file='/home/stack/.ssh/id_extra_keys' --config-options whitebox.ctlplane_ssh_private_key_path='/home/stack/.ssh/id_extra_keys'" } // This is needed until the downstream tempest plugins switch to tripleo-admin on 17 (by branching) if (OPENSTACK_VERSION.toDouble() >= 17) { // Note: order is important, this allows to override overcloud admin user name using TEMPEST_SECOND_RUN_OVERRIDE_OPTIONS when it's needed TEMPEST_SECOND_RUN_OVERRIDE_OPTIONS = "--config-options ssh_credentials.overcloud_user=tripleo-admin " + TEMPEST_SECOND_RUN_OVERRIDE_OPTIONS } TEMPEST_TESTS = 'sanity,smoke,cinder_backup' if ( '' != '' ) { TEMPEST_TESTS = '' } TEMPEST_CONFIGURATION = '' if ( 'yes' != '' ) { TEMPEST_CONFIGURATION = '--tempest-config yes' } TEMPEST_REVISION = '' if ( '' != '' ) { TEMPEST_REVISION = '--revision ' } TEMPEST_DIR = '--dir tempest-dir' if ( '17.1' != '' ) { TEMPEST_DIR = '--dir tempest_17.1' } if ("false".toBoolean()) { log "Squashing double curly braces in Tempest override options", level: "DEBUG" TEMPEST_SECOND_RUN_OVERRIDE_OPTIONS = TEMPEST_SECOND_RUN_OVERRIDE_OPTIONS.replaceAll("\\{\\{", "\\{").replaceAll("\\}\\}", "\\}") } dir('infrared') { sh2 """ . $ir_venv/bin/activate infrared tempest -v \ -o test2.yml \ --setup rpm \ --openstack-installer tripleo \ --openstack-version ${OPENSTACK_VERSION} \ ${TEMPEST_IMAGE_OPT} \ --openstack-installer tripleo $FLV_IJECT \ --tests ${TEMPEST_TESTS} \ ${TEMPEST_DIR} \ ${RESULTS_FORMATS} \ ${TEMPEST_CONFIGURATION} \ ${TEMPEST_REVISION} \ ${TEMPEST_SECOND_RUN_OVERRIDE_OPTIONS} """, basename: 'ir-tempest-2', echoScript: true env.IR_TEMPEST_RUN_FINISHED = 'True' } } } // ****************************************** // Catch block stages // ****************************************** def stage_catch() { /** * This code is executed in the catch statement when a job is finished * and marks the respective multiple test cases as failed. * The testcases are expected to be in a comma separated string. */ POLARION_TEST_RUN_ID = env.POLARION_TEST_RUN_ID ?: '20221208-0905' POLARION_TEST_CASE_ID = getPolarionTestCase("RHELOSP-148121", "") if ( ! POLARION_TEST_RUN_ID.empty ) { if ( POLARION_TEST_CASE_ID.empty ) { println "Error. The test case wasn't initialized." currentBuild.result = 'FAILURE' return } jump_status = "failed" } } // ****************************************** // Finally block stages // ****************************************** def stage_finally_dont_wrap() { } def stage_finally_upload_test_results() { /** * Upload test results from the unified tests directory to a Jenkins logs directory in the storage server */ String destResultsDirURL = null boolean testResultsUploaded = false long uploadTestResultStartTime = new Date().getTime() Exception uploadTestResultsEx = null try { timeout(time: 20, unit: 'MINUTES') { String irInventoryDir = "" try { irInventoryDir = sh(returnStdout: true, script: ". $ir_env/bin/activate && dirname `infrared workspace inventory`").trim() } catch (Exception invDirEx) { irInventoryDir = "$WORKSPACE/infrared/.workspaces/active" } String srcResultsDir = "$irInventoryDir/test_results" try { String manualResFilePath = (env.JUMP_TESTRUN_RESULTS_DIR ?: WORKSPACE) + "/test_run_result.out" // Instead of working with 'File Operations' plugin or writing to a new file, we just use 'sh' to copy file if (fileExists(manualResFilePath)) { if (fileExists(irInventoryDir) && !fileExists(srcResultsDir)) { log "Test results dir in InfraRed's inventory dir (active workspace) doesn't exist. " + "Creating one for the manual test results file." sh "mkdir -p $srcResultsDir" } log "Copying manual test results file ('$manualResFilePath') for Polarion to the " + "unified test results dir ('srcResultsDir')", level: "DEBUG" sh "cp \"$manualResFilePath\" \"$srcResultsDir\"" } else { log "Manual test results file for Polarion wasn't found", level: "DEBUG" } } catch (Exception polManResEx) { throw polManResEx("An error occurred while trying to copy the manual tests results " + "file for Polarion to the unified test results dir\n$polManResEx") } ArrayList tobikoDirs = sh(script: "ls -d $irInventoryDir/tobiko_*/ || true", returnStdout: true).trim().split("\\r?\\n") tobikoDirs.removeAll(Arrays.asList("", null)) ArrayList allResultDirs = tobikoDirs.collect() if (fileExists(srcResultsDir)) { allResultDirs.add("$srcResultsDir/*") } // Note: There is no need to check if result directories or the files inside are empty or not, we want to upload the // results directory as is, and let the later stages/jobs to handle such cases if (!allResultDirs.size()) { log "Result directories weren't found", level: "DEBUG" } else { String tmpTestResultsDir = "/tmp/$JOB_NAME/$BUILD_ID/test_results" sh(script: "mkdir -p $tmpTestResultsDir") for (String resultsDir: allResultDirs) { sh(script: "cp -rf $resultsDir $tmpTestResultsDir") } // Upload unified test results dir destResultsDirURL = uploadToLogServer(tmpTestResultsDir, "") testResultsUploaded = true sh(script: "rm -rf $tmpTestResultsDir || true") // Because we don't create a new dir, we append the test_results dir to the URL destResultsDirURL = appendPathToURL(destResultsDirURL, "test_results") currentBuild.description = (currentBuild.description ?: '') + "Test Results" log "Test Results: $destResultsDirURL" } // else !srcResultsDir exists } // timeout } // try catch (Exception resUploadEx) { uploadTestResultsEx = resUploadEx log "An error occurred during the attempt to upload test results to the logs server.\n$resUploadEx ", level: "ERROR" // Default value should be 'true' instead of 'false' after testing period if ("False".toBoolean()) { throw resUploadEx } } finally { try { long uploadTestResultEndTime = new Date().getTime() def elasticDict = [ build_id: BUILD_ID, build_url: BUILD_URL, exception_class: uploadTestResultsEx ? uploadTestResultsEx.class.toString() : null, exception_message: uploadTestResultsEx ? uploadTestResultsEx.message.toString() : null, exception_stacktrace: uploadTestResultsEx ? uploadTestResultsEx.stackTrace.toString() : null, jenkins_url: JENKINS_URL, job_name: JOB_NAME, results_dir_url: destResultsDirURL, results_uploaded: testResultsUploaded, time_duration: uploadTestResultEndTime - uploadTestResultStartTime, time_end: uploadTestResultEndTime, time_start: uploadTestResultStartTime ] def response = ESCreateDoc("http://seal52.lab.eng.tlv2.redhat.com", "upload_test_results", elasticDict) log "Upload Test Results - Elasticsearch was successfully updated:$response", level: "DEBUG" } catch (Exception uploadTestResultsESEx) { log "Failed to update Elasticsearch with 'Upload Test Results' data\n$uploadTestResultsESEx", level: "WARN" } } /* The purpose of this file is to prevent a wrong indication of test results in Jenkins in case the 'ir-tempest-run' groovy file is used more than once. By having the following code in a separate file, it's possible to make only one call to the 'junit' Jenkins plugin instead of multiple calls which end with wrong (duplicated) number of test results in the 'Test Result' view of builds in Jenkins. */ RUN_JUNIT_TEMPEST = env.IR_TEMPEST_RUN_FINISHED ?: 'False' IR_TEMPEST_RESULTS_FORMATS = 'junitxml,html' ?: '' try { if (RUN_JUNIT_TEMPEST.toBoolean()) { log "Updating the build with Tempest results", level: "DEBUG" sh """ . $ir_venv/bin/activate pushd infrared # ensuring tempest results are in the top directory of Build Artifacts # will make consuming these results easy for external tools and link mkdir -p \$WORKSPACE/tempest-results/ find .workspaces/\$(infrared workspace list --active) -type f -name "*tempest-results*html" -exec cp {} \$WORKSPACE/tempest-results/ \\; popd """ junit '**/tests_results.xml, logs/test_*.xml, **/.workspaces/active/**/tempest*.xml, **/nosetests.xml' if ( IR_TEMPEST_RESULTS_FORMATS.matches('.*html.*') ) { archiveArtifacts artifacts: 'tempest-results/*' currentBuild.description = (currentBuild.description ?: '') + "
\nTempest Results\n" } } else { log "Skipped: Updating the build with Tempest results", level: "DEBUG" } } catch (Exception tempestJunitEx) { log "Failed to record Tempest results with Junit plugin\n$tempestJunitEx\n${tempestJunitEx.stackTrace}", level: "ERROR" throw tempestJunitEx } } def stage_finally_post_processing_triggers() { // The following is a temporary try-catch block. // It shouldn't be here after the testing period. try { Long updatePolarionTriggerTimeStart = new Date().time Exception updatePolarionTriggerEx = null String composeID = null String dfg = "" ?: "upgrades" String manualTestCasesIDs = getPolarionTestCase("RHELOSP-148121", "") String manualTestCases = manualTestCasesIDs.tokenize(",").collect {it.trim() + "=$jump_status"}.join(",") String resultsDir = getBuildLogsDir("test_results") String rhospRelease = null String testRunIDs = env.POLARION_TEST_RUN_ID ?: "20221208-0905" String triggeredBuildUrl = null boolean allowPrefix = "False".toBoolean() boolean markAllBlocked = (env.POLARION_MARK_ALL_BLOCKED ?: "False").toBoolean() && (currentBuild.result == 'FAILURE' || currentBuild.currentResult == 'FAILURE') boolean removeOldTestCases = "False".toBoolean() boolean removeTCsFirstIter = "false".toBoolean() boolean updateOnlyExistingTCs = "False".toBoolean() boolean updatePolarionJobTriggered = false boolean updatePolarionJobTriggeredSuccessfully = false def updatePolarionTriggerJobParams = null String _jumpPassedStageStr = env.JUMP_PASSED_STAGE ?: '' boolean _jumpPassedStage = _jumpPassedStageStr.empty || getStageStatus(_jumpPassedStageStr) boolean _publishToPolarion = (env.PUBLISH_TO_POLARION ?: "false").toBoolean() boolean resultsDirFound try { log "Checking if results dir ('$resultsDir') found on the remote", level: "DEBUG" sh script: "ssh rhos-ci@rhos-ci-logs.lab.eng.tlv2.redhat.com '[ -d $resultsDir ]'" resultsDirFound = true } catch (Exception verifyResDirEx) { resultsDirFound = false } finally { log "Results dir found? $resultsDirFound", level: "DEBUG" } // The only case we want to update Polarion when results dir wasn't found // is when there is a need to mark all as 'Blocked' or there is a need to // to update manual Test Cases. If both of them are 'false', there is no // value of triggering the job because it'll fail. boolean resultsDirRunCond = resultsDirFound || markAllBlocked || manualTestCases boolean updatePolarion = _publishToPolarion && _jumpPassedStage && resultsDirRunCond if (updatePolarion) { try { // Getting the Compose ID & the RHOSP Release version from the first host in the 'undercloud' group String firstUCHost String irInventoryFile String irVenv = ir_venv try { irInventoryFile = sh(returnStdout: true, script: ". $irVenv/bin/activate && infrared workspace inventory").trim() } catch (Exception ex) { log "Couldn't get InfraRed's inventory file.", level: "WARNING" log "$ex", level: "DEBUG" } if (irInventoryFile) { try { firstUCHost = getFirstHostOfAnsibleGroup(irVenv, irInventoryFile, 'undercloud') } catch (Exception ex) { log "Couldn't find an undercloud host in the Ansible inventory file.", level: "WARNING" log "$ex", level: "DEBUG" } } if (firstUCHost) { composeID = getComposeFromHost(irVenv, irInventoryFile, firstUCHost) rhospRelease = getRHOSPReleaseFromHost(irVenv, irInventoryFile, firstUCHost) } updatePolarionTriggerJobParams = [ [$class: 'BooleanParameterValue', name: 'ALLOW_TEST_CASES_PREFIX', value: allowPrefix], [$class: 'BooleanParameterValue', name: 'MARK_ALL_BLOCKED', value: markAllBlocked], [$class: 'BooleanParameterValue', name: 'REMOVE_OLD_TEST_CASES', value: removeOldTestCases], [$class: 'BooleanParameterValue', name: 'REMOVE_TEST_CASES_FIRST_ITERATION_ONLY', value: removeTCsFirstIter], [$class: 'BooleanParameterValue', name: 'UPDATE_ONLY_EXISTING_TEST_CASES', value: updateOnlyExistingTCs], [$class: 'StringParameterValue', name: 'COMPOSE_ID', value: composeID], [$class: 'StringParameterValue', name: 'DFG', value: dfg.toLowerCase()], [$class: 'StringParameterValue', name: 'MANUAL_TEST_CASES', value: manualTestCases], [$class: 'StringParameterValue', name: 'CALLING_BUILD_URL', value: BUILD_URL], [$class: 'StringParameterValue', name: 'CALLING_JOB_TYPE', value: 'legacy'], [$class: 'StringParameterValue', name: 'RESULTS_DIR', value: resultsDirFound ? resultsDir: null], [$class: 'StringParameterValue', name: 'RHOSP_RELEASE', value: rhospRelease], [$class: 'StringParameterValue', name: 'TEST_RUN_IDS', value: testRunIDs], ] updatePolarionJobTriggered = true build( job: 'jump-update-polarion', parameters: updatePolarionTriggerJobParams, wait: false, ) updatePolarionJobTriggeredSuccessfully = true HashMap esResponse ArrayList exceptionsList = new ArrayList() int sleepTime = 5 int queryAttempts = 12 while (queryAttempts > 0) { queryAttempts-- try { sleep(sleepTime) esResponse = ESMatchQuery( "http://seal52.lab.eng.tlv2.redhat.com", "update_polarion_sync", "calling_build_url", BUILD_URL ) if (esResponse._shards.successful == 1 && esResponse.hits.total.value > 0) { triggeredBuildUrl = esResponse.hits.hits[0]._source.triggered_build_url log "Polarion Update Build: $triggeredBuildUrl" ArrayList tokenizedTB = triggeredBuildUrl.tokenize("/") String badgeText = "${tokenizedTB[-2]} #${tokenizedTB[-1]}" addBadgeText(badgeText, "black", "lavender", "1px", "black") break } } catch (Exception elasticEx) { exceptionsList << elasticEx } } if (!triggeredBuildUrl) { log "Couldn't get the URL to the Polarion Update Build.\n$exceptionsList", level: "DEBUG" } } catch (Exception polarionTriggerGenEx) { updatePolarionTriggerEx = polarionTriggerGenEx throw polarionTriggerGenEx } finally { try { Long updatePolarionTriggerTimeEnd = new Date().time HashMap jobParams = [ build_url: BUILD_URL, // Although in jobParams, this is a must exception_class: updatePolarionTriggerEx ? updatePolarionTriggerEx.class.toString() : null, exception_message: updatePolarionTriggerEx ? updatePolarionTriggerEx.message.toString() : null, exception_stacktrace: updatePolarionTriggerEx ? updatePolarionTriggerEx.stackTrace.toString() : null, results_dir_found: resultsDirFound, results_dir_run_condition: resultsDirRunCond, time_duration: updatePolarionTriggerTimeEnd - updatePolarionTriggerTimeStart, time_end: updatePolarionTriggerTimeEnd, time_start: updatePolarionTriggerTimeStart, triggered_build_url: triggeredBuildUrl, update_polarion_job_triggered: updatePolarionJobTriggered, update_polarion_job_triggered_successfully: updatePolarionJobTriggeredSuccessfully, ] updatePolarionTriggerJobParams.each { jobParams[it['name']] = it['value'] } def response = ESCreateDoc("http://seal52.lab.eng.tlv2.redhat.com", "update_polarion_trigger", jobParams) log "Update Polarion Trigger - Elasticsearch was successfully updated:$response", level: "DEBUG" } catch (Exception finallyESEx) { throw finallyESEx } } } } // End of temporary try block catch (Exception updatePolarionTmpEx) { log "The following failure happened during 'Update Polarion Trigger' " + "and *DOESN'T* affect your build\n$updatePolarionTmpEx\n${updatePolarionTmpEx.stackTrace}", level: "DEBUG" emailext ( from: 'rhos-ci-jenkins@redhat.com', to: 'aopincar@redhat.com', subject: "FAILURE: Update Polarion Trigger", body: "Build URL: $BUILD_URL\n" + "Exception: $updatePolarionTmpEx\n" + "Stack Trace: \n${updatePolarionTmpEx.stackTrace}", attachLog: false ) } // End of temporary catch block } def stage_finally_pre() { /** * Update the test run in polarion with the status of the job * Need to see if RH-IT-Root-CA.crt can be taken from a central location * * @requires install_ir.groovy.inc * @jjb_param {polarion_test_run_id} optional * Defines ID of Polarion test run. * Paramter expect to be string - for example '20171017-1613' * @jjb_param {polarion_test_case_id} optional * Defines ID of Polarion test case. If specified, and publishing * to Polarion is enabled, they will be updated. * Paramter expect to be string - for example 'RHELOSP-23328' * @jjb_param {polarion_tempest} optional * Defines if we need to upload tempest results to Polarion. * Parameter expect to be yaml boolean - "yes" instead of "'yes'" * @jjb_param {jump_update_existing_test_cases_only} optional * If "true", only update the test cases found in the test run, * without adding the other test cases found in the XML. * Default value: "false". If multiple test runs are specified, * it is set to "true" regardless of the value set. */ RUN_JUMP = env.JUMP_PASSED_STAGE ?: '' POLARION_TEST_RUN_ID = env.POLARION_TEST_RUN_ID ?: '20221208-0905' // polarion_test_case_id can be a list of single quoted strings or a // groovy map using single quote, so we *have to* use double quote // here. POLARION_TEST_CASE_ID = getPolarionTestCase("RHELOSP-148121", "") PUBLISH_TO_POLARION = env.PUBLISH_TO_POLARION ?: "false" POLARION_TEMPEST = 'False' ?: "false" POLARION_TOBIKO = 'False' ?: "false" FIRST_ITERATION_REMOVE = 'false' ?: "false" // Mark all TestRun's TestCases as 'Blocked' in case the build failed MARK_ALL_BLOCKED = env.POLARION_MARK_ALL_BLOCKED ?: 'False' // RUN_JUMP, if specified, should point to a passed stage. // Also, PUBLISH_TO_POLARION must be true. if ( (RUN_JUMP.empty || getStageStatus(RUN_JUMP)) && (PUBLISH_TO_POLARION.toBoolean()) ) { def uc_host = '' def puddle_id = 'UNKNOWN_PUDDLE_ID' def rhosp_release = 'UNKNOWN_RHOSP_RELEASE' try { puddle_id = env.JP_PUDDLE_VERSION ?: env.PUDDLE_CORE ?: puddle_id } catch (Exception ex) { log "An error occurred while trying to find the Compose ID from env vars", level: "DEBUG" } def stageFinished = false def workspaceResultsFileFound = false def beginningBuildResult = currentBuild.currentResult def updateExistingTestCasesOnly = "False" if ( 'False' != '' ) { updateExistingTestCasesOnly = "False" } // In case multiple Test Runs are given, updating only the existing ones in each Test Run. if (POLARION_TEST_RUN_ID.split(",").size() > 1) { updateExistingTestCasesOnly = "True" } // Getting the Puddle ID & the rhosp-release version from the first host in the 'undercloud' group // * In the future we may skip the following block if it's enough to have the Compose from env vars try { def tmp_dir = sh(returnStdout: true, script: "mktemp -d ${WORKSPACE}/jump_helpers_XXXXX").trim() sh """ . $ir_venv/bin/activate ANSIBLE_FORCE_COLOR=no ANSIBLE_NOCOLOR=yes ansible -i \$(infrared workspace inventory) undercloud --list-host > ${tmp_dir}/uc_hosts_list.txt cat ${tmp_dir}/uc_hosts_list.txt | sed '1,/^ hosts ([0-9]*)/d' | awk 'NR==1' | sed 's/^ *//g' > ${tmp_dir}/first_undercloud_host """ uc_host = sh(returnStdout: true, script: "cat ${tmp_dir}/first_undercloud_host").trim() log "Trying to fetch the Puddle ID & the RHOSP version from '${uc_host}' - the first undercloud host in the 'undercloud' group" if (!uc_host) { log "Couldn't find an undercloud host in the inventory file, Puddle ID & RHOSP Version are unknown", level: "WARNING" } else { try { sh """ . $ir_venv/bin/activate ansible ${uc_host} -i \$(infrared workspace inventory) -m ansible.builtin.fetch -a "src=~/core_puddle_version dest=${tmp_dir}/ flat=yes" """ def fetched_puddle_id = sh(returnStdout: true, script: "cat ${tmp_dir}/core_puddle_version").trim() if (fetched_puddle_id != '') { puddle_id = fetched_puddle_id } } catch (Exception ex) { log "Couldn't fetch the Puddle ID from the '${uc_host}' host in the Ansible inventory file.", level: "WARNING" log "${ex}", level: "DEBUG" } try { sh """ . $ir_venv/bin/activate ansible ${uc_host} -i \$(infrared workspace inventory) -m ansible.builtin.fetch -a "src=/etc/rhosp-release dest=${tmp_dir}/ flat=yes" """ def fetched_rhosp_release = sh(returnStdout: true, script: "cat ${tmp_dir}/rhosp-release | awk '{ if (\$12 == \"release\" ) { print \$13\" \"\$14\" \"\$15 } else { print \$6\" \"\$7\" \"\$8 } }'").trim() if (fetched_rhosp_release != '') { rhosp_release = fetched_rhosp_release } } catch (Exception ex) { log "Couldn't fetch the RHOSP release from the '${uc_host}' host in the Ansible inventory file.", level: "WARNING" log "${ex}", level: "DEBUG" } } } catch (Exception ex) { log "Couldn't find a undercloud host in the Ansible inventory file.", level: "WARNING" log "${ex}", level: "DEBUG" } log "Puddle ID: ${puddle_id}" log "RHOSP release: ${rhosp_release}" if (! POLARION_TEST_RUN_ID.empty ) { if (!POLARION_TEST_CASE_ID.empty) { // Make sure failures in stage2 are also counted if (env.STAGE_FATAL_EX) { jump_status = 'failed' } notifyJump(POLARION_TEST_CASE_ID, jump_status ) } dir('jump') { try { git branch: 'master', url: 'https://code.engineering.redhat.com/gerrit/jump' // A must for creating new (missing) test cases. (case sensitive - should be exactly as in Polarion) def dfg = '' def jjb_polarion_dfg = [ 'cloudops': 'CloudOps', 'compute': 'Compute', 'dci': 'DCI', 'df': 'Deployment', 'edge': 'Edge', 'enterprise': 'System', 'hardware_provisioning': 'HardProv', 'network': 'Network', 'nfv': 'NFV', 'osasinfra': 'OSasInfra', 'pidone': 'PIDONE', 'security': 'Security', 'storage': 'Storage', 'upgrades': 'Upgrade' ] if (!updateExistingTestCasesOnly.toBoolean()) { if ( '' != '' ) { dfg = '--dfg ' + '' } else if ('upgrades' != '' && jjb_polarion_dfg.containsKey('upgrades')) { dfg = '--dfg ' + jjb_polarion_dfg['upgrades'] } } // Whether or not to allow the creation of same test cases with different prefix when the same tempest stage is running more than once. def allow_prefix = 'False' ?: "False" if (MARK_ALL_BLOCKED == 'True' && currentBuild.result == 'FAILURE') { withEnv(['REQUESTS_CA_BUNDLE=/etc/pki/ca-trust/source/anchors/RH-IT-Root-CA.crt']) { sh2 basename: 'jump_update_polarion_test_case', script: """ . $ir_venv/bin/activate ./prepare_pylero.sh for test_run_id in \$(echo ${POLARION_TEST_RUN_ID} | sed "s/,/ /g") do echo "Marking all test cases of test run '\$test_run_id' as 'Blocked'..." python jump.py \ --project-id=RHELOpenStackPlatform \ --testrun-id=\$test_run_id \ --user=rhosp_machine \ --jenkins_build_url=\$BUILD_URL \ --puddle-id="${puddle_id}" \ --custom-fields build="${rhosp_release}" \ --mark-all-blocked done """ } } /* End of 'Mark all Blocked' */ /* NOT Mark all Blocked */ else { RESULTS_DIR = env.JUMP_TESTRUN_RESULTS_DIR ?: "${env.WORKSPACE}" RESULTS_FILE = RESULTS_DIR + "/test_run_result.out" if ( fileExists(file: RESULTS_FILE )) { workspaceResultsFileFound = true withEnv(['REQUESTS_CA_BUNDLE=/etc/pki/ca-trust/source/anchors/RH-IT-Root-CA.crt']) { sh2 basename: 'jump_update_polarion_test_case', script: """ . $ir_venv/bin/activate ./prepare_pylero.sh echo "validate test_case" TEST_CASES=\$(cat "${RESULTS_FILE}" | paste -sd "," -) # Iterates over all Test Runs and sends result to Polarion (sed to loop over comma-separated value) for test_run_id in \$(echo ${POLARION_TEST_RUN_ID} | sed "s/,/ /g") do echo "Updating \$test_run_id" python jump.py \ --project-id=RHELOpenStackPlatform \ --testrun-id=\$test_run_id \ --testcases=\$TEST_CASES \ --user=rhosp_machine \ --jenkins_build_url=\$BUILD_URL \ --puddle-id="${puddle_id}" \ --custom-fields build="${rhosp_release}" \ --update-existing-test-cases=${updateExistingTestCasesOnly} \ done """ } } /* test_run_result.out exists */ def updated_test_runs_dir = "updated_test_runs" def remove_tcs_first_iter_only = "false".toLowerCase() == "true" ?: false for (testing_framework in ["tempest", "tobiko"]) { if (testing_framework == "tempest") { if (POLARION_TEMPEST.empty || !POLARION_TEMPEST.toBoolean()) { continue } test_results_dir = 'test_results' } else if (testing_framework == "tobiko") { if (POLARION_TOBIKO.empty || !POLARION_TOBIKO.toBoolean()) { continue } test_results_dir = 'tobiko_*' } test_results_dir_name_opt = test_results_dir.split(",").collect {"-name '$it'"}.join(" -o ") log "Jump Stage: Processing ${ testing_framework } results", level: "DEBUG" withEnv(['REQUESTS_CA_BUNDLE=/etc/pki/ca-trust/source/anchors/RH-IT-Root-CA.crt']) { sh2 basename: "jump_update_polarion_test_run_${ testing_framework }", script: """ if [[ ! -d ${ updated_test_runs_dir } ]]; then echo "Making dir ${ updated_test_runs_dir }" mkdir ${ updated_test_runs_dir } fi . $ir_venv/bin/activate if [[ ! -d pylero ]]; then echo "Preparing pylero" ./prepare_pylero.sh else echo "Skip pylero preparation" fi # NOTE: it's useful to see the commands that are being run to troubleshoot potential issues set -x RESULTS_DIRS=`find ../infrared/.workspaces/ -type d ${test_results_dir_name_opt}` if [[ -z \$RESULTS_DIRS ]] then echo "Results directory '${test_results_dir_name_opt}' wasn't found in '../infrared/.workspaces/'" RESULTS_FILES='' else echo "Searching for XML file(s) in \$RESULTS_DIRS" RESULTS_FILES=`find \$RESULTS_DIRS -type f -name '*.xml' | sort ` fi if [[ -z \$RESULTS_FILES ]] then echo "${testing_framework} result(s) not found, skip uploading" else # Iterates over all Test Runs and sends result to Polarion (sed to loop over comma-separated value) for test_run_id in \$(echo ${POLARION_TEST_RUN_ID} | sed "s/,/ /g") do for _result_file in \$RESULTS_FILES do # The following code is a workaround to upload all the test report files # to Polarion. It should be removed once there will be a permanent # patch to combine all the report files together and upload it in one attempt if [[ "${ remove_tcs_first_iter_only }" == "true" ]]; then if [[ ! -f "${ updated_test_runs_dir }/\$test_run_id" ]]; then REMOVE_OLD_TESTS="True" touch ${ updated_test_runs_dir }/\$test_run_id echo "First iteration of Test Run \$test_run_id, removing old (missing) Test Cases" else REMOVE_OLD_TESTS="False" echo "Not first iteration of Test Run \$test_run_id, not removing old (missing) Test Cases" fi else REMOVE_OLD_TESTS=False echo "Removing old (missing) Test Cases? \$REMOVE_OLD_TESTS" fi prefix="" if [ "${ allow_prefix }" == "True" ] then if [ "${testing_framework}" == "tempest" ] then # Check if there are multiple result files for the same component component_name="\$(echo \$_result_file | awk -F tempest-results- '{ print \$2 }' | awk -F . '{ print \$1 }')" component_count=\$(find \$RESULTS_DIRS -name "*\$component_name*.xml" | wc -l) if [ "\$component_count" -gt 1 ] then prefix="--testcase-prefix=\$(echo \$_result_file | awk -F tempest-results- '{ print \$2 }' | awk -F .xml '{ print \$1 }')" fi elif [ "${testing_framework}" == "tobiko" ] then result_files_count=\$(find \$RESULTS_DIRS -name "tobiko*.xml" | wc -l) if [ "\$result_files_count" -gt 1 ] then prefix="--testcase-prefix=\$(echo \$_result_file | sed 's/.*tobiko.*_[0-9]*_\\(.*\\)\\.xml/\\1/')" fi fi fi echo "Processing \$_result_file with \$test_run_id" python jump.py \ --testrun-id=\$test_run_id \ --xml-file=\$_result_file \ --update_testcases=True \ --jenkins_build_url=\$BUILD_URL \ --puddle-id="${puddle_id}" \ --custom-fields build="${rhosp_release}" \ --remove-old-tests="\$REMOVE_OLD_TESTS" \ --update-existing-test-cases=${updateExistingTestCasesOnly} \ ${ dfg } \$prefix \ done done fi """ } } /* POLARION_TEMPEST matches 'yes' or 'true' */ } /* End of 'NOT Mark all Blocked' */ stageFinished = true } catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException ex) { log "Build/Stage timeout expired during Jump stage", level: "ERROR" throw ex } catch (Exception e) { println(e.getMessage()) finally_errors.add(['name': 'JUMP_tool', 'message': e.getMessage(), 'fail_build': true]) } finally { def dataFiles = findFiles(glob: 'workspaces/**/jump_run_data.json') if (dataFiles.size()) { dataFiles.each { try { ESSingleInsert('http://seal45.lab.eng.tlv2.redhat.com:9200/jump/_doc', it.getPath()) } catch (Exception es_ex) { try { log "Failed to upload Jump's run data to Elasticsearch from '${it.getPath()}'.\n" + "Exception details:\n" + "toString(): ${es_ex.toString()}\n" + "getMessage(): ${es_ex.getMessage()}\n" + "getStackTrace(): ${es_ex.getStackTrace()}\n", level: "ERROR" } catch (Exception inner_es_ex) { log "An error occured while trying to log an ESSingleInsert Exception.\n" + "${inner_es_ex}", level: "ERROR" } emailext ( from: 'rhos-ci-jenkins@redhat.com', to: 'aopincar@redhat.com', subject: "Jump Stage: An error occurred while trying upload Jump data to Elasticsearch", body: "Build URL: ${env.BUILD_URL}\nData File: ${it.getPath()}", attachLog: false) } } } else { log "Jump Stage: Run data JSON files weren't found. Nothing to upload to Elasticsearch.", level: "WARN" } def buildStatusChangedToFailureDuringStage = false if ((currentBuild.result == 'FAILURE' && currentBuild.result != beginningBuildResult) || (!stageFinished && beginningBuildResult != 'FAILURE')) { buildStatusChangedToFailureDuringStage = true } /* Fail if no publishing type (test cases, tempest, ...) is requested, which means that POLARION_TEMPEST is false and the list of test cases is empty. */ def unsupportedPublishingType = false if (POLARION_TEST_CASE_ID.empty && !POLARION_TEMPEST.toBoolean() && !POLARION_TOBIKO.toBoolean()) { unsupportedPublishingType = true } if (stageFinished) { if (!dataFiles.size()) { log "Jump Stage: Jump wasn't executed, please check if test result files exist", level: "ERROR" currentBuild.result = 'FAILURE' } else if (unsupportedPublishingType && !workspaceResultsFileFound && currentBuild.result != 'FAILURE') { log "Jump Stage: Current publishing type isn't fully supported and no results file has been found, failing the build!\n" + "Currently supported publishing types: Test Cases, Tempest, Tobiko " + "('polarion_test_case_id', 'polarion_tempest', 'polarion_tobiko' JJB params), " + "or having a results file ('test_run_result.out') inside the workspace directory.", level: 'ERROR' currentBuild.result = 'FAILURE' } else { log "Jump Stage: Errors were not found during Jump executions", level: 'DEBUG' } } else { def stageMsg = "Jump Stage: Jump execution failed, " if (buildStatusChangedToFailureDuringStage) { stageMsg += "this is the reason why the build failed" } else { stageMsg += "but something else failed the build at an earlier stage" } log stageMsg, level: 'ERROR' } log "Jump Stage: Build result (currentBuild.currentResult) at the beginning and ending of the stage: Beginning='${ beginningBuildResult }', Ending='${ currentBuild.currentResult }' (currentBuild.result='${ currentBuild.result }')", level: 'DEBUG' } } try { archiveArtifacts allowEmptyArchive: true, artifacts: 'jump/**/*.xml, jump/workspaces/**/*.*, test_run_result.out' sh 'rm -f "${RESULTS_FILE}"' } catch (Exception ex) { log "Jump Stage: Something happened while trying to archive artifacts or remove the result files", level: 'ERROR' throw ex } } } /** * Parses the CI_MESSAGE (UMB) param. */ timeout(time: 5, unit: 'MINUTES') { if ( 'False' == 'true' ) { try { // get compose/puddle/poodle id info at the start of the build puddle_version = '' if ('16.2' != '') { puddle_version = puddleDateFormatter(env.OSP_VERSION ?: '16.2',"${PRODUCT_BUILD}") println "puddle_version: " + puddle_version } send_results_to_umb(dry_run: false, puddle_version: puddle_version, product_version: '16.2', ci_type: 'Custom') } // end "try" catch (Exception e) { println "Send result to UMB failed!, exception: " + e } } // end 'if umb_send_result' else { println "Not sending result to UMB because jjb param 'umb_send_result' not true" } } // end "timeout" /** * Triggers the 'update-reportportal' job * * The 'update-reportportal' job is a standalone Jenkins job that responsible * on updating ReportPortal with test/deployment results * * @jjb_param {publish_to_reportportal} Run ReportPortal publish results script, * default state. * * @jjb_param {ir_reportportal_tags} ReportPortal default launch tags. * * @jjb_param {ir_rp_log_last_traceback_only} Upload only the last python traceback to the logs * * @jjb_param {ir_rp_full_log_attachment} Save full test case log as attachment if traceback only option has be chosen * * @param $ir_venv virtual environment where InfraRed is installed, * by install_ir.groovy.inc stage. * * @param PUBLISH_TO_REPORTPORTAL Run ReportPortal publish results script. * * @param $REPORTPORTAL_TAGS ReportPortal editable launch tags. * Add separated by comma. * * @param $JOB_NAME Jenkins job name of the project of this build. * set up automatically by Jenkins. * * @param $BUILD_ID Jenkins job current build ID. * set up automatically by Jenkins. * */ try { timeout(time: 20, unit: 'MINUTES') { Long rpTriggerTimeStart = new Date().getTime() boolean enableTFA = "False".toBoolean() boolean fullLogAttachment = "true".toBoolean() boolean ignoreSkippedTests = "false".toBoolean() boolean logLastTracebackOnly = "true".toBoolean() boolean reportPortalUpdateJobTriggeredSuccessfully = false boolean triggerReportPortalUpdateJob = false boolean updateReportPortal = (env.PUBLISH_TO_REPORTPORTAL ?: "True").toBoolean() boolean updateStagingProjects = JENKINS_URL.contains("rhos-ci-jenkins.lab.eng.tlv2.redhat.com") ? false : true boolean uploadDeploymentResults = "False".toBoolean() boolean uploadTestResults = "True".toBoolean() String composeID = env.PUDDLE_CORE ?: env.REPORT_PUDDLE ?: env.JP_PUDDLE_VERSION ?: null if (!composeID) { try { log "Compose ID wasn't found in environment variables, trying " + "to fetch it from the first 'undercloud' host", level: "DEBUG" // The code below is taken from 'update_polarion_trigger' String firstUCHost = null String irInventoryFile = null String irVenv = ir_venv try { irInventoryFile = sh(returnStdout: true, script: ". $irVenv/bin/activate && infrared workspace inventory").trim() } catch (Exception ex) { log "Couldn't get InfraRed's inventory file.", level: "WARNING" log "$ex", level: "DEBUG" } if (irInventoryFile) { try { firstUCHost = getFirstHostOfAnsibleGroup(irVenv, irInventoryFile, 'undercloud') } catch (Exception ex) { log "Couldn't find an 'undercloud' host in the Ansible inventory file.", level: "WARNING" log "$ex", level: "DEBUG" } } if (firstUCHost) { composeID = getComposeFromHost(irVenv, irInventoryFile, firstUCHost) ?: null // rhospRelease = getRHOSPReleaseFromHost(irVenv, irInventoryFile, firstUCHost) } } catch (Exception fetchComposeFromHostEx) { log "An exception was raised while trying to fetch the compose ID:\n" + "$fetchComposeFromHostEx", level: "WARNING" } } String remoteTestsDir = getBuildLogsDir("test_results") log "Remote tests directory: $remoteTestsDir", level: "DEBUG" // TODO: Check if result dirs exist ArrayList launchTagsList = new ArrayList() launchTagsList << "DATE:${new Date().format("yyyy-MM-dd")}" launchTagsList << env.REPORTPORTAL_TAGS ?: "" if (Float.valueOf("16.2") >= 13 && "yes".toBoolean()) { launchTagsList << "containers" } HashMap envVars = env.getEnvironment() as HashMap envVars.remove('REPORTPORTAL_TAGS') envVars.each { evKey, evValue -> launchTagsList << "$evKey:$evValue" } String launchTagsStr = launchTagsList.join(';') launchTagsStr = launchTagsStr.replaceAll('"', "'") if (updateReportPortal && uploadTestResults) { triggerReportPortalUpdateJob = true reportportalUpdateTriggerJobParams = [ [$class: 'BooleanParameterValue', name: 'ADD_JENKINS_STAGES_TO_TESTS_RESULTS', value: true], [$class: 'BooleanParameterValue', name: 'FULL_LOG_ATTACHMENT', value: fullLogAttachment], [$class: 'BooleanParameterValue', name: 'IGNORE_SKIPPED_TESTS', value: ignoreSkippedTests], [$class: 'BooleanParameterValue', name: 'LOG_LAST_TRACEBACK_ONLY', value: logLastTracebackOnly], [$class: 'BooleanParameterValue', name: 'TFACON', value: enableTFA], [$class: 'BooleanParameterValue', name: 'UPDATE_RHOS_PSI_RP_INSTANCE', value: true], [$class: 'BooleanParameterValue', name: 'UPDATE_STAGING_PROJECTS', value: updateStagingProjects], [$class: 'StringParameterValue', name: 'CALLING_BUILD_URL', value: BUILD_URL], [$class: 'StringParameterValue', name: 'COMPOSE_ID', value: composeID], [$class: 'StringParameterValue', name: 'DEPLOYMENT_RESULTS_PATH', value: null], [$class: 'StringParameterValue', name: 'LAUNCH_ATTRIBUTES', value: launchTagsStr], [$class: 'StringParameterValue', name: 'LAUNCH_NAME', value: env.RP_LAUNCH_ALTNAME ?: JOB_NAME], [$class: 'StringParameterValue', name: 'LAUNCH_START_TIME', value: String.valueOf(currentBuild.startTimeInMillis)], [$class: 'StringParameterValue', name: 'TEST_RESULTS_PATH', value: uploadTestResults ? remoteTestsDir: null], ] log "Triggering the 'reportportal-update' job with the following parameters:\n" + "$reportportalUpdateTriggerJobParams", level: "DEBUG" build( job: 'reportportal-update', parameters: reportportalUpdateTriggerJobParams, wait: false, ) reportPortalUpdateJobTriggeredSuccessfully = true HashMap esResponse = ESWaitOnMatchQuery( "http://seal52.lab.eng.tlv2.redhat.com", "reportportal_update_sync", "calling_build_url.keyword", BUILD_URL ) if (esResponse != null && esResponse._shards.successful == 1 && esResponse.hits.total.value > 0) { triggeredBuildUrl = esResponse.hits.hits[0]._source.triggered_build_url log "ReportPortal Update Build: $triggeredBuildUrl" ArrayList tokenizedTB = triggeredBuildUrl.tokenize("/") String badgeText = "${tokenizedTB[-2]} #${tokenizedTB[-1]}" addBadgeText(badgeText, "black", "lavender", "1px", "black") } else { log "Couldn't get the URL to the ReportPortal Update Build.\n$esResponse", level: "DEBUG" } } // ReportPortal Trigger - End of trigger condition } // ReportPortal Trigger - End of timeout } // ReportPortal Trigger - End of Main try catch (Exception rpUpdateTriggerEx) { log "Failed to trigger 'update-reportportal' Jenkins job:\n$rpUpdateTriggerEx", level: "ERROR" // TODO: Send email to aopincar } finally { Long rpTriggerTimeEnd = new Date().getTime() // TODO: Update elasticsearch } } def stage_finally() { /** * Run an Ansible role for aggregating logs from different nodes. * * Options which are supported by openstack/ansible-role-collect-logs can be * found here [1]. * Before you use any of the options in [1] make sure, it's defined in * infrared plugin.spec [2], otherwise that option will not work with Infrared * and it will fail on unrecognized argument. * * @jjb_param {artcl_options} optional * Contains default ansible-role-collect-logs used in jobs using * this stage. * * @jjb_param {artcl_openstack_nodes} optional * OpenStack nodes ansible-role-collect-logs will be executed on. * Default is all:!localhost:!hypervisor * * @jjb_param {artcl_collect_log_types} optional * Contains types of logs to collect divided by comma, such as * openstack logs, network logs, system logs, etc. Acceptable values * are system, monitoring, network, openstack and container. * Default is container,monitoring,network,openstack,system * * @jjb_param {artcl_options_append} optional * In case you want to use some options from [1] specific for your * job, this variable will do the trick. Note, that the option * you want use here must be specified in [2] as well. * * @jjb_param {artcl_collect_list} optional * By default contains default list of files to be collected defined * within this stage. Set this variable in case you want to collect * custom files. * If you want to only add some extra files, see * {artcl_collect_list_append} variable below. * * @jjb_param {artcl_collect_list_append} optional * If you want to collect extra files than only those defined in * {artcl_collect_list}, set this value. * * @jjb_param {artcl_exclude_list} optional * By default contains default list of files to be excluded defined * within this stage. Set this variable in case you want to exclude * custom files. * If you want to only add some extra files, see * {artcl_exclude_list_append} variable below. * * @jjb_param {artcl_exclude_list_append} optional * If you want to exclude extra files than only those defined in * {artcl_exclude_list}, set this value. * * @jjb_param {ir_publish_to_server} optional * A hostname of the server the logs will be published to. * Default is rhos-ci-logs.lab.eng.tlv2.redhat.com * * @jjb_param {ir_publish_baseurl} optional * An URL the logs are available at after they are published. * Default is rhos-ci-logs.lab.eng.tlv2.redhat.com/logs/ * * [1] https://github.com/openstack/ansible-role-collect-logs/tree/master/defaults * [2] https://github.com/openstack/ansible-role-collect-logs/blob/master/infrared_plugin/plugin.spec **/ stage2('Collect logs') { addToArtifacts(artifacts: '**/*.log') addToArtifacts(artifacts: 'infrared/*.cfg') addToArtifacts(artifacts: 'infrared/*.yml') addToArtifacts(artifacts: 'infrared/*.yaml', allowEmptyArchive: true) addToArtifacts(artifacts: 'infrared/.workspaces/') addToArtifacts(artifacts: 'infrared/workspace.tgz') addToArtifacts(artifacts: 'infrared/.envrc', allowEmptyArchive: true) addToArtifacts(artifacts: 'jump/**/*.json', allowEmptyArchive: true) timeout(time: 60, unit: 'MINUTES') { def logserver_dest = "rhos-ci-logs.lab.eng.tlv2.redhat.com" def logserver_user = "rhos-ci" def log_baseurl = "http://rhos-ci-logs.lab.eng.tlv2.redhat.com/logs" if (!logserver_dest) { println("Skipping log collection - ir_publish_to_server is not set.") return } // use global env variable in Jenkins master to prevent logs collisions // when multiple jenkins masters (like production and staging) are using same // log server destination (also make sure such location exists on log server def server_prefix = env.LOGS_JENKINS_PREFIX ?: '' // ensure prefix starts with slash and not ends (rsync path should be without it) if (server_prefix && server_prefix[0] != '/') { server_prefix = '/'+server_prefix } if (server_prefix && server_prefix[-1] == '/') { server_prefix = server_prefix[0..-2] } def PLUGIN_REPO = "https://github.com/openstack/ansible-role-collect-logs.git" def ARTIFACTS_SERVER_REL_PATH = "${JOB_NAME}/${BUILD_ID}" def ARTIFACTS_SERVER_DEST_PATH = "/rhos-infra-dev-netapp/jenkins-logs${server_prefix}/${ARTIFACTS_SERVER_REL_PATH}" def ARTIFACTS_SERVER_DEST = "${logserver_user}@${logserver_dest}:${ARTIFACTS_SERVER_DEST_PATH}" if (log_baseurl == "") { // we cannot use groovy variables inside jjb's default value, so doing switch to default in groovy log_baseurl = "http://${logserver_dest}" } // strip trailing slash if present in baseurl if (log_baseurl[-1] == '/') { log_baseurl = log_baseurl[0..-2] } def LOG_URL = log_baseurl + server_prefix + "/${ARTIFACTS_SERVER_REL_PATH}/" currentBuild.description = (currentBuild.description ?: '') + "Browse logs" println("Browse logs: ${LOG_URL}") /** KEEP THE LISTS OF FILES AND OPTIONS IN SYNC WITH THE LISTS IN * jobs/compact/script/lib/logs.lib.sh */ def artcl_CLI_options_common = """--artcl_txt_rename true \ --artcl_gzip true \ --artcl_find_maxdepth 10 \ --artcl_use_rsync true \ --artcl_rsync_collect_list false \ --artcl_build_url "" \ --artcl_publish false \ --ara_enabled false \ """ def artcl_CLI_hypervisor_collect_options = artcl_CLI_options_common artcl_CLI_hypervisor_collect_options += " --openstack_nodes hypervisor " artcl_CLI_hypervisor_collect_options += " --collect_log_types hypervisor " def artcl_commands_hypervisor_list = [ "hypervisor.journal.cmd='journalctl -x --since=-8h --lines=100000'", "hypervisor.journal-kernel.cmd='journalctl -xk --since=-8h --lines=100000'", "hypervisor.virsh-list.cmd='virsh list --all'", "hypervisor.disk.cmd='blkid;lsblk;df -T;df -i;'", "hypervisor.memory.cmd='free -m'", "hypervisor.rpms.cmd='rpm -qa'", ] def artcl_commands_hypervisor_list_str = artcl_commands_hypervisor_list.join(',') def artcl_collect_hypervisor_list = [ "/etc/ssh/", "/var/lib/libvirt/qemu/*.log", "/var/log/extra/journal.txt", "/var/log/extra/journal-kernel.txt", "/var/log/extra/virsh-list.txt", "/var/log/extra/disk.txt", "/var/log/extra/memory.txt", "/var/log/extra/rpms.txt", ] def artcl_collect_hypervisor_list_str = artcl_collect_hypervisor_list.join(',') def artcl_collect_default_list = [ "/etc/", "/etc/neutron", "/etc/tempest/*.xml", "/etc/tempest/saved_state.json", "/etc/tempest/tempest.conf", "/etc/tempest/tempest.conf.sample", "/home/*/*.conf", "/home/*/*.json", "/home/*/*.log", "/home/*/*.sh", "/home/*/*.yaml", "/home/*/*.yml", "/home/*/*/black_list_*", "/home/*/*/white_list_*", "/home/*/*rc", "/home/*/.instack/install-undercloud.log", "/home/*/.tripleo", "/home/*/central/*.yaml", "/home/*/central/*/*.yaml", "/home/*/central/network/nic-configs/", "/home/*/composable_roles/*.yaml", "/home/*/composable_roles/*/*.yaml", "/home/*/composable_roles/network/nic-configs/", "/home/*/config-download/", "/home/*/dcn*/*.yaml", "/home/*/dcn*/*/*.yaml", "/home/*/dcn*/network/nic-configs/", "/home/*/inventory/group_vars/*.yml", "/home/*/openshift_deploy_logs/*.log", "/home/*/ostest/", "/home/*/ovb", "/home/*/overcloud_deploy.sh", "/home/*/overcloudrc*", "/home/*/playbooks_logs/*.log", "/home/*/robot/", "/home/*/shiftstackrc*", "/home/*/tempest*/*.log", "/home/*/tempest*/*.xml", "/home/*/tempest*/etc/*.conf", "/home/*/tempest*/saved_state.json", "/home/*/tempest*/.stestr/", "/home/*/tripleo-heat-installer-templates/", "/home/*/undercloud-ansible-*", "/home/*/undercloud-install-*.tar.bzip2", "/home/*/virt", "/home/*/tripleo-deploy/", "/home/*/overcloud-deploy/", "/home/*/templates/", "/root/", "/usr/share/ceph-osd-run.sh", "/usr/share/openstack-tripleo-heat-templates", "/var/lib/cloud/", "/var/lib/config-data/", "/var/lib/config-data/puppet-generated/", "/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/", "/var/lib/container-puppet/", "/var/lib/docker-puppet", "/var/lib/heat-config", "/var/lib/libvirt/qemu/*.log", "/var/lib/mistral/", "/var/lib/neutron/", "/var/lib/openvswitch/ovn/*.db", "/var/lib/tripleo-config", "/var/log/", "/var/log/containers/opendaylight", "/var/log/extra/containers/", "/var/log/extra/podman/containers", "/var/run/heat-config", "/var/tmp/packstack", "rally-dir/*.html", "rally-dir/*.log", "rally-dir/*.txt", "rally-dir/*.xml", ] def artcl_collect_default_list_str = artcl_collect_default_list.join(',') def artcl_exclude_default_list = [ "'.*'", "/etc/pki/*", "/etc/selinux/targeted/*", "/root/*.initrd*", "/root/*.tar*", "/root/*.vmlinuz*", "/root/*.qcow*", "/udev/hwdb.bin", "/var/lib/config-data/*/etc/puppet/modules", "/var/lib/config-data/*/etc/selinux/targeted/*", "/var/log/journal/*", ] def artcl_exclude_default_list_str = artcl_exclude_default_list.join(',') if ("" != "") { artcl_CLI_options = "" } else { artcl_CLI_options = artcl_CLI_options_common } if ("" != "") { artcl_CLI_options += " " } if ("" != "") { artcl_CLI_options += " --openstack_nodes " } else { artcl_CLI_options += " --openstack_nodes all:!localhost:!hypervisor" } if ("" != "") { artcl_CLI_options += " --collect_log_types " } else { artcl_CLI_options += " --collect_log_types container,monitoring,network,openstack,system" } if ("" != "") { artcl_CLI_options += " --artcl_collect_list " } else { artcl_CLI_options += " --artcl_collect_list $artcl_collect_default_list_str" } if ("" != "") { artcl_CLI_options += " --artcl_collect_list_append " } if ("" != "") { artcl_CLI_options += " --artcl_exclude_list " } else { artcl_CLI_options += " --artcl_exclude_list $artcl_exclude_default_list_str" } if ("" != "") { artcl_CLI_options += " --artcl_exclude_list_append " } def collect_dir = "$WORKSPACE/logs" artcl_CLI_options = "--local_working_dir '${ir_venv}' --artcl_collect_dir '$collect_dir' " + artcl_CLI_options artcl_CLI_hypervisor_collect_options = "--local_working_dir '${ir_venv}' --artcl_collect_dir '${collect_dir}' " + artcl_CLI_hypervisor_collect_options artcl_CLI_hypervisor_collect_options += " --artcl_commands $artcl_commands_hypervisor_list_str" artcl_CLI_hypervisor_collect_options += " --artcl_collect_list '$artcl_collect_hypervisor_list_str'" def copy_artifact_files = "mkdir -p ${collect_dir};\nshopt -s globstar;\n"; for(artifact_pattern in ArtclCollectList.instance) { fail_if_missing = artifact_pattern.allowEmptyArchive ? ' || true;' : ';' copy_artifact_files += "cp --parents -r ${artifact_pattern.artifacts} ${collect_dir} ${fail_if_missing}\n" } CIResourceCheck(CIResourcesThisBuild + logserver_dest + [resource_from_url(PLUGIN_REPO)]) try { String workspaceExportCopyKeys = "true".toBoolean() ? "-K" : "" sh2 """ . ${ir_venv}/bin/activate unset ANSIBLE_FORCE_COLOR pushd "$WORKSPACE/infrared" # try exporting workspace, in case of issue need to still continue with log collection infrared workspace export ${workspaceExportCopyKeys} -f workspace || true printenv | sort &> env.log pushd "$WORKSPACE" # gather all files we want from jenkins slave into collect_dir for artcl to pick them up ${copy_artifact_files} pushd "$WORKSPACE/infrared" infrared plugin add $PLUGIN_REPO --src-path infrared_plugin infrared plugin list export ANSIBLE_ROLES_PATH="$WORKSPACE/infrared/plugins" export ANSIBLE_LOG_PATH="$WORKSPACE/infrared/collect.log" # do not cause failure here see RHOSINFRA-3205 set +eo pipefail infrared ansible-role-collect-logs --disable_artifacts_cleanup true ${artcl_CLI_options} echo "Collecting logs from hypervisor" infrared ansible-role-collect-logs --disable_artifacts_cleanup true ${artcl_CLI_hypervisor_collect_options} artcl_exit=\$? ### Find known big issues (oom,segfault,selinux) in logs ### ... count them and print as build marks pushd "${collect_dir}" # do not cause failure if we have no-hits or missing files here set +eo pipefail findcat() { while read F; do if [[ "\$F" =~ .gz\$ ]]; then zcat "\$F" else cat "\$F" fi done < <(find . "\$@") # all params are passed to find } # here we use sed to strip all pid/uid and such numbers in an attempt of counting # just (almost) unique avc denials (not just 8000 of repeated ones) # # (there some numbered contexts (e.g. subj=...,c96,c442) which so far seem to have no info, # still resulting in just single audit2allow entry, so stripping those too SELINUX=\$(findcat -maxdepth 5 -path '*/var/log/audit/audit*' | \ grep -i denied | \ sed -r 's/(audit\\(|(\\S+id|ino|ses)=|[,.]c)[0-9.:]+/\\1.../g' | \ sort -u | \ wc -l) OOM=\$(findcat -maxdepth 4 -path '*/var/log/messages*' | \ grep -i oom-killer | \ wc -l) SEGFAULT=\$(findcat -maxdepth 4 -path '*/var/log/messages*' | \ grep -i segfault | \ wc -l) set +x # split to separate strings so that presence of code in console is not matched as mark itself echo "Build" "mark: selinux_problems_found=\$SELINUX" echo "Build" "mark: oom_killer_problems_found=\$OOM" echo "Build" "mark: segfault_problems_found=\$SEGFAULT" echo "rsyncing logs to ${ARTIFACTS_SERVER_DEST}/" du -sh ${collect_dir}/* # Copy console logs from all stages to the collect_dir mkdir -p ${collect_dir}/console_logs cp -r $WORKSPACE/.sh/* ${collect_dir}/console_logs # Publish logs ssh ${logserver_user}@${logserver_dest} mkdir -p "${ARTIFACTS_SERVER_DEST_PATH}" rsync -av --quiet ${collect_dir}/* "${ARTIFACTS_SERVER_DEST}/" echo "rsyncing logs finished" # Symlink README located in the collect server to this build log directory ssh ${logserver_user}@${logserver_dest} 'ln -s /rhos-infra/rhos-logs-readme.html ${ARTIFACTS_SERVER_DEST_PATH}/README.html' exit \$artcl_exit """, basename: 'collect-artcl.log', echoScript: false, maxLines: -1 } finally { buildMarks() // NOTE: in case we have failure in log-collection we loose these, so log collection should be in try-block and this in it's finally } } // endOf timeout() } // enfOfStage Collect logs } def stage_finally_post() { /** * Post build actions. * Delete virtual environment when build is done. * **/ try { if (ir_venv?.size() > 6) { if (env.DISABLE_CLEANUP == "true") { // hides confusing error related to ssh sockets sh "mv -f $ir_venv $WORKSPACE/ 2>&1 >> cleanup.log || true" } else { sh "rm -rf $ir_venv" } } } catch(Exception e) { echo 'WARN: [CI] Failed to move $ir_venv to WORKSPACE: ' + e.getMessage() sh "rm -rf $ir_venv" // housekeeping failures should not affect builds status // currentBuild.result = 'UNSTABLE' // could only downgrade finally_errors.add(['name': 'venv_cleanup', 'message': e.getMessage(), 'fail_build': false]) } /** * Post build actions. * Delete workspace when build is done. * */ dir('infrared') { try { // Do cleanup only when running on production if (env.JENKINS_URL.contains("rhos-ci-jenkins.lab.eng.tlv2.redhat.com")) { step([$class: 'WsCleanup']) } } catch(Exception e) { echo 'Failed to clean workspace ' + e.getMessage() finally_errors.add(['name': 'workspace_cleanup', 'message': e.getMessage(), 'fail_build': false]) } } /** * Send mail report at the end of the job. * Attachments could be added. * * @jjb_param {email_recipients} Email recipients address. Separate by comma. * @jjb_param {email_reply_addr} Email replyto address. * @jjb_param {email_attach} Email attachments. Separate multiple attachments by comma. * */ if (env.SKIPMAIL == 'true') { echo "Skipping email report due to SKIPMAIL parameter." } else if (currentBuild.result == 'ABORTED') { echo "Skipping email report as this build is ABORTED." } else if ('' == '') { echo "Skipping email report as no recipients provided." } else { emailext ( from: 'rhos-ci-jenkins@redhat.com', to: '', replyTo: 'noreply@redhat.com', subject: "Jenkins Job - " + currentBuild.currentResult + " - $env.JOB_NAME", body: """The $env.JOB_NAME job finished with """ + currentBuild.currentResult + """ status.

The job could be found at: $env.BUILD_URL

For more info, look for the attachments.

""", mimeType: 'text/html', attachmentsPattern: '', attachLog: true, compressLog: true ) } } pipeWrapper() { if ( env.IR_PROVISION_HOST && env.NODE_NAME == 'qe-virthost-cpu' ) { if ( env.IR_PROVISION_HOST.endsWith('brq.redhat.com') || env.IR_PROVISION_HOST.endsWith('brq2.redhat.com') ) { env.NODE_NAME = 'qe-generic && brq2' addMark("NODE_NAME_OVERRIDE: qe-generic && brq2", "pink") } else if ( env.IR_PROVISION_HOST.endsWith('rdu2.redhat.com') ) { env.NODE_NAME = 'qe-generic && rdu2' addMark("NODE_NAME_OVERRIDE: qe-generic && rdu2", "pink") } else if ( env.IR_PROVISION_HOST.endsWith('tlv.redhat.com') || env.IR_PROVISION_HOST.endsWith('tlv2.redhat.com') ) { env.NODE_NAME = 'qe-generic && tlv2' addMark("NODE_NAME_OVERRIDE: qe-generic && tlv2", "pink") } } node2(env.NODE_NAME ?: 'dfg-upgrades || qe-virthost-cpu') { pickMirror() if (env.TAGS ?: '') { addMark("TAGS: " + env.TAGS, "LightGreen") } addMark("SLAVE: " + env.NODE_NAME, "orange") try { // timeout should inside node to avoid timing out queued jobs job_exec_timeout = env.JOB_TIMEOUT ?: '360' timeout(Integer.parseInt(job_exec_timeout)) { step([$class: 'WsCleanup']) prefix=sh(returnStdout: true, script: 'echo $(echo -n ' + env.JOB_NAME + env.BUILD_NUMBER + ' | md5sum | /bin/cut -f1 -d" ")-').trim() ir_venv=sh(returnStdout: true, script: 'mktemp -p /tmp -d ir-venv-XXXXXXX').trim() uc_type=sh(returnStdout: true, script: 'echo "uc-full-deploy"').trim() host=sh(returnStdout: true, script: 'echo ${IR_PROVISION_HOST:-$(hostname)}').trim() if ('True'.toBoolean()) { CIResourcesThisBuild.add(host) CIResourceCheck(CIResourcesThisBuild) } stage_before_try() // set IS_CVP env. variable at the start of the job so we can check it later, during infrared run // it's used for verification if all the pieces (i.e.: container image overrides) has been properly passed to infrared tripleo-overcloud command env.IS_CVP = 'False'.toBoolean() // in case some CI jobs don't set IGNORE_CVP_FAILSAFES build param at all, we need to set it to false (safe choice) by default // so the rest of the job (groovy/compact shell scripts) will not complain that this env variable is undefined env.IGNORE_CVP_FAILSAFES = env.IGNORE_CVP_FAILSAFES ? env.IGNORE_CVP_FAILSAFES.toBoolean() : false log "env.IGNORE_CVP_FAILSAFES: " + env.IGNORE_CVP_FAILSAFES, level: "DEBUG" log "{is_cvp|}.toBoolean(): " + 'False'.toBoolean(), level: "DEBUG" log "env.IS_CVP.toBoolean(): " + env.IS_CVP.toBoolean(), level: "DEBUG" log "overcloud_container_images_urls: " + overcloud_container_images_urls, level: "DEBUG" if (env.JOB_NAME =~ /^cvp-.*/ && ! env.IGNORE_CVP_FAILSAFES.toBoolean() && ! env.IS_CVP) { error("this job is a CVP one (its name starts with 'cvp-') hence it should have the jbb param 'is_cvp' set to true, " + "please correct that in the job's configuration") } // the check for overcloud_container_images_urls is performed here at the start of the job to fail fast if this variable is empty // it's also checked later, during tripleo-overcloud stage, to make sure it's passed there correctly too if (env.IS_CVP.toBoolean()) { if (overcloud_container_images_urls ==~ /.*\w+.*/) { log "the value of overcloud_container_images_urls will be used for container image URLs override of tripleo overcloud", level: "WARN" } else { if (! env.IGNORE_CVP_FAILSAFES.toBoolean()) { error("this job is a CVP one (jjb param 'is_cvp: true') hence overcloud_container_images_urls can't be empty; " + "make sure you provided the properly formatted CI_MESSAGE and run parse_ci_message() groovy method as part of the job") } } if (env.IGNORE_CVP_FAILSAFES.toBoolean()) { log "IGNORE_CVP_FAILSAFES=true, " + "the results of this build may be affected by it", level: "WARN" } } sh 'set|grep -v -E "^(BASHOPTS|BASH_VERSINFO|EUID|PPID|SHELLOPTS|UID)=.*" > $WORKSPACE/.envrc' archiveArtifacts artifacts: '.envrc' try { stage_inside_try_pre() stage_inside_try() stage_inside_try_post() stage_inside_try_2_pre() stage_inside_try_2() stage_inside_try_2_post() } catch (Exception ex) { currentBuild.result = 'FAILURE' log "Oops! Something went wrong.\n\t${ ex }", level: 'ERROR' stage_ex = ex stage_catch() throw stage_ex; } finally { stage_finally_dont_wrap() stage2('Finally Steps') { stage_finally_upload_test_results() stage_finally_post_processing_triggers() stage_finally_pre() stage_finally() stage_finally_post() def finally_fatal_errors = '' for (finally_step in finally_errors) { if (finally_step['fail_build'] ?: false) { finally_fatal_errors += "${finally_step['name']} ${finally_step['message']} \n" } else { println "Warning: We had failure(s) in finally stage: ${finally_step['name']} ${finally_step['message']} \n" } } if (finally_fatal_errors != '' || stage_ex != null || env.STAGE_FATAL_EX != '') { try { pipeline_stage_ex = env.STAGE_FATAL_EX ?: stage_ex.getMessage() } catch (Exception ex) { pipeline_stage_ex = '' log "IR-Try-Finally: Unable to get exception message\n" + "finally_fatal_errors: ${ pipeline_stage_ex }\n" + "stage_ex: ${ stage_ex }\n" + "env.STAGE_FATAL_EX: ${ env.STAGE_FATAL_EX }", level: "WARN" } currentBuild.result = 'FAILURE' if (pipeline_stage_ex != '') { error "We had fatal failure(s) in finally stage(s): \n ${finally_fatal_errors}" } else if (finally_fatal_errors == '') { error "Job failed with exception: \n ${pipeline_stage_ex}" } else { error "\n Job stage failed with exception: \n ${ pipeline_stage_ex } \n We had fatal failure(s) in finally stage(s): \n ${ finally_fatal_errors } \n " } } } // stage2 } // finally } // timeout } // try catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException ex) { if (ex.causes.size() > 0) { def ex_cause = ex.causes[0] if (ex_cause instanceof org.jenkinsci.plugins.workflow.steps.TimeoutStepExecution.ExceededTimeout) { log "Job execution timeout (${ job_exec_timeout } minutes) expired.", level: 'ERROR' } else if (ex_cause instanceof jenkins.model.CauseOfInterruption.UserInterruption) { log "Job aborted by '${ ex_cause.getUserId() }' (${ ex_cause.getUser().getDisplayName() }).", level: 'WARNING' } } throw ex } // end catch } // node } // pipeWrapper