#!/bin/sh
#
# This script runs command to help diagnose a problem with Rudder or obtain a overall health status

## VARIABLES
############

RUN_DATE="$(date +%Y-%m-%d-%H%M%S)"
RUDDER_OPT="/opt/rudder"
RUDDER_VAR="/var/rudder"
OUTPUT_DIR="/var/rudder/debug/info"
COLLECT_DIR="collect-${RUN_DATE}"
OUTPUT_FILE="${OUTPUT_DIR}/debug-info-$(hostname)-${RUN_DATE}.tar.gz"

## LOADER
#########

# Rudder API related utility functions
apiutils_call_api() {
  curl -s -k -H "X-API-Token: ${1}" "https://localhost/rudder/api/latest/${2}" | sed -n "s%.*\"${3}\": \"\([^\"]*\).*%\1\n%p"
}
cfengineutils_test_cfpromises() {
  # First we need to make sure time is there
  TIME_PRESENT=`which time > /dev/null 2>&1`

  if [ ${?} -eq 0 ]
  then
    # Time is present, we can benchmark the speed of the cf-promises

    # Hack to outsmart bash :
    #   When it sees a command starting with time, it removes it, run it, then print the time.
    #   It allows it to print time out of a pipe, but that is exactly what we don't want here.
    #   Using a variable here just disable this detection.
    TIMECMD=time
    COMMAND_OUTPUT=`${TIMECMD} -p ${RUDDER_OPT}/bin/cf-promises "${2}" 2>&1`

    if [ "${?}" -ne 0 ]
    then
      test_failure "cf-promises on ${1} gave a non-zero return code" "${COMMAND_OUTPUT}"
      CFENGINE_CFPROMISES='ko'
    else

      CFPROMISES_RUN_TIME=`echo "${COMMAND_OUTPUT}" | tail -3 | head -1 | awk '{print $2}' | cut -d '.' -f 1`

      if [ "${CFPROMISES_RUN_TIME}" -gt 10 ]
      then
        test_warning "cf-promises on ${1} took more than 10s" "cfpromises took ${CFPROMISES_RUN_TIME}s to run, possible DNS problem ?"
      else
        test_success "cf-promises on ${1} ran successfully" "cfpromises took ${CFPROMISES_RUN_TIME}s to run"
      fi
    fi
  else
    test_skipped "Time is not installed on this system" "can measure cfpromises speed"
    COMMAND_OUTPUT=`${RUDDER_OPT}/bin/cf-promises "${2}" 2>&1`

    if [ "${?}" -ne 0 ]
    then
      test_failure "cf-promises on ${1} gave a non-zero return code" "${COMMAND_OUTPUT}"
      CFENGINE_CFPROMISES='ko'
    else
      test_success "cf-promises on ${1} ran successfully" "Unable to measure time (time is not installed)"
    fi
  fi

}

cfengineutils_validate_uuid() {
  echo "${UUID}" | grep -Ec '[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}' 2>/dev/null
}

collect_logs_annex_header() {
  log "ANNEX" "----------------------------------------------------------------------"
  log "ANNEX" "${1}"
  log "ANNEX" "----------------------------------------------------------------------"
}

# Collect useful debugging information
collect_logs() {

  # System info

  ## df (for /, /var and /opt)
  collect_logs_annex_header "system: df"
  df -h|grep -E '(/|/var|/rudder|.*rudder.*|.*postgres.*|.*pgsql.*)$' >> "${LOG_FILE}" 2>&1

  ## Date and timezone
  collect_logs_annex_header "system: date and timezone"
  [ -x /bin/timedatectl ] && /bin/timedatectl >> "${LOG_FILE}" 2>&1 || date -R >> "${LOG_FILE}" 2>&1

  ## cfengine processes
  collect_logs_annex_header "system: running cfengine processes"
  processutils_getcommand cf-execd >> "${LOG_FILE}" 2>&1
  processutils_getcommand cf-serverd >> "${LOG_FILE}" 2>&1
  processutils_getcommand cf-agent >> "${LOG_FILE}" 2>&1

  ## Java processes (Jetty)
  processutils_getcommand java >> "${LOG_FILE}" 2>&1

  ## OS, Java and Python version
  collect_logs_annex_header "system: OS, Java and Python versions"

  [ -e /etc/os-release ] && cat /etc/os-release >> "${LOG_FILE}" 2>&1 || cat /etc/issue >> "${LOG_FILE}" 2>&1
  type java >/dev/null 2>&1 && echo "java: `java -version 2>&1`" >> "${LOG_FILE}" 2>&1
  type python >/dev/null 2>&1 && echo "python: `python --version 2>&1`" >> "${LOG_FILE}" 2>&1

  ## Package versions
  collect_logs_annex_header "system: Installed rudder packages"
  packageutils_query rudder >> "${LOG_FILE}" 2>&1
  packageutils_query ncf >> "${LOG_FILE}" 2>&1

  # Rudder agent info
  collect_logs_annex_header "agent info command"
  type rudder >/dev/null 2>&1 && rudder agent info >> "${LOG_FILE}" 2>&1

  collect_logs_annex_header "agent library linking info"
  ldd ${RUDDER_OPT}/bin/cf-agent >> "${LOG_FILE}" 2>&1

  collect_logs_annex_header "agent core info"
  echo "policy_server.dat: `cat ${RUDDER_VAR}/cfengine-community/policy_server.dat  2>/dev/null`" >> "${LOG_FILE}" 2>&1
  echo "rudder_promises_generated: `cat ${RUDDER_VAR}/cfengine-community/inputs/rudder_promises_generated 2>/dev/null`" >> "${LOG_FILE}" 2>&1
  echo "rudder-promises-generated: `cat ${RUDDER_VAR}/cfengine-community/inputs/rudder-promises-generated 2>/dev/null`" >> "${LOG_FILE}" 2>&1

  collect_logs_annex_header "active techniques"
  find ${RUDDER_VAR}/cfengine-community/inputs -type d | sed "s%^${RUDDER_VAR}/cfengine-community/inputs/%%" | grep -E "[0-9]\.[0-9]$" >> "${LOG_FILE}" 2>&1

  collect_logs_annex_header "agent output directory"
  echo "There are `ls -l /var/rudder/cfengine-community/outputs/ | wc -l` files in the outputs directory" >> "${LOG_FILE}"
  echo "The five biggest ones are:" >> "${LOG_FILE}"
  du -ks /var/rudder/cfengine-community/outputs/* 2>/dev/null | sort -n | tail -5 >> "${LOG_FILE}" 2>&1

  if [ -d "/opt/rudder/share/plugins" ]
  then
    collect_logs_annex_header "installed rudder plugins"
    rudder package list --format=json >> "${LOG_FILE}" 2>&1
  fi

}

hash_secret() {
  REGEX="${1}"
  FILENAME="${2}"

  perl -MDigest::SHA=sha512_hex -pe "s/${REGEX}/\$1.sha512_hex\$2/e" "${FILENAME}" > "${FILENAME}.hashed"
  mv "${FILENAME}.hashed" "${FILENAME}"
}

collect_files() {

  # Create the target directory
  TARGET_DIRECTORY="${1}"
  mkdir -p "${TARGET_DIRECTORY}"/

  # Copy the test log
  cp "${LOG_FILE}" "${TARGET_DIRECTORY}"

  # Gather rudder specific information
  mkdir -p "${TARGET_DIRECTORY}/rudder/"

  ## full outputs file list
  ls -hal /var/rudder/cfengine-community/outputs/ >> "${TARGET_DIRECTORY}/rudder/outputs.log" 2>/dev/null

  # Backup cfengine logs
  mkdir -p "${TARGET_DIRECTORY}/cfengine/"
  cp "${RUDDER_VAR}/cfengine-community/outputs/previous" "${TARGET_DIRECTORY}/cfengine/" 2>/dev/null

  # Backup syslog configuration
  mkdir -p "${TARGET_DIRECTORY}/syslog/"
  cp -r /etc/*syslog* "${TARGET_DIRECTORY}/syslog/" 2>/dev/null

  # Server statistics
  if [ "${CURRENT_MACHINE_TYPE}" = "server" ]
  then

    ## statistics script output
    ${RUDDER_OPT}/bin/rudder-metrics-reporting -v > "${TARGET_DIRECTORY}/rudder/rudder-metrics-reporting.json" 2>/dev/null

    ## Backup Rudder configuration
    cp /etc/default/rudder-jetty "${TARGET_DIRECTORY}/rudder/" 2>/dev/null
    cp ${RUDDER_OPT}/etc/logback.xml "${TARGET_DIRECTORY}/rudder/" 2>/dev/null
    cp ${RUDDER_OPT}/etc/*.properties "${TARGET_DIRECTORY}/rudder/" 2>/dev/null
    cp ${RUDDER_OPT}/etc/rudder-networks* "${TARGET_DIRECTORY}/rudder/" 2>/dev/null


    # Hash the passwords
    # This is ad-hoc, as most files/keys differ
    hash_secret "^(ldap.authpw=)(.*)$" "${TARGET_DIRECTORY}/rudder/rudder-web.properties"
    hash_secret "^(rudder.jdbc.password=)(.*)$" "${TARGET_DIRECTORY}/rudder/rudder-web.properties"
    hash_secret "^(rudder.webdav.password=)(.*)$" "${TARGET_DIRECTORY}/rudder/rudder-web.properties"
    hash_secret "(.*rudder.auth.admin.password=)(.*)$" "${TARGET_DIRECTORY}/rudder/rudder-web.properties"
    hash_secret "^(rudder.auth.ldap.connection.bind.password=)(.*)$" "${TARGET_DIRECTORY}/rudder/rudder-web.properties"

    if [ -f "${TARGET_DIRECTORY}/syslog/rsyslog.d/rudder.conf" ]; then
      hash_secret "^(.*:ompgsql:.*,rudder,rudder,)(.*);RudderReportsFormat$" "${TARGET_DIRECTORY}/syslog/rsyslog.d/rudder.conf"
      # add back the rest of line lost by previous line
      sed -i '/^.*ompgsql:.*,rudder,rudder/ s/$/;RudderReportsFormat/' "${TARGET_DIRECTORY}/syslog/rsyslog.d/rudder.conf"
    fi

    # Backup apache logs
    mkdir -p "${TARGET_DIRECTORY}/apache/"
    cp /var/log/rudder/apache2/*log /var/log/rudder/apache2/*log.1 "${TARGET_DIRECTORY}/apache/" 2>/dev/null

    # Backup webapp logs
    mkdir -p "${TARGET_DIRECTORY}/webapp/"
    cp /var/log/rudder/webapp/$(ls -tr /var/log/rudder/webapp|tail -1) "${TARGET_DIRECTORY}/webapp/" 2>/dev/null

    # Backup slapd logs
    mkdir -p "${TARGET_DIRECTORY}/slapd/"
    cp /var/log/rudder/ldap/slapd.log /var/log/rudder/ldap/slapd.log.1 "${TARGET_DIRECTORY}/slapd/" 2>/dev/null

  fi

}

# File logging functions

log() {
  [ -n "${NO_LOG}" ] || echo "${1}: ${2}`output_details ${3}`" >> "${LOG_FILE}"
}
# Common output formatting

# Return a value like " (value)" if a string is given, and nothing if the argument is empty
output_details() {
  [ -n "${*}" ] && [ "${VERBOSE}" = "1" ] && printf " (${*})"
}
# Utilities to query package information on the machine

# Query package info (version and such, depending on the used PM)
packageutils_query() {

  # RPM
  if type rpm >/dev/null 2>&1
  then
    rpm -qa "${1}*" 2>/dev/null
  # DPKG
  elif type dpkg >/dev/null 2>&1
  then
    dpkg -l "${1}*" 2>/dev/null | tail -n +6
  fi

}

packageutils_query_version() {

  # RPM
  if type rpm >/dev/null 2>&1
  then
    rpm -q --queryformat '%{version}\n' "${1}" 2>/dev/null | grep -E "^[0-9]"
  # DPKG
  elif type dpkg >/dev/null 2>&1
  then
    dpkg -l "${1}"  2>/dev/null | tail -n +6 | grep -E "^ii" | awk '{print $3}' 2>/dev/null
  fi

}

packageutils_query_package_status() {

  # RPM
  if type rpm >/dev/null 2>&1
  then
    STATUS=`rpm -q --queryformat '%{version}\n' "${1}" 2>/dev/null | grep -E "^[0-9]"`
    [ -n "${STATUS}" ] && echo "ii" || echo "un"
  # DPKG
  elif type dpkg >/dev/null 2>&1
  then
    STATUS=`dpkg -l "${1}" 2>/dev/null | tail -n +6 | awk '{print $1}' 2>/dev/null`
    [ -n "${STATUS}" ] && echo "${STATUS}" || echo "un"
fi

}
processutils_matchandsanitize() {

  # Detect the correct ps tool to use
  ns=`ps --no-header -o utsns --pid $$ 2>/dev/null || true`
  if [ -e "/proc/bc/0" ]; then # we have openvz
    if [ -e /bin/vzps ]; then # we have vzps
      PS_COMMAND="/bin/vzps -E 0 -ef"
    else # use rudder provided vzps
      PS_COMMAND="${RUDDER_OPT}/bin/vzps.py -E 0 -ef"
    fi
  elif [ -n "${ns}" ]; then # we have namespaces
    # the sed is here to prepend a fake user field that is removed by the -o option (it is never used)
    PS_COMMAND="eval ps --no-header -e -O utsns,tty,uid | grep -E '^[[:space:]]*[[:digit:]]*[[:space:]]+${ns}'"
  else # standard unix
    PS_COMMAND="ps -ef"
  fi

  # On all processes, grep the first entry matching "something" (excluding grep itself) and return the result
  # Sample output: "kegerun+  6045  5566  5 mai12 ?        01:21:15 /usr/bin/firefox"
  ${PS_COMMAND} | grep "${1}" | grep -v grep | sed -re 's,\s+, ,g'

}

# Get matching process(es) running command
processutils_getcommand() {

  # Get a process command
  echo `processutils_matchandsanitize "${1}"` | cut -d ' ' -f 8-

}
# Terminal ANSI colours and font styles

## Colors configuration (enable colors only if stdout is a terminal)
if [ -t 1 ] && [ -z "${NO_COLORS}" ]; then

  # Escape sequence and resets
  ESC_SEQ="\033["
  RESET="${ESC_SEQ}0m"

  # Foreground colours
  BLACK="${ESC_SEQ}30;1m"
  RED="${ESC_SEQ}31;1m"
  GREEN="${ESC_SEQ}32;1m"
  YELLOW="${ESC_SEQ}33;1m"
  BLUE="${ESC_SEQ}34;1m"
  MAGENTA="${ESC_SEQ}35;1m"
  CYAN="${ESC_SEQ}36;1m"
  WHITE="${ESC_SEQ}37;1m"
  BR_BLACK="${ESC_SEQ}90;1m"
  BR_RED="${ESC_SEQ}91;1m"
  BR_GREEN="${ESC_SEQ}92;1m"
  BR_YELLOW="${ESC_SEQ}93;1m"
  BR_BLUE="${ESC_SEQ}94;1m"
  BR_MAGENTA="${ESC_SEQ}95;1m"
  BR_CYAN="${ESC_SEQ}96;1m"
  BR_WHITE="${ESC_SEQ}97;1m"

  # Background colours (optional)
  BG_BLACK="40;1m"
  BG_RED="41;1m"
  BG_GREEN="42;1m"
  BG_YELLOW="43;1m"
  BG_BLUE="44;1m"
  BG_MAGENTA="45;1m"
  BG_CYAN="46;1m"
  BG_WHITE="47;1m"

  # Font styles
  REGULAR="0m"
  BOLD="1m"
  BOLD_RESET="${ESC_SEQ}21m"
  UNDERLINE="4m"
  UNDERLINE_RESET="${ESC_SEQ}24m"
fi

PADDING=`printf %60s |tr " " "."`

# Output functions
term_simple() {
  printf '%b\n' "${1}"
}

term_padded() {
  printf "${2}${PADDING}" | head -c 70 ; printf " ${1}`output_details ${3}`\n"
}
SUCCESS_COUNT=0
FAILURE_COUNT=0
SKIPPED_COUNT=0

# Utility function to call in case of a simple info
test_info() {
  term_simple "${1} ${2}"
  log "INFO" "${1}" "${2}"
}

# Utility function to call when a test succeeded
test_success() {
  term_padded "${GREEN}OK${RESET}" "${1}" "${2}"
  log "SUCCESS" "${1}" "${2}"
  SUCCESS_COUNT=$((${SUCCESS_COUNT} +1))
}

# Utility function to call when a test succeeded
test_warning() {
  term_padded "${YELLOW}WARNING${RESET}" "${1}" "${2}"
  log "WARNING" "${1}" "${2}"
  FAILURE_COUNT=$((${FAILURE_COUNT} +1))
}

# Utility function to call when a test succeeded
test_skipped() {
  term_padded "${MAGENTA}SKIPPED${RESET}" "${1}" "${2}"
  log "SKIPPED" "${1}" "${2}"
  SKIPPED_COUNT=$((${SKIPPED_COUNT} +1))
}

# Utility function to call when a test failed
test_failure() {
  term_padded "${RED}ERROR${RESET}" "${1}" "${2}"
  log "FAILURE" "${1}" "${2}"
  FAILURE_COUNT=$((${FAILURE_COUNT} +1))
}
##
# CFEngine cf-promises test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} cfengine_cfpromises"

# Describe your tests here
cfengine_cfpromises_description="Verifies that cf-promises runs successfully on all generated promises"

cfengine_cfpromises() {

  # By default, this test is successful
  CFENGINE_CFPROMISES='ok'

  # If the rudder share directory exists and we are allowed to run slow tests,
  # run cf-promises on all generated promises
  if [ -d "${RUDDER_VAR}/share" ] && [ "${SLOW_TESTS}" = "1" ]
  then
    for i in ${RUDDER_VAR}/share/*
    do
      NODE=`echo ${i} | cut -d "/" -f 5`
      cfengineutils_test_cfpromises "${NODE}" "${RUDDER_VAR}/share/${NODE}/rules/cfengine-community/promises.cf"
    done
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${CFENGINE_CFPROMISES}" = 'ko' ] && return 1

}
#
# CFEngine cf-promises test (agent)
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} cfengine_cfpromises_agent"

# Describe your tests here
cfengine_cfpromises_agent_description="Verify that cf-promises validates the local agent available promises"

cfengine_cfpromises_agent() {

  # By default, this test is successful
  CFENGINE_CFPROMISES_AGENT='ok'

  # Validate the local promises set
  cfengineutils_test_cfpromises "localhost" "${RUDDER_VAR}/cfengine-community/inputs/promises.cf"

  # Finally, return 1 if the test has failed somewhere
  [ "${CFENGINE_CFPROMISES_AGENT}" = 'ko' ] && return 1

}
#
# CFEngine keys test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} cfengine_keys_agent"

# Describe your tests here
cfengine_keys_agent_description="Verify that the current cfengine key is usable"

cfengine_keys_agent() {

  # By default, this test is successful
  CFENGINE_KEYS_AGENT='ok'

  # Test if CFEngine keys are here
  if [ -e "${RUDDER_VAR}/cfengine-community/ppkeys/localhost.pub" ] && [ -e "${RUDDER_VAR}/cfengine-community/ppkeys/localhost.priv" ]
  then

    # Test if we can execute the signature.sh script
    if [ -e "${RUDDER_OPT}/bin/rudder-sign" ]
    then

      # Try to sign a test file
      TEST_FILE=`mktemp`
      COMMAND_OUTPUT=`${RUDDER_OPT}/bin/rudder-sign "${TEST_FILE}" && grep -qE "^digest=[a-z0-9]+$" "${TEST_FILE}.sign"`

      if [ "${?}" -eq 0 ]
      then
        test_success "CFEngine keys present and usable" "Signed ${TEST_FILE} successfully"
      else
        test_failure "CFEngine keys present but signature test failed" "Failed to sign ${TEST_FILE}: `cat ${TEST_FILE}.sign`"
        CFENGINE_KEYS_AGENT='ko'
      fi

      rm -f "${TEST_FILE}" "${TEST_FILE}.sign"

    else
      test_failure "Inventory signature utility is missing"
      RUDDER_KEYS_AGENT='ko'
    fi

  else
    test_failure "CFEngine keys are missing"
    RUDDER_KEYS_AGENT='ko'
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${SKEL_SCENARIO}" = 'ko' ] && return 1

}
#
# CFEngine LMDB size test (agent)
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} cfengine_lmdb_size_agent"

# Describe your tests here
cfengine_lmdb_size_agent_description="Verify that CFEngine LMDB databases have correct sizes"

cfengine_lmdb_size_agent() {

  # By default, this test is successful
  CFENGINE_LMDB_SIZE_AGENT='ok'

  for i in state/cf_state.lmdb state/cf_lock.lmdb performance.lmdb cf_lastseen.lmdb
  do

    if [ -e "${RUDDER_VAR}/cfengine-community/${i}" ]
    then

      LMDB_FILESIZE=`stat -c "%s" "${RUDDER_VAR}/cfengine-community/${i}" 2>/dev/null`

      # If the LMDB database filesize exceeds 100000000 bytes
      if [ "${LMDB_FILESIZE}" -gt 100000000 ]
      then
        test_failure "LMDB database ${i} is too big" "Database size is ${LMDB_FILESIZE} bytes"
      else
        test_success "LMDB database ${i} size is correct"
      fi
    else
      test_skipped "LMDB database ${i} is absent"
    fi

  done

  # Finally, return 1 if the test has failed somewhere
  [ "${CFENGINE_LMDB_SIZE__AGENT}" = 'ko' ] && return 1

}
#
# CFEngine server connection test (agent)
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} cfengine_server_connection_agent"

# Describe your tests here
cfengine_server_connection_agent_description="Verify that cfengine can connect to its policy server"

cfengine_server_connection_agent() {

  # By default, this test is successful
  CFENGINE_SERVER_CONNECTION_AGENT='ok'

  # Test if netcat is available
  if type nc >/dev/null 2>&1
  then
    NETCAT="nc"
  elif type netcat >/dev/null 2>&1
  then
    NETCAT="netcat"
  else
    test_skipped "Skipping server connectivity tests (no nc)"
    return 0
  fi

  SERVER_HOST=`cat ${RUDDER_VAR}/cfengine-community/policy_server.dat 2>/dev/null`
  PORTS="443 5309"

  # CFEngine enterprise requires port 5308 also
  [ -x /var/cfengine/bin/cf-agent ] && PORTS="${PORTS} 5308"

  # Try to open every port in the list
  for i in ${PORTS}
  do

    COMMAND_OUTPUT=`${NETCAT} -v -z "${SERVER_HOST}" "${i}" 2>&1`

    if [ "${?}" -eq 0 ]
    then
      test_success "Connection to server on port ${i} successful"
    else
      test_warning "Connection to server on port ${i} failed" "Output: ${COMMAND_OUTPUT}"
    fi

  done


  # Finally, return 1 if the test has failed somewhere
  [ "${CFENGINE_SERVER_CONNECTION_AGENT}" = 'ko' ] && return 1

}
#
# LDAP connection test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} ldap_connection"

# Describe your tests here
ldap_connection_description="Test the validity of the LDAP connection"

ldap_connection() {

  # By default, this test is successful
  LDAP_CONNECTION='ok'

  # Get all LDAP access credentials and parameters
  LDAP_USER="$(grep -E '^ldap.authdn[ \t]*=' ${RUDDER_OPT}/etc/rudder-web.properties | cut -d "=" -f 2-)"
  LDAP_PASSWORD="$(grep -E '^ldap.authpw[ \t]*=' ${RUDDER_OPT}/etc/rudder-web.properties | cut -d "=" -f 2-)"
  LDAP_SERVER="$(grep -E '^ldap.host[ \t]*=' ${RUDDER_OPT}/etc/rudder-web.properties | cut -d '=' -f 2-)"
  LDAP_PORT="$(grep -E '^ldap.port[ \t]*=' ${RUDDER_OPT}/etc/rudder-web.properties | cut -d '=' -f 2-)"

  LDAP_BIND_ANONYMOUS="-H ldap://${LDAP_SERVER}:${LDAP_PORT}/ -x"
  LDAP_BIND_RUDDER="${LDAP_BIND_ANONYMOUS} -D ${LDAP_USER} -w ${LDAP_PASSWORD}"

  # Am I using a local or remote LDAP server ?
  if [ "${LDAP_SERVER}" = "localhost" ] || echo "${LDAP_SERVER}" | grep -q "`hostname`"
  then

    # The server is local, test if the slapd process is active
    SLAPD_COMMAND_LINE=`processutils_getcommand /opt/rudder/libexec/slapd | head -1`

    if [ -z "${SLAPD_COMMAND_LINE}" ]
    then
      test_failure "LDAP server is local and slapd is dead" "no process matched"
      LDAP_CONNECTION='ko'
    else
      test_success "LDAP server is local and slapd is alive" "detected command: ${SLAPD_COMMAND_LINE}"
    fi

  else

    # The server is remote, try to ping it
    COMMAND_OUTPUT=`ping -q -c 1 ${LDAP_SERVER} 2>&1`
    if [ ${?} -eq 0 ];
    then
      test_success "LDAP server is remote, and a ping to \"${LDAP_SERVER}\" succeeded"
    else
      test_warning "LDAP server is remote, and a ping to \"${LDAP_SERVER}\" failed" "${COMMAND_OUTPUT}"
    fi

  fi

  # Try to get the naming context
  COMMAND_OUTPUT=`${RUDDER_OPT}/bin/ldapsearch ${LDAP_BIND_ANONYMOUS} -b "" -s base namingContexts -LLL 2>&1`

  if [ ${?} -eq 0 ];
  then
    NAMING_CONTEXT=`echo ${COMMAND_OUTPUT} | sed 's%.*namingContexts: \(.*\)%\1%'`
    test_success "LDAP connection (anonymous) succeeded" "namingContexts returned ${NAMING_CONTEXT}"
  else
    test_failure "LDAP connection (anonymous) failed" "command output: ${COMMAND_OUTPUT}"
    LDAP_CONNECTION='ko'
  fi

  # Get the naming context logged in as rudder user
  COMMAND_OUTPUT=`${RUDDER_OPT}/bin/ldapsearch ${LDAP_BIND_RUDDER} -b "" -s base namingContexts -LLL 2>&1`

  if [ ${?} -eq 0 ];
  then
    test_success "LDAP connection (rudder user) succeeded" "namingContexts returned ${NAMING_CONTEXT}"
  else
    test_failure "LDAP connection (rudder user) failed" "command output: ${COMMAND_OUTPUT}"
    LDAP_CONNECTION='ko'
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${LDAP_CONNECTION}" = 'ko' ] && return 1

}
#
# LDAP structure test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} ldap_structure"

# Describe your tests here
ldap_structure_description="Test the validity of the LDAP structure"

ldap_structure() {

  # By default, this test is successful
  LDAP_STRUCTURE='ok'

  # Run the ldap_connection test first if necessary
  if [ -z "${LDAP_CONNECTION}" ]
  then
    test_info "Running LDAP connection test first"
    test_info ""
    ldap_connection
  fi

  # Skip if the connection test failed
  if [ "${LDAP_CONNECTION}" != "ok" ]
  then
    test_skipped "LDAP connection test failed, aborting structure test"
  else

    # Get the amount of dns at the base of the LDAP structure as a logged in user
    COMMAND_OUTPUT=`${RUDDER_OPT}/bin/ldapsearch ${LDAP_BIND_RUDDER} -b "${NAMING_CONTEXT}" -s one -LLL 2>&1`
    BASE_DN_AMOUNT=`echo ${COMMAND_OUTPUT} |sed 's%\(\w*: \)%\n\1%g' | grep 'dn: ' | wc -l`

    # There should be 4 dns
    if [ ${BASE_DN_AMOUNT} -eq 4 ];
    then
      test_success "LDAP structure looks good" "There are ${BASE_DN_AMOUNT} dn(s) at the namingContext base"
    else
      test_failure "LDAP structure seems incorrect" "${COMMAND_OUTPUT}"
      LDAP_STRUCTURE='ko'
    fi

  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${LDAP_STRUCTURE}" = 'ko' ] && return 1

}
#
# PostgreSQL connection test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} postgresql_connection"

# Describe your tests here
postgresql_connection_description="Test the validity of the PostgreSQL connection"

postgresql_connection() {

  # By default, this test is successful
  POSTGRESQL_CONNECTION='ok'

  # Get all PostgreSQL access credentials and parameters
  SQL_SERVER="$(grep -E '^rudder.jdbc.url[ \t]*=' ${RUDDER_OPT}/etc/rudder-web.properties | cut -d '=' -f 2- | sed 's%^.*://\(.*\):\(.*\)/.*$%\1%')"
  SQL_PORT="$(grep -E '^rudder.jdbc.url[ \t]*=' ${RUDDER_OPT}/etc/rudder-web.properties | cut -d '=' -f 2- | sed 's%^.*://\(.*\):\(.*\)/.*$%\2%')"
  SQL_USER="$(grep -E '^rudder.jdbc.username[ \t]*=' ${RUDDER_OPT}/etc/rudder-web.properties | cut -d "=" -f 2-)"
  SQL_PASSWORD="$(grep -E '^rudder.jdbc.password[ \t]*=' ${RUDDER_OPT}/etc/rudder-web.properties | cut -d "=" -f 2-)"

  export PGPASSWORD="${SQL_PASSWORD}"

  PSQL="psql -q -h ${SQL_SERVER} -p ${SQL_PORT} -U ${SQL_USER}"

  # Am I using a local or remote PostgreSQL server ?
  if [ "${SQL_SERVER}" = "localhost" ] || echo "${SQL_SERVER}" | grep -q "`hostname`"
  then

    # The server is local, test if the PostgreSQL process is active
    POSTGRESQL_COMMAND_LINE=`processutils_getcommand postgres | head -1`

    if [ -z "${POSTGRESQL_COMMAND_LINE}" ]
    then
      test_failure "PostgreSQL server is local and PostgreSQL is dead" "no process matched"
      LDAP_CONNECTION='ko'
    else
      test_success "PostgreSQL server is local and PostgreSQL is alive" "detected command: ${POSTGRESQL_COMMAND_LINE}"
    fi

  else

    # The server is remote, try to ping it
    COMMAND_OUTPUT=`ping -q -c 1 ${SQL_SERVER} 2>&1`
    if [ ${?} -eq 0 ];
    then
      test_success "PostgreSQL server is remote, and a ping to \"${SQL_SERVER}\" succeeded"
    else
      test_warning "PostgreSQL server is remote, and a ping to \"${SQL_SERVER}\" failed" "${COMMAND_OUTPUT}"
    fi

  fi

  # Try to run a noop command on the server
  COMMAND_OUTPUT=`${PSQL} -t -d rudder -c "SELECT NULL;" 2>&1`

  if [ ${?} -eq 0 ]
  then
    test_success "PostgreSQL connection succeeded"
  else
    test_failure "PostgreSQL connection failed" "${COMMAND_OUTPUT}"
    POSTGRESQL_CONNECTION='ko'
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${SKEL_SCENARIO}" = 'ko' ] && return 1

}
#
# PostgreSQL structure test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} postgresql_structure"

# Describe your tests here
postgresql_structure_description="Test the validity of the PostgreSQL structure"

postgresql_structure() {

  # By default, this test is successful
  POSTGRESQL_STRUCTURE='ok'

  if [ -z "${POSTGRESQL_CONNECTION}" ] || [ "${POSTGRESQL_CONNECTION}" != "ok" ]
  then
    test_info "Running postgresql connection test first"
    test_info ""
    postgresql_connection || (test_failure "postgresql connection test failed, aborting structure test" ; POSTGRESQL_STRUCTURE='ko')
  fi

  COMMAND_OUTPUT=`${PSQL} -t -d rudder -c "SELECT count(*) FROM information_schema.tables WHERE table_schema = 'public';" 2>&1`

  # Test if there are at least 5 tables in the database
  if [ ${COMMAND_OUTPUT} -gt 5 ]
  then
    test_success "PostgreSQL structure looks good" "${COMMAND_OUTPUT} tables in the 'rudder' database"
  else
    test_failure "PostgreSQL structure seems incorrect" "${COMMAND_OUTPUT} tables in the 'rudder' database"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${POSTGRESQL_STRUCTURE}" = 'ko' ] && return 1

}
#
# Rudder ACL test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} rudder_acl"

# Describe your tests here
rudder_acl_description="Verify that there is no filesystem ACL on the configuration-repository"

rudder_acl() {

  # By default, this test is successful
  RUDDER_ACL='ok'

  # Test if ACL utilities are installed
  if type getfacl >/dev/null 2>&1
  then

    COMMAND_OUTPUT=`getfacl /var/rudder/configuration-repository/.git/objects 2>/dev/null | grep -c default`

    # Test if default ACL are defined on the configuration-repository
    if [ "${COMMAND_OUTPUT}" -ne 0 ]
    then
      test_warning "default ACLs detected on configuration-repository" "${COMMAND_OUTPUT} default ACLs active"
    else
      test_success "No default ACLs detected on configuration-repository"
    fi

  else
    test_skipped "Will not test ACL on configuration-repository (getfacl absent)"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${RUDDER_ACL}" = 'ko' ] && return 1

}
#
# Rudder API test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} rudder_api"

# Describe your tests here
rudder_api_description="Verify that the Rudder API responds"

rudder_api() {

  # By default, this test is successful
  RUDDER_API='ok'

  API_STATUS=`/usr/bin/curl -s -x '' http://localhost:8080/rudder/api/status`

  # Test if the API responds with "OK"
  if [ "${API_STATUS}" = "OK" ]
  then
    test_success "API responded 'OK'"
  else
    test_failure "API did not respond 'OK'" "${API_STATUS}" ; RUDDER_API='ko' ; return 1
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${RUDDER_API}" = 'ko' ] && return 1

}
#!/bin/sh 
#
# Rudder cf_serverd_logs test
#
# This test allows to display the logs related to cf-served
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} rudder_cf_serverd_logs"

# Descrive your tests here
rudder_cf_serverd_logs_description="display cf-serverd related to cf-serverd file"

rudder_cf_serverd_logs() {

  # BY default, this test is successful
  RUDDER_CF_SERVERD_LOGS='ok'

  # For Debian|Ubuntu
  PATH1="/var/log/syslog"
  # For Redhat|Centos|Sles
  PATH2="/var/log/messages"

  if [ -f ${PATH1} ]; then RUDDER_LOG_PATH="${PATH1}"
  elif [ -f ${PATH2} ]; then RUDDER_LOG_PATH="${PATH2}"
  fi

  COMMAND_OUTPUT_TEST=`grep 'cf-serverd' ${RUDDER_LOG_PATH} | grep "error: " | tail -10 | wc -l`
  COMMAND_OUTPUT=`grep 'cf-serverd' ${RUDDER_LOG_PATH} | grep "error: " | tail -10`

  if [ $COMMAND_OUTPUT_TEST -eq 0 ]
  then
    test_success "The Rudder cf-serverd log contains no error"
  else
    test_failure "There are errors in the Rudder cf-serverd logs" "Output: ${COMMAND_OUTPUT}"
    RUDDER_CF_SERVERD_LOGS='ko'
  fi


  # Finally, return 1 if the test has failed somewhere
  [ "${RUDDER_CF_SERVERD_LOGS}" = 'ko' ] && return 1

}


#
# Rudder defaultpassword test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} rudder_defaultpassword"

# Describe your tests here
rudder_defaultpassword_description="Verify if the current Rudder installation uses the default password"

rudder_defaultpassword() {

  # By default, this test is successful
  RUDDER_DEFAULTPASSWORD='ok'

  COMMAND_OUTPUT=`grep 'c7ad44cbad762a5da0a452f9e854fdc1e0e7a52a38015f23f3eab1d80b931dd472634dfac71cd34ebc35d16ab7fb8a90c81f975113d6c7538dc69dd8de9077ec' "${RUDDER_OPT}/etc/rudder-users.xml"`

  # Test if a hash matching the default one is found
  if [ "${?}" -eq 0 ]
  then
    test_warning "This Rudder installation uses a default password" "Users matching: ${COMMAND_OUTPUT}"
  else
    test_success "This Rudder installation does not use a default password"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${RUDDER_DEFAULTPASSWORD}" = 'ko' ] && return 1

}
#
# Rudder get server uuid test (agent)
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} rudder_get_server_uuid_agent"

# Describe your tests here
rudder_get_server_uuid_agent_description="Verify that the agent can access the server uuid"

rudder_get_server_uuid_agent() {

  # By default, this test is successful
  RUDDER_GET_SERVER_UUID_AGENT='ok'

  SERVER_HOST=`cat ${RUDDER_VAR}/cfengine-community/policy_server.dat 2>/dev/null`

  COMMAND_OUTPUT=`curl -s -k -x '' "https://${SERVER_HOST}/uuid" 2>&1`

  # Test if the server uuid is reachable
  if [ "${?}" -eq 0 ]
  then
    test_success "Downloading server uuid succeeded" "Got uuid \"${COMMAND_OUTPUT}\""
  else
    test_failure "Downloading server uuid failed" "Got uuid \"${COMMAND_OUTPUT}\""
    RUDDER_GET_SERVER_UUID_AGENT='ko'
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${RUDDER_GET_SERVER_UUID_AGENT}" = 'ko' ] && return 1

}
#
# Rudder init test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} rudder_init"

# Describe your tests here
rudder_init_description="Verify that Rudder has been initialized properly"

rudder_init() {

  # By default, this test is successful
  RUDDER_INIT='ok'

  # Run the ldap_structure test before
  if [ -z "${LDAP_STRUCTURE}" ]
  then
    test_info "Running LDAP structure test first"
    test_info ""
    ldap_structure
  fi

  # If it failed, skip the test
  if [ "${LDAP_STRUCTURE}" != "ok" ]
  then
    test_failure "LDAP structure test failed/skipped, considering rudder uninitialized"
    RUDDER_INIT='ko'
  else
    test_success "LDAP structure test succeeded, considering rudder initialized"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${RUDDER_INIT}" = 'ko' ] && return 1

}
#
# Rudder packages test (agent)
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} rudder_packages_agent"

# Describe your tests here
rudder_packages_description="Verify that Rudder packages are installed properly"

rudder_packages_agent() {

  # By default, this test is successful
  RUDDER_PACKAGES_AGENT='ok'

  # For each known Rudder package
  for i in rudder-agent rudder-agent-thin rudder-cfengine-community rudder-inventory-endpoint rudder-inventory-ldap rudder-jetty rudder-policy-templates rudder-reports rudder-server-root rudder-techniques rudder-webapp
  do

    PACKAGE_VERSION=`packageutils_query_version "${i}"`
    PACKAGE_STATUS=`packageutils_query_package_status "${i}"`

    [ -z "${TEST_VERSION}" ] && TEST_VERSION="${PACKAGE_VERSION}"

    # If the version is different from any previously queried package version, fail the test
    if [ -n "${PACKAGE_VERSION}" ]
    then
      if [ "${PACKAGE_STATUS}" != "ii" ]
      then
        test_failure "Package ${i} is not installed properly" "package ${i} has dpkg status \"${PACKAGE_STATUS}\""
        RUDDER_PACKAGES_AGENT='ko'
      else
        if [ "${PACKAGE_VERSION}" != "${TEST_VERSION}" ]
        then
          test_failure "Package ${i} version is incoherent" "version ${PACKAGE_VERSION}, reference ${TEST_VERSION}"
        else
          test_success "Package ${i} status and version is coherent" "version ${PACKAGE_VERSION}, reference ${TEST_VERSION}"
        fi
      fi
    fi

  done

  if [ "`packageutils_query_package_status rudder-server-root`" = "ii" ]
  then

    MISSING_PACKAGE=0

    for i in rudder-agent rudder-server-relay rudder-reports rudder-webapp
    do

      PACKAGE_STATUS=`packageutils_query_package_status "${i}"`

      if [ "${PACKAGE_STATUS}" != "ii" ]
      then
        test_failure "Required server package ${i} is not installed" "package ${i} has dpkg status \"${PACKAGE_STATUS}\""
        MISSING_PACKAGE=1
        RUDDER_PACKAGES_AGENT='ko'
      fi

    done

    [ "${MISSING_PACKAGE}" -eq 0 ] && test_success "All required server packages are installed"

  fi

  # We check if ".rpmnew" packages exist in the directories that keep the configuration files.
  PATH_DIRECTORY1="/opt/rudder/etc/"
  PATH_DIRECTORY2="/etc/httpd/"

  if [ -d "${PATH_DIRECTORY1}" -a -d "${PATH_DIRECTORY2}" ]
  then
    TEST1=$( find "${PATH_DIRECTORY1}" -type f \( -name "*.rpmnew" \) | wc -l )
    TEST2=$( find "${PATH_DIRECTORY2}" -type f \( -name "*.rpmnew" \) | wc -l )
    RESULT=$(($TEST1+$TEST2))

    if [ "${RESULT}" -ne 0 ]
    then
	    test_failure "There are unapplied configuration files (.rpmnew)" "${RESULT} packages found to apply"
    else
      test_success "All configuration files are applied" "${RESULT} packages found to upply"
    fi
  else
    test_failure "Directory does not exist" 
  fi

   
  # Finally, return 1 if the test has failed somewhere
  [ "${RUDDER_PACKAGES}" = 'ko' ] && return 1

}
#
# Rudder processes test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} rudder_processes"

# Describe your tests here
rudder_processes_description="Verify that all necessary Rudder processes are running"

rudder_processes() {

  # By default, this test is successful
  RUDDER_PROCESSES='ok'

  # Get apache process name
  type httpd >/dev/null 2>/dev/null && APACHE_NAME="httpd" || APACHE_NAME="apache2"

  # For each core Rudder component process
  for i in slapd /opt/rudder/jetty postgres "${APACHE_NAME}"
  do

    PROCESS_COMMAND=`processutils_getcommand "${i}"`

    # If the process is not running, fail the test
    if [ -n "${PROCESS_COMMAND}" ]
    then
      test_success "The ${i} process is running" "Detected process ${PROCESS_COMMAND}"
    else
      test_failure "The ${i} process looks dead"
      RUDDER_PROCESSES='ko'
    fi

  done

  # Finally, return 1 if the test has failed somewhere
  [ "${RUDDER_PROCESSES}" = 'ko' ] && return 1

}
#!/bin/sh
#
# runtime test
#

# Describe your tests here
runtime_test_description="Verify if some runs took more than amount of time between runs"
  
runtime_test() {

  # By default, this test is successful
  RUNTIME_AGENT='ok'
  LOG_PATH="/var/rudder/cfengine-community/outputs"
  runtime=300

  if [ -e $LOG_PATH ]
  then
    for i in $(ls -rt ${LOG_PATH} | tail -5)
    do  
      # Get a last date access 
      start_exec=$(echo $i | cut -d'_' -f6)
      # Get a last date of modification
      date_modif=$(stat -c %Y ${LOG_PATH}/$i)
      # fet a agent runtime
      runtime_agent=$(($date_modif-$start_exec))
  
    # we check if some runs took more than amount of time between runs.
      if [ $runtime_agent -lt $runtime ]
      then 
        test_success "runtime_agent " "${runtime_agent} has a normal run time "
      else 
        test_failure "runtime_agent " "${runtime_agent} does not have a normal run time"
      fi
    done
  else 
    test_skipped "directory ${LOG_PATH} does not exist"
  fi

 # Finally, return 1 if the test has dailed somewhere
 [ "${RUNTIME_AGENT}" = 'ko' ] && return 1


}
#
# Rudder unsent inventories test (agent)
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} rudder_unsent_inventories_agent"

# Describe your tests here
rudder_unsent_inventories_agent_description="Verify that the agent has no unsent inventories"

rudder_unsent_inventories_agent() {

  # By default, this test is successful
  RUDDER_UNSENT_INVENTORIES_AGENT='ok'

  if [ "${CURRENT_MACHINE_TYPE}" = 'server' ]
  then
    UNSENT_INVENTORIES_DIR="${RUDDER_VAR}/inventories/failed"
  else
    UNSENT_INVENTORIES_DIR="${RUDDER_VAR}/inventories"
  fi

  # we check failed inventories that do not exceed 5 days (432000 s).
  recent_time="432000"
  inc=0
  for i in "${UNSENT_INVENTORIES_DIR}"/*
  do
    [ -e "$i" ] || continue
    last_modif=$(stat -c %Y $i)
    today_date=$(date +'%s')
    recent_modif=$(($today_date-$recent_time))
    if [ $last_modif -ge $recent_modif ]
    then
      inc=$((inc+1))
    fi
  done

  # If there are unsent inventories, fail the test
  if [ "${inc}" -gt 1 ]; then

    test_failure "There are some unsent inventories"
    RUDDER_UNSENT_INVENTORIES_AGENT='ko'
  else
    test_success "No unsent inventories detected"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${RUDDER_UNSENT_INVENTORIES_AGENT}" = 'ko' ] && return 1

}

#
# Rudder webapplog test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} rudder_webapplog"

# Describe your tests here
rudder_webapplog_description="Verify that the current jetty stdout log does not contain any ERROR"

rudder_webapplog() {

  # By default, this test is successful
  RUDDER_WEBAPPLOG='ok'

  CURRENT_DATE=`date +%Y_%m_%d`
  COMMAND_OUTPUT=`grep 'ERROR' "/var/log/rudder/webapp/${CURRENT_DATE}.stderrout.log"`

  # If an ERROR matches in the log, fail the test
  if [ "${?}" -ne 0 ]
  then
    test_success "The Rudder webapp log contains no error"
  else
    test_failure "There are errors in the Rudder webapp logs" "Output: ${COMMAND_OUTPUT}"
    RUDDER_WEBAPPLOG='ko'
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${RUDDER_WEBAPPLOG}" = 'ko' ] && return 1

}
#
# Generic test template
#
# Here is a paragraph that explains this test purpose
#

# Declare your tests here (KEEP "${TESTS}")
#TESTS="${TESTS} skel_scenario"

# Describe your tests here
#skel_scenario_description="Verify that ..."

skel_scenario() {

  # By default, this test is successful
  SKEL_SCENARIO='ok'

  if [ "${CONDITION}" = "true" ]
  then
    # Test is successful
    test_success "test succeeded" "verbose details"
  elif [ "${CONDITION}" = "false" ]
  then
    # Test has failed, mark it as such
    test_failure "test failed" "verbose details"
    SKEL_SCENARIO='ko'
  else
    # Something unexpected happened, skip the test
    test_skipped "test skipped" "verbose details"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${SKEL_SCENARIO}" = 'ko' ] && return 1

}
#
# System df test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} system_df"

# Describe your tests here
system_df_description="Verify important filesystems (/, /opt, /var) available free space"

system_df() {

  # By default, this test is successful
  SYSTEM_DF='ok'

  for i in / /opt /var '.*rudder.*' '.*postgres.*' '.*pgsql.*'
  do

    FILESYSTEM_PERCENTAGE=`df -P -h | grep -E "${i}$" | head -1 | awk '{ print $5 }' | tr -d '%'`

    # If a filesystem goes beyond 92%, fail the test
    if [ -z "${FILESYSTEM_PERCENTAGE}" ]
    then
      test_skipped "Filesystem ${i} does not exist"
    elif [ "${FILESYSTEM_PERCENTAGE}" -gt 92 ]
    then
      test_warning "Filesystem ${i} does not have enough free space" "${FILESYSTEM_PERCENTAGE} percent used"
      SYSTEM_DF='ok'
    elif [ "${FILESYSTEM_PERCENTAGE}" -eq 100 ]
    then
      test_failure "Filesystem ${i} does not have free space" "${FILESYSTEM_PENRCENTAGE} percent used"
      SYSTEM_DF='ko'
    else
      test_success "Filesystem ${i} has enough free space" "${FILESYSTEM_PERCENTAGE} percent used"
    fi
  done

  # Finally, return 1 if the test has failed somewhere
  [ "${SYSTEM_DF}" = 'ko' ] && return 1

}
#
# System mem test
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} system_mem"

# Describe your tests here
system_mem_description="Verify available free memory space"

system_mem() {

  # By default, this test is successful
  SYSTEM_DF='ok'

  FREE_MEMORY=`free -m | grep 'Mem:' | awk '{print $4+$6+$7}'`

  # If the available memory goes under 500M, fail the test
  if [ "${FREE_MEMORY}" -gt 500 ]
  then
      test_success "There is enough free memory" "${FREE_MEMORY} MB free"
  else
      test_failure "Not enough free memory" "${FREE_MEMORY} MB free"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${SYSTEM_DF}" = 'ko' ] && return 1

}
#
# uuid generation test
#
# This test requires a valid UUID to be given
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} uuid_generation"

# Describe your tests here
uuid_generation_description="Verify that the given uuid / node has generated promises"

uuid_generation() {

  # By default, this test is successful
  UUID_GENERATION='ok'

  MATCH_AMOUNT=`find /var/rudder/share -iname "${UUID}" 2>/dev/null | wc -l`

  if [ "${MATCH_AMOUNT}" -eq 0 ]
  then
    test_failure "No generated promises detected for this UUID" "There are ${MATCH_AMOUNT} matches for this UUID"
    UUID_GENERATION='ko'
  elif [ "${MATCH_AMOUNT}" -gt 1 ]
  then
    test_failure "There are more than 1 matching UUID in generated promises ?!" "There are ${MATCH_AMOUNT} matches for this UUID"
    UUID_GENERATION='ko'
  elif [ "${MATCH_AMOUNT}" -eq 1 ]
  then
    test_success "There are generated promises detected for this UUID" "There are ${MATCH_AMOUNT} matches for this UUID"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${UUID_GENERATION}" = 'ko' ] && return 1

}
#
# uuid inventory_date test
#
# This test requires a valid UUID to be given
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} uuid_inventory_date"

# Describe your tests here
uuid_inventory_date_description="Verify that the given uuid / node last inventory date is okay"

uuid_inventory_date() {

  # By default, this test is successful
  UUID_INVENTORY_DATE='ok'

  NODE_INVENTORY_DATE=`apiutils_call_api "${API_TOKEN}" "nodes/${UUID}?include=full" "lastInventoryDate" | cut -d " " -f 1`

  DATE_TODAY=`date --date=today '+%Y-%m-%d'`
  DATE_YESTERDAY=`date --date=yesterday '+%Y-%m-%d'`

  if [ "${NODE_INVENTORY_DATE}" != "${DATE_TODAY}" ] && [ "${NODE_INVENTORY_DATE}" != "${DATE_YESTERDAY}" ]
  then
    test_failure "Node last inventory is too old" "Last inventory date is ${NODE_INVENTORY_DATE}"
    UUID_INVENTORY_DATE='ko'
  else
    test_success "Node last inventory date is okay" "Last inventory date is ${NODE_INVENTORY_DATE}"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${UUID_INVENTORY_DATE}" = 'ko' ] && return 1

}
#
# uuid policy_server test
#
# This test requires a valid UUID to be given
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} uuid_policy_server"

# Describe your tests here
uuid_policy_server_description="Verify that the given uuid / node policy_server is 'accepted'"

uuid_policy_server() {

  # By default, this test is successful
  UUID_POLICY_SERVER='ok'

  NODE_POLICY_SERVER=`apiutils_call_api "${API_TOKEN}" "nodes/${UUID}?include=full" "policyServerId"`

  if [ -z "${NODE_POLICY_SERVER}" ]
  then
    test_failure "Unable to get the host policy server" "No policyServerId returned by Rudder"
    UUID_POLICY_SERVER='ko'
  else
    test_success "Node policy server is ${NODE_POLICY_SERVER}"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${UUID_POLICY_SERVER}" = 'ko' ] && return 1

}
#
# uuid resolution test
#
# This test requires a valid UUID to be given
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} uuid_resolution"

# Describe your tests here
uuid_resolution_description="Verify that the given uuid / node can be resolved"

uuid_resolution() {

  # By default, this test is successful
  UUID_RESOLUTION='ok'

  NODE_HOSTNAME=`apiutils_call_api "${API_TOKEN}" "nodes/${UUID}?include=minimal" "hostname"`
  NODE_IP=`getent hosts "${NODE_HOSTNAME}" | awk '{ print $1; }'`

  if [ -z "${NODE_HOSTNAME}" ]
  then
    test_failure "Node hostname resolution failed" "No hostname returned by Rudder"
    UUID_RESOLUTION='ko'
  elif [ -z "${NODE_IP}" ]
  then
    test_failure "Node hostname resolution succeeded" "Node ${NODE_HOSTNAME} resolved to nothing"
    UUID_RESOLUTION='ko'
  else
    test_success "Node hostname resolution succeeded" "Node ${NODE_HOSTNAME} resolved to ${NODE_IP}"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${UUID_RESOLUTION}" = 'ko' ] && return 1

}
#
# uuid status test
#
# This test requires a valid UUID to be given
#

# Declare your tests here (KEEP "${TESTS}")
TESTS="${TESTS} uuid_status"

# Describe your tests here
uuid_status_description="Verify that the given uuid / node status is 'accepted'"

uuid_status() {

  # By default, this test is successful
  UUID_STATUS='ok'

  NODE_STATUS=`apiutils_call_api "${API_TOKEN}" "nodes/${UUID}?include=minimal" "status"`

  if [ -z "${NODE_STATUS}" ]
  then
    test_failure "Unable to get the host status" "No status returned by Rudder"
    UUID_STATUS='ko'
  elif [ "${NODE_STATUS}" != "accepted" ]
  then
    test_failure "Node status is not 'accepted'" "Node status: ${NODE_STATUS}"
    UUID_STATUS='ko'
  else
    test_success "Node status is 'accepted'"
  fi

  # Finally, return 1 if the test has failed somewhere
  [ "${UUID_STATUS}" = 'ko' ] && return 1

}

## FUNCTIONS
############

component_list() {
  echo "${TESTS}" | tr ' ' '\n' | sed 's/^\([A-Za-z]*\)_.*/\1/' | uniq
}

scenario_list() {
  echo "${TESTS}" | tr ' ' '\n' | grep -E "^${1}_" | sed 's/\([A-Za-z]*\)_\([A-Za-z_]*\)/\2/' | uniq
}

usage() {
  echo ""
  echo "This is the rudder debug info script."
  echo ""
  echo "Usage: ${0} [options] [component|all|help] [scenario|all|help]"
  echo ""
  echo "Options:"
  echo ""
  echo "* -h / --help: this help"
  echo "* -v / --verbose: verbose output in test scenarios"
  echo "* -u / --uuid: debug a specific agent UUID"
  echo "* -t / --api-token: api token (used by UUID debug)"
  echo "* -a / --agent-only: only run agent tests, even if running on a server"
  echo "* -n / --no-collect: do not collect log files in a tarball at the end"
  echo "* -s / --slow-tests: also run tests that can take a long time"
  echo "* -l / --log_file: log file location (ex: /var/rudder/debug/info/rudder-debug-info.log"
  echo ""
  echo "Components:"
  echo "* help: this help"
  for component in `component_list`
  do
    echo "* ${component}: ${component} related scenarios"
  done
  echo ""
  echo "Note: If no component is given, run all scenarios in all components."
  echo ""
}

component_usage() {
  component="$1"
  echo ""
  echo "Run scenarios on the ${component} component."
  echo "If no scenario is given, or the scenario is 'all', run all scenarios."
  echo ""
  echo "Available scenarios:"
  for scenario in `scenario_list ${component}`
  do
    eval "echo \"* ${scenario}: \$${component}_${scenario}_description\""
  done
  echo ""
}

## MAIN
#######

# Handle arguments (thanks getopt)
OPTS=`getopt -o hvu:t:ansl: --long help,verbose,uuid:,api-token:,agent-only,no-collect,slow-tests,log-file: -n 'parse-options' -- "$@"`

if [ $? != 0 ] ; then
  usage
  exit 1
fi

eval set -- "$OPTS"

VERBOSE='0'
UUID=''
API_TOKEN=''
AGENT_ONLY='0'
NO_COLLECT='0'
SLOW_TESTS='0'
LOG_DIR="${OUTPUT_DIR}/log"
mkdir -p ${LOG_DIR}
LOG_FILE="${LOG_DIR}/rudder-debug-info-${RUN_DATE}.log"

while true; do
  case "$1" in
    -h | --help )       usage; exit 1; shift ;;
    -v | --verbose )    VERBOSE='1'; shift ;;
    -u | --uuid )       UUID="${2}"; shift; shift ;;
    -t | --api-token )  API_TOKEN="${2}"; shift; shift ;;
    -a | --agent-only ) AGENT_ONLY='1'; shift ;;
    -n | --no-collect ) NO_COLLECT='1'; shift ;;
    -s | --slow-tests ) SLOW_TESTS='1'; shift ;;
    -l | --log-file )   LOG_FILE="${2}"; shift; shift ;;
    -- ) shift; break ;;
    * ) break ;;
  esac
done

# The first parameter is the component
component="${1}"
scenario="${2}"

# If we run on an agent, filter out server-only tests
CURRENT_MACHINE_TYPE='server'

# Validate the given UUID, if applicable
if [ -n "${UUID}" ]
then
  if [ -z "${API_TOKEN}" ]
  then
    echo "ERROR: UUID debug requires a valid API token to be given"
    exit 1
  elif [ `cfengineutils_validate_uuid "${UUID}"` -ne 1 ]
  then
    echo "ERROR: This UUID is invalid"
    echo "ERROR: Please enter a valid one, ex: 22afad2b-0385-4b5a-8ac2-d77850ee6bf3"
    exit 1
  fi
else
  # No UUID given, strip out uuid scenarios
  TESTS=`echo "${TESTS}" | tr " " "\n" | grep -vE "^uuid_"`
fi

if [ ! -d "/opt/rudder/etc/server-roles.d" ] || [ "${AGENT_ONLY}" = '1' ]
then
  TESTS=`echo "${TESTS}" | tr " " "\n" | grep -E "_agent$"`
  CURRENT_MACHINE_TYPE='agent'
fi

if [ "${component}" = "help" ]
then
  usage
  exit 0
fi

# Test the validity of the "component" parameter
if [ -n "${component}" ] && [ "${component}" != "all" ]
then
  for installed_component in `component_list`
  do
    if [ "${component}" = "${installed_component}" ]
    then
      component_ok="y"
    fi
  done
else
  run_all_components="y"
  component_ok="y"
fi

if [ "${component_ok}" != "y" ]
then
  echo "Unknown component '${component}'"
  echo ""
  usage
  exit 1
fi

if [ "${scenario}" = "help" ]
then
  component_usage "${component}"
  exit 0
fi

# Test the validity of the "scenario" parameter
if [ -n "${scenario}" ] && [ "${scenario}" != "any" ]
then
  if ! type "${component}_${scenario}" > /dev/null
  then
    echo "Test ${scenario} on ${component} not found"
    echo ""
    component_usage "${component}"
    exit 3
  fi
else
  run_all_scenarios="y"
fi

# Run the tests
log "START" "----------------------------------------------------------------------"
log "START" "Started at `date -R` on `hostname -f`"
log "START" "Machine type: ${CURRENT_MACHINE_TYPE}"
log "START" "----------------------------------------------------------------------"

if [ -n "${UUID}" ]
then

  # Disable log collection
  NO_COLLECT='1'

  term_simple "Running tests against UUID ${UUID}..."
  term_simple ""

  # Run all UUID tests
  for scenario in `scenario_list uuid`
  do
    "uuid_${scenario}"
  done

elif [ "${run_all_components}" = 'y' ]
then

  term_simple "Running all ${CURRENT_MACHINE_TYPE} scenarios..."

  for component in `component_list`
  do
    term_simple ""
    term_simple "${component} component:"
    term_simple ""
    for scenario in `scenario_list ${component}`
    do
      "${component}_${scenario}"
    done
  done

elif [ "${run_all_scenarios}" = 'y' ]
then

  term_simple "Running all scenarios from component ${component}..."
  term_simple ""

  for scenario in `scenario_list ${component}`
  do
    "${component}_${scenario}"
  done

else

  term_simple "Running the ${component} ${scenario} scenario..."
  term_simple ""

  "${component}_${scenario}"

fi

# Summarize successes, errors and skips
term_simple ""
term_simple "Success: ${GREEN}${SUCCESS_COUNT}${RESET}"
term_simple "Failure: ${RED}${FAILURE_COUNT}${RESET}"
term_simple "Skipped: ${MAGENTA}${SKIPPED_COUNT}${RESET}"
log "RESULT" "Success: ${SUCCESS_COUNT}"
log "RESULT" "Failure: ${FAILURE_COUNT}"
log "RESULT" "Skipped: ${SKIPPED_COUNT}"

# Collect useful system files for debugging purposes
if [ "${NO_COLLECT}" != "1" ]
then

  mkdir -p "${OUTPUT_DIR}/${COLLECT_DIR}"
  collect_logs
  collect_files "${OUTPUT_DIR}/${COLLECT_DIR}"
  tar -C /var/rudder/debug/info -zcf "${OUTPUT_FILE}" "${COLLECT_DIR}"
  ln -sf "${OUTPUT_FILE}" "${OUTPUT_DIR}/debug-info-latest.tar.gz"
  rm -rf "${OUTPUT_DIR}/${COLLECT_DIR}"

  term_simple ""
  term_simple "Debug information is available in ${OUTPUT_FILE}"
fi

[ "${FAILURE_COUNT}" -ne 0 ] && exit 1 || exit 0
