mirror of
https://github.com/rsyslog/rsyslog.git
synced 2026-05-10 23:10:51 +02:00
Motivation: code coverage reports were incomplete. This lays a better base for consistent reporting via GitHub Actions, with room for follow-ups. It also removes a test flake source in Kafka jobs. Impact: CI/tests only; no runtime behavior or ABI changes expected. Before: Coverage uploads were inconsistent; Kafka tests could hang while reading from /dev/urandom to generate topic names. After: Coverage is collected with lcov and uploaded via a dedicated GH Action; Kafka topics use fast $RANDOM-based hex, avoiding early-boot entropy stalls. Technical details: - Add two workflows: "codecov base" and "codecov kafka" on Ubuntu 24.04. Use lcov capture with unexecuted blocks and prune common noise; upload with token for same-repo PRs and tokenless for forks. - Update .codecov.yml: add path fixes for container (/rsyslog) and runner layouts; explicitly set comment: false and patch: false. - Bump actions/checkout to v4 in existing workflows; add an actionlint job to catch YAML problems early. - Switch codecov jobs in container matrix to 24.04 images. - Improve run-ci.sh lcov invocation to be more tolerant of line/macro mismatches. - Testbench: replace /dev/urandom topic generation with 8-char hex from $RANDOM; adjust diag.sh path/quoting for zookeeper helper.
58 lines
1.4 KiB
Bash
Executable File
58 lines
1.4 KiB
Bash
Executable File
#!/bin/bash
|
|
# added 2018-08-29 by alorbach
|
|
# This file is part of the rsyslog project, released under ASL 2.0
|
|
. ${srcdir:=.}/diag.sh init
|
|
check_command_available kcat
|
|
export KEEP_KAFKA_RUNNING="YES"
|
|
|
|
export TESTMESSAGES=1000
|
|
# Set EXTRA_EXITCHECK to dump kafka/zookeeperlogfiles on failure only.
|
|
export EXTRA_EXITCHECK=dumpkafkalogs
|
|
export EXTRA_EXIT=kafka
|
|
|
|
export RANDTOPIC="$(printf '%08x' "$(( (RANDOM<<16) ^ RANDOM ))")"
|
|
|
|
download_kafka
|
|
stop_zookeeper
|
|
stop_kafka
|
|
start_zookeeper
|
|
start_kafka
|
|
|
|
export RSYSLOG_DEBUGLOG="log"
|
|
generate_conf
|
|
add_conf '
|
|
main_queue(queue.timeoutactioncompletion="60000" queue.timeoutshutdown="60000")
|
|
|
|
module(load="../plugins/imkafka/.libs/imkafka")
|
|
/* Polls messages from kafka server!*/
|
|
input( type="imkafka"
|
|
topic="'$RANDTOPIC'"
|
|
broker="localhost:29092"
|
|
consumergroup="default"
|
|
confParam=[ "does.not.exist=none",
|
|
"session.timeout.ms=10000",
|
|
"socket.timeout.ms=5000",
|
|
"socket.keepalive.enable=true",
|
|
"reconnect.backoff.jitter.ms=1000",
|
|
"enable.partition.eof=false" ]
|
|
)
|
|
|
|
template(name="outfmt" type="string" string="%msg:F,58:2%\n")
|
|
|
|
action( type="omfile" file="'$RSYSLOG_OUT_LOG'")
|
|
'
|
|
|
|
startup
|
|
|
|
# We inject messages, even though we know this will not work. The reason
|
|
# is that we want to ensure we do not get a segfault in such an error case
|
|
injectmsg_kcat
|
|
|
|
shutdown_when_empty
|
|
wait_shutdown
|
|
|
|
content_check "error setting custom configuration parameter 'does.not.exist=none'"
|
|
delete_kafka_topic $RANDTOPIC '.dep_wrk' '22181'
|
|
|
|
exit_test
|