Nilorea Library kafka event test.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <locale.h>
#include <libgen.h>
#include <errno.h>
#include "cJSON.h"
#include "rdkafka.h"
#define OK 0
#define ERROR -1
#define NB_TEST_EVENTS 10
fprintf(stderr,
"Syntax is: ex_kafka -v -c config_file [-s event or -f eventfile] -o event_log_file -V LOGLEVEL\n"
" -v version: print version and exit\n"
" -c config_file: [required] Kproducer config file\n"
" -s : string of the event to send\n"
" -f : file containing the event to send\n"
" -C : start a consumer (default output received in terminal)"
" -P : start a producer and produce event"
" -o : optionnal, set a log file instead of default (stderr/stdout)\n"
" -p : optionnal, set a log prefix\n"
" -V verbosity: specify a log level for console output\n"
" Supported: LOG_ EMERG,ALERT,CRIT,ERR,WARNING,NOTICE,INFO,DEBUG\n");
}
static void stop(
int sig) {
(void)sig;
}
int main(
int argc,
char* argv[]) {
rd_kafka_headers_t* headers = NULL;
while ((
getoptret = getopt(argc, argv,
"vhCPH:c:s:f:V:o:p:")) != -1) {
case 'v':
fprintf(stderr, " Version compiled on %s at %s\n", __DATE__, __TIME__);
exit(TRUE);
case 'h':
exit(0);
break;
case 'C':
} else {
fprintf(stderr, "-C and -P can not be used at the ame time!");
exit(TRUE);
}
break;
case 'P':
} else {
fprintf(stderr, "-C and -P can not be used at the ame time!");
exit(TRUE);
}
break;
case 'H': {
char *name = NULL, *val = NULL;
size_t name_sz = -1;
name = optarg;
val = strchr(name, '=');
if (val) {
name_sz = (size_t)(val - name);
val++;
}
if (!headers)
headers = rd_kafka_headers_new(16);
int err = rd_kafka_header_add(headers, name, name_sz, val, -1);
if (err) {
fprintf(stderr,
"%% Failed to add header %s: %s\n",
name, rd_kafka_err2str(err));
exit(1);
}
} break;
case 'c':
break;
case 's':
break;
case 'f':
break;
case 'o':
break;
case 'p':
break;
case 'V':
if (!strncmp("LOG_NULL", optarg, 8)) {
} else if (!strncmp("LOG_NOTICE", optarg, 10)) {
} else if (!strncmp("LOG_INFO", optarg, 8)) {
} else if (!strncmp("LOG_ERR", optarg, 7)) {
} else if (!strncmp("LOG_DEBUG", optarg, 9)) {
} else {
fprintf(stderr, "%s n'est pas un niveau de log valide.\n", optarg);
exit(-1);
}
break;
case '?':
if (optopt == 'c' || optopt == 's' || optopt == 'f') {
fprintf(stderr, "Option -%c need a parameter\n", optopt);
exit(FALSE);
}
default:
exit(-1);
break;
}
}
exit(1);
}
}
}
exit(1);
}
exit(1);
}
}
exit(1);
}
unsigned int exit_code = 0;
size_t nb_queued = 0;
size_t nb_waiting = 0;
size_t nb_error = 0;
int poll_status = 1;
}
}
if (headers) {
event->rd_kafka_headers = rd_kafka_headers_copy(headers);
rd_kafka_headers_destroy(headers);
}
n_log(
LOG_ERR,
"n_kafka_produce returned an error for event %p", event);
} else {
n_log(
LOG_INFO,
"n_kafka_produce returned OK for event %p", event);
}
}
do {
n_log(
LOG_DEBUG,
"polling kafka handle, status: %d, %d in queue, %d waiting for ack, %d on error", poll_status, nb_queued, nb_waiting, nb_error);
usleep(30000);
if (nb_queued == 0 && nb_waiting == 0 && nb_error == 0)
break;
} else {
if (event) {
n_log(
LOG_INFO,
"received event schema id %d string:\n%s", event->schema_id,
_str(event->event_string->data + 4));
else
n_log(
LOG_INFO,
"received event string:\n%s", event->event_string->data);
}
} else {
usleep(30000);
}
}
}
while (
run && poll_status > 0);
n_log(
LOG_INFO,
"kafka_handle: %d queued, %d waiting ack, %d on error", nb_queued, nb_waiting, nb_error);
if (nb_error > 0 || nb_waiting > 0) {
n_log(
LOG_ERR,
"kafka_handle: %d events are still waiting for ack, and %d are on error !", nb_waiting, nb_error);
}
if (node) {
n_log(
LOG_INFO,
"[unprocessed]received event schema id %d string:\n%s", event->schema_id, event->event_string->data + 4);
else
n_log(
LOG_INFO,
"[unprocessed]received event string:\n%s", event->event_string->data);
}
}
exit(exit_code);
}
static void stop(int sig)
#define FALL_THROUGH
set windows if true
#define Malloc(__ptr, __struct, __size)
Malloc Handler to get errors and set to 0.
#define __n_assert(__ptr, __ret)
macro to assert things
#define _str(__PTR)
define true
#define _nstr(__PTR)
N_STR or "NULL" string for logging purposes.
#define list_foreach(__ITEM_, __LIST_)
ForEach macro helper.
FILE * get_log_file(void)
return the current log_file
#define n_log(__LEVEL__,...)
Logging function wrapper to get line and func.
#define LOG_DEBUG
debug-level messages
#define LOG_ERR
error conditions
int set_log_file(char *file)
Set the logging to a file instead of stderr.
void set_log_level(const int log_level)
Set the global log level value ( static int LOG_LEVEL )
#define LOG_NOTICE
normal but significant condition
#define LOG_NULL
no log output
#define LOG_INFO
informational
LIST * received_events
list of received N_KAFKA_EVENT
int schema_id
kafka schema id in network order
int n_kafka_dump_unprocessed(N_KAFKA *kafka, char *directory)
dump unprocessed/unset events
N_KAFKA_EVENT * n_kafka_get_event(N_KAFKA *kafka)
get a received event from the N_KAFKA kafka handle
N_KAFKA * n_kafka_load_config(char *config_file, int mode)
load a kafka configuration from a file
int n_kafka_get_status(N_KAFKA *kafka, size_t *nb_queued, size_t *nb_waiting, size_t *nb_error)
return the queues status
int n_kafka_event_destroy(N_KAFKA_EVENT **event)
destroy a kafka event and set it's pointer to NULL
int n_kafka_produce(N_KAFKA *kafka, N_KAFKA_EVENT *event)
put an event in the events_to_send list
N_KAFKA_EVENT * n_kafka_new_event_from_char(char *string, size_t written, int schema_id)
make a new event from a char *string
int n_kafka_start_pooling_thread(N_KAFKA *kafka)
start the pooling thread of a kafka handle
N_KAFKA_EVENT * n_kafka_new_event_from_file(char *filename, int schema_id)
make a new event from a N_STR *string
void n_kafka_delete(N_KAFKA *kafka)
delete a N_KAFKA handle
structure of a KAFKA consumer or producer handle
structure of a KAFKA message
#define local_strdup(__src_)
Do tar(1) matching rules, which ignore a trailing slash?
A box including a string and his lenght.
Kafka generic produce and consume event header.