From 144176a25213d74ae65a5c10892ac3233149831b Mon Sep 17 00:00:00 2001 From: David Bar-On Date: Thu, 30 Jun 2022 17:09:27 +0300 Subject: [PATCH 1/2] Fix issue #754 - incorrect UDP link capability in jSON output --- src/iperf_api.c | 29 +++++++++++++++++++++-------- src/iperf_server_api.c | 2 +- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/src/iperf_api.c b/src/iperf_api.c index a95e02418..1fdd57544 100644 --- a/src/iperf_api.c +++ b/src/iperf_api.c @@ -3603,12 +3603,12 @@ iperf_print_results(struct iperf_test *test) } /* - * We must to sum streams separately. - * For bidirectional mode we must to display + * We must sum streams separately. + * For bidirectional mode we must display * information about sender and receiver streams. * For client side we must handle sender streams * firstly and receiver streams for server side. - * The following design allows us to do this. + * The following design allows us to do that. */ if (test->mode == BIDIRECTIONAL) { @@ -3643,6 +3643,7 @@ iperf_print_results(struct iperf_test *test) double sender_time = 0.0, receiver_time = 0.0; struct iperf_time temp_time; double bandwidth; + iperf_size_t bytes_count; char mbuf[UNIT_LEN]; int stream_must_be_sender = current_mode * current_mode; @@ -3790,7 +3791,7 @@ iperf_print_results(struct iperf_test *test) * information for both the sender and receiver * side. * - * The JSON format as currently defined only includes one + * The JSON format as currently defined to only include one * value for the number of packets. We usually want that * to be the sender's value (how many packets were sent * by the sender). However this value might not be @@ -3800,8 +3801,19 @@ iperf_print_results(struct iperf_test *test) * is the case, then use the receiver's count of packets * instead. */ + // Choose sum of sent/received bytes count based on mode int64_t packet_count = sender_packet_count ? sender_packet_count : receiver_packet_count; - cJSON_AddItemToObject(json_summary_stream, "udp", iperf_json_printf("socket: %d start: %f end: %f seconds: %f bytes: %d bits_per_second: %f jitter_ms: %f lost_packets: %d packets: %d lost_percent: %f out_of_order: %d sender: %b", (int64_t) sp->socket, (double) start_time, (double) sender_time, (double) sender_time, (int64_t) bytes_sent, bandwidth * 8, (double) sp->jitter * 1000.0, (int64_t) (sp->cnt_error - sp->omitted_cnt_error), (int64_t) (packet_count - sp->omitted_packet_count), (double) lost_percent, (int64_t) (sp->outoforder_packets - sp->omitted_outoforder_packets), stream_must_be_sender)); + if (test->mode == SENDER) + bytes_count = bytes_sent; + else if (test->mode == RECEIVER) + bytes_count = bytes_received; + else { // BIDIRECTIONAL + if (stream_must_be_sender) + bytes_count = bytes_sent; + else + bytes_count = bytes_received; + } + cJSON_AddItemToObject(json_summary_stream, "udp", iperf_json_printf("socket: %d start: %f end: %f seconds: %f bytes: %d bits_per_second: %f jitter_ms: %f lost_packets: %d packets: %d lost_percent: %f out_of_order: %d sender: %b", (int64_t) sp->socket, (double) start_time, (double) sender_time, (double) sender_time, (int64_t) bytes_count, bandwidth * 8, (double) sp->jitter * 1000.0, (int64_t) (sp->cnt_error - sp->omitted_cnt_error), (int64_t) (packet_count - sp->omitted_packet_count), (double) lost_percent, (int64_t) (sp->outoforder_packets - sp->omitted_outoforder_packets), stream_must_be_sender)); } else { /* @@ -3895,7 +3907,7 @@ iperf_print_results(struct iperf_test *test) } } } - } + } // End of if at least one steram if (test->num_streams > 1 || test->json_output) { /* @@ -3989,7 +4001,8 @@ iperf_print_results(struct iperf_test *test) * structure is not recommended due to * ambiguities between the sender and receiver. */ - cJSON_AddItemToObject(test->json_end, sum_name, iperf_json_printf("start: %f end: %f seconds: %f bytes: %d bits_per_second: %f jitter_ms: %f lost_packets: %d packets: %d lost_percent: %f sender: %b", (double) start_time, (double) receiver_time, (double) receiver_time, (int64_t) total_sent, bandwidth * 8, (double) avg_jitter * 1000.0, (int64_t) lost_packets, (int64_t) total_packets, (double) lost_percent, stream_must_be_sender)); + bytes_count = stream_must_be_sender ? total_sent : total_received; + cJSON_AddItemToObject(test->json_end, sum_name, iperf_json_printf("start: %f end: %f seconds: %f bytes: %d bits_per_second: %f jitter_ms: %f lost_packets: %d packets: %d lost_percent: %f sender: %b", (double) start_time, (double) receiver_time, (double) receiver_time, (int64_t) bytes_count, bandwidth * 8, (double) avg_jitter * 1000.0, (int64_t) lost_packets, (int64_t) total_packets, (double) lost_percent, stream_must_be_sender)); /* * Separate sum_sent and sum_received structures. * Using these structures to get the most complete @@ -4087,7 +4100,7 @@ iperf_print_results(struct iperf_test *test) } } } - } + } // Loop between lower and upper modes /* Set real sender_has_retransmits for current side */ if (test->mode == BIDIRECTIONAL) diff --git a/src/iperf_server_api.c b/src/iperf_server_api.c index 18f105ded..dffd0fdc9 100644 --- a/src/iperf_server_api.c +++ b/src/iperf_server_api.c @@ -209,13 +209,13 @@ iperf_handle_message_server(struct iperf_test *test) FD_CLR(sp->socket, &test->write_set); close(sp->socket); } - test->reporter_callback(test); if (iperf_set_send_state(test, EXCHANGE_RESULTS) != 0) return -1; if (iperf_exchange_results(test) < 0) return -1; if (iperf_set_send_state(test, DISPLAY_RESULTS) != 0) return -1; + test->reporter_callback(test); if (test->on_test_finish) test->on_test_finish(test); break; From 00ea5a063bbda0845b20b1c2a809a8ac14f4c971 Mon Sep 17 00:00:00 2001 From: David Bar-On Date: Wed, 26 Jul 2023 14:25:31 +0300 Subject: [PATCH 2/2] Add fix to #1552 - remote cpu utilization on the server --- src/iperf_api.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/iperf_api.c b/src/iperf_api.c index 1fdd57544..6fce790ad 100644 --- a/src/iperf_api.c +++ b/src/iperf_api.c @@ -4059,13 +4059,12 @@ iperf_print_results(struct iperf_test *test) } else { if (test->verbose) { - if (stream_must_be_sender) { - if (test->bidirectional) { - iperf_printf(test, report_cpu, report_local, stream_must_be_sender?report_sender:report_receiver, test->cpu_util[0], test->cpu_util[1], test->cpu_util[2], report_remote, stream_must_be_sender?report_receiver:report_sender, test->remote_cpu_util[0], test->remote_cpu_util[1], test->remote_cpu_util[2]); - iperf_printf(test, report_cpu, report_local, !stream_must_be_sender?report_sender:report_receiver, test->cpu_util[0], test->cpu_util[1], test->cpu_util[2], report_remote, !stream_must_be_sender?report_receiver:report_sender, test->remote_cpu_util[0], test->remote_cpu_util[1], test->remote_cpu_util[2]); - } else - iperf_printf(test, report_cpu, report_local, stream_must_be_sender?report_sender:report_receiver, test->cpu_util[0], test->cpu_util[1], test->cpu_util[2], report_remote, stream_must_be_sender?report_receiver:report_sender, test->remote_cpu_util[0], test->remote_cpu_util[1], test->remote_cpu_util[2]); - } + if (test->bidirectional) { + iperf_printf(test, report_cpu, report_local, stream_must_be_sender?report_sender:report_receiver, test->cpu_util[0], test->cpu_util[1], test->cpu_util[2], report_remote, stream_must_be_sender?report_receiver:report_sender, test->remote_cpu_util[0], test->remote_cpu_util[1], test->remote_cpu_util[2]); + iperf_printf(test, report_cpu, report_local, !stream_must_be_sender?report_sender:report_receiver, test->cpu_util[0], test->cpu_util[1], test->cpu_util[2], report_remote, !stream_must_be_sender?report_receiver:report_sender, test->remote_cpu_util[0], test->remote_cpu_util[1], test->remote_cpu_util[2]); + } else + iperf_printf(test, report_cpu, report_local, stream_must_be_sender?report_sender:report_receiver, test->cpu_util[0], test->cpu_util[1], test->cpu_util[2], report_remote, stream_must_be_sender?report_receiver:report_sender, test->remote_cpu_util[0], test->remote_cpu_util[1], test->remote_cpu_util[2]); + if (test->protocol->id == Ptcp) { char *snd_congestion = NULL, *rcv_congestion = NULL; if (stream_must_be_sender) {