From b9c25b65036f2ca80dbbec45a7798ec6e03c17bf Mon Sep 17 00:00:00 2001 From: "Anand H. Krishnan" Date: Tue, 26 May 2015 15:08:24 +0530 Subject: [PATCH] Extend the flow hold count per-cpu statistics to 128 cpus vRouter keeps a per-cpu flow hold count statistic. This statistic is exported to user space processes to aid in debugging. While this statistic is maintained for all the cpus, vRouter exports statistics only for the first 64 cpus. The main reason why we limit the export to only 64 cpus is that the messaging infrastructure within vRouter has a notion of how much to allocate for each message based on the structure name. This calculation is not dynamic since for most structures the calculation remains the same. For flow, we allocate only for 64 cpus. While making this calculation dynamic is a larger effort, for now we will extend the memory allocated to accommodate 128 cpus (which is reasonable). Also, split the regular flow request and the flow table information requests so that we allocate only what we need. Part of this commit also fixes the problem where vRouter was setting the sandesh list size to the size of the memory rather than the number of elements in the list, which resulted in sandesh encode failures in the case of a large cpu count. Change-Id: I3be31c10c86f52457199e5015d85ac2c7d76f5cf Closes-BUG: #1458795 --- dp-core/vr_flow.c | 18 ++++++++++++------ dp-core/vr_sandesh.c | 8 ++++++-- include/vr_message.h | 1 + include/vr_sandesh.h | 2 ++ utils/flow.c | 19 +++++++++++++++---- 5 files changed, 36 insertions(+), 12 deletions(-) diff --git a/dp-core/vr_flow.c b/dp-core/vr_flow.c index 8654be2b2..c7d9d9786 100644 --- a/dp-core/vr_flow.c +++ b/dp-core/vr_flow.c @@ -1283,7 +1283,8 @@ vr_flow_req_destroy(vr_flow_req *req) vr_flow_req * vr_flow_req_get(vr_flow_req *ref_req) { - unsigned int hold_stat_size = vr_num_cpus * sizeof(uint32_t); + unsigned int hold_stat_size; + unsigned int num_cpus = vr_num_cpus; vr_flow_req *req = vr_zalloc(sizeof(*req)); if (!req) @@ -1304,6 +1305,10 @@ vr_flow_req_get(vr_flow_req *ref_req) } } + if (num_cpus > VR_FLOW_MAX_CPUS) + num_cpus = VR_FLOW_MAX_CPUS; + + hold_stat_size = num_cpus * sizeof(uint32_t); req->fr_hold_stat = vr_zalloc(hold_stat_size); if (!req->fr_hold_stat) { if (vr_flow_path && req->fr_file_path) { @@ -1314,7 +1319,7 @@ vr_flow_req_get(vr_flow_req *ref_req) vr_free(req); return NULL; } - req->fr_hold_stat_size = hold_stat_size; + req->fr_hold_stat_size = num_cpus; return req; } @@ -1326,7 +1331,7 @@ void vr_flow_req_process(void *s_req) { int ret = 0; - unsigned int i; + unsigned int i, object = VR_FLOW_OBJECT_ID; bool need_destroy = false; uint64_t hold_count = 0; @@ -1358,8 +1363,8 @@ vr_flow_req_process(void *s_req) resp->fr_hold_oflows = router->vr_flow_table_info->vfti_oflows; resp->fr_added = router->vr_flow_table_info->vfti_added; resp->fr_cpus = vr_num_cpus; - /* we only have space for VR_MAX_CPUS stats block max when encoding */ - for (i = 0; ((i < vr_num_cpus) && (i < VR_MAX_CPUS)); i++) { + /* we only have space for 64 stats block max when encoding */ + for (i = 0; ((i < vr_num_cpus) && (i < VR_FLOW_MAX_CPUS)); i++) { resp->fr_hold_stat[i] = router->vr_flow_table_info->vfti_hold_count[i]; hold_count += resp->fr_hold_stat[i]; @@ -1367,6 +1372,7 @@ vr_flow_req_process(void *s_req) resp->fr_created = hold_count; + object = VR_FLOW_INFO_OBJECT_ID; break; case FLOW_OP_FLOW_SET: @@ -1379,7 +1385,7 @@ vr_flow_req_process(void *s_req) } send_response: - vr_message_response(VR_FLOW_OBJECT_ID, resp, ret); + vr_message_response(object, resp, ret); if (need_destroy) { vr_flow_req_destroy(resp); } diff --git a/dp-core/vr_sandesh.c b/dp-core/vr_sandesh.c index c3d26ccf1..af72f5846 100644 --- a/dp-core/vr_sandesh.c +++ b/dp-core/vr_sandesh.c @@ -35,8 +35,7 @@ struct sandesh_object_md sandesh_md[] = { .obj_type_string = "vr_mirror_req", }, [VR_FLOW_OBJECT_ID] = { - .obj_len = ((4 * sizeof(vr_flow_req)) + - (VR_MAX_CPUS * sizeof(unsigned int))), + .obj_len = 4 * sizeof(vr_flow_req), .obj_type_string = "vr_flow_req", }, [VR_VRF_ASSIGN_OBJECT_ID] = { @@ -63,6 +62,11 @@ struct sandesh_object_md sandesh_md[] = { .obj_len = 4 * sizeof(vrouter_ops), .obj_type_string = "vrouter_ops", }, + [VR_FLOW_INFO_OBJECT_ID] = { + .obj_len = ((4 * sizeof(vr_flow_req)) + + (VR_FLOW_MAX_CPUS * sizeof(unsigned int))), + .obj_type_string = "vr_flow_req", + }, }; static unsigned int diff --git a/include/vr_message.h b/include/vr_message.h index 33c4f1210..476394e1e 100644 --- a/include/vr_message.h +++ b/include/vr_message.h @@ -28,6 +28,7 @@ #define VR_DROP_STATS_OBJECT_ID 10 #define VR_VXLAN_OBJECT_ID 11 #define VR_VROUTER_OPS_OBJECT_ID 12 +#define VR_FLOW_INFO_OBJECT_ID 13 #define VR_MESSAGE_PAGE_SIZE (4096 - 128) diff --git a/include/vr_sandesh.h b/include/vr_sandesh.h index 4fbf19bdc..f7f28e5b8 100644 --- a/include/vr_sandesh.h +++ b/include/vr_sandesh.h @@ -6,6 +6,8 @@ #ifndef __VR_SANDESH_H__ #define __VR_SANDESH_H__ +#define VR_FLOW_MAX_CPUS 128 + struct sandesh_object_md { unsigned int obj_len; char *obj_type_string; diff --git a/utils/flow.c b/utils/flow.c index 5a5f17126..dfad1889a 100644 --- a/utils/flow.c +++ b/utils/flow.c @@ -66,7 +66,8 @@ struct flow_table { unsigned int ft_flags; unsigned int ft_cpus; unsigned int ft_hold_oflows; - u_int32_t ft_hold_stat[64]; + unsigned int ft_hold_stat_count; + u_int32_t ft_hold_stat[128]; char flow_table_path[256]; } main_table; @@ -156,9 +157,9 @@ dump_table(struct flow_table *ft) printf("Entries: Created %lu Added %lu Processed %lu\n", ft->ft_created, ft->ft_added, ft->ft_processed); printf("(Created Flows/CPU: "); - for (i = 0; i < ft->ft_cpus; i++) { + for (i = 0; i < ft->ft_hold_stat_count; i++) { printf("%u", ft->ft_hold_stat[i]); - if (i != (ft->ft_cpus - 1)) + if (i != (ft->ft_hold_stat_count - 1)) printf(" "); } printf(")(oflows %u)\n\n", ft->ft_hold_oflows); @@ -543,9 +544,19 @@ flow_table_map(vr_flow_req *req) ft->ft_cpus = req->fr_cpus; if (req->fr_hold_stat && req->fr_hold_stat_size) { - for (i = 0; i < ft->ft_cpus; i++) { + ft->ft_hold_stat_count = req->fr_hold_stat_size; + for (i = 0; i < req->fr_hold_stat_size; i++) { + if (i == + (sizeof(ft->ft_hold_stat) / sizeof(ft->ft_hold_stat[0]))) { + ft->ft_hold_stat_count = i; + break; + } + ft->ft_hold_stat[i] = req->fr_hold_stat[i]; } + } else { + ft->ft_hold_stat_count = 0; + memset(ft->ft_hold_stat, 0, sizeof(ft->ft_hold_stat)); } return ft->ft_num_entries;