如何解决DPDK 17.11.1-执行基于目标的速率限制时会出现掉线
编辑问题说明以突出显示核心逻辑上的更多信息
进行基于目标的速率限制时,我们发现性能问题。 我们为每个{destination-src}对保持状态(最多100个目的地和2 ^ 16个源)。我们有一个由100个节点组成的数组,在每个节点上都有一个rte_hash *。该哈希表将维护该目标看到的每个源ip的状态。我们为看到的每个目标(从0到100)都有一个映射,该映射用于索引数组。如果特定来源在一秒钟内超过为此目的地定义的阈值,则我们将阻止该来源,否则我们将允许该来源。在运行时,当我们仅看到2或3个目的地的流量时,就没有问题,但是当流量超过5时,就会看到很多掉线。我们的函数必须进行查找并标识与dest_ip和src_ip相匹配的流。处理流程并确定是否需要删除。如果未找到流,则将其添加到哈希中。
struct flow_state {
struct rte_hash* hash;
};
struct flow_state flow_state_arr[100];
//将要在pipeline_init中使用rte_hash_create创建这些哈希表,并在pipeline_free期间释放它们。
我概述了我们在伪代码中的工作。
run()
{
1) do rx
2) from the pkt,get index into the flow_state_arr and retrieve the rte_hash* handle
3) rte_hash_lookup_data(hash,src_ip,flow_data)
4) if entry found,take decision on the flow (the decision is simply say rate limiting the flow)
5) else rte_hash_add_data(hash,new_flow_data) to add the flow to table and forward
}
请指导我们是否可以在数据路径中拥有这些多个哈希表对象,或者如果我们需要分别处理每个目标的状态,那是最好的方法。
修改
谢谢回答。我将很高兴分享代码段和我们收集的结果。我没有其他DPDK版本的比较结果,但是下面是使用17.11.1。进行测试的一些结果。
测试设置
我正在使用IXIA流量生成器(使用两个10G链接生成12Mpps)用于3个目的地14.143.156.x(在这种情况下为101,102,103)。每个目的地的流量都来自2 ^ 16个不同的来源。这是流量生成器设置。
代码段
struct flow_state_t {
struct rte_hash* hash;
uint32_t size;
uint64_t threshold;
};
struct flow_data_t {
uint8_t curr_state; // 0 if blocked,1 if allowed
uint64_t pps_count;
uint64_t src_first_seen;
};
struct pipeline_ratelimit {
struct pipeline p;
struct pipeline_ratelimit_params params;
rte_table_hash_op_hash f_hash;
uint32_t swap_field0_offset[SWAP_DIM];
uint32_t swap_field1_offset[SWAP_DIM];
uint64_t swap_field_mask[SWAP_DIM];
uint32_t swap_n_fields;
pipeline_msg_req_handler custom_handlers[2]; // handlers for add and del
struct flow_state_t flow_state_arr[100];
struct flow_data_t flows[100][65536];
} __rte_cache_aligned;
/*
add_handler(pipeline,msg) -- msg includes index and threshold
In the add handler
a rule/ threshold is added for a destination
rte_hash_create and store rte_hash* in flow_state_arr[index]
max of 100 destinations or rules are allowed
previous pipelines add the ID (index) to the packet to look in to the
flow_state_arr for the rule
*/
/*
del_handler(pipeline,msg) -- msg includes index
In the del handler
a rule/ threshold @index is deleted
the associated rte_hash* is also freed
the slot is made free
*/
#define ALLOWED 1
#define BLOCKED 0
#define TABLE_MAX_CAPACITY 65536
int do_rate_limit(struct pipeline_ratelimit* ps,uint32_t id,unsigned char* pkt)
{
uint64_t curr_time_stamp = rte_get_timer_cycles();
struct iphdr* iph = (struct iphdr*)pkt;
uint32_t src_ip = rte_be_to_cpu_32(iph->saddr);
struct flow_state_t* node = &ps->flow_state_arr[id];
struct flow_data_t* flow = NULL
rte_hash_lookup_data(node->hash,&src_ip,(void**)&flow);
if (flow != NULL)
{
if (flow->curr_state == ALLOWED)
{
if (flow->pps_count++ > node->threshold)
{
uint64_t seconds_elapsed = (curr_time_stamp - flow->src_first_seen) / CYCLES_IN_1SEC;
if (seconds_elapsed)
{
flow->src_first_seen += seconds_elapsed * CYCLES_IN_1_SEC;
flow->pps_count = 1;
return ALLOWED;
}
else
{
flow->pps_count = 0;
flow->curr_state = BLOCKED;
return BLOCKED;
}
}
return ALLOWED;
}
else
{
uint64_t seconds_elapsed = (curr_time_stamp - flow->src_first_seen) / CYCLES_IN_1SEC;
if (seconds_elapsed > 120)
{
flow->curr_state = ALLOWED;
flow->pps_count = 0;
flow->src_first_seen += seconds_elapsed * CYCLES_IN_1_SEC;
return ALLOWED;
}
return BLOCKED;
}
}
int index = node->size;
// If entry not found and we have reached capacity
// Remove the rear element and mark it as the index for the new node
if (node->size == TABLE_MAX_CAPACITY)
{
rte_hash_reset(node->hash);
index = node->size = 0;
}
// Add new element @packet_flows[mit_id][index]
struct flow_data_t* flow_data = &ps->flows[id][index];
*flow_data = { ALLOWED,1,curr_time_stamp };
node->size++;
// Add the new key to hash
rte_hash_add_key_data(node->hash,(void*)&src_ip,(void*)flow_data);
return ALLOWED;
}
static int pipeline_ratelimit_run(void* pipeline)
{
struct pipeline_ratelimit* ps = (struct pipeline_ratelimit*)pipeline;
struct rte_port_in* port_in = p->port_in_next;
struct rte_port_out* port_out = &p->ports_out[0];
struct rte_port_out* port_drop = &p->ports_out[2];
uint8_t valid_pkt_cnt = 0,invalid_pkt_cnt = 0;
struct rte_mbuf* valid_pkts[RTE_PORT_IN_BURST_SIZE_MAX];
struct rte_mbuf* invalid_pkts[RTE_PORT_IN_BURST_SIZE_MAX];
memset(valid_pkts,sizeof(valid_pkts));
memset(invalid_pkts,sizeof(invalid_pkts));
uint64_t n_pkts;
if (unlikely(port_in == NULL)) {
return 0;
}
/* Input port RX */
n_pkts = port_in->ops.f_rx(port_in->h_port,p->pkts,port_in->burst_size);
if (n_pkts == 0)
{
p->port_in_next = port_in->next;
return 0;
}
uint32_t rc = 0;
char* rx_pkt = NULL;
for (j = 0; j < n_pkts; j++) {
struct rte_mbuf* m = p->pkts[j];
rx_pkt = rte_pktmbuf_mtod(m,char*);
uint32_t id = rte_be_to_cpu_32(*(uint32_t*)(rx_pkt - sizeof(uint32_t)));
unsigned short packet_len = rte_be_to_cpu_16(*((unsigned short*)(rx_pkt + 16)));
struct flow_state_t* node = &(ps->flow_state_arr[id]);
if (node->hash && node->threshold != 0)
{
// Decide whether to allow of drop the packet
// returns allow - 1,drop - 0
if (do_rate_limit(ps,id,(unsigned char*)(rx_pkt + 14)))
valid_pkts[valid_pkt_count++] = m;
else
invalid_pkts[invalid_pkt_count++] = m;
}
else
valid_pkts[valid_pkt_count++] = m;
if (invalid_pkt_cnt) {
p->pkts_mask = 0;
rte_memcpy(p->pkts,invalid_pkts,sizeof(invalid_pkts));
p->pkts_mask = RTE_LEN2MASK(invalid_pkt_cnt,uint64_t);
rte_pipeline_action_handler_port_bulk_mod(p,p->pkts_mask,port_drop);
}
p->pkts_mask = 0;
memset(p->pkts,sizeof(p->pkts));
if (valid_pkt_cnt != 0)
{
rte_memcpy(p->pkts,valid_pkts,sizeof(valid_pkts));
p->pkts_mask = RTE_LEN2MASK(valid_pkt_cnt,uint64_t);
}
rte_pipeline_action_handler_port_bulk_mod(p,port_out);
/* Pick candidate for next port IN to serve */
p->port_in_next = port_in->next;
return (int)n_pkts;
}
}
结果
- 当从60000个源中为一个目的地生成的流量仅为14Mpps阈值时,没有丢弃。我们能够从IXIA发送12Mpps和接收12Mpps
- 添加3个或更多目的地后(每个都配置为从60000个来源接收流量),观察到了丢弃。吞吐量仅为8-9 Mpps。当发送到100个目的地(每个目的地60000 src)时,仅处理6.4Mpps。下降了50%。
- 通过vtune-profiler运行它时,它报告rte_hash_lookup_data为热点,并且主要是内存绑定(DRAM绑定)。我将尽快附上vtune报告。
解决方法
根据内部测试的更新,rte_hash
库不会导致性能下降。因此,如注释中所建议,由于当前的模式和算法设计,更有可能导致高速缓存未命中,并且每个周期的指令更少。
要确定是前端停滞还是后端管道停滞或内存停滞,请使用perf
或vtune
。另外,请尽量减少分支,并使用更多的likely
和prefetch
。
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。