首页
学习
活动
专区
圈层
工具
发布
社区首页 >问答首页 >DPDK 17.11.1 -在进行基于目的地的速率限制时所见的下降

DPDK 17.11.1 -在进行基于目的地的速率限制时所见的下降
EN

Stack Overflow用户
提问于 2020-09-08 05:01:18
回答 1查看 159关注 0票数 0

编辑问题语句以突出显示核心逻辑的更多内容

我们在进行基于目的地的速率限制时,会出现性能问题。我们为每个{目的-src}对(最多100个目的地和2^16个源)维护状态。我们有一个由100个节点组成的数组,每个节点都有一个rte_hash*。此哈希表将维护目标所看到的每个源ip的状态。我们对所看到的每个目标都有一个映射(0到100),这用于对数组进行索引。如果某个特定的源在一秒钟内超过了为该目标定义的阈值,我们将阻塞该源,否则将允许该源。在运行时,当我们只看到两个或三个目的地的流量时,就没有问题了,但是当我们超过5时,我们就会看到大量的下降。我们的函数必须进行查找并识别匹配dest_ip和src_ip的流。处理流程,并决定它是否需要下降。如果找不到流,则将其添加到哈希中。

代码语言:javascript
复制
struct flow_state {
    struct rte_hash* hash;    
};

struct flow_state flow_state_arr[100];

// am将使用rte_hash_create在pipeline_init上创建这些哈希表,并在pipeline_free期间释放它们。

我在概述我们用伪码做的事情。

代码语言:javascript
复制
run()
{
    1) do rx
    2) from the pkt, get index into the flow_state_arr and retrieve the rte_hash* handle    
    3) rte_hash_lookup_data(hash, src_ip,flow_data)
    4) if entry found, take decision on the flow (the decision is simply say rate limiting the flow)
    5) else rte_hash_add_data(hash,src_ip,new_flow_data) to add the flow to table and forward
}

请指导我们是否可以在数据路径中有这些多个哈希表对象,或者如果我们需要分别处理每个目的地的状态,最好的方法是什么。

编辑

谢谢你的回答。我将很高兴分享代码片段和我们收集的结果。我没有其他DPDK版本的比较结果,但是下面是我们使用17.11.1测试的一些结果。

测试设置

Am使用I峡流量gen (使用两个10G链接生成12 12Mpps)用于3个目的地14.143.156.x (在本例中为101,102,103)。每个目的地的流量来自2^16种不同的来源。这是流量生成设置。

代码片段

代码语言:javascript
复制
    struct flow_state_t {
        struct rte_hash* hash;
        uint32_t size;
        uint64_t threshold;
    };
    struct flow_data_t {
        uint8_t curr_state; // 0 if blocked, 1 if allowed
        uint64_t pps_count;
        uint64_t src_first_seen;
    };
    struct pipeline_ratelimit {
        struct pipeline p;
        struct pipeline_ratelimit_params params;
        rte_table_hash_op_hash f_hash;
        uint32_t swap_field0_offset[SWAP_DIM];
        uint32_t swap_field1_offset[SWAP_DIM];
        uint64_t swap_field_mask[SWAP_DIM];
        uint32_t swap_n_fields;
        pipeline_msg_req_handler custom_handlers[2]; // handlers for add and del
        struct flow_state_t flow_state_arr[100];
        struct flow_data_t flows[100][65536];
    } __rte_cache_aligned;
    
    /*
      add_handler(pipeline,msg) -- msg includes index and threshold
      In the add handler
      a rule/ threshold is added for a destination
      rte_hash_create and store rte_hash* in flow_state_arr[index]
      max of 100 destinations or rules are allowed
      previous pipelines add the ID (index) to the packet to look in to the
      flow_state_arr for the rule
    */
    
    /*
      del_handler(pipeline,msg) -- msg includes index
      In the del handler
      a rule/ threshold @index is deleted
      the associated rte_hash* is also freed
      the slot is made free
    */
    
    #define ALLOWED 1
    #define BLOCKED 0
    #define TABLE_MAX_CAPACITY 65536
    int do_rate_limit(struct pipeline_ratelimit* ps, uint32_t id, unsigned char* pkt)
    {
        uint64_t curr_time_stamp = rte_get_timer_cycles();
        struct iphdr* iph = (struct iphdr*)pkt;
        uint32_t src_ip = rte_be_to_cpu_32(iph->saddr);
    
        struct flow_state_t* node = &ps->flow_state_arr[id];
        struct flow_data_t* flow = NULL
        rte_hash_lookup_data(node->hash, &src_ip, (void**)&flow);
        if (flow != NULL)
        {
            if (flow->curr_state == ALLOWED)
            {
                if (flow->pps_count++ > node->threshold)
                {
                    uint64_t seconds_elapsed = (curr_time_stamp - flow->src_first_seen) / CYCLES_IN_1SEC;
                    if (seconds_elapsed)
                    {
                        flow->src_first_seen += seconds_elapsed * CYCLES_IN_1_SEC;
                        flow->pps_count = 1;
                        return ALLOWED;
                    }
                    else
                    {
                        flow->pps_count = 0;
                        flow->curr_state = BLOCKED;
                        return BLOCKED;
                    }
                }
                return ALLOWED;
            }
            else
            {
                uint64_t seconds_elapsed = (curr_time_stamp - flow->src_first_seen) / CYCLES_IN_1SEC;
                if (seconds_elapsed > 120)
                {
                    flow->curr_state = ALLOWED;
                    flow->pps_count = 0;
                    flow->src_first_seen += seconds_elapsed * CYCLES_IN_1_SEC;
                    return ALLOWED;
                }
                return BLOCKED;
            }
        }
        int index = node->size;
        // If entry not found and we have reached capacity
        // Remove the rear element and mark it as the index for the new node    
        if (node->size == TABLE_MAX_CAPACITY)
        {
            rte_hash_reset(node->hash);
            index = node->size = 0;
        }
    
        // Add new element @packet_flows[mit_id][index]
        struct flow_data_t* flow_data = &ps->flows[id][index]; 
        *flow_data = { ALLOWED, 1, curr_time_stamp };
        node->size++;
    
        // Add the new key to hash
        rte_hash_add_key_data(node->hash, (void*)&src_ip, (void*)flow_data);    
        return ALLOWED;
    }
    static int pipeline_ratelimit_run(void* pipeline)
    {
        struct pipeline_ratelimit* ps = (struct pipeline_ratelimit*)pipeline;
    
        struct rte_port_in* port_in = p->port_in_next;
        struct rte_port_out* port_out = &p->ports_out[0];
        struct rte_port_out* port_drop = &p->ports_out[2];
    
        uint8_t valid_pkt_cnt = 0, invalid_pkt_cnt = 0;
        struct rte_mbuf* valid_pkts[RTE_PORT_IN_BURST_SIZE_MAX];
        struct rte_mbuf* invalid_pkts[RTE_PORT_IN_BURST_SIZE_MAX];
    
        memset(valid_pkts, 0, sizeof(valid_pkts));
        memset(invalid_pkts, 0, sizeof(invalid_pkts));
    
        uint64_t n_pkts;
    
        if (unlikely(port_in == NULL)) {
            return 0;
        }
    
        /* Input port RX */
        n_pkts = port_in->ops.f_rx(port_in->h_port, p->pkts,
            port_in->burst_size);
    
        if (n_pkts == 0)
        {
            p->port_in_next = port_in->next;
            return 0;
        }
    
        uint32_t rc = 0;
        char* rx_pkt = NULL;
    
        for (j = 0; j < n_pkts; j++) {
    
            struct rte_mbuf* m = p->pkts[j];
            rx_pkt = rte_pktmbuf_mtod(m, char*);
            uint32_t id = rte_be_to_cpu_32(*(uint32_t*)(rx_pkt - sizeof(uint32_t)));
            unsigned short packet_len = rte_be_to_cpu_16(*((unsigned short*)(rx_pkt + 16)));
    
            struct flow_state_t* node = &(ps->flow_state_arr[id]);
    
            if (node->hash && node->threshold != 0)
            {
                // Decide whether to allow of drop the packet
                // returns allow - 1, drop - 0
                if (do_rate_limit(ps, id, (unsigned char*)(rx_pkt + 14)))
                    valid_pkts[valid_pkt_count++] = m;
                else
                    invalid_pkts[invalid_pkt_count++] = m;
            }
            else
                valid_pkts[valid_pkt_count++] = m;
    
            if (invalid_pkt_cnt) {
                p->pkts_mask = 0;
                rte_memcpy(p->pkts, invalid_pkts, sizeof(invalid_pkts));
                p->pkts_mask = RTE_LEN2MASK(invalid_pkt_cnt, uint64_t);
                rte_pipeline_action_handler_port_bulk_mod(p, p->pkts_mask, port_drop);
            }
    
            p->pkts_mask = 0;
            memset(p->pkts, 0, sizeof(p->pkts));
    
            if (valid_pkt_cnt != 0)
            {
                rte_memcpy(p->pkts, valid_pkts, sizeof(valid_pkts));
                p->pkts_mask = RTE_LEN2MASK(valid_pkt_cnt, uint64_t);
            }
    
            rte_pipeline_action_handler_port_bulk_mod(p, p->pkts_mask, port_out);
    
            /* Pick candidate for next port IN to serve */
            p->port_in_next = port_in->next;
            return (int)n_pkts;
        }
}

结果

  1. 从阈值为14 14Mpps的60000个源只为一个目的地生成流量时,没有下降。我们能够从IXIA发送12 12Mpps,并在添加了3个或更多目的地(每个目的地配置为从60000源恢复流量)后观察到了

12 12Mpps的

  1. 下降。吞吐率只有8-9个Mpps。当发送到100个目的地(每个60000 src )时,只处理6.4Mpp。
  2. 在通过vtune运行它时,报告称rte_hash_lookup_data是热点,主要是内存绑定(DRAM绑定)。我很快就会附上这份报告。
EN

回答 1

Stack Overflow用户

回答已采纳

发布于 2020-09-09 02:08:28

基于内部测试的更新,rte_hash库不会导致性能下降。因此,正如注释中所建议的那样,由于当前的模式和算法设计可能导致缓存丢失和每个周期的指令较少,所以更有可能出现这种情况。

要识别它是前端失速还是后端管道失速还是内存失速,请使用perfvtune。同时,尽量减少分支,并使用更多的likelyprefetch

票数 0
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/63787370

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档