DPDK二层转发和加密结合在一起,因为最近需要引用到DPDK的加解密函数,所以研究了这个例子
因为DPDK默认只提供了NULL CRYPTO POLL MODE DRIVER,所以我们需要在编译DPDK的时候打开其他 CRYPTO POLL MODE DRIVER的开关,这里以OPENSSL CRYPTO POLL MODE DRIVER为例:
加密操作的两个结构体:
对数据加密的参数基本保存在这两个结构体内
/**
* Cryptographic Operation.
*
* This structure contains data relating to performing cryptographic
* operations. This operation structure is used to contain any operation which
* is supported by the cryptodev API, PMDs should check the type parameter to
* verify that the operation is a support function of the device. Crypto
* operations are enqueued and dequeued in crypto PMDs using the
* rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
*/
struct rte_crypto_op {
enum rte_crypto_op_type type;
/**< operation type */
enum rte_crypto_op_status status;
/**<
* operation status - this is reset to
* RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
* will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
* is successfully processed by a crypto PMD
*/
struct rte_mempool *mempool;
/**< crypto operation mempool which operation is allocated from */
phys_addr_t phys_addr;
/**< physical address of crypto operation */
void *opaque_data;
/**< Opaque pointer for user data */
RTE_STD_C11
union {
struct rte_crypto_sym_op *sym;
/**< Symmetric operation parameters */
}; /**< operation specific parameters */
} __rte_cache_aligned;
/**
* Symmetric Cryptographic Operation.
*
* This structure contains data relating to performing symmetric cryptographic
* processing on a referenced mbuf data buffer.
*
* When a symmetric crypto operation is enqueued with the device for processing
* it must have a valid *rte_mbuf* structure attached, via m_src parameter,
* which contains the source data which the crypto operation is to be performed
* on.
* While the mbuf is in use by a crypto operation no part of the mbuf should be
* changed by the application as the device may read or write to any part of the
* mbuf. In the case of hardware crypto devices some or all of the mbuf
* may be DMAed in and out of the device, so writing over the original data,
* though only the part specified by the rte_crypto_sym_op for transformation
* will be changed.
* Out-of-place (OOP) operation, where the source mbuf is different to the
* destination mbuf, is a special case. Data will be copied from m_src to m_dst.
* The part copied includes all the parts of the source mbuf that will be
* operated on, based on the cipher.data.offset+cipher.data.length and
* auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part
* indicated by the cipher parameters will be transformed, any extra data around
* this indicated by the auth parameters will be copied unchanged from source to
* destination mbuf.
* Also in OOP operation the cipher.data.offset and auth.data.offset apply to
* both source and destination mbufs. As these offsets are relative to the
* data_off parameter in each mbuf this can result in the data written to the
* destination buffer being at a different alignment, relative to buffer start,
* to the data in the source buffer.
*/
struct rte_crypto_sym_op {
struct rte_mbuf *m_src; /**< source mbuf */
struct rte_mbuf *m_dst; /**< destination mbuf */
enum rte_crypto_sym_op_sess_type sess_type;
RTE_STD_C11
union {
struct rte_cryptodev_sym_session *session;
/**< Handle for the initialised session context */
struct rte_crypto_sym_xform *xform;
/**< Session-less API crypto operation parameters */
};
struct {
struct {
uint32_t offset;
/**< Starting point for cipher processing, specified
* as number of bytes from start of data in the source
* buffer. The result of the cipher operation will be
* written back into the output buffer starting at
* this location.
*
* @note
* For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
* KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
* and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
* this field should be in bits.
*/
uint32_t length;
/**< The message length, in bytes, of the source buffer
* on which the cryptographic operation will be
* computed. This must be a multiple of the block size
* if a block cipher is being used. This is also the
* same as the result length.
*
* @note
* In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
* this value should not include the length of the
* padding or the length of the MAC; the driver will
* compute the actual number of bytes over which the
* encryption will occur, which will include these
* values.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
* field should be set to 0.
*
* @note
* For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
* KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
* and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
* this field should be in bits.
*/
} data; /**< Data offsets and length for ciphering */
struct {
uint8_t *data;
/**< Initialisation Vector or Counter.
*
* - For block ciphers in CBC or F8 mode, or for KASUMI
* in F8 mode, or for SNOW 3G in UEA2 mode, this is the
* Initialisation Vector (IV) value.
*
* - For block ciphers in CTR mode, this is the counter.
*
* - For GCM mode, this is either the IV (if the length
* is 96 bits) or J0 (for other sizes), where J0 is as
* defined by NIST SP800-38D. Regardless of the IV
* length, a full 16 bytes needs to be allocated.
*
* - For CCM mode, the first byte is reserved, and the
* nonce should be written starting at &iv[1] (to allow
* space for the implementation to write in the flags
* in the first byte). Note that a full 16 bytes should
* be allocated, even though the length field will
* have a value less than this.
*
* - For AES-XTS, this is the 128bit tweak, i, from
* IEEE Std 1619-2007.
*
* For optimum performance, the data pointed to SHOULD
* be 8-byte aligned.
*/
phys_addr_t phys_addr;
uint16_t length;
/**< Length of valid IV data.
*
* - For block ciphers in CBC or F8 mode, or for KASUMI
* in F8 mode, or for SNOW 3G in UEA2 mode, this is the
* length of the IV (which must be the same as the
* block length of the cipher).
*
* - For block ciphers in CTR mode, this is the length
* of the counter (which must be the same as the block
* length of the cipher).
*
* - For GCM mode, this is either 12 (for 96-bit IVs)
* or 16, in which case data points to J0.
*
* - For CCM mode, this is the length of the nonce,
* which can be in the range 7 to 13 inclusive.
*/
} iv; /**< Initialisation vector parameters */
} cipher;
struct {
struct {
uint32_t offset;
/**< Starting point for hash processing, specified as
* number of bytes from start of packet in source
* buffer.
*
* @note
* For CCM and GCM modes of operation, this field is
* ignored. The field @ref aad field
* should be set instead.
*
* @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
* mode of operation, this field is set to 0. aad data
* pointer of rte_crypto_sym_op_data structure is
* used instead
*
* @note
* For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
* KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
* and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
* this field should be in bits.
*/
uint32_t length;
/**< The message length, in bytes, of the source
* buffer that the hash will be computed on.
*
* @note
* For CCM and GCM modes of operation, this field is
* ignored. The field @ref aad field should be set
* instead.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
* of operation, this field is set to 0.
* Auth.aad.length is used instead.
*
* @note
* For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
* KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
* and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
* this field should be in bits.
*/
} data; /**< Data offsets and length for authentication */
struct {
uint8_t *data;
/**< If this member of this structure is set this is a
* pointer to the location where the digest result
* should be inserted (in the case of digest generation)
* or where the purported digest exists (in the case of
* digest verification).
*
* At session creation time, the client specified the
* digest result length with the digest_length member
* of the @ref rte_crypto_auth_xform structure. For
* physical crypto devices the caller must allocate at
* least digest_length of physically contiguous memory
* at this location.
*
* For digest generation, the digest result will
* overwrite any data at this location.
*
* @note
* For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
* "digest result" read "authentication tag T".
*
* If this member is not set the digest result is
* understood to be in the destination buffer for
* digest generation, and in the source buffer for
* digest verification. The location of the digest
* result in this case is immediately following the
* region over which the digest is computed.
*/
phys_addr_t phys_addr;
/**< Physical address of digest */
uint16_t length;
/**< Length of digest. This must be the same value as
* @ref rte_crypto_auth_xform.digest_length.
*/
} digest; /**< Digest parameters */
struct {
uint8_t *data;
/**< Pointer to Additional Authenticated Data (AAD)
* needed for authenticated cipher mechanisms (CCM and
* GCM), and to the IV for SNOW 3G authentication
* (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
* authentication mechanisms this pointer is ignored.
*
* The length of the data pointed to by this field is
* set up for the session in the @ref
* rte_crypto_auth_xform structure as part of the @ref
* rte_cryptodev_sym_session_create function call.
* This length must not exceed 240 bytes.
*
* Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
* the caller should setup this field as follows:
*
* - the nonce should be written starting at an offset
* of one byte into the array, leaving room for the
* implementation to write in the flags to the first
* byte.
*
* - the additional authentication data itself should
* be written starting at an offset of 18 bytes into
* the array, leaving room for the length encoding in
* the first two bytes of the second block.
*
* - the array should be big enough to hold the above
* fields, plus any padding to round this up to the
* nearest multiple of the block size (16 bytes).
* Padding will be added by the implementation.
*
* Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
* caller should setup this field as follows:
*
* - the AAD is written in starting at byte 0
* - the array must be big enough to hold the AAD, plus
* any space to round this up to the nearest multiple
* of the block size (16 bytes).
*
* @note
* For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
* operation, this field is used to pass plaintext.
*/
phys_addr_t phys_addr; /**< physical address */
uint16_t length; /**< Length of digest */
} aad;
/**< Additional authentication parameters */
} auth;
} __rte_cache_aligned;
函数:
- main函数:(和其他例子没什么区别)首先都是初始化dpdk环境,解析参数,创建内存池等等,为每一个网口设备和crypto device分配一个逻辑核,然后调用网口初始化函数和crypto device初始化函数,最后进入主循环,为每一个逻辑核创建一个线程
/*
* @brief 初始化并启动所有网口
* @param 命令行参数结构体
* @return 返回启动的网口数量
**/
static int
initialize_ports(struct l2fwd_crypto_options *options)
{
uint8_t last_portid, portid;
unsigned enabled_portcount = 0;
/* 获取网口数量 */
unsigned nb_ports = rte_eth_dev_count();
if (nb_ports == 0) {
printf("No Ethernet ports - bye\n");
return -1;
}
/* Reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
for (last_portid = 0, portid = 0; portid < nb_ports; portid++) {
int retval;
/* Skip ports that are not enabled 忽略未启用的物理端口 */
if ((options->portmask & (1 << portid)) == 0)
continue;
/* init port */
printf("Initializing port %u... ", (unsigned) portid);
fflush(stdout);
/* 根据port_conf参数配置一个网口的信息, 同时设1个发送队列和1个接收队列 */
retval = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (retval < 0) {
printf("Cannot configure device: err=%d, port=%u\n",
retval, (unsigned) portid);
return -1;
}
/* 在每个物理网口上初始化一个接收队列 init one RX queue */
fflush(stdout);
retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd, //0代表接收队列的编号
rte_eth_dev_socket_id(portid),
NULL, l2fwd_pktmbuf_pool);
if (retval < 0) {
printf("rte_eth_rx_queue_setup:err=%d, port=%u\n",
retval, (unsigned) portid);
return -1;
}
/* 在每个物理端口上初始化一个发送队列 init one TX queue on each port */
fflush(stdout);
retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid),
NULL);
if (retval < 0) {
printf("rte_eth_tx_queue_setup:err=%d, port=%u\n",
retval, (unsigned) portid);
return -1;
}
/* Start device */
retval = rte_eth_dev_start(portid);
if (retval < 0) {
printf("rte_eth_dev_start:err=%d, port=%u\n",
retval, (unsigned) portid);
return -1;
}
//设为混杂模式
rte_eth_promiscuous_enable(portid);
/* 获取物理网口的mac地址 */
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
(unsigned) portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
l2fwd_ports_eth_addr[portid].addr_bytes[3],
l2fwd_ports_eth_addr[portid].addr_bytes[4],
l2fwd_ports_eth_addr[portid].addr_bytes[5]);
/* initialize port stats */
memset(&port_statistics, 0, sizeof(port_statistics));
/* Setup port forwarding table 如果是有偶数个物理端口,设为相邻两个物理端口对发 */
if (enabled_portcount % 2) {
l2fwd_dst_ports[portid] = last_portid;
l2fwd_dst_ports[last_portid] = portid;
} else {/* 将这个portid设为最后一个portid */
last_portid = portid;
}
l2fwd_enabled_port_mask |= (1 << portid);
enabled_portcount++; //更新已启用的物理端口的总数
}
if (enabled_portcount == 1) {
l2fwd_dst_ports[last_portid] = last_portid;
} else if (enabled_portcount % 2) {
printf("odd number of ports in portmask- bye\n");
return -1;
}
//检查所有物理端口的连接状态
check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
return enabled_portcount;
}
/* @brief 初始化并激活Crypto device
* @param options 命令行参数(加密算法、认证算法、加密密钥等)
* @param nb_ports 已经激活的网口数量
* @param enabled_cdevs 存储启动的crypto_dev数组
* @return enabled_cdev_count 激活的crypto_dev数量
**/
static int
initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
uint8_t *enabled_cdevs)
{
unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
const struct rte_cryptodev_capabilities *cap;
enum rte_crypto_auth_algorithm cap_auth_algo;
enum rte_crypto_auth_algorithm opt_auth_algo;
enum rte_crypto_cipher_algorithm cap_cipher_algo;
enum rte_crypto_cipher_algorithm opt_cipher_algo;
int retval;
cdev_count = rte_cryptodev_count();
printf("the cdev_cout is %d\n", cdev_count);
if (cdev_count == 0) {
printf("No crypto devices available\n");
return -1;
}
for (cdev_id = 0; cdev_id < cdev_count && enabled_cdev_count < nb_ports;
cdev_id++) {
struct rte_cryptodev_qp_conf qp_conf;
struct rte_cryptodev_info dev_info;
struct rte_cryptodev_config conf = {
.nb_queue_pairs = 1,
.socket_id = SOCKET_ID_ANY,
.session_mp = {
.nb_objs = 2048,
.cache_size = 64
}
};
/* 获取Crypto device的信息 */
rte_cryptodev_info_get(cdev_id, &dev_info);
/* Set cipher parameters 设置加密参数 */
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
/* Check if device supports cipher algo */
i = 0;
opt_cipher_algo = options->cipher_xform.cipher.algo;
cap = &dev_info.capabilities[i];
while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
/* 加密算法 */
cap_cipher_algo = cap->sym.cipher.algo;
if (cap->sym.xform_type ==
RTE_CRYPTO_SYM_XFORM_CIPHER) {
if (cap_cipher_algo == opt_cipher_algo) {
if (check_type(options, &dev_info) == 0)
break;
}
}
cap = &dev_info.capabilities[++i];
}
if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
printf("Algorithm %s not supported by cryptodev %u"
" or device not of preferred type (%s)\n",
supported_cipher_algo[opt_cipher_algo],
cdev_id,
options->string_type);
continue;
}
/* block_size */
options->block_size = cap->sym.cipher.block_size;
/*
* Check if length of provided IV is supported
* by the algorithm chosen.
*/
if (options->iv_param) {
if (check_supported_size(options->iv.length,
cap->sym.cipher.iv_size.min,
cap->sym.cipher.iv_size.max,
cap->sym.cipher.iv_size.increment)
!= 0) {
printf("Unsupported IV length\n");
return -1;
}
/*
* Check if length of IV to be randomly generated
* is supported by the algorithm chosen.
*/
} else if (options->iv_random_size != -1) {
if (check_supported_size(options->iv_random_size,
cap->sym.cipher.iv_size.min,
cap->sym.cipher.iv_size.max,
cap->sym.cipher.iv_size.increment)
!= 0) {
printf("Unsupported IV length\n");
return -1;
}
options->iv.length = options->iv_random_size;
/* No size provided, use minimum size. */
} else
options->iv.length = cap->sym.cipher.iv_size.min;
/*
* Check if length of provided cipher key is supported
* by the algorithm chosen.
*/
if (options->ckey_param) {
if (check_supported_size(
options->cipher_xform.cipher.key.length,
cap->sym.cipher.key_size.min,
cap->sym.cipher.key_size.max,
cap->sym.cipher.key_size.increment)
!= 0) {
printf("Unsupported cipher key length\n");
return -1;
}
/*
* Check if length of the cipher key to be randomly generated
* is supported by the algorithm chosen.
*/
} else if (options->ckey_random_size != -1) {
if (check_supported_size(options->ckey_random_size,
cap->sym.cipher.key_size.min,
cap->sym.cipher.key_size.max,
cap->sym.cipher.key_size.increment)
!= 0) {
printf("Unsupported cipher key length\n");
return -1;
}
options->cipher_xform.cipher.key.length =
options->ckey_random_size;
/* No size provided, use minimum size. */
} else
options->cipher_xform.cipher.key.length =
cap->sym.cipher.key_size.min;
if (!options->ckey_param)
generate_random_key(
options->cipher_xform.cipher.key.data,
options->cipher_xform.cipher.key.length);
}
/* Set auth parameters 设置认证参数 */
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) {
/* Check if device supports auth algo */
i = 0;
opt_auth_algo = options->auth_xform.auth.algo; /* 认证算法 */
cap = &dev_info.capabilities[i];
while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
cap_auth_algo = cap->sym.auth.algo;
if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
(cap_auth_algo == opt_auth_algo) &&
(check_type(options, &dev_info) == 0)) {
break;
}
cap = &dev_info.capabilities[++i];
}
if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
printf("Algorithm %s not supported by cryptodev %u"
" or device not of preferred type (%s)\n",
supported_auth_algo[opt_auth_algo],
cdev_id,
options->string_type);
continue;
}
/*
* Check if length of provided AAD(附加认证数据) is supported
* by the algorithm chosen.
*/
if (options->aad_param) {
if (check_supported_size(options->aad.length,
cap->sym.auth.aad_size.min,
cap->sym.auth.aad_size.max,
cap->sym.auth.aad_size.increment)
!= 0) {
printf("Unsupported AAD length\n");
return -1;
}
/*
* Check if length of AAD to be randomly generated
* is supported by the algorithm chosen.
*/
} else if (options->aad_random_size != -1) {
if (check_supported_size(options->aad_random_size,
cap->sym.auth.aad_size.min,
cap->sym.auth.aad_size.max,
cap->sym.auth.aad_size.increment)
!= 0) {
printf("Unsupported AAD length\n");
return -1;
}
options->aad.length = options->aad_random_size;
/* No size provided, use minimum size. */
} else
options->aad.length = cap->sym.auth.aad_size.min;
options->auth_xform.auth.add_auth_data_length =
options->aad.length;
/*
* Check if length of provided auth key is supported
* by the algorithm chosen.
*/
if (options->akey_param) {
if (check_supported_size(
options->auth_xform.auth.key.length,
cap->sym.auth.key_size.min,
cap->sym.auth.key_size.max,
cap->sym.auth.key_size.increment)
!= 0) {
printf("Unsupported auth key length\n");
return -1;
}
/*
* Check if length of the auth key to be randomly generated
* is supported by the algorithm chosen.
*/
} else if (options->akey_random_size != -1) {
if (check_supported_size(options->akey_random_size,
cap->sym.auth.key_size.min,
cap->sym.auth.key_size.max,
cap->sym.auth.key_size.increment)
!= 0) {
printf("Unsupported auth key length\n");
return -1;
}
options->auth_xform.auth.key.length =
options->akey_random_size;
/* No size provided, use minimum size. */
} else
options->auth_xform.auth.key.length =
cap->sym.auth.key_size.min;
if (!options->akey_param)
generate_random_key(
options->auth_xform.auth.key.data,
options->auth_xform.auth.key.length);
/* Check if digest size is supported by the algorithm. */
if (options->digest_size != -1) {
if (check_supported_size(options->digest_size,
cap->sym.auth.digest_size.min,
cap->sym.auth.digest_size.max,
cap->sym.auth.digest_size.increment)
!= 0) {
printf("Unsupported digest length\n");
return -1;
}
options->auth_xform.auth.digest_length =
options->digest_size;
/* No size provided, use minimum size. */
} else
options->auth_xform.auth.digest_length =
cap->sym.auth.digest_size.min;
}
/* 根据conf
的信息配置crypto device */
retval = rte_cryptodev_configure(cdev_id, &conf);
if (retval < 0) {
printf("Failed to configure cryptodev %u", cdev_id);
return -1;
}
qp_conf.nb_descriptors = 2048;
retval = rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf,
SOCKET_ID_ANY);
if (retval < 0) {
printf("Failed to setup queue pair %u on cryptodev %u",
0, cdev_id);
return -1;
}
/* 启动crypto device */
retval = rte_cryptodev_start(cdev_id);
if (retval < 0) {
printf("Failed to start device %u: error %d\n",
cdev_id, retval);
return -1;
}
printf("Starting crypto device !!!\n");
l2fwd_enabled_crypto_mask |= (((uint64_t)1) << cdev_id);
enabled_cdevs[cdev_id] = 1; /* 该cdev_id的标识crypto device已经启动 */
enabled_cdev_count++; /* 启动的crypto device数量加1 */
}
return enabled_cdev_count;
}
/*
* Read packet from RX queues
*/
for (i = 0; i < qconf->nb_rx_ports; i++) {
portid = qconf->rx_port_list[i];
cparams = &port_cparams[i];
/* 接收数据包,存储在pkts_burst数组中,一次可能接收多个数据rte_mbuf,
返回值nb_rx为接收的数量 */
nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
pkts_burst, MAX_PKT_BURST);
port_statistics[portid].rx += nb_rx;
if (nb_rx) {
/*
* If we can't allocate a crypto_ops, then drop
* the rest of the burst and dequeue and
* process the packets to free offload structs
*
* allocate crypto operations from a mempool,
* 对于n个数据包,创建n个crypto operations
*/
if (rte_crypto_op_bulk_alloc(
l2fwd_crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
ops_burst, nb_rx) !=
nb_rx) {
for (j = 0; j < nb_rx; j++)
rte_pktmbuf_free(pkts_burst[j]);
nb_rx = 0;
}
/* Enqueue packets from Crypto device*/
for (j = 0; j < nb_rx; j++) {
m = pkts_burst[j];
/* 将数据包放入加密设备队列 */
l2fwd_simple_crypto_enqueue(m,
ops_burst[j], cparams);
}
}
/* Dequeue packets from Crypto device */
do {
nb_rx = rte_cryptodev_dequeue_burst(
cparams->dev_id, cparams->qp_id,
ops_burst, MAX_PKT_BURST);
crypto_statistics[cparams->dev_id].dequeued +=
nb_rx;
/* Forward crypto'd packets */
for (j = 0; j < nb_rx; j++) {
m = ops_burst[j]->sym->m_src;
rte_crypto_op_free(ops_burst[j]);
l2fwd_simple_forward(m, portid);
}
} while (nb_rx == MAX_PKT_BURST);
}
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
{
struct ether_hdr *eth_hdr; /* 二层头 */
struct ipv4_hdr *ip_hdr; /* IPv4头部 */
uint32_t ipdata_offset, data_len;
uint32_t pad_len = 0; /* 填充数据长度 */
char *padding; /* 填充的数据内容 */
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
if (eth_hdr->ether_type != rte_cpu_to_be_16(ETHER_TYPE_IPv4))
return -1;
ipdata_offset = sizeof(struct ether_hdr); /* 二层头长度 */
ip_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) +
ipdata_offset);
ipdata_offset += (ip_hdr->version_ihl & IPV4_HDR_IHL_MASK)
* IPV4_IHL_MULTIPLIER; /* ip头部长度 */
/* Zero pad data to be crypto'd so it is block aligned
数据长度 = 包m的长度-二层头的长度-ip头部长度 */
data_len = rte_pktmbuf_data_len(m) - ipdata_offset;
if (cparams->do_hash && cparams->hash_verify)
data_len -= cparams->digest_length; /* 如果有做认证,则减去hash摘要长度 */
if (cparams->do_cipher) {
/*
* Following algorithms are block cipher algorithms,
* and might need padding 分组加密算法,可能需要填充数据
*/
switch (cparams->cipher_algo) {
case RTE_CRYPTO_CIPHER_AES_CBC:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_3DES_ECB:
if (data_len % cparams->block_size) /* 如果数据长度模分组大小不为0 */
pad_len = cparams->block_size -
(data_len % cparams->block_size); /* 则计算填充数据长度 */
break;
default:
pad_len = 0;
}
/* 如果需要填充 */
if (pad_len) {
padding = rte_pktmbuf_append(m, pad_len); /* 将填充数据附加到m,并返回填充的数据首地址 */
if (unlikely(!padding))
return -1;
data_len += pad_len;
memset(padding, 0, pad_len); /* 将填充数据初始化为0 */
}
}
/* Set crypto operation data parameters, Attach a session to a crypto operation */
rte_crypto_op_attach_sym_session(op, cparams->session);
/* 填充rte_crypto_op结构体的认证参数 */
if (cparams->do_hash) {
if (!cparams->hash_verify) {
/* Append space for digest to end of packet 将认证数据附加到数据包后面 */
op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
} else {
op->sym->auth.digest.data = rte_pktmbuf_mtod(m,
uint8_t *) + ipdata_offset + data_len;
}
op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
op->sym->auth.digest.length = cparams->digest_length;
/* For wireless algorithms, offset/length must be in bits */
if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
cparams->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
op->sym->auth.data.offset = ipdata_offset << 3;
op->sym->auth.data.length = data_len << 3;
} else {
op->sym->auth.data.offset = ipdata_offset;
op->sym->auth.data.length = data_len;
}
if (cparams->aad.length) {
op->sym->auth.aad.data = cparams->aad.data;
op->sym->auth.aad.phys_addr = cparams->aad.phys_addr;
op->sym->auth.aad.length = cparams->aad.length;
} else {
op->sym->auth.aad.data = NULL;
op->sym->auth.aad.phys_addr = 0;
op->sym->auth.aad.length = 0;
}
}
/* 填充rte_crypto_op结构体的加密参数 */
if (cparams->do_cipher) {
op->sym->cipher.iv.data = cparams->iv.data;
op->sym->cipher.iv.phys_addr = cparams->iv.phys_addr;
op->sym->cipher.iv.length = cparams->iv.length;
/* For wireless algorithms, offset/length must be in bits */
if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
cparams->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
op->sym->cipher.data.offset = ipdata_offset << 3;
op->sym->cipher.data.length = data_len << 3;
} else {
op->sym->cipher.data.offset = ipdata_offset;
op->sym->cipher.data.length = data_len;
}
}
/* m_src表示源数据,即没有加密之前的数据 */
op->sym->m_src = m;
return l2fwd_crypto_enqueue(op, cparams);
}
/**
* Enqueue a burst of operations for processing on a crypto device.
*
* The rte_cryptodev_enqueue_burst() function is invoked to place
* crypto operations on the queue *qp_id* of the device designated by
* its *dev_id*.
*
* The *nb_ops* parameter is the number of operations to process which are
* supplied in the *ops* array of *rte_crypto_op* structures.
*
* The rte_cryptodev_enqueue_burst() function returns the number of
* operations it actually enqueued for processing. A return value equal to
* *nb_ops* means that all packets have been enqueued.
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
* to be enqueued for processing. The value
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
* @param ops The address of an array of *nb_ops* pointers
* to *rte_crypto_op* structures which contain
* the crypto operations to be processed.
* @param nb_ops The number of operations to process.
*
* @return
* The number of operations actually enqueued on the crypto device. The return
* value can be less than the value of the *nb_ops* parameter when the
* crypto devices queue is full or if invalid parameters are specified in
* a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
dev->data->queue_pairs[qp_id], ops, nb_ops);
}