[PATCH-RFC] platform: linux-dpdk: crypto accelaration support

Message ID 1478522381-18153-1-git-send-email-balakrishna.garapati@linaro.org
State New
Headers show

Commit Message

Krishna Garapati Nov. 7, 2016, 12:39 p.m.
Running odp-dpdk with dpdk sw/hw crypto support

Signed-off-by: Balakrishna Garapati <balakrishna.garapati@linaro.org>
---
 platform/linux-dpdk/Makefile.am               |   3 +-
 platform/linux-dpdk/include/odp_crypto_dpdk.h |  13 +
 platform/linux-dpdk/include/odp_packet_dpdk.h |   3 +
 platform/linux-dpdk/odp_crypto_dpdk.c         | 822 ++++++++++++++++++++++++++
 platform/linux-dpdk/odp_init.c                |  14 +
 platform/linux-generic/include/odp_internal.h |   2 +
 6 files changed, 856 insertions(+), 1 deletion(-)
 create mode 100644 platform/linux-dpdk/include/odp_crypto_dpdk.h
 create mode 100644 platform/linux-dpdk/odp_crypto_dpdk.c

Patch hide | download patch | download mbox

diff --git a/platform/linux-dpdk/Makefile.am b/platform/linux-dpdk/Makefile.am
index 488f2c0..6ce0ce6 100644
--- a/platform/linux-dpdk/Makefile.am
+++ b/platform/linux-dpdk/Makefile.am
@@ -121,6 +121,7 @@  noinst_HEADERS = \
 		  ${top_srcdir}/platform/linux-generic/include/odp_classification_inlines.h \
 		  ${top_srcdir}/platform/linux-generic/include/odp_classification_internal.h \
 		  ${top_srcdir}/platform/linux-generic/include/odp_crypto_internal.h \
+		  ${srcdir}/include/odp_crypto_dpdk.h \
 		  ${top_srcdir}/platform/linux-generic/include/odp_forward_typedefs_internal.h \
 		  ${top_srcdir}/platform/linux-generic/include/odp_internal.h \
 		  ${srcdir}/include/odp_packet_dpdk.h \
@@ -160,7 +161,7 @@  __LIB__libodp_dpdk_la_SOURCES = \
 			   ../linux-generic/odp_cpu.c \
 			   ../linux-generic/odp_cpumask.c \
 			   ../linux-generic/odp_cpumask_task.c \
-			   ../linux-generic/odp_crypto.c \
+			   odp_crypto_dpdk.c \
 			   odp_errno.c \
 			   ../linux-generic/odp_event.c \
 			   ../linux-generic/odp_hash.c \
diff --git a/platform/linux-dpdk/include/odp_crypto_dpdk.h b/platform/linux-dpdk/include/odp_crypto_dpdk.h
new file mode 100644
index 0000000..559ac94
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_crypto_dpdk.h
@@ -0,0 +1,13 @@ 
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#ifndef ODP_CRYPTO_DPDK_H
+#define ODP_CRYPTO_DPDK_H
+
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+
+#endif
diff --git a/platform/linux-dpdk/include/odp_packet_dpdk.h b/platform/linux-dpdk/include/odp_packet_dpdk.h
index 495d5e6..c83089b 100644
--- a/platform/linux-dpdk/include/odp_packet_dpdk.h
+++ b/platform/linux-dpdk/include/odp_packet_dpdk.h
@@ -43,6 +43,9 @@ 
 #include <rte_jhash.h>
 #include <rte_hash_crc.h>
 
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+
 #define RTE_TEST_RX_DESC_DEFAULT 128
 #define RTE_TEST_TX_DESC_DEFAULT 512
 
diff --git a/platform/linux-dpdk/odp_crypto_dpdk.c b/platform/linux-dpdk/odp_crypto_dpdk.c
new file mode 100644
index 0000000..edb72a8
--- /dev/null
+++ b/platform/linux-dpdk/odp_crypto_dpdk.c
@@ -0,0 +1,822 @@ 
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include <odp/api/crypto.h>
+#include <odp_internal.h>
+#include <odp/api/atomic.h>
+#include <odp/api/spinlock.h>
+#include <odp/api/sync.h>
+#include <odp/api/debug.h>
+#include <odp/api/align.h>
+#include <odp/api/shared_memory.h>
+#include <odp_crypto_internal.h>
+#include <odp_debug_internal.h>
+#include <odp/api/hints.h>
+#include <odp/api/random.h>
+#include <odp_packet_internal.h>
+#include <odp_crypto_dpdk.h>
+
+#include <string.h>
+
+#include <openssl/rand.h>
+
+#define MAX_SESSIONS 32
+#define NB_MBUF  1024
+
+typedef struct odp_crypto_global_s odp_crypto_global_t;
+typedef struct odp_dpdk_session_entry_t odp_dpdk_session_entry;
+struct odp_dpdk_session_entry_t {
+		struct odp_dpdk_session_entry_t *next;
+		uint64_t rte_session;
+		odp_bool_t do_cipher_first;
+		struct rte_crypto_sym_xform cipher_xform;
+		struct rte_crypto_sym_xform auth_xform;
+		struct {
+			uint8_t *data;
+			uint16_t length;
+		} iv;
+		odp_queue_t compl_queue;           /**< Async mode completion event queue */
+		odp_pool_t output_pool;            /**< Output buffer pool */
+};
+
+struct odp_crypto_global_s {
+	odp_spinlock_t                lock;
+	uint8_t enabled_crypto_devs;
+	uint8_t enabled_crypto_dev_ids[RTE_CRYPTO_MAX_DEVS];
+	odp_dpdk_session_entry *free;
+	odp_dpdk_session_entry sessions[0];
+};
+
+static odp_crypto_global_t *global;
+struct rte_mempool *crypto_op_pool;
+
+static
+odp_crypto_generic_op_result_t *get_op_result_from_event(odp_event_t ev)
+{
+	return &(odp_packet_hdr(odp_packet_from_event(ev))->op_result);
+}
+
+static inline int
+check_supported_size(uint16_t length, uint16_t min, uint16_t max,
+		uint16_t increment)
+{
+	uint16_t supp_size;
+
+	for (supp_size = min; supp_size <= max; supp_size += increment) {
+		if (length == supp_size)
+			return 0;
+	}
+
+	return -1;
+}
+
+static
+odp_dpdk_session_entry *alloc_session(void)
+{
+	odp_dpdk_session_entry *session = NULL;
+
+	odp_spinlock_lock(&global->lock);
+	session = global->free;
+	if (session)
+		global->free = session->next;
+	odp_spinlock_unlock(&global->lock);
+
+	return session;
+}
+
+static
+void free_session(odp_dpdk_session_entry *session)
+{
+	odp_spinlock_lock(&global->lock);
+	session->next = global->free;
+	global->free = session;
+	odp_spinlock_unlock(&global->lock);
+}
+
+int
+odp_crypto_init_global(void)
+{
+	size_t mem_size;
+	odp_shm_t shm;
+	int idx;
+
+	/* Calculate the memory size we need */
+	mem_size  = sizeof(*global);
+	mem_size += (MAX_SESSIONS * sizeof(odp_dpdk_session_entry));
+
+	/* Allocate our globally shared memory */
+	shm = odp_shm_reserve("crypto_pool", mem_size,
+			      ODP_CACHE_LINE_SIZE, 0);
+
+	global = odp_shm_addr(shm);
+
+	/* Clear it out */
+	memset(global, 0, mem_size);
+
+	/* Initialize free list and lock */
+	for (idx = 0; idx < MAX_SESSIONS; idx++) {
+		global->sessions[idx].next = global->free;
+		global->free = &global->sessions[idx];
+	}
+
+	global->enabled_crypto_devs = 0;
+	odp_spinlock_init(&global->lock);
+
+	return 0;
+}
+
+int
+odp_crypto_init_local(void)
+{
+
+	int16_t cdev_id, cdev_count;
+	int retval = -1;
+
+	cdev_count = rte_cryptodev_count();
+	if (cdev_count == 0) {
+		ODP_ERR("No crypto devices available\n");
+		return -1;
+	}
+
+	for (cdev_id = cdev_count - 1; cdev_id >= 0; cdev_id--) {
+		struct rte_cryptodev_qp_conf qp_conf;
+
+		struct rte_cryptodev_config conf = {
+			.nb_queue_pairs = 1,
+			.socket_id = SOCKET_ID_ANY,
+			.session_mp = {
+				.nb_objs = MAX_SESSIONS,
+				.cache_size = 8
+			}
+		};
+
+		retval = rte_cryptodev_configure(cdev_id, &conf);
+		if (retval < 0) {
+			ODP_ERR("Failed to configure cryptodev %u", cdev_id);
+			return -1;
+		}
+
+		qp_conf.nb_descriptors = MAX_SESSIONS;
+
+		retval = rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf,
+				SOCKET_ID_ANY);
+		if (retval < 0) {
+			ODP_ERR("Failed to setup queue pair %u on cryptodev %u",
+					0, cdev_id);
+			return -1;
+		}
+
+		odp_spinlock_lock(&global->lock);
+		global->enabled_crypto_devs++;
+		global->enabled_crypto_dev_ids[global->enabled_crypto_devs -1] = cdev_id;
+		odp_spinlock_unlock(&global->lock);
+	}
+
+	/* create crypto op pool */
+	crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+			rte_socket_id());
+
+	if (crypto_op_pool == NULL) {
+		ODP_ERR("Cannot create crypto op pool\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+int odp_crypto_capability(odp_crypto_capability_t *capability)
+{
+	uint8_t i, cdev_id, cdev_count;
+	const struct rte_cryptodev_capabilities *cap;
+	enum rte_crypto_auth_algorithm cap_auth_algo;
+	enum rte_crypto_cipher_algorithm cap_cipher_algo;
+
+	if (NULL == capability)
+		return -1;
+
+	/* Initialize crypto capability structure */
+	memset(capability, 0, sizeof(odp_crypto_capability_t));
+
+	cdev_count = rte_cryptodev_count();
+	if (cdev_count == 0) {
+		ODP_ERR("No crypto devices available\n");
+		return -1;
+	}
+
+	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+		struct rte_cryptodev_info dev_info;
+
+		rte_cryptodev_info_get(cdev_id, &dev_info);
+		i = 0;
+		cap = &dev_info.capabilities[i];
+		if ((dev_info.feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED)) {
+			while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+				cap_cipher_algo = cap->sym.cipher.algo;
+				if (cap->sym.xform_type ==
+				        RTE_CRYPTO_SYM_XFORM_CIPHER) {
+					if (cap_cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
+						capability->hw_ciphers.bit.null = 1;
+					}
+					if (cap_cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC) {
+						capability->hw_ciphers.bit.trides_cbc = 1;
+						capability->hw_ciphers.bit.des = 1;
+					}
+					if (cap_cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+						capability->hw_ciphers.bit.aes128_cbc = 1;
+					}
+					if (cap_cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM) {
+						capability->hw_ciphers.bit.aes128_gcm = 1;
+					}
+				}
+
+				cap_auth_algo = cap->sym.auth.algo;
+				if (cap->sym.xform_type ==
+				        RTE_CRYPTO_SYM_XFORM_AUTH) {
+					if (cap_auth_algo == RTE_CRYPTO_AUTH_NULL) {
+						capability->hw_auths.bit.null = 1;
+					}
+					if (cap_auth_algo == RTE_CRYPTO_AUTH_AES_GCM) {
+						capability->hw_auths.bit.aes128_gcm = 1;
+					}
+					if (cap_auth_algo == RTE_CRYPTO_AUTH_MD5_HMAC) {
+						capability->hw_auths.bit.md5_96 = 1;
+					}
+					if (cap_auth_algo == RTE_CRYPTO_AUTH_SHA256_HMAC) {
+						capability->hw_auths.bit.sha256_128 = 1;
+					}
+				}
+				cap = &dev_info.capabilities[++i];
+			}
+		} else {
+			while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+				cap_cipher_algo = cap->sym.cipher.algo;
+				if (cap->sym.xform_type ==
+				        RTE_CRYPTO_SYM_XFORM_CIPHER) {
+					if (cap_cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
+						capability->ciphers.bit.null = 1;
+					}
+					if (cap_cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC) {
+						capability->ciphers.bit.trides_cbc = 1;
+						capability->ciphers.bit.des = 1;
+					}
+					if (cap_cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+						capability->ciphers.bit.aes128_cbc = 1;
+					}
+					if (cap_cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM) {
+						capability->ciphers.bit.aes128_gcm = 1;
+					}
+				}
+
+				cap_auth_algo = cap->sym.auth.algo;
+				if (cap->sym.xform_type ==
+				        RTE_CRYPTO_SYM_XFORM_AUTH) {
+					if (cap_auth_algo == RTE_CRYPTO_AUTH_NULL) {
+						capability->auths.bit.null = 1;
+					}
+					if (cap_auth_algo == RTE_CRYPTO_AUTH_AES_GCM) {
+						capability->auths.bit.aes128_gcm = 1;
+					}
+					if (cap_auth_algo == RTE_CRYPTO_AUTH_MD5_HMAC) {
+						capability->auths.bit.md5_96 = 1;
+					}
+					if (cap_auth_algo == RTE_CRYPTO_AUTH_SHA256_HMAC) {
+						capability->auths.bit.sha256_128 = 1;
+					}
+				}
+				cap = &dev_info.capabilities[++i];
+			}
+		}
+
+		capability->max_sessions = dev_info.sym.max_nb_sessions;
+	}
+
+	return 0;
+}
+
+static int get_crypto_dev(struct rte_crypto_sym_xform *cipher_xform,
+			       struct rte_crypto_sym_xform *auth_xform,
+			       uint16_t iv_length, uint8_t *dev_id)
+{
+	uint8_t cdev_id, id;
+	const struct rte_cryptodev_capabilities *cap;
+	enum rte_crypto_cipher_algorithm cap_cipher_algo;
+	enum rte_crypto_auth_algorithm cap_auth_algo;
+	enum rte_crypto_cipher_algorithm app_cipher_algo;
+	enum rte_crypto_auth_algorithm app_auth_algo;
+
+	for (id = 0; id < global->enabled_crypto_devs; id++) {
+		struct rte_cryptodev_info dev_info;
+
+		cdev_id = global->enabled_crypto_dev_ids[id];
+		rte_cryptodev_info_get(cdev_id, &dev_info);
+		int i = 0;
+		app_cipher_algo = cipher_xform->cipher.algo;
+		cap = &dev_info.capabilities[i];
+		while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+			cap_cipher_algo = cap->sym.cipher.algo;
+			if (cap->sym.xform_type ==
+			    RTE_CRYPTO_SYM_XFORM_CIPHER) {
+				if (cap_cipher_algo == app_cipher_algo)
+						break;
+			}
+					cap = &dev_info.capabilities[++i];
+		}
+
+		if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+			continue;
+		}
+
+		/* Check if key size is supported by the algorithm. */
+		if (cipher_xform->cipher.key.length) {
+			if (check_supported_size(cipher_xform->cipher.key.length,
+					cap->sym.cipher.key_size.min,
+					cap->sym.cipher.key_size.max,
+					cap->sym.cipher.key_size.increment)
+						!= 0) {
+				ODP_ERR("Unsupported cipher key length\n");
+				return -1;
+			}
+		/* No size provided, use minimum size. */
+		} else
+			cipher_xform->cipher.key.length =
+					cap->sym.cipher.key_size.min;
+
+		/* Check if iv length is supported by the algorithm. */
+		if (iv_length) {
+			if (check_supported_size(iv_length,
+					cap->sym.cipher.iv_size.min,
+					cap->sym.cipher.iv_size.max,
+					cap->sym.cipher.iv_size.increment)
+						!= 0) {
+				ODP_ERR("Unsupported iv length\n");
+				return -1;
+			}
+		}
+
+		i = 0;
+		app_auth_algo = auth_xform->auth.algo;
+		cap = &dev_info.capabilities[i];
+		while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+			cap_auth_algo = cap->sym.auth.algo;
+			if ((cap->sym.xform_type ==
+			    RTE_CRYPTO_SYM_XFORM_AUTH) & (cap_auth_algo == app_auth_algo)) {
+						break;
+			}
+					cap = &dev_info.capabilities[++i];
+		}
+
+		if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+			continue;
+		}
+
+		/* Check if key size is supported by the algorithm. */
+		if (auth_xform->auth.key.length) {
+			if (check_supported_size(auth_xform->auth.key.length,
+					cap->sym.auth.key_size.min,
+					cap->sym.auth.key_size.max,
+					cap->sym.auth.key_size.increment)
+						!= 0) {
+				ODP_ERR("Unsupported auth key length\n");
+				return -1;
+			}
+		/* No size provided, use minimum size. */
+		} else
+			auth_xform->auth.key.length =
+					cap->sym.auth.key_size.min;
+
+
+		/* Check if digest size is supported by the algorithm. */
+		if (auth_xform->auth.digest_length) {
+			if (check_supported_size(auth_xform->auth.digest_length,
+					cap->sym.auth.digest_size.min,
+					cap->sym.auth.digest_size.max,
+					cap->sym.auth.digest_size.increment)
+						!= 0) {
+				ODP_ERR("Unsupported digest length\n");
+				return -1;
+			}
+		/* No size provided, use minimum size. */
+		} else
+			auth_xform->auth.digest_length =
+					cap->sym.auth.digest_size.min;
+
+		memcpy(dev_id, &cdev_id, sizeof(cdev_id));
+		return 0;
+	}
+
+	return -1;
+}
+
+int
+odp_crypto_session_create(odp_crypto_session_params_t *params,
+			  odp_crypto_session_t *session_out,
+			  odp_crypto_ses_create_err_t *status)
+{
+	int rc = 0;
+	uint8_t cdev_id = 0;
+	struct rte_crypto_sym_xform cipher_xform;
+	struct rte_crypto_sym_xform auth_xform;
+	struct rte_crypto_sym_xform *first_xform;
+	struct rte_cryptodev_sym_session *session;
+	odp_dpdk_session_entry *entry;
+
+	/* Allocate memory for this session */
+	entry = alloc_session();
+
+	/* Default to successful result */
+	*status = ODP_CRYPTO_SES_CREATE_ERR_NONE;
+
+	/* Cipher Data */
+	cipher_xform.cipher.key.data = rte_malloc("crypto key",
+						params->cipher_key.length, 0);
+	if (cipher_xform.cipher.key.data == NULL) {
+		ODP_ERR("Failed to allocate memory for cipher key\n");
+		return -1;
+	}
+
+
+	cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	cipher_xform.next = NULL;
+	cipher_xform.cipher.key.length = params->cipher_key.length;
+	memcpy(cipher_xform.cipher.key.data, params->cipher_key.data, params->cipher_key.length);
+
+	/* Authentication Data */
+	auth_xform.auth.key.data = rte_malloc("auth key",
+						params->auth_key.length, 0);
+	if (auth_xform.auth.key.data == NULL) {
+		ODP_ERR("Failed to allocate memory for auth key\n");
+		return -1;
+	}
+	auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	auth_xform.next = NULL;
+	auth_xform.auth.key.length = params->auth_key.length;
+	memcpy(auth_xform.auth.key.data, params->auth_key.data, params->auth_key.length);
+
+	/* Derive order */
+	if (ODP_CRYPTO_OP_ENCODE == params->op)
+		entry->do_cipher_first =  params->auth_cipher_text;
+	else
+		entry->do_cipher_first = !params->auth_cipher_text;
+
+	/* Process based on cipher */
+	/* Derive order */
+	if (entry->do_cipher_first) {
+		cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+		auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+		first_xform = &cipher_xform;
+		first_xform->next = &auth_xform;
+	} else {
+		cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+		auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
+		first_xform = &auth_xform;
+		first_xform->next = &cipher_xform;
+	}
+
+	switch (params->cipher_alg) {
+	case ODP_CIPHER_ALG_NULL:
+		cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+		break;
+	case ODP_CIPHER_ALG_DES:
+	case ODP_CIPHER_ALG_3DES_CBC:
+		cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_3DES_CBC;
+		break;
+	case ODP_CIPHER_ALG_AES128_CBC:
+		cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+		break;
+	case ODP_CIPHER_ALG_AES128_GCM:
+		cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+		break;
+	default:
+		rc = -1;
+	}
+
+	/* Check result */
+	if (rc) {
+		*status = ODP_CRYPTO_SES_CREATE_ERR_INV_CIPHER;
+		return -1;
+	}
+
+	/* Process based on auth */
+	switch (params->auth_alg) {
+	case ODP_AUTH_ALG_NULL:
+		auth_xform.auth.algo = RTE_CRYPTO_AUTH_NULL;
+		break;
+	case ODP_AUTH_ALG_MD5_96:
+		auth_xform.auth.algo = RTE_CRYPTO_AUTH_MD5_HMAC;
+		auth_xform.auth.digest_length = 12;
+		break;
+	case ODP_AUTH_ALG_SHA256_128:
+		auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
+		auth_xform.auth.digest_length = 16;
+		break;
+	case ODP_AUTH_ALG_AES128_GCM:
+		auth_xform.auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
+		auth_xform.auth.digest_length = 16;
+		break;
+	default:
+		rc = -1;
+	}
+
+	/* Check result */
+	if (rc) {
+		*status = ODP_CRYPTO_SES_CREATE_ERR_INV_AUTH;
+		return -1;
+	}
+
+	rc = get_crypto_dev(&cipher_xform, &auth_xform, params->iv.length, &cdev_id);
+
+	if (rc) {
+		ODP_ERR("Couldn't find a crypto device");
+		return -1;
+	}
+
+	/* Setup session */
+	session = rte_cryptodev_sym_session_create(cdev_id, first_xform);
+
+	if (session == NULL)
+		return -1;
+
+	entry->rte_session  = (intptr_t)session;
+	entry->cipher_xform = cipher_xform;
+	entry->auth_xform = auth_xform;
+	entry->iv.length = params->iv.length;
+	entry->iv.data = params->iv.data;
+	entry->output_pool = params->output_pool;
+	entry->compl_queue = params->compl_queue;
+
+	/* We're happy */
+	*session_out = (intptr_t)entry;
+
+	return 0;
+}
+
+int odp_crypto_session_destroy(odp_crypto_session_t session)
+{
+	struct rte_cryptodev_sym_session *rte_session = NULL;
+	odp_dpdk_session_entry *entry;
+
+	entry = (odp_dpdk_session_entry *)session;
+
+	rte_session = (struct rte_cryptodev_sym_session*)(intptr_t)entry->rte_session;
+
+	rte_session = rte_cryptodev_sym_session_free(rte_session->dev_id, rte_session);
+
+	if (rte_session != NULL)
+		return -1;
+
+	/* remove the odp_dpdk_session_entry */
+	memset(entry, 0, sizeof(*entry));
+	free_session(entry);
+
+	return 0;
+}
+
+int odp_crypto_operation(odp_crypto_op_params_t *params,
+		     odp_bool_t *posted,
+		     odp_crypto_op_result_t *result)
+{
+	odp_crypto_alg_err_t rc_cipher = ODP_CRYPTO_ALG_ERR_NONE;
+	odp_crypto_alg_err_t rc_auth = ODP_CRYPTO_ALG_ERR_NONE;
+	struct rte_crypto_sym_xform cipher_xform;
+	struct rte_crypto_sym_xform auth_xform;
+	struct rte_cryptodev_sym_session *rte_session = NULL;
+	odp_crypto_op_result_t local_result;
+	odp_dpdk_session_entry *entry;
+	uint8_t *data_addr, *aad_head;
+	struct rte_crypto_op *op;
+	uint16_t rc;
+	uint32_t plain_len, aad_len;
+
+	entry = (odp_dpdk_session_entry *)(intptr_t)params->session;
+	if(entry == NULL)
+		return -1;
+
+	rte_session = (struct rte_cryptodev_sym_session*)(intptr_t)entry->rte_session;
+	if(entry == NULL)
+		return -1;
+
+	cipher_xform = entry->cipher_xform;
+	auth_xform = entry->auth_xform;
+
+	/* Resolve output buffer */
+	if (ODP_PACKET_INVALID == params->out_pkt &&
+	    ODP_POOL_INVALID != entry->output_pool)
+		params->out_pkt = odp_packet_alloc(entry->output_pool,
+				odp_packet_len(params->pkt));
+
+	if (params->pkt != params->out_pkt) {
+		if (odp_unlikely(ODP_PACKET_INVALID == params->out_pkt))
+			ODP_ABORT();
+		(void)odp_packet_copy_from_pkt(params->out_pkt,
+					       0,
+					       params->pkt,
+					       0,
+					       odp_packet_len(params->pkt));
+		_odp_packet_copy_md_to_packet(params->pkt, params->out_pkt);
+		odp_packet_free(params->pkt);
+		params->pkt = ODP_PACKET_INVALID;
+	}
+
+	data_addr = odp_packet_data(params->out_pkt);
+
+	op = rte_crypto_op_alloc(crypto_op_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	if (op == NULL)
+		return -1;
+
+	/* Set crypto operation data parameters */
+	rte_crypto_op_attach_sym_session(op, rte_session);
+	op->sym->auth.digest.data = data_addr + params->hash_result_offset;
+	op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset((struct rte_mbuf*)params->out_pkt,
+				odp_packet_len(params->out_pkt) - auth_xform.auth.digest_length);
+	op->sym->auth.digest.length = auth_xform.auth.digest_length;
+
+	/* For SNOW3G algorithms, offset/length must be in bits */
+	if (auth_xform.auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+		op->sym->auth.data.offset = params->auth_range.offset << 3;
+		op->sym->auth.data.length = params->auth_range.length << 3;
+	} else {
+		op->sym->auth.data.offset = params->auth_range.offset;
+		op->sym->auth.data.length = params->auth_range.length;
+	}
+
+	aad_head = data_addr + params->auth_range.offset;
+	plain_len   = params->cipher_range.length;
+	aad_len = params->auth_range.length - plain_len;
+
+	if (aad_len > 0) {
+		op->sym->auth.aad.data = rte_malloc("aad", aad_len, 0);
+		if (op->sym->auth.aad.data == NULL) {
+			ODP_ERR("Failed to allocate memory for AAD");
+			return -1;
+		}
+		memcpy(op->sym->auth.aad.data, aad_head, aad_len);
+		op->sym->auth.aad.phys_addr = rte_malloc_virt2phy(op->sym->auth.aad.data);
+		op->sym->auth.aad.length = aad_len;
+	}
+
+	if (entry->iv.length == 0) {
+		ODP_ERR("Wrong IV length");
+		return -1;
+	}
+
+	op->sym->cipher.iv.data = rte_malloc("iv", entry->iv.length, 0);
+	if (op->sym->cipher.iv.data == NULL) {
+		ODP_ERR("Failed to allocate memory for IV");
+		return -1;
+	}
+
+	if (params->override_iv_ptr)
+		memcpy(op->sym->cipher.iv.data, params->override_iv_ptr, entry->iv.length);
+	else if (entry->iv.data)
+		memcpy(op->sym->cipher.iv.data, entry->iv.data, entry->iv.length);
+	else
+		return ODP_CRYPTO_ALG_ERR_IV_INVALID;
+
+	op->sym->cipher.iv.phys_addr = rte_malloc_virt2phy(op->sym->cipher.iv.data);
+	op->sym->cipher.iv.length = entry->iv.length;
+
+
+	/* For SNOW3G algorithms, offset/length must be in bits */
+	if (cipher_xform.cipher.algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2) {
+		op->sym->cipher.data.offset = params->cipher_range.offset << 3;
+		op->sym->cipher.data.length = params->cipher_range.length << 3;
+
+	} else {
+		op->sym->cipher.data.offset = params->cipher_range.offset;
+		op->sym->cipher.data.length = params->cipher_range.length;
+	}
+
+	op->sym->m_src = (struct rte_mbuf*)params->out_pkt;
+	rc = rte_cryptodev_enqueue_burst(rte_session->dev_id, 0, &op, 1);
+
+	if (rc == 0) {
+		ODP_ERR("Failed to enqueue packet");
+		return -1;
+	}
+
+	rte_cryptodev_dequeue_burst(rte_session->dev_id, 0,
+						&op, 1);
+
+	params->out_pkt = (odp_packet_t)op->sym->m_src;
+
+	/* Fill in result */
+	local_result.ctx = params->ctx;
+	local_result.pkt = params->out_pkt;
+	local_result.cipher_status.alg_err = rc_cipher;
+	local_result.cipher_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
+	local_result.auth_status.alg_err = rc_auth;
+	local_result.auth_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
+	local_result.ok =
+		(rc_cipher == ODP_CRYPTO_ALG_ERR_NONE) &&
+		(rc_auth == ODP_CRYPTO_ALG_ERR_NONE);
+
+	rte_crypto_op_free(op);
+
+	/* If specified during creation post event to completion queue */
+	if (ODP_QUEUE_INVALID != entry->compl_queue) {
+		odp_event_t completion_event;
+		odp_crypto_generic_op_result_t *op_result;
+
+		completion_event = odp_packet_to_event(params->out_pkt);
+		_odp_buffer_event_type_set(
+			odp_buffer_from_event(completion_event),
+			ODP_EVENT_CRYPTO_COMPL);
+		/* Asynchronous, build result (no HW so no errors) and send it*/
+		op_result = get_op_result_from_event(completion_event);
+		op_result->magic = OP_RESULT_MAGIC;
+		op_result->result = local_result;
+		if (odp_queue_enq(entry->compl_queue, completion_event)) {
+			odp_event_free(completion_event);
+			return -1;
+		}
+
+		/* Indicate to caller operation was async */
+		*posted = 1;
+	} else {
+		/* Synchronous, simply return results */
+		if (!result)
+			return -1;
+		*result = local_result;
+
+		/* Indicate to caller operation was sync */
+		*posted = 0;
+	}
+
+	return 0;
+}
+
+int odp_crypto_term_global(void)
+{
+	int rc = 0;
+	int ret;
+	int count = 0;
+	odp_dpdk_session_entry *session;
+
+	for (session = global->free; session != NULL; session = session->next)
+		count++;
+	if (count != MAX_SESSIONS) {
+		ODP_ERR("crypto sessions still active\n");
+		rc = -1;
+	}
+
+	ret = odp_shm_free(odp_shm_lookup("crypto_pool"));
+	if (ret < 0) {
+		ODP_ERR("shm free failed for crypto_pool\n");
+		rc = -1;
+	}
+
+	return rc;
+}
+
+int odp_crypto_term_local(void)
+{
+	return 0;
+}
+
+int32_t
+odp_random_data(uint8_t *buf, int32_t len, odp_bool_t use_entropy ODP_UNUSED)
+{
+	int32_t rc;
+	rc = RAND_bytes(buf, len);
+	return (1 == rc) ? len /*success*/: -1 /*failure*/;
+}
+
+odp_crypto_compl_t odp_crypto_compl_from_event(odp_event_t ev)
+{
+	/* This check not mandated by the API specification */
+	if (odp_event_type(ev) != ODP_EVENT_CRYPTO_COMPL)
+		ODP_ABORT("Event not a crypto completion");
+	return (odp_crypto_compl_t)ev;
+}
+
+odp_event_t odp_crypto_compl_to_event(odp_crypto_compl_t completion_event)
+{
+	return (odp_event_t)completion_event;
+}
+
+void
+odp_crypto_compl_result(odp_crypto_compl_t completion_event,
+			odp_crypto_op_result_t *result)
+{
+	odp_event_t ev = odp_crypto_compl_to_event(completion_event);
+	odp_crypto_generic_op_result_t *op_result;
+
+	op_result = get_op_result_from_event(ev);
+
+	if (OP_RESULT_MAGIC != op_result->magic)
+		ODP_ABORT();
+
+	memcpy(result, &op_result->result, sizeof(*result));
+}
+
+void
+odp_crypto_compl_free(odp_crypto_compl_t completion_event)
+{
+	_odp_buffer_event_type_set(
+		odp_buffer_from_event((odp_event_t)completion_event),
+		ODP_EVENT_PACKET);
+}
diff --git a/platform/linux-dpdk/odp_init.c b/platform/linux-dpdk/odp_init.c
index d50c576..35398ad 100644
--- a/platform/linux-dpdk/odp_init.c
+++ b/platform/linux-dpdk/odp_init.c
@@ -555,10 +555,17 @@  int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
 	}
 	stage = POOL_INIT;
 
+	if (odp_crypto_init_local()) {
+		ODP_ERR("ODP crypto init failed.\n");
+		return -1;
+	}
+	stage = CRYPTO_INIT;
+
 	if (sched_fn->init_local()) {
 		ODP_ERR("ODP schedule local init failed.\n");
 		goto init_fail;
 	}
+
 	/* stage = SCHED_INIT; */
 
 	return 0;
@@ -606,6 +613,13 @@  int _odp_term_local(enum init_stage stage)
 		}
 		/* Fall through */
 
+	case CRYPTO_INIT:
+		if (odp_crypto_term_local()) {
+			ODP_ERR("ODP crypto term failed.\n");
+			rc = -1;
+		}
+		/* Fall through */
+
 	default:
 		break;
 	}
diff --git a/platform/linux-generic/include/odp_internal.h b/platform/linux-generic/include/odp_internal.h
index 3429781..11e2c2f 100644
--- a/platform/linux-generic/include/odp_internal.h
+++ b/platform/linux-generic/include/odp_internal.h
@@ -104,6 +104,8 @@  int odp_queue_term_global(void);
 
 int odp_crypto_init_global(void);
 int odp_crypto_term_global(void);
+int odp_crypto_init_local(void);
+int odp_crypto_term_local(void);
 
 int odp_timer_init_global(void);
 int odp_timer_term_global(void);