[API-NEXT,v8,1/1] comp: compression spec

Message ID 1501851623-8731-2-git-send-email-odpbot@yandex.ru
State Superseded
Headers show
Series
  • comp: compression spec
Related show

Commit Message

Github ODP bot Aug. 4, 2017, 1 p.m.
From: Shally Verma <shally.verma@cavium.com>


Signed-off-by: Shally Verma <shally.verma@cavium.com>

Signed-off-by: Mahipal Challa <mahipal.challa@cavium.com> Cc PrasadAthreya.Narayana@cavium.com

---
/** Email created from pull request 102 (1234sv:api-next)
 ** https://github.com/Linaro/odp/pull/102
 ** Patch: https://github.com/Linaro/odp/pull/102.patch
 ** Base sha: 8390f890d4bd2babb63a24f7b15d2f4763e44050
 ** Merge commit sha: fbdff8c82a19f5b640ae299204b3bb1bbbefdccb
 **/
 include/odp/api/spec/comp.h | 815 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 815 insertions(+)
 create mode 100644 include/odp/api/spec/comp.h

Comments

Savolainen, Petri (Nokia - FI/Espoo) Aug. 8, 2017, 1:08 p.m. | #1
> -----Original Message-----

> From: lng-odp [mailto:lng-odp-bounces@lists.linaro.org] On Behalf Of

> Github ODP bot

> Sent: Friday, August 04, 2017 4:00 PM

> To: lng-odp@lists.linaro.org

> Subject: [lng-odp] [PATCH API-NEXT v8 1/1] comp: compression spec

> 

> From: Shally Verma <shally.verma@cavium.com>

> 

> Signed-off-by: Shally Verma <shally.verma@cavium.com>

> Signed-off-by: Mahipal Challa <mahipal.challa@cavium.com> Cc

> PrasadAthreya.Narayana@cavium.com

> ---

> /** Email created from pull request 102 (1234sv:api-next)

>  ** https://github.com/Linaro/odp/pull/102

>  ** Patch: https://github.com/Linaro/odp/pull/102.patch

>  ** Base sha: 8390f890d4bd2babb63a24f7b15d2f4763e44050

>  ** Merge commit sha: fbdff8c82a19f5b640ae299204b3bb1bbbefdccb

>  **/

>  include/odp/api/spec/comp.h | 815

> ++++++++++++++++++++++++++++++++++++++++++++

>  1 file changed, 815 insertions(+)

>  create mode 100644 include/odp/api/spec/comp.h

> 

> diff --git a/include/odp/api/spec/comp.h b/include/odp/api/spec/comp.h

> new file mode 100644

> index 00000000..2956094c

> --- /dev/null

> +++ b/include/odp/api/spec/comp.h

> @@ -0,0 +1,815 @@

> +/* Copyright (c) 2013, Linaro Limited


Year 2017

> +

> +/**

> + * Comp API hash algorithm

> + *

> + */

> +typedef enum {

> +	/** ODP_COMP_HASH_ALG_NONE*/


This kind of comment is not very helpful. Each enumeration needs explanation - like odp_comp_alg_t under.

> +	ODP_COMP_HASH_ALG_NONE,

> +	/** ODP_COMP_HASH_ALG_SHA1*/

> +	ODP_COMP_HASH_ALG_SHA1,

> +	/**  ODP_COMP_HASH_ALG_SHA256*/

> +	ODP_COMP_HASH_ALG_SHA256

> +} odp_comp_hash_alg_t;

> +

> +/**

> + * Comp API compression algorithm

> + *

> + */

> +typedef enum {

> +	/** No algorithm specified.

> +	 * Means no compression, output == input.

> +	 * if provided, no operation (compression/decompression or

> hash)

> +	 * applied on input. Added for testing purpose.

> +	 */

> +	ODP_COMP_ALG_NULL,

> +	/** DEFLATE - RFC1951 */

> +	ODP_COMP_ALG_DEFLATE,

> +	/** ZLIB - RFC1950 */

> +	ODP_COMP_ALG_ZLIB,

> +	/** LZS */

> +	ODP_COMP_ALG_LZS

> +} odp_comp_alg_t;

> +

> +

> +/**

> + * Hash algorithms in a bit field structure

> + *

> + */

> +typedef union odp_comp_hash_algos_t {

> +	/** hash algorithms */

> +	struct {

> +		/** SHA-1 */

> +		uint32_t sha1  : 1;

> +

> +		/** SHA with 256 bits of Message Digest */

> +		uint32_t sha256 : 1;



Need to be more explicit in algorithm definition: SHA-1, SHA-256, ... algorithm (SHA-2 would also do, but we use SHA-256 in crypto API since it seems to be used by standards).

Actually, these explanations should go under enum definitions and then just refer to those enums here - like odp_comp_algos_t under.


> +

> +	} bit;

> +

> +	/** All bits of the bit field structure

> +	 *

> +	 * This field can be used to set/clear all flags, or bitwise

> +	 * operations over the entire structure.

> +	 */

> +	uint32_t all_bits;

> +} odp_comp_hash_algos_t;

> +

> +/**

> + * Comp algorithms in a bit field structure

> + *

> + */

> +typedef union odp_comp_algos_t {

> +	/** Compression algorithms */

> +	struct {

> +		/** ODP_COMP_ALG_NULL */

> +		uint32_t null       : 1;

> +

> +		/** ODP_COMP_ALG_DEFLATE */

> +		uint32_t deflate    : 1;

> +

> +		/** ODP_COMP_ALG_ZLIB */

> +		uint32_t zlib       : 1;

> +

> +		/** ODP_COMP_ALG_LZS */

> +		uint32_t lzs        :1;

> +	} bit;

> +

> +	/** All bits of the bit field structure

> +	 * This field can be used to set/clear all flags, or bitwise

> +	 * operations over the entire structure.

> +	 */

> +	uint32_t all_bits;

> +} odp_comp_algos_t;

> +

> +/**

> + * Compression Interface Capabilities

> + *

> + */

> +typedef struct odp_comp_capability_t {

> +	/** Maximum number of  sessions */

> +	uint32_t max_sessions;

> +

> +	/** Supported compression algorithms */

> +	odp_comp_algos_t comp_algs;


No need to save one character => comp_algos

> +

> +	/** Supported hash algorithms. */

> +	odp_comp_hash_algos_t hash_algs;


hash_algos

> +

> +	/* sync/async mode of operation support.

> +	 * Implementation should support atleast one of the mode.

> +	 */



"mode" field definition missing on this line ?


> +

> +	/** Support type for synchronous operation mode

> (ODP_COMP_SYNC).

> +	 *  User should set odp_comp_session_param_t:mode based on

> +	 *  support level as indicated by this param.

> +	 */

> +	odp_support_t sync;

> +

> +	/** Support type for asynchronous operation mode

> (ODP_COMP_ASYNC).

> +	 *  User should set odp_comp_session_param_t:mode param based

> on

> +	 *  support level as indicated by this param.

> +	 */

> +	odp_support_t async;

> +} odp_comp_capability_t;

> +

> +/**

> + * Hash algorithm capabilities

> + *

> + */

> +typedef struct odp_comp_hash_alg_capability_t {

> +	/** Digest length in bytes */

> +	uint32_t digest_len;

> +} odp_comp_hash_alg_capability_t;

> +

> +/**

> + * Compression algorithm capabilities structure for each algorithm.

> + *

> + */

> +typedef struct odp_comp_alg_capability_t {

> +	/** Enumeration indicating alg support for dictionary load */

> +	odp_support_t support_dict;

> +

> +	/** Optional Maximum length of dictionary supported

> +	 *   by implementation of the algorithm.

> +	 *

> +	 *   Invalid if support_dict == ODP_SUPPORT_NO.

> +	 *

> +	 *   Implementation use dictionary of length less than or equal

> to value

> +	 *   indicated by dict_len. if set to 0 and if support_dict ==

> +	 *   ODP_SUPPORT_YES, then implementation will use dictionary

> length

> +	 *   less than or equal to user input length in

> odp_comp_set_dict()

> +	 *   and update used dictionary length at output of the call.

> +	 *

> +	 */

> +	uint32_t dict_len;

> +

> +	/* Maximum compression level supported by implementation of

> this algo.

> +	 *  Indicates number of compression levels supported by

> implementation,

> +	 *

> +	 * where,

> +	 *

> +	 * 1 means fastest compression i.e. output produced at

> +	 * best possible speed at the expense of compression quality,

> and

> +	 *

> +	 * max_level means best compression i.e.output produced is best

> possible

> +	 * compressed content at the expense of speed.

> +	 *

> +	 * Example, if max_level = 4 , it means algorithm supports four

> levels

> +	 * of compression from value 1 up to 4. User can set this value

> from

> +	 * 1 (fastest compression) to 4 (best compression).

> +	 * See RFC1950 for an example explanation to level.

> +	 *

> +	 * Value 0 mean implementation use its default value.

> +	 *

> +	 */

> +	uint32_t max_level;

> +

> +	/* Supported hash algorithms */

> +	odp_comp_hash_algos_t hash_alg;

> +} odp_comp_alg_capability_t;

> +

> +/**

> + * Comp API dictionary type

> + *

> + */

> +typedef struct odp_comp_dict_t {

> +	/** pointer to character array */

> +	uint8_t *buf;


Is the data input only? If it is, then this should be const data pointer.

What is expected about the format of the data? Null terminated strings? If it's strings, then e.g. const char * would be better.


> +	/** length of the dictionary. */

> +	uint32_t len;

> +} odp_comp_dict_t;

> +

> +/**

> + * Comp API algorithm specific parameters

> + *

> + */

> +typedef struct odp_comp_alg_param_t {

> +	struct comp_alg_def_param {

> +		/** compression level where

> +		 * ODP_COMP_LEVEL_MIN <= level <= ODP_COMP_LEVEL_MAX

> +		 */

> +		odp_comp_level_t level;

> +		/** huffman code to use */

> +		odp_comp_huffman_code_t comp_code;

> +	} deflate;


Doxygen comment missing? Is this filled in only if algorithms is ALG_DEFLATE ?


> +	struct comp_alg_zlib_param {

> +			/** deflate algo params */

> +			struct comp_alg_def_param def;

> +	} zlib;


Same here.

> +} odp_comp_alg_param_t;

> +

> +/**

> + * Comp API data range specifier

> + *

> + */

> +typedef union odp_comp_data_t {

> +	struct {

> +		/** packet */

> +		odp_packet_t packet;

> +

> +		/** packet data range to operate on  */

> +		odp_packet_data_range_t data_range;



Barry have indicated use case for defining multiple ranges per packet. How that will be handled?


> +	} pkt;

> +} odp_comp_data_t;



Packet is quite deep in parameter structures. E.g. in compress call

param.input.pkt.packet            = pkt;
param.input.pkt.data_range.offset = 0;
param.input.pkt.data_range.len    = len;

odp_comp_compress(&param, &result)



I'd pull up the packet handle(s) from param, and maybe have separate calls for full packets and ranges:

int odp_comp_packet(const odp_packet_t pkt_in[],
                    odp_packet_t pkt_out[],
                    const odp_comp_op_param_t param[],
                    int num_pkt);

typedef struct odp_comp_packet_range_t {
	int num_range;

	struct {
		uint32_t offset;
		uint32_t len;
	} range[];

} odp_comp_packet_range_t;

int odp_comp_packet_range(const odp_packet_t pkt_in[],
                          const odp_comp_packet_range_t range_in[],
                          odp_packet_t pkt_out[],
                          // also range_out needed ??
                          const odp_comp_op_param_t param[],
                          int num_pkt);


Then later on, direct memory address input/output would look something like this:

int odp_comp_mem(uint8_t *ptr_in[],
                 uint32_t len_in[],
                 uint8_t *ptr_out[],
                 uint32_t *len_out[],
                 const odp_comp_op_param_t param[],
                 int num_ptr);


> +

> + /**

> + * Comp API session creation parameters

> + *

> + */

> +typedef struct odp_comp_session_param_t {

> +	/** Compress vs. Decompress operation */

> +	odp_comp_op_t op;

> +

> +	/** Sync vs Async

> +	 *

> +	 * When mode = ODP_COMP_SYNC,

> odp_comp_compress()/odp_comp_decomp()

> +	 * should be called.

> +	 *

> +	 * When mode = ODP_COMP_ASYNC, odp_comp_compress_enq()/

> +	 * odp_comp_decomp_enq() should be called.

> +	 *

> +	 * Use odp_comp_capability() for supported mode.

> +	 *

> +	 */

> +	odp_comp_op_mode_t mode;

> +

> +	/** Compression algorithm

> +	 *

> +	 *  Use odp_comp_capability() for supported algorithms.

> +	 */

> +	odp_comp_alg_t comp_alg;

> +

> +	/** Hash algorithm

> +	 *

> +	 *  Use odp_comp_alg_capability() for supported hash algo for

> +	 *  compression algo given as comp_alg. Implementation should

> not

> +	 *  support hash only operation on data. output should always

> contain

> +	 *  data + hash.

> +	 *

> +	 */

> +	odp_comp_hash_alg_t hash_alg;

> +

> +	/** parameters specific to compression */

> +	odp_comp_alg_param_t alg_param;

> +

> +	/** Async mode completion event queue

> +	 *

> +	 * When mode = ODP_COMP_ASYNC, user should wait on

> ODP_EVENT_PACKET

> +	 * with subtype ODP_EVENT_PACKET_COMP on this queue.

> +	 *

> +	 * By default, implementation enques completion events in-

> order-of

> +	 * request submission and thus queue is considered ordered.

> +	 *

> +	 * Please note, behavior could be changed or enhanced

> +	 * to queue event in-order-of their completion to enable

> +	 * performance-oriented application to leverage hw offered

> parallelism.

> +	 * However, this will be subject to application requirement and

> more

> +	 * explicit defined use-case.

> +	 *

> +	 */

> +	odp_queue_t compl_queue;

> +} odp_comp_session_param_t;

> +

> +/**

> + * Comp API operation parameters.

> + * Called to process each data unit.

> + *

> + */

> +typedef struct odp_comp_op_param_t {

> +	/** Session handle from creation */

> +	odp_comp_session_t session;

> +

> +	/** User context */

> +	void *ctx;

> +

> +	/** Boolean indicating End of data, where

> +	 *

> +	 *   true : last chunk

> +	 *

> +	 *   false: more to follow

> +	 *

> +	 * If set to true, indicates this is the last chunk of

> +	 * data. After processing of last chunk of data is complete

> i.e.

> +	 * call returned with any error code except

> ODP_COMP_ERR_OUT_OF_SPACE,

> +	 * implementation should move algorithm to stateless mode

> +	 * for next of batch of operation i.e. reset history,

> +	 * insert 'End of Block' marker into compressed data stream(if

> +	 * supported by algo).See deflate/zlib for interpretation of

> +	 * stateless/stateful.

> +	 *

> +	 * For stateless compressions (ex ipcomp), last should be set

> to 'true'

> +	 * for every input packet processing call.

> +	 *

> +	 * For compression + hash, digest will be available after

> +	 * last chunk is processed completely. In case of

> +	 * ODP_COMP_ERR_OUT_OF_SPACE, application should keep on

> calling

> +	 * odp_comp_xxx() API with more output buffer unless call

> returns

> +	 * with ODP_COMP_ERR_NONE or other failure code except

> +	 *  ODP_COMP_ERR_OUT_OF_SPACE.

> +	 */

> +	odp_bool_t last;

> +

> +	/** Input data */

> +	odp_comp_data_t input;


I'd move this out of the struct, since it's not operation parameter but the target of the operation. From call to call, everything else may remain constant, but the target is not constant.

> +

> +	/** placeholder for output data.

> +	 *

> +	 * For Compression/Decompression+hash session,

> +	 * output  will store both data and digest(with digest appended

> at

> +	 * end-of-data). User should pass packet of sufficiently large

> size

> +	 * to store digest.

> +	 *

> +	 */

> +	odp_comp_data_t output;


Same here.

> +} odp_comp_op_param_t;

> +

> +/**

> + * Comp API per operation result

> + *

> + */

> +typedef struct odp_comp_op_result_t {

> +	/** User context from request */

> +	void *ctx;

> +

> +	/** Operation Return Code */

> +	odp_comp_err_t err;

> +

> +	/** Pointer to output.Valid when odp_comp_err_t is

> +	 * ODP_COMP_ERR_NONE or ODP_COMP_ERR_OUT_OF_SPACE

> +	 *

> +	 * Contain data after compression/decompression operation,

> +	 * or data + digest for compression/decompression + hash

> operation.

> +	 *

> +	 */

> +	odp_comp_data_t output;



This would also go out of the struct.


> +} odp_comp_op_result_t;

> +


-Petri
shally verma Aug. 8, 2017, 4:51 p.m. | #2
Petri/Berry

As per discussion in today's call, this is what I summarize :

Two new requirements added:

1. Support compression / decompression of multiple ranges with in one
single packet and
2. Operating on multiple packets with in call where each packet may
further carry multiple range.

To meet 1st requirement, current proposal says, calling
odp_packet_compress/decomp() for each packet per range as it allows
easy and clean management of output buffer and also more flexibility
per various possible  application use cases:.
- call can be made in statefull or stateless mode per each range, Or
- Modify  headers according to each range output,
- Application manage output buffers at their end,
- Async notification on out of space condition  will be easily maintained.

Currently , we do support file-based compression using ODP APIs, but
i.e. on stream of data bytes where 1 chunk occupy - 1 packet (which
can be segmented/unsegmented) not on stream of Packets.
Is there a use-case to have a file which consists of stream of
Packets? (where packet is of type HTTP Packet ?)

New proposal says "allow multiple ranges with in one single call". Few
concerns raised for this, key one include:

- How do we know each range be compressed in stateful  or stateless
mode i.e. each range is independent Or dependent?
- How do we handle out_of_space error while operating on individual
range? Especially for the case when Implementation try to use HW
parallelization for better throughput?
- How do we support same design in asynchronous mode ??
- How do we see it  - as performance gain ? Or ease-of-use?  Or it may
end up introducing more overhead to implementation.

Please feedback your inputs to design issues as envisioned.

For now we can focus on requirement #1 as design for requirement # 2
will be based on outcome of #1.

Thanks
Shally

On Tue, Aug 8, 2017 at 6:38 PM, Savolainen, Petri (Nokia - FI/Espoo)
<petri.savolainen@nokia.com> wrote:
>

>

>> -----Original Message-----

>> From: lng-odp [mailto:lng-odp-bounces@lists.linaro.org] On Behalf Of

>> Github ODP bot

>> Sent: Friday, August 04, 2017 4:00 PM

>> To: lng-odp@lists.linaro.org

>> Subject: [lng-odp] [PATCH API-NEXT v8 1/1] comp: compression spec

>>

>> From: Shally Verma <shally.verma@cavium.com>

>>

>> Signed-off-by: Shally Verma <shally.verma@cavium.com>

>> Signed-off-by: Mahipal Challa <mahipal.challa@cavium.com> Cc

>> PrasadAthreya.Narayana@cavium.com

>> ---

>> /** Email created from pull request 102 (1234sv:api-next)

>>  ** https://github.com/Linaro/odp/pull/102

>>  ** Patch: https://github.com/Linaro/odp/pull/102.patch

>>  ** Base sha: 8390f890d4bd2babb63a24f7b15d2f4763e44050

>>  ** Merge commit sha: fbdff8c82a19f5b640ae299204b3bb1bbbefdccb

>>  **/

>>  include/odp/api/spec/comp.h | 815

>> ++++++++++++++++++++++++++++++++++++++++++++

>>  1 file changed, 815 insertions(+)

>>  create mode 100644 include/odp/api/spec/comp.h

>>

>> diff --git a/include/odp/api/spec/comp.h b/include/odp/api/spec/comp.h

>> new file mode 100644

>> index 00000000..2956094c

>> --- /dev/null

>> +++ b/include/odp/api/spec/comp.h

>> @@ -0,0 +1,815 @@

>> +/* Copyright (c) 2013, Linaro Limited

>

> Year 2017

>

>> +

>> +/**

>> + * Comp API hash algorithm

>> + *

>> + */

>> +typedef enum {

>> +     /** ODP_COMP_HASH_ALG_NONE*/

>

> This kind of comment is not very helpful. Each enumeration needs explanation - like odp_comp_alg_t under.

>

>> +     ODP_COMP_HASH_ALG_NONE,

>> +     /** ODP_COMP_HASH_ALG_SHA1*/

>> +     ODP_COMP_HASH_ALG_SHA1,

>> +     /**  ODP_COMP_HASH_ALG_SHA256*/

>> +     ODP_COMP_HASH_ALG_SHA256

>> +} odp_comp_hash_alg_t;

>> +

>> +/**

>> + * Comp API compression algorithm

>> + *

>> + */

>> +typedef enum {

>> +     /** No algorithm specified.

>> +      * Means no compression, output == input.

>> +      * if provided, no operation (compression/decompression or

>> hash)

>> +      * applied on input. Added for testing purpose.

>> +      */

>> +     ODP_COMP_ALG_NULL,

>> +     /** DEFLATE - RFC1951 */

>> +     ODP_COMP_ALG_DEFLATE,

>> +     /** ZLIB - RFC1950 */

>> +     ODP_COMP_ALG_ZLIB,

>> +     /** LZS */

>> +     ODP_COMP_ALG_LZS

>> +} odp_comp_alg_t;

>> +

>> +

>> +/**

>> + * Hash algorithms in a bit field structure

>> + *

>> + */

>> +typedef union odp_comp_hash_algos_t {

>> +     /** hash algorithms */

>> +     struct {

>> +             /** SHA-1 */

>> +             uint32_t sha1  : 1;

>> +

>> +             /** SHA with 256 bits of Message Digest */

>> +             uint32_t sha256 : 1;

>

>

> Need to be more explicit in algorithm definition: SHA-1, SHA-256, ... algorithm (SHA-2 would also do, but we use SHA-256 in crypto API since it seems to be used by standards).

>

> Actually, these explanations should go under enum definitions and then just refer to those enums here - like odp_comp_algos_t under.

>

>

>> +

>> +     } bit;

>> +

>> +     /** All bits of the bit field structure

>> +      *

>> +      * This field can be used to set/clear all flags, or bitwise

>> +      * operations over the entire structure.

>> +      */

>> +     uint32_t all_bits;

>> +} odp_comp_hash_algos_t;

>> +

>> +/**

>> + * Comp algorithms in a bit field structure

>> + *

>> + */

>> +typedef union odp_comp_algos_t {

>> +     /** Compression algorithms */

>> +     struct {

>> +             /** ODP_COMP_ALG_NULL */

>> +             uint32_t null       : 1;

>> +

>> +             /** ODP_COMP_ALG_DEFLATE */

>> +             uint32_t deflate    : 1;

>> +

>> +             /** ODP_COMP_ALG_ZLIB */

>> +             uint32_t zlib       : 1;

>> +

>> +             /** ODP_COMP_ALG_LZS */

>> +             uint32_t lzs        :1;

>> +     } bit;

>> +

>> +     /** All bits of the bit field structure

>> +      * This field can be used to set/clear all flags, or bitwise

>> +      * operations over the entire structure.

>> +      */

>> +     uint32_t all_bits;

>> +} odp_comp_algos_t;

>> +

>> +/**

>> + * Compression Interface Capabilities

>> + *

>> + */

>> +typedef struct odp_comp_capability_t {

>> +     /** Maximum number of  sessions */

>> +     uint32_t max_sessions;

>> +

>> +     /** Supported compression algorithms */

>> +     odp_comp_algos_t comp_algs;

>

> No need to save one character => comp_algos

>

>> +

>> +     /** Supported hash algorithms. */

>> +     odp_comp_hash_algos_t hash_algs;

>

> hash_algos

>

>> +

>> +     /* sync/async mode of operation support.

>> +      * Implementation should support atleast one of the mode.

>> +      */

>

>

> "mode" field definition missing on this line ?

>

>

>> +

>> +     /** Support type for synchronous operation mode

>> (ODP_COMP_SYNC).

>> +      *  User should set odp_comp_session_param_t:mode based on

>> +      *  support level as indicated by this param.

>> +      */

>> +     odp_support_t sync;

>> +

>> +     /** Support type for asynchronous operation mode

>> (ODP_COMP_ASYNC).

>> +      *  User should set odp_comp_session_param_t:mode param based

>> on

>> +      *  support level as indicated by this param.

>> +      */

>> +     odp_support_t async;

>> +} odp_comp_capability_t;

>> +

>> +/**

>> + * Hash algorithm capabilities

>> + *

>> + */

>> +typedef struct odp_comp_hash_alg_capability_t {

>> +     /** Digest length in bytes */

>> +     uint32_t digest_len;

>> +} odp_comp_hash_alg_capability_t;

>> +

>> +/**

>> + * Compression algorithm capabilities structure for each algorithm.

>> + *

>> + */

>> +typedef struct odp_comp_alg_capability_t {

>> +     /** Enumeration indicating alg support for dictionary load */

>> +     odp_support_t support_dict;

>> +

>> +     /** Optional Maximum length of dictionary supported

>> +      *   by implementation of the algorithm.

>> +      *

>> +      *   Invalid if support_dict == ODP_SUPPORT_NO.

>> +      *

>> +      *   Implementation use dictionary of length less than or equal

>> to value

>> +      *   indicated by dict_len. if set to 0 and if support_dict ==

>> +      *   ODP_SUPPORT_YES, then implementation will use dictionary

>> length

>> +      *   less than or equal to user input length in

>> odp_comp_set_dict()

>> +      *   and update used dictionary length at output of the call.

>> +      *

>> +      */

>> +     uint32_t dict_len;

>> +

>> +     /* Maximum compression level supported by implementation of

>> this algo.

>> +      *  Indicates number of compression levels supported by

>> implementation,

>> +      *

>> +      * where,

>> +      *

>> +      * 1 means fastest compression i.e. output produced at

>> +      * best possible speed at the expense of compression quality,

>> and

>> +      *

>> +      * max_level means best compression i.e.output produced is best

>> possible

>> +      * compressed content at the expense of speed.

>> +      *

>> +      * Example, if max_level = 4 , it means algorithm supports four

>> levels

>> +      * of compression from value 1 up to 4. User can set this value

>> from

>> +      * 1 (fastest compression) to 4 (best compression).

>> +      * See RFC1950 for an example explanation to level.

>> +      *

>> +      * Value 0 mean implementation use its default value.

>> +      *

>> +      */

>> +     uint32_t max_level;

>> +

>> +     /* Supported hash algorithms */

>> +     odp_comp_hash_algos_t hash_alg;

>> +} odp_comp_alg_capability_t;

>> +

>> +/**

>> + * Comp API dictionary type

>> + *

>> + */

>> +typedef struct odp_comp_dict_t {

>> +     /** pointer to character array */

>> +     uint8_t *buf;

>

> Is the data input only? If it is, then this should be const data pointer.

>

> What is expected about the format of the data? Null terminated strings? If it's strings, then e.g. const char * would be better.

>

>

>> +     /** length of the dictionary. */

>> +     uint32_t len;

>> +} odp_comp_dict_t;

>> +

>> +/**

>> + * Comp API algorithm specific parameters

>> + *

>> + */

>> +typedef struct odp_comp_alg_param_t {

>> +     struct comp_alg_def_param {

>> +             /** compression level where

>> +              * ODP_COMP_LEVEL_MIN <= level <= ODP_COMP_LEVEL_MAX

>> +              */

>> +             odp_comp_level_t level;

>> +             /** huffman code to use */

>> +             odp_comp_huffman_code_t comp_code;

>> +     } deflate;

>

> Doxygen comment missing? Is this filled in only if algorithms is ALG_DEFLATE ?

>

>

>> +     struct comp_alg_zlib_param {

>> +                     /** deflate algo params */

>> +                     struct comp_alg_def_param def;

>> +     } zlib;

>

> Same here.

>

>> +} odp_comp_alg_param_t;

>> +

>> +/**

>> + * Comp API data range specifier

>> + *

>> + */

>> +typedef union odp_comp_data_t {

>> +     struct {

>> +             /** packet */

>> +             odp_packet_t packet;

>> +

>> +             /** packet data range to operate on  */

>> +             odp_packet_data_range_t data_range;

>

>

> Barry have indicated use case for defining multiple ranges per packet. How that will be handled?

>

>

>> +     } pkt;

>> +} odp_comp_data_t;

>

>

> Packet is quite deep in parameter structures. E.g. in compress call

>

> param.input.pkt.packet            = pkt;

> param.input.pkt.data_range.offset = 0;

> param.input.pkt.data_range.len    = len;

>

> odp_comp_compress(&param, &result)

>

>

>

> I'd pull up the packet handle(s) from param, and maybe have separate calls for full packets and ranges:

>

> int odp_comp_packet(const odp_packet_t pkt_in[],

>                     odp_packet_t pkt_out[],

>                     const odp_comp_op_param_t param[],

>                     int num_pkt);

>

> typedef struct odp_comp_packet_range_t {

>         int num_range;

>

>         struct {

>                 uint32_t offset;

>                 uint32_t len;

>         } range[];

>

> } odp_comp_packet_range_t;

>

> int odp_comp_packet_range(const odp_packet_t pkt_in[],

>                           const odp_comp_packet_range_t range_in[],

>                           odp_packet_t pkt_out[],

>                           // also range_out needed ??

>                           const odp_comp_op_param_t param[],

>                           int num_pkt);

>

>

> Then later on, direct memory address input/output would look something like this:

>

> int odp_comp_mem(uint8_t *ptr_in[],

>                  uint32_t len_in[],

>                  uint8_t *ptr_out[],

>                  uint32_t *len_out[],

>                  const odp_comp_op_param_t param[],

>                  int num_ptr);

>

>

>> +

>> + /**

>> + * Comp API session creation parameters

>> + *

>> + */

>> +typedef struct odp_comp_session_param_t {

>> +     /** Compress vs. Decompress operation */

>> +     odp_comp_op_t op;

>> +

>> +     /** Sync vs Async

>> +      *

>> +      * When mode = ODP_COMP_SYNC,

>> odp_comp_compress()/odp_comp_decomp()

>> +      * should be called.

>> +      *

>> +      * When mode = ODP_COMP_ASYNC, odp_comp_compress_enq()/

>> +      * odp_comp_decomp_enq() should be called.

>> +      *

>> +      * Use odp_comp_capability() for supported mode.

>> +      *

>> +      */

>> +     odp_comp_op_mode_t mode;

>> +

>> +     /** Compression algorithm

>> +      *

>> +      *  Use odp_comp_capability() for supported algorithms.

>> +      */

>> +     odp_comp_alg_t comp_alg;

>> +

>> +     /** Hash algorithm

>> +      *

>> +      *  Use odp_comp_alg_capability() for supported hash algo for

>> +      *  compression algo given as comp_alg. Implementation should

>> not

>> +      *  support hash only operation on data. output should always

>> contain

>> +      *  data + hash.

>> +      *

>> +      */

>> +     odp_comp_hash_alg_t hash_alg;

>> +

>> +     /** parameters specific to compression */

>> +     odp_comp_alg_param_t alg_param;

>> +

>> +     /** Async mode completion event queue

>> +      *

>> +      * When mode = ODP_COMP_ASYNC, user should wait on

>> ODP_EVENT_PACKET

>> +      * with subtype ODP_EVENT_PACKET_COMP on this queue.

>> +      *

>> +      * By default, implementation enques completion events in-

>> order-of

>> +      * request submission and thus queue is considered ordered.

>> +      *

>> +      * Please note, behavior could be changed or enhanced

>> +      * to queue event in-order-of their completion to enable

>> +      * performance-oriented application to leverage hw offered

>> parallelism.

>> +      * However, this will be subject to application requirement and

>> more

>> +      * explicit defined use-case.

>> +      *

>> +      */

>> +     odp_queue_t compl_queue;

>> +} odp_comp_session_param_t;

>> +

>> +/**

>> + * Comp API operation parameters.

>> + * Called to process each data unit.

>> + *

>> + */

>> +typedef struct odp_comp_op_param_t {

>> +     /** Session handle from creation */

>> +     odp_comp_session_t session;

>> +

>> +     /** User context */

>> +     void *ctx;

>> +

>> +     /** Boolean indicating End of data, where

>> +      *

>> +      *   true : last chunk

>> +      *

>> +      *   false: more to follow

>> +      *

>> +      * If set to true, indicates this is the last chunk of

>> +      * data. After processing of last chunk of data is complete

>> i.e.

>> +      * call returned with any error code except

>> ODP_COMP_ERR_OUT_OF_SPACE,

>> +      * implementation should move algorithm to stateless mode

>> +      * for next of batch of operation i.e. reset history,

>> +      * insert 'End of Block' marker into compressed data stream(if

>> +      * supported by algo).See deflate/zlib for interpretation of

>> +      * stateless/stateful.

>> +      *

>> +      * For stateless compressions (ex ipcomp), last should be set

>> to 'true'

>> +      * for every input packet processing call.

>> +      *

>> +      * For compression + hash, digest will be available after

>> +      * last chunk is processed completely. In case of

>> +      * ODP_COMP_ERR_OUT_OF_SPACE, application should keep on

>> calling

>> +      * odp_comp_xxx() API with more output buffer unless call

>> returns

>> +      * with ODP_COMP_ERR_NONE or other failure code except

>> +      *  ODP_COMP_ERR_OUT_OF_SPACE.

>> +      */

>> +     odp_bool_t last;

>> +

>> +     /** Input data */

>> +     odp_comp_data_t input;

>

> I'd move this out of the struct, since it's not operation parameter but the target of the operation. From call to call, everything else may remain constant, but the target is not constant.

>

>> +

>> +     /** placeholder for output data.

>> +      *

>> +      * For Compression/Decompression+hash session,

>> +      * output  will store both data and digest(with digest appended

>> at

>> +      * end-of-data). User should pass packet of sufficiently large

>> size

>> +      * to store digest.

>> +      *

>> +      */

>> +     odp_comp_data_t output;

>

> Same here.

>

>> +} odp_comp_op_param_t;

>> +

>> +/**

>> + * Comp API per operation result

>> + *

>> + */

>> +typedef struct odp_comp_op_result_t {

>> +     /** User context from request */

>> +     void *ctx;

>> +

>> +     /** Operation Return Code */

>> +     odp_comp_err_t err;

>> +

>> +     /** Pointer to output.Valid when odp_comp_err_t is

>> +      * ODP_COMP_ERR_NONE or ODP_COMP_ERR_OUT_OF_SPACE

>> +      *

>> +      * Contain data after compression/decompression operation,

>> +      * or data + digest for compression/decompression + hash

>> operation.

>> +      *

>> +      */

>> +     odp_comp_data_t output;

>

>

> This would also go out of the struct.

>

>

>> +} odp_comp_op_result_t;

>> +

>

> -Petri

>

>
shally verma Aug. 8, 2017, 4:52 p.m. | #3
Just a Resend.

On Tue, Aug 8, 2017 at 10:21 PM, shally verma
<shallyvermacavium@gmail.com> wrote:
> Petri/Berry

>

> As per discussion in today's call, this is what I summarize :

>

> Two new requirements added:

>

> 1. Support compression / decompression of multiple ranges with in one

> single packet and

> 2. Operating on multiple packets with in call where each packet may

> further carry multiple range.

>

> To meet 1st requirement, current proposal says, calling

> odp_packet_compress/decomp() for each packet per range as it allows

> easy and clean management of output buffer and also more flexibility

> per various possible  application use cases:.

> - call can be made in statefull or stateless mode per each range, Or

> - Modify  headers according to each range output,

> - Application manage output buffers at their end,

> - Async notification on out of space condition  will be easily maintained.

>

> Currently , we do support file-based compression using ODP APIs, but

> i.e. on stream of data bytes where 1 chunk occupy - 1 packet (which

> can be segmented/unsegmented) not on stream of Packets.

> Is there a use-case to have a file which consists of stream of

> Packets? (where packet is of type HTTP Packet ?)

>

> New proposal says "allow multiple ranges with in one single call". Few

> concerns raised for this, key one include:

>

> - How do we know each range be compressed in stateful  or stateless

> mode i.e. each range is independent Or dependent?

> - How do we handle out_of_space error while operating on individual

> range? Especially for the case when Implementation try to use HW

> parallelization for better throughput?

> - How do we support same design in asynchronous mode ??

> - How do we see it  - as performance gain ? Or ease-of-use?  Or it may

> end up introducing more overhead to implementation.

>

> Please feedback your inputs to design issues as envisioned.

>

> For now we can focus on requirement #1 as design for requirement # 2

> will be based on outcome of #1.

>

> Thanks

> Shally

>

> On Tue, Aug 8, 2017 at 6:38 PM, Savolainen, Petri (Nokia - FI/Espoo)

> <petri.savolainen@nokia.com> wrote:

>>

>>

>>> -----Original Message-----

>>> From: lng-odp [mailto:lng-odp-bounces@lists.linaro.org] On Behalf Of

>>> Github ODP bot

>>> Sent: Friday, August 04, 2017 4:00 PM

>>> To: lng-odp@lists.linaro.org

>>> Subject: [lng-odp] [PATCH API-NEXT v8 1/1] comp: compression spec

>>>

>>> From: Shally Verma <shally.verma@cavium.com>

>>>

>>> Signed-off-by: Shally Verma <shally.verma@cavium.com>

>>> Signed-off-by: Mahipal Challa <mahipal.challa@cavium.com> Cc

>>> PrasadAthreya.Narayana@cavium.com

>>> ---

>>> /** Email created from pull request 102 (1234sv:api-next)

>>>  ** https://github.com/Linaro/odp/pull/102

>>>  ** Patch: https://github.com/Linaro/odp/pull/102.patch

>>>  ** Base sha: 8390f890d4bd2babb63a24f7b15d2f4763e44050

>>>  ** Merge commit sha: fbdff8c82a19f5b640ae299204b3bb1bbbefdccb

>>>  **/

>>>  include/odp/api/spec/comp.h | 815

>>> ++++++++++++++++++++++++++++++++++++++++++++

>>>  1 file changed, 815 insertions(+)

>>>  create mode 100644 include/odp/api/spec/comp.h

>>>

>>> diff --git a/include/odp/api/spec/comp.h b/include/odp/api/spec/comp.h

>>> new file mode 100644

>>> index 00000000..2956094c

>>> --- /dev/null

>>> +++ b/include/odp/api/spec/comp.h

>>> @@ -0,0 +1,815 @@

>>> +/* Copyright (c) 2013, Linaro Limited

>>

>> Year 2017

>>

>>> +

>>> +/**

>>> + * Comp API hash algorithm

>>> + *

>>> + */

>>> +typedef enum {

>>> +     /** ODP_COMP_HASH_ALG_NONE*/

>>

>> This kind of comment is not very helpful. Each enumeration needs explanation - like odp_comp_alg_t under.

>>

>>> +     ODP_COMP_HASH_ALG_NONE,

>>> +     /** ODP_COMP_HASH_ALG_SHA1*/

>>> +     ODP_COMP_HASH_ALG_SHA1,

>>> +     /**  ODP_COMP_HASH_ALG_SHA256*/

>>> +     ODP_COMP_HASH_ALG_SHA256

>>> +} odp_comp_hash_alg_t;

>>> +

>>> +/**

>>> + * Comp API compression algorithm

>>> + *

>>> + */

>>> +typedef enum {

>>> +     /** No algorithm specified.

>>> +      * Means no compression, output == input.

>>> +      * if provided, no operation (compression/decompression or

>>> hash)

>>> +      * applied on input. Added for testing purpose.

>>> +      */

>>> +     ODP_COMP_ALG_NULL,

>>> +     /** DEFLATE - RFC1951 */

>>> +     ODP_COMP_ALG_DEFLATE,

>>> +     /** ZLIB - RFC1950 */

>>> +     ODP_COMP_ALG_ZLIB,

>>> +     /** LZS */

>>> +     ODP_COMP_ALG_LZS

>>> +} odp_comp_alg_t;

>>> +

>>> +

>>> +/**

>>> + * Hash algorithms in a bit field structure

>>> + *

>>> + */

>>> +typedef union odp_comp_hash_algos_t {

>>> +     /** hash algorithms */

>>> +     struct {

>>> +             /** SHA-1 */

>>> +             uint32_t sha1  : 1;

>>> +

>>> +             /** SHA with 256 bits of Message Digest */

>>> +             uint32_t sha256 : 1;

>>

>>

>> Need to be more explicit in algorithm definition: SHA-1, SHA-256, ... algorithm (SHA-2 would also do, but we use SHA-256 in crypto API since it seems to be used by standards).

>>

>> Actually, these explanations should go under enum definitions and then just refer to those enums here - like odp_comp_algos_t under.

>>

>>

>>> +

>>> +     } bit;

>>> +

>>> +     /** All bits of the bit field structure

>>> +      *

>>> +      * This field can be used to set/clear all flags, or bitwise

>>> +      * operations over the entire structure.

>>> +      */

>>> +     uint32_t all_bits;

>>> +} odp_comp_hash_algos_t;

>>> +

>>> +/**

>>> + * Comp algorithms in a bit field structure

>>> + *

>>> + */

>>> +typedef union odp_comp_algos_t {

>>> +     /** Compression algorithms */

>>> +     struct {

>>> +             /** ODP_COMP_ALG_NULL */

>>> +             uint32_t null       : 1;

>>> +

>>> +             /** ODP_COMP_ALG_DEFLATE */

>>> +             uint32_t deflate    : 1;

>>> +

>>> +             /** ODP_COMP_ALG_ZLIB */

>>> +             uint32_t zlib       : 1;

>>> +

>>> +             /** ODP_COMP_ALG_LZS */

>>> +             uint32_t lzs        :1;

>>> +     } bit;

>>> +

>>> +     /** All bits of the bit field structure

>>> +      * This field can be used to set/clear all flags, or bitwise

>>> +      * operations over the entire structure.

>>> +      */

>>> +     uint32_t all_bits;

>>> +} odp_comp_algos_t;

>>> +

>>> +/**

>>> + * Compression Interface Capabilities

>>> + *

>>> + */

>>> +typedef struct odp_comp_capability_t {

>>> +     /** Maximum number of  sessions */

>>> +     uint32_t max_sessions;

>>> +

>>> +     /** Supported compression algorithms */

>>> +     odp_comp_algos_t comp_algs;

>>

>> No need to save one character => comp_algos

>>

>>> +

>>> +     /** Supported hash algorithms. */

>>> +     odp_comp_hash_algos_t hash_algs;

>>

>> hash_algos

>>

>>> +

>>> +     /* sync/async mode of operation support.

>>> +      * Implementation should support atleast one of the mode.

>>> +      */

>>

>>

>> "mode" field definition missing on this line ?

>>

>>

>>> +

>>> +     /** Support type for synchronous operation mode

>>> (ODP_COMP_SYNC).

>>> +      *  User should set odp_comp_session_param_t:mode based on

>>> +      *  support level as indicated by this param.

>>> +      */

>>> +     odp_support_t sync;

>>> +

>>> +     /** Support type for asynchronous operation mode

>>> (ODP_COMP_ASYNC).

>>> +      *  User should set odp_comp_session_param_t:mode param based

>>> on

>>> +      *  support level as indicated by this param.

>>> +      */

>>> +     odp_support_t async;

>>> +} odp_comp_capability_t;

>>> +

>>> +/**

>>> + * Hash algorithm capabilities

>>> + *

>>> + */

>>> +typedef struct odp_comp_hash_alg_capability_t {

>>> +     /** Digest length in bytes */

>>> +     uint32_t digest_len;

>>> +} odp_comp_hash_alg_capability_t;

>>> +

>>> +/**

>>> + * Compression algorithm capabilities structure for each algorithm.

>>> + *

>>> + */

>>> +typedef struct odp_comp_alg_capability_t {

>>> +     /** Enumeration indicating alg support for dictionary load */

>>> +     odp_support_t support_dict;

>>> +

>>> +     /** Optional Maximum length of dictionary supported

>>> +      *   by implementation of the algorithm.

>>> +      *

>>> +      *   Invalid if support_dict == ODP_SUPPORT_NO.

>>> +      *

>>> +      *   Implementation use dictionary of length less than or equal

>>> to value

>>> +      *   indicated by dict_len. if set to 0 and if support_dict ==

>>> +      *   ODP_SUPPORT_YES, then implementation will use dictionary

>>> length

>>> +      *   less than or equal to user input length in

>>> odp_comp_set_dict()

>>> +      *   and update used dictionary length at output of the call.

>>> +      *

>>> +      */

>>> +     uint32_t dict_len;

>>> +

>>> +     /* Maximum compression level supported by implementation of

>>> this algo.

>>> +      *  Indicates number of compression levels supported by

>>> implementation,

>>> +      *

>>> +      * where,

>>> +      *

>>> +      * 1 means fastest compression i.e. output produced at

>>> +      * best possible speed at the expense of compression quality,

>>> and

>>> +      *

>>> +      * max_level means best compression i.e.output produced is best

>>> possible

>>> +      * compressed content at the expense of speed.

>>> +      *

>>> +      * Example, if max_level = 4 , it means algorithm supports four

>>> levels

>>> +      * of compression from value 1 up to 4. User can set this value

>>> from

>>> +      * 1 (fastest compression) to 4 (best compression).

>>> +      * See RFC1950 for an example explanation to level.

>>> +      *

>>> +      * Value 0 mean implementation use its default value.

>>> +      *

>>> +      */

>>> +     uint32_t max_level;

>>> +

>>> +     /* Supported hash algorithms */

>>> +     odp_comp_hash_algos_t hash_alg;

>>> +} odp_comp_alg_capability_t;

>>> +

>>> +/**

>>> + * Comp API dictionary type

>>> + *

>>> + */

>>> +typedef struct odp_comp_dict_t {

>>> +     /** pointer to character array */

>>> +     uint8_t *buf;

>>

>> Is the data input only? If it is, then this should be const data pointer.

>>

>> What is expected about the format of the data? Null terminated strings? If it's strings, then e.g. const char * would be better.

>>

>>

>>> +     /** length of the dictionary. */

>>> +     uint32_t len;

>>> +} odp_comp_dict_t;

>>> +

>>> +/**

>>> + * Comp API algorithm specific parameters

>>> + *

>>> + */

>>> +typedef struct odp_comp_alg_param_t {

>>> +     struct comp_alg_def_param {

>>> +             /** compression level where

>>> +              * ODP_COMP_LEVEL_MIN <= level <= ODP_COMP_LEVEL_MAX

>>> +              */

>>> +             odp_comp_level_t level;

>>> +             /** huffman code to use */

>>> +             odp_comp_huffman_code_t comp_code;

>>> +     } deflate;

>>

>> Doxygen comment missing? Is this filled in only if algorithms is ALG_DEFLATE ?

>>

>>

>>> +     struct comp_alg_zlib_param {

>>> +                     /** deflate algo params */

>>> +                     struct comp_alg_def_param def;

>>> +     } zlib;

>>

>> Same here.

>>

>>> +} odp_comp_alg_param_t;

>>> +

>>> +/**

>>> + * Comp API data range specifier

>>> + *

>>> + */

>>> +typedef union odp_comp_data_t {

>>> +     struct {

>>> +             /** packet */

>>> +             odp_packet_t packet;

>>> +

>>> +             /** packet data range to operate on  */

>>> +             odp_packet_data_range_t data_range;

>>

>>

>> Barry have indicated use case for defining multiple ranges per packet. How that will be handled?

>>

>>

>>> +     } pkt;

>>> +} odp_comp_data_t;

>>

>>

>> Packet is quite deep in parameter structures. E.g. in compress call

>>

>> param.input.pkt.packet            = pkt;

>> param.input.pkt.data_range.offset = 0;

>> param.input.pkt.data_range.len    = len;

>>

>> odp_comp_compress(&param, &result)

>>

>>

>>

>> I'd pull up the packet handle(s) from param, and maybe have separate calls for full packets and ranges:

>>

>> int odp_comp_packet(const odp_packet_t pkt_in[],

>>                     odp_packet_t pkt_out[],

>>                     const odp_comp_op_param_t param[],

>>                     int num_pkt);

>>

>> typedef struct odp_comp_packet_range_t {

>>         int num_range;

>>

>>         struct {

>>                 uint32_t offset;

>>                 uint32_t len;

>>         } range[];

>>

>> } odp_comp_packet_range_t;

>>

>> int odp_comp_packet_range(const odp_packet_t pkt_in[],

>>                           const odp_comp_packet_range_t range_in[],

>>                           odp_packet_t pkt_out[],

>>                           // also range_out needed ??

>>                           const odp_comp_op_param_t param[],

>>                           int num_pkt);

>>

>>

>> Then later on, direct memory address input/output would look something like this:

>>

>> int odp_comp_mem(uint8_t *ptr_in[],

>>                  uint32_t len_in[],

>>                  uint8_t *ptr_out[],

>>                  uint32_t *len_out[],

>>                  const odp_comp_op_param_t param[],

>>                  int num_ptr);

>>

>>

>>> +

>>> + /**

>>> + * Comp API session creation parameters

>>> + *

>>> + */

>>> +typedef struct odp_comp_session_param_t {

>>> +     /** Compress vs. Decompress operation */

>>> +     odp_comp_op_t op;

>>> +

>>> +     /** Sync vs Async

>>> +      *

>>> +      * When mode = ODP_COMP_SYNC,

>>> odp_comp_compress()/odp_comp_decomp()

>>> +      * should be called.

>>> +      *

>>> +      * When mode = ODP_COMP_ASYNC, odp_comp_compress_enq()/

>>> +      * odp_comp_decomp_enq() should be called.

>>> +      *

>>> +      * Use odp_comp_capability() for supported mode.

>>> +      *

>>> +      */

>>> +     odp_comp_op_mode_t mode;

>>> +

>>> +     /** Compression algorithm

>>> +      *

>>> +      *  Use odp_comp_capability() for supported algorithms.

>>> +      */

>>> +     odp_comp_alg_t comp_alg;

>>> +

>>> +     /** Hash algorithm

>>> +      *

>>> +      *  Use odp_comp_alg_capability() for supported hash algo for

>>> +      *  compression algo given as comp_alg. Implementation should

>>> not

>>> +      *  support hash only operation on data. output should always

>>> contain

>>> +      *  data + hash.

>>> +      *

>>> +      */

>>> +     odp_comp_hash_alg_t hash_alg;

>>> +

>>> +     /** parameters specific to compression */

>>> +     odp_comp_alg_param_t alg_param;

>>> +

>>> +     /** Async mode completion event queue

>>> +      *

>>> +      * When mode = ODP_COMP_ASYNC, user should wait on

>>> ODP_EVENT_PACKET

>>> +      * with subtype ODP_EVENT_PACKET_COMP on this queue.

>>> +      *

>>> +      * By default, implementation enques completion events in-

>>> order-of

>>> +      * request submission and thus queue is considered ordered.

>>> +      *

>>> +      * Please note, behavior could be changed or enhanced

>>> +      * to queue event in-order-of their completion to enable

>>> +      * performance-oriented application to leverage hw offered

>>> parallelism.

>>> +      * However, this will be subject to application requirement and

>>> more

>>> +      * explicit defined use-case.

>>> +      *

>>> +      */

>>> +     odp_queue_t compl_queue;

>>> +} odp_comp_session_param_t;

>>> +

>>> +/**

>>> + * Comp API operation parameters.

>>> + * Called to process each data unit.

>>> + *

>>> + */

>>> +typedef struct odp_comp_op_param_t {

>>> +     /** Session handle from creation */

>>> +     odp_comp_session_t session;

>>> +

>>> +     /** User context */

>>> +     void *ctx;

>>> +

>>> +     /** Boolean indicating End of data, where

>>> +      *

>>> +      *   true : last chunk

>>> +      *

>>> +      *   false: more to follow

>>> +      *

>>> +      * If set to true, indicates this is the last chunk of

>>> +      * data. After processing of last chunk of data is complete

>>> i.e.

>>> +      * call returned with any error code except

>>> ODP_COMP_ERR_OUT_OF_SPACE,

>>> +      * implementation should move algorithm to stateless mode

>>> +      * for next of batch of operation i.e. reset history,

>>> +      * insert 'End of Block' marker into compressed data stream(if

>>> +      * supported by algo).See deflate/zlib for interpretation of

>>> +      * stateless/stateful.

>>> +      *

>>> +      * For stateless compressions (ex ipcomp), last should be set

>>> to 'true'

>>> +      * for every input packet processing call.

>>> +      *

>>> +      * For compression + hash, digest will be available after

>>> +      * last chunk is processed completely. In case of

>>> +      * ODP_COMP_ERR_OUT_OF_SPACE, application should keep on

>>> calling

>>> +      * odp_comp_xxx() API with more output buffer unless call

>>> returns

>>> +      * with ODP_COMP_ERR_NONE or other failure code except

>>> +      *  ODP_COMP_ERR_OUT_OF_SPACE.

>>> +      */

>>> +     odp_bool_t last;

>>> +

>>> +     /** Input data */

>>> +     odp_comp_data_t input;

>>

>> I'd move this out of the struct, since it's not operation parameter but the target of the operation. From call to call, everything else may remain constant, but the target is not constant.

>>

>>> +

>>> +     /** placeholder for output data.

>>> +      *

>>> +      * For Compression/Decompression+hash session,

>>> +      * output  will store both data and digest(with digest appended

>>> at

>>> +      * end-of-data). User should pass packet of sufficiently large

>>> size

>>> +      * to store digest.

>>> +      *

>>> +      */

>>> +     odp_comp_data_t output;

>>

>> Same here.

>>

>>> +} odp_comp_op_param_t;

>>> +

>>> +/**

>>> + * Comp API per operation result

>>> + *

>>> + */

>>> +typedef struct odp_comp_op_result_t {

>>> +     /** User context from request */

>>> +     void *ctx;

>>> +

>>> +     /** Operation Return Code */

>>> +     odp_comp_err_t err;

>>> +

>>> +     /** Pointer to output.Valid when odp_comp_err_t is

>>> +      * ODP_COMP_ERR_NONE or ODP_COMP_ERR_OUT_OF_SPACE

>>> +      *

>>> +      * Contain data after compression/decompression operation,

>>> +      * or data + digest for compression/decompression + hash

>>> operation.

>>> +      *

>>> +      */

>>> +     odp_comp_data_t output;

>>

>>

>> This would also go out of the struct.

>>

>>

>>> +} odp_comp_op_result_t;

>>> +

>>

>> -Petri

>>

>>
Narayana Prasad Athreya Aug. 8, 2017, 6:06 p.m. | #4
Can someone explain what is the use-case of #1 and why the use-case 
cannot be met with proposed API?

PRasad

On Tuesday 08 August 2017 10:21 PM, shally verma wrote:
> Petri/Berry

>

> As per discussion in today's call, this is what I summarize :

>

> Two new requirements added:

>

> 1. Support compression / decompression of multiple ranges with in one

> single packet and

> 2. Operating on multiple packets with in call where each packet may

> further carry multiple range.

>

> To meet 1st requirement, current proposal says, calling

> odp_packet_compress/decomp() for each packet per range as it allows

> easy and clean management of output buffer and also more flexibility

> per various possible  application use cases:.

> - call can be made in statefull or stateless mode per each range, Or

> - Modify  headers according to each range output,

> - Application manage output buffers at their end,

> - Async notification on out of space condition  will be easily maintained.

>

> Currently , we do support file-based compression using ODP APIs, but

> i.e. on stream of data bytes where 1 chunk occupy - 1 packet (which

> can be segmented/unsegmented) not on stream of Packets.

> Is there a use-case to have a file which consists of stream of

> Packets? (where packet is of type HTTP Packet ?)

>

> New proposal says "allow multiple ranges with in one single call". Few

> concerns raised for this, key one include:

>

> - How do we know each range be compressed in stateful  or stateless

> mode i.e. each range is independent Or dependent?

> - How do we handle out_of_space error while operating on individual

> range? Especially for the case when Implementation try to use HW

> parallelization for better throughput?

> - How do we support same design in asynchronous mode ??

> - How do we see it  - as performance gain ? Or ease-of-use?  Or it may

> end up introducing more overhead to implementation.

>

> Please feedback your inputs to design issues as envisioned.

>

> For now we can focus on requirement #1 as design for requirement # 2

> will be based on outcome of #1.

>

> Thanks

> Shally

>

> On Tue, Aug 8, 2017 at 6:38 PM, Savolainen, Petri (Nokia - FI/Espoo)

> <petri.savolainen@nokia.com> wrote:

>>

>>> -----Original Message-----

>>> From: lng-odp [mailto:lng-odp-bounces@lists.linaro.org] On Behalf Of

>>> Github ODP bot

>>> Sent: Friday, August 04, 2017 4:00 PM

>>> To: lng-odp@lists.linaro.org

>>> Subject: [lng-odp] [PATCH API-NEXT v8 1/1] comp: compression spec

>>>

>>> From: Shally Verma <shally.verma@cavium.com>

>>>

>>> Signed-off-by: Shally Verma <shally.verma@cavium.com>

>>> Signed-off-by: Mahipal Challa <mahipal.challa@cavium.com> Cc

>>> PrasadAthreya.Narayana@cavium.com

>>> ---

>>> /** Email created from pull request 102 (1234sv:api-next)

>>>   ** https://github.com/Linaro/odp/pull/102

>>>   ** Patch: https://github.com/Linaro/odp/pull/102.patch

>>>   ** Base sha: 8390f890d4bd2babb63a24f7b15d2f4763e44050

>>>   ** Merge commit sha: fbdff8c82a19f5b640ae299204b3bb1bbbefdccb

>>>   **/

>>>   include/odp/api/spec/comp.h | 815

>>> ++++++++++++++++++++++++++++++++++++++++++++

>>>   1 file changed, 815 insertions(+)

>>>   create mode 100644 include/odp/api/spec/comp.h

>>>

>>> diff --git a/include/odp/api/spec/comp.h b/include/odp/api/spec/comp.h

>>> new file mode 100644

>>> index 00000000..2956094c

>>> --- /dev/null

>>> +++ b/include/odp/api/spec/comp.h

>>> @@ -0,0 +1,815 @@

>>> +/* Copyright (c) 2013, Linaro Limited

>> Year 2017

>>

>>> +

>>> +/**

>>> + * Comp API hash algorithm

>>> + *

>>> + */

>>> +typedef enum {

>>> +     /** ODP_COMP_HASH_ALG_NONE*/

>> This kind of comment is not very helpful. Each enumeration needs explanation - like odp_comp_alg_t under.

>>

>>> +     ODP_COMP_HASH_ALG_NONE,

>>> +     /** ODP_COMP_HASH_ALG_SHA1*/

>>> +     ODP_COMP_HASH_ALG_SHA1,

>>> +     /**  ODP_COMP_HASH_ALG_SHA256*/

>>> +     ODP_COMP_HASH_ALG_SHA256

>>> +} odp_comp_hash_alg_t;

>>> +

>>> +/**

>>> + * Comp API compression algorithm

>>> + *

>>> + */

>>> +typedef enum {

>>> +     /** No algorithm specified.

>>> +      * Means no compression, output == input.

>>> +      * if provided, no operation (compression/decompression or

>>> hash)

>>> +      * applied on input. Added for testing purpose.

>>> +      */

>>> +     ODP_COMP_ALG_NULL,

>>> +     /** DEFLATE - RFC1951 */

>>> +     ODP_COMP_ALG_DEFLATE,

>>> +     /** ZLIB - RFC1950 */

>>> +     ODP_COMP_ALG_ZLIB,

>>> +     /** LZS */

>>> +     ODP_COMP_ALG_LZS

>>> +} odp_comp_alg_t;

>>> +

>>> +

>>> +/**

>>> + * Hash algorithms in a bit field structure

>>> + *

>>> + */

>>> +typedef union odp_comp_hash_algos_t {

>>> +     /** hash algorithms */

>>> +     struct {

>>> +             /** SHA-1 */

>>> +             uint32_t sha1  : 1;

>>> +

>>> +             /** SHA with 256 bits of Message Digest */

>>> +             uint32_t sha256 : 1;

>>

>> Need to be more explicit in algorithm definition: SHA-1, SHA-256, ... algorithm (SHA-2 would also do, but we use SHA-256 in crypto API since it seems to be used by standards).

>>

>> Actually, these explanations should go under enum definitions and then just refer to those enums here - like odp_comp_algos_t under.

>>

>>

>>> +

>>> +     } bit;

>>> +

>>> +     /** All bits of the bit field structure

>>> +      *

>>> +      * This field can be used to set/clear all flags, or bitwise

>>> +      * operations over the entire structure.

>>> +      */

>>> +     uint32_t all_bits;

>>> +} odp_comp_hash_algos_t;

>>> +

>>> +/**

>>> + * Comp algorithms in a bit field structure

>>> + *

>>> + */

>>> +typedef union odp_comp_algos_t {

>>> +     /** Compression algorithms */

>>> +     struct {

>>> +             /** ODP_COMP_ALG_NULL */

>>> +             uint32_t null       : 1;

>>> +

>>> +             /** ODP_COMP_ALG_DEFLATE */

>>> +             uint32_t deflate    : 1;

>>> +

>>> +             /** ODP_COMP_ALG_ZLIB */

>>> +             uint32_t zlib       : 1;

>>> +

>>> +             /** ODP_COMP_ALG_LZS */

>>> +             uint32_t lzs        :1;

>>> +     } bit;

>>> +

>>> +     /** All bits of the bit field structure

>>> +      * This field can be used to set/clear all flags, or bitwise

>>> +      * operations over the entire structure.

>>> +      */

>>> +     uint32_t all_bits;

>>> +} odp_comp_algos_t;

>>> +

>>> +/**

>>> + * Compression Interface Capabilities

>>> + *

>>> + */

>>> +typedef struct odp_comp_capability_t {

>>> +     /** Maximum number of  sessions */

>>> +     uint32_t max_sessions;

>>> +

>>> +     /** Supported compression algorithms */

>>> +     odp_comp_algos_t comp_algs;

>> No need to save one character => comp_algos

>>

>>> +

>>> +     /** Supported hash algorithms. */

>>> +     odp_comp_hash_algos_t hash_algs;

>> hash_algos

>>

>>> +

>>> +     /* sync/async mode of operation support.

>>> +      * Implementation should support atleast one of the mode.

>>> +      */

>>

>> "mode" field definition missing on this line ?

>>

>>

>>> +

>>> +     /** Support type for synchronous operation mode

>>> (ODP_COMP_SYNC).

>>> +      *  User should set odp_comp_session_param_t:mode based on

>>> +      *  support level as indicated by this param.

>>> +      */

>>> +     odp_support_t sync;

>>> +

>>> +     /** Support type for asynchronous operation mode

>>> (ODP_COMP_ASYNC).

>>> +      *  User should set odp_comp_session_param_t:mode param based

>>> on

>>> +      *  support level as indicated by this param.

>>> +      */

>>> +     odp_support_t async;

>>> +} odp_comp_capability_t;

>>> +

>>> +/**

>>> + * Hash algorithm capabilities

>>> + *

>>> + */

>>> +typedef struct odp_comp_hash_alg_capability_t {

>>> +     /** Digest length in bytes */

>>> +     uint32_t digest_len;

>>> +} odp_comp_hash_alg_capability_t;

>>> +

>>> +/**

>>> + * Compression algorithm capabilities structure for each algorithm.

>>> + *

>>> + */

>>> +typedef struct odp_comp_alg_capability_t {

>>> +     /** Enumeration indicating alg support for dictionary load */

>>> +     odp_support_t support_dict;

>>> +

>>> +     /** Optional Maximum length of dictionary supported

>>> +      *   by implementation of the algorithm.

>>> +      *

>>> +      *   Invalid if support_dict == ODP_SUPPORT_NO.

>>> +      *

>>> +      *   Implementation use dictionary of length less than or equal

>>> to value

>>> +      *   indicated by dict_len. if set to 0 and if support_dict ==

>>> +      *   ODP_SUPPORT_YES, then implementation will use dictionary

>>> length

>>> +      *   less than or equal to user input length in

>>> odp_comp_set_dict()

>>> +      *   and update used dictionary length at output of the call.

>>> +      *

>>> +      */

>>> +     uint32_t dict_len;

>>> +

>>> +     /* Maximum compression level supported by implementation of

>>> this algo.

>>> +      *  Indicates number of compression levels supported by

>>> implementation,

>>> +      *

>>> +      * where,

>>> +      *

>>> +      * 1 means fastest compression i.e. output produced at

>>> +      * best possible speed at the expense of compression quality,

>>> and

>>> +      *

>>> +      * max_level means best compression i.e.output produced is best

>>> possible

>>> +      * compressed content at the expense of speed.

>>> +      *

>>> +      * Example, if max_level = 4 , it means algorithm supports four

>>> levels

>>> +      * of compression from value 1 up to 4. User can set this value

>>> from

>>> +      * 1 (fastest compression) to 4 (best compression).

>>> +      * See RFC1950 for an example explanation to level.

>>> +      *

>>> +      * Value 0 mean implementation use its default value.

>>> +      *

>>> +      */

>>> +     uint32_t max_level;

>>> +

>>> +     /* Supported hash algorithms */

>>> +     odp_comp_hash_algos_t hash_alg;

>>> +} odp_comp_alg_capability_t;

>>> +

>>> +/**

>>> + * Comp API dictionary type

>>> + *

>>> + */

>>> +typedef struct odp_comp_dict_t {

>>> +     /** pointer to character array */

>>> +     uint8_t *buf;

>> Is the data input only? If it is, then this should be const data pointer.

>>

>> What is expected about the format of the data? Null terminated strings? If it's strings, then e.g. const char * would be better.

>>

>>

>>> +     /** length of the dictionary. */

>>> +     uint32_t len;

>>> +} odp_comp_dict_t;

>>> +

>>> +/**

>>> + * Comp API algorithm specific parameters

>>> + *

>>> + */

>>> +typedef struct odp_comp_alg_param_t {

>>> +     struct comp_alg_def_param {

>>> +             /** compression level where

>>> +              * ODP_COMP_LEVEL_MIN <= level <= ODP_COMP_LEVEL_MAX

>>> +              */

>>> +             odp_comp_level_t level;

>>> +             /** huffman code to use */

>>> +             odp_comp_huffman_code_t comp_code;

>>> +     } deflate;

>> Doxygen comment missing? Is this filled in only if algorithms is ALG_DEFLATE ?

>>

>>

>>> +     struct comp_alg_zlib_param {

>>> +                     /** deflate algo params */

>>> +                     struct comp_alg_def_param def;

>>> +     } zlib;

>> Same here.

>>

>>> +} odp_comp_alg_param_t;

>>> +

>>> +/**

>>> + * Comp API data range specifier

>>> + *

>>> + */

>>> +typedef union odp_comp_data_t {

>>> +     struct {

>>> +             /** packet */

>>> +             odp_packet_t packet;

>>> +

>>> +             /** packet data range to operate on  */

>>> +             odp_packet_data_range_t data_range;

>>

>> Barry have indicated use case for defining multiple ranges per packet. How that will be handled?

>>

>>

>>> +     } pkt;

>>> +} odp_comp_data_t;

>>

>> Packet is quite deep in parameter structures. E.g. in compress call

>>

>> param.input.pkt.packet            = pkt;

>> param.input.pkt.data_range.offset = 0;

>> param.input.pkt.data_range.len    = len;

>>

>> odp_comp_compress(&param, &result)

>>

>>

>>

>> I'd pull up the packet handle(s) from param, and maybe have separate calls for full packets and ranges:

>>

>> int odp_comp_packet(const odp_packet_t pkt_in[],

>>                      odp_packet_t pkt_out[],

>>                      const odp_comp_op_param_t param[],

>>                      int num_pkt);

>>

>> typedef struct odp_comp_packet_range_t {

>>          int num_range;

>>

>>          struct {

>>                  uint32_t offset;

>>                  uint32_t len;

>>          } range[];

>>

>> } odp_comp_packet_range_t;

>>

>> int odp_comp_packet_range(const odp_packet_t pkt_in[],

>>                            const odp_comp_packet_range_t range_in[],

>>                            odp_packet_t pkt_out[],

>>                            // also range_out needed ??

>>                            const odp_comp_op_param_t param[],

>>                            int num_pkt);

>>

>>

>> Then later on, direct memory address input/output would look something like this:

>>

>> int odp_comp_mem(uint8_t *ptr_in[],

>>                   uint32_t len_in[],

>>                   uint8_t *ptr_out[],

>>                   uint32_t *len_out[],

>>                   const odp_comp_op_param_t param[],

>>                   int num_ptr);

>>

>>

>>> +

>>> + /**

>>> + * Comp API session creation parameters

>>> + *

>>> + */

>>> +typedef struct odp_comp_session_param_t {

>>> +     /** Compress vs. Decompress operation */

>>> +     odp_comp_op_t op;

>>> +

>>> +     /** Sync vs Async

>>> +      *

>>> +      * When mode = ODP_COMP_SYNC,

>>> odp_comp_compress()/odp_comp_decomp()

>>> +      * should be called.

>>> +      *

>>> +      * When mode = ODP_COMP_ASYNC, odp_comp_compress_enq()/

>>> +      * odp_comp_decomp_enq() should be called.

>>> +      *

>>> +      * Use odp_comp_capability() for supported mode.

>>> +      *

>>> +      */

>>> +     odp_comp_op_mode_t mode;

>>> +

>>> +     /** Compression algorithm

>>> +      *

>>> +      *  Use odp_comp_capability() for supported algorithms.

>>> +      */

>>> +     odp_comp_alg_t comp_alg;

>>> +

>>> +     /** Hash algorithm

>>> +      *

>>> +      *  Use odp_comp_alg_capability() for supported hash algo for

>>> +      *  compression algo given as comp_alg. Implementation should

>>> not

>>> +      *  support hash only operation on data. output should always

>>> contain

>>> +      *  data + hash.

>>> +      *

>>> +      */

>>> +     odp_comp_hash_alg_t hash_alg;

>>> +

>>> +     /** parameters specific to compression */

>>> +     odp_comp_alg_param_t alg_param;

>>> +

>>> +     /** Async mode completion event queue

>>> +      *

>>> +      * When mode = ODP_COMP_ASYNC, user should wait on

>>> ODP_EVENT_PACKET

>>> +      * with subtype ODP_EVENT_PACKET_COMP on this queue.

>>> +      *

>>> +      * By default, implementation enques completion events in-

>>> order-of

>>> +      * request submission and thus queue is considered ordered.

>>> +      *

>>> +      * Please note, behavior could be changed or enhanced

>>> +      * to queue event in-order-of their completion to enable

>>> +      * performance-oriented application to leverage hw offered

>>> parallelism.

>>> +      * However, this will be subject to application requirement and

>>> more

>>> +      * explicit defined use-case.

>>> +      *

>>> +      */

>>> +     odp_queue_t compl_queue;

>>> +} odp_comp_session_param_t;

>>> +

>>> +/**

>>> + * Comp API operation parameters.

>>> + * Called to process each data unit.

>>> + *

>>> + */

>>> +typedef struct odp_comp_op_param_t {

>>> +     /** Session handle from creation */

>>> +     odp_comp_session_t session;

>>> +

>>> +     /** User context */

>>> +     void *ctx;

>>> +

>>> +     /** Boolean indicating End of data, where

>>> +      *

>>> +      *   true : last chunk

>>> +      *

>>> +      *   false: more to follow

>>> +      *

>>> +      * If set to true, indicates this is the last chunk of

>>> +      * data. After processing of last chunk of data is complete

>>> i.e.

>>> +      * call returned with any error code except

>>> ODP_COMP_ERR_OUT_OF_SPACE,

>>> +      * implementation should move algorithm to stateless mode

>>> +      * for next of batch of operation i.e. reset history,

>>> +      * insert 'End of Block' marker into compressed data stream(if

>>> +      * supported by algo).See deflate/zlib for interpretation of

>>> +      * stateless/stateful.

>>> +      *

>>> +      * For stateless compressions (ex ipcomp), last should be set

>>> to 'true'

>>> +      * for every input packet processing call.

>>> +      *

>>> +      * For compression + hash, digest will be available after

>>> +      * last chunk is processed completely. In case of

>>> +      * ODP_COMP_ERR_OUT_OF_SPACE, application should keep on

>>> calling

>>> +      * odp_comp_xxx() API with more output buffer unless call

>>> returns

>>> +      * with ODP_COMP_ERR_NONE or other failure code except

>>> +      *  ODP_COMP_ERR_OUT_OF_SPACE.

>>> +      */

>>> +     odp_bool_t last;

>>> +

>>> +     /** Input data */

>>> +     odp_comp_data_t input;

>> I'd move this out of the struct, since it's not operation parameter but the target of the operation. From call to call, everything else may remain constant, but the target is not constant.

>>

>>> +

>>> +     /** placeholder for output data.

>>> +      *

>>> +      * For Compression/Decompression+hash session,

>>> +      * output  will store both data and digest(with digest appended

>>> at

>>> +      * end-of-data). User should pass packet of sufficiently large

>>> size

>>> +      * to store digest.

>>> +      *

>>> +      */

>>> +     odp_comp_data_t output;

>> Same here.

>>

>>> +} odp_comp_op_param_t;

>>> +

>>> +/**

>>> + * Comp API per operation result

>>> + *

>>> + */

>>> +typedef struct odp_comp_op_result_t {

>>> +     /** User context from request */

>>> +     void *ctx;

>>> +

>>> +     /** Operation Return Code */

>>> +     odp_comp_err_t err;

>>> +

>>> +     /** Pointer to output.Valid when odp_comp_err_t is

>>> +      * ODP_COMP_ERR_NONE or ODP_COMP_ERR_OUT_OF_SPACE

>>> +      *

>>> +      * Contain data after compression/decompression operation,

>>> +      * or data + digest for compression/decompression + hash

>>> operation.

>>> +      *

>>> +      */

>>> +     odp_comp_data_t output;

>>

>> This would also go out of the struct.

>>

>>

>>> +} odp_comp_op_result_t;

>>> +

>> -Petri

>>

>>

> .

>
shally verma Aug. 29, 2017, 7:26 a.m. | #5
Based on last discussion, I was reworking to add odp_comp_op_pkt ()
API based on Crypto design. Help me to answer with these questions:

1. Current crypto packet base API is not giving Error code as an
output to its sync version i.e. in int odp_crypto_op(const
odp_packet_t pkt_in[],......), I do not see where it is returning
odp_crypto_packet_result_t. Can anyone help?

2. Current crypto version of odp_crypto_op(odp_pkt_t pkt_in[] ...)
does not have two separate version for encryption and decryption where
as in Compression, we added two. One for compress and another for
decompress.
So do we want to retain two separate flavor or unify like crypto
packet based api? Ex.
odp_comp_op_pkt ( ... ) OR
odp_comp_compress_pkt( ...),
odp_comp_decompress_pkt(),
odp_comp_compress_pkt_enq() and so on...?



Thanks
Shally

On Tue, Aug 8, 2017 at 11:36 PM, Narayana Prasad Athreya
<pathreya@caviumnetworks.com> wrote:
> Can someone explain what is the use-case of #1 and why the use-case cannot

> be met with proposed API?

>

> PRasad

>

> On Tuesday 08 August 2017 10:21 PM, shally verma wrote:

>

> Petri/Berry

>

> As per discussion in today's call, this is what I summarize :

>

> Two new requirements added:

>

> 1. Support compression / decompression of multiple ranges with in one

> single packet and

> 2. Operating on multiple packets with in call where each packet may

> further carry multiple range.

>

> To meet 1st requirement, current proposal says, calling

> odp_packet_compress/decomp() for each packet per range as it allows

> easy and clean management of output buffer and also more flexibility

> per various possible  application use cases:.

> - call can be made in statefull or stateless mode per each range, Or

> - Modify  headers according to each range output,

> - Application manage output buffers at their end,

> - Async notification on out of space condition  will be easily maintained.

>

> Currently , we do support file-based compression using ODP APIs, but

> i.e. on stream of data bytes where 1 chunk occupy - 1 packet (which

> can be segmented/unsegmented) not on stream of Packets.

> Is there a use-case to have a file which consists of stream of

> Packets? (where packet is of type HTTP Packet ?)

>

> New proposal says "allow multiple ranges with in one single call". Few

> concerns raised for this, key one include:

>

> - How do we know each range be compressed in stateful  or stateless

> mode i.e. each range is independent Or dependent?

> - How do we handle out_of_space error while operating on individual

> range? Especially for the case when Implementation try to use HW

> parallelization for better throughput?

> - How do we support same design in asynchronous mode ??

> - How do we see it  - as performance gain ? Or ease-of-use?  Or it may

> end up introducing more overhead to implementation.

>

> Please feedback your inputs to design issues as envisioned.

>

> For now we can focus on requirement #1 as design for requirement # 2

> will be based on outcome of #1.

>

> Thanks

> Shally

>

> On Tue, Aug 8, 2017 at 6:38 PM, Savolainen, Petri (Nokia - FI/Espoo)

> <petri.savolainen@nokia.com> wrote:

>

> -----Original Message-----

> From: lng-odp [mailto:lng-odp-bounces@lists.linaro.org] On Behalf Of

> Github ODP bot

> Sent: Friday, August 04, 2017 4:00 PM

> To: lng-odp@lists.linaro.org

> Subject: [lng-odp] [PATCH API-NEXT v8 1/1] comp: compression spec

>

> From: Shally Verma <shally.verma@cavium.com>

>

> Signed-off-by: Shally Verma <shally.verma@cavium.com>

> Signed-off-by: Mahipal Challa <mahipal.challa@cavium.com> Cc

> PrasadAthreya.Narayana@cavium.com

> ---

> /** Email created from pull request 102 (1234sv:api-next)

>  ** https://github.com/Linaro/odp/pull/102

>  ** Patch: https://github.com/Linaro/odp/pull/102.patch

>  ** Base sha: 8390f890d4bd2babb63a24f7b15d2f4763e44050

>  ** Merge commit sha: fbdff8c82a19f5b640ae299204b3bb1bbbefdccb

>  **/

>  include/odp/api/spec/comp.h | 815

> ++++++++++++++++++++++++++++++++++++++++++++

>  1 file changed, 815 insertions(+)

>  create mode 100644 include/odp/api/spec/comp.h

>

> diff --git a/include/odp/api/spec/comp.h b/include/odp/api/spec/comp.h

> new file mode 100644

> index 00000000..2956094c

> --- /dev/null

> +++ b/include/odp/api/spec/comp.h

> @@ -0,0 +1,815 @@

> +/* Copyright (c) 2013, Linaro Limited

>

> Year 2017

>

> +

> +/**

> + * Comp API hash algorithm

> + *

> + */

> +typedef enum {

> +     /** ODP_COMP_HASH_ALG_NONE*/

>

> This kind of comment is not very helpful. Each enumeration needs explanation

> - like odp_comp_alg_t under.

>

> +     ODP_COMP_HASH_ALG_NONE,

> +     /** ODP_COMP_HASH_ALG_SHA1*/

> +     ODP_COMP_HASH_ALG_SHA1,

> +     /**  ODP_COMP_HASH_ALG_SHA256*/

> +     ODP_COMP_HASH_ALG_SHA256

> +} odp_comp_hash_alg_t;

> +

> +/**

> + * Comp API compression algorithm

> + *

> + */

> +typedef enum {

> +     /** No algorithm specified.

> +      * Means no compression, output == input.

> +      * if provided, no operation (compression/decompression or

> hash)

> +      * applied on input. Added for testing purpose.

> +      */

> +     ODP_COMP_ALG_NULL,

> +     /** DEFLATE - RFC1951 */

> +     ODP_COMP_ALG_DEFLATE,

> +     /** ZLIB - RFC1950 */

> +     ODP_COMP_ALG_ZLIB,

> +     /** LZS */

> +     ODP_COMP_ALG_LZS

> +} odp_comp_alg_t;

> +

> +

> +/**

> + * Hash algorithms in a bit field structure

> + *

> + */

> +typedef union odp_comp_hash_algos_t {

> +     /** hash algorithms */

> +     struct {

> +             /** SHA-1 */

> +             uint32_t sha1  : 1;

> +

> +             /** SHA with 256 bits of Message Digest */

> +             uint32_t sha256 : 1;

>

> Need to be more explicit in algorithm definition: SHA-1, SHA-256, ...

> algorithm (SHA-2 would also do, but we use SHA-256 in crypto API since it

> seems to be used by standards).

>

> Actually, these explanations should go under enum definitions and then just

> refer to those enums here - like odp_comp_algos_t under.

>

>

> +

> +     } bit;

> +

> +     /** All bits of the bit field structure

> +      *

> +      * This field can be used to set/clear all flags, or bitwise

> +      * operations over the entire structure.

> +      */

> +     uint32_t all_bits;

> +} odp_comp_hash_algos_t;

> +

> +/**

> + * Comp algorithms in a bit field structure

> + *

> + */

> +typedef union odp_comp_algos_t {

> +     /** Compression algorithms */

> +     struct {

> +             /** ODP_COMP_ALG_NULL */

> +             uint32_t null       : 1;

> +

> +             /** ODP_COMP_ALG_DEFLATE */

> +             uint32_t deflate    : 1;

> +

> +             /** ODP_COMP_ALG_ZLIB */

> +             uint32_t zlib       : 1;

> +

> +             /** ODP_COMP_ALG_LZS */

> +             uint32_t lzs        :1;

> +     } bit;

> +

> +     /** All bits of the bit field structure

> +      * This field can be used to set/clear all flags, or bitwise

> +      * operations over the entire structure.

> +      */

> +     uint32_t all_bits;

> +} odp_comp_algos_t;

> +

> +/**

> + * Compression Interface Capabilities

> + *

> + */

> +typedef struct odp_comp_capability_t {

> +     /** Maximum number of  sessions */

> +     uint32_t max_sessions;

> +

> +     /** Supported compression algorithms */

> +     odp_comp_algos_t comp_algs;

>

> No need to save one character => comp_algos

>

> +

> +     /** Supported hash algorithms. */

> +     odp_comp_hash_algos_t hash_algs;

>

> hash_algos

>

> +

> +     /* sync/async mode of operation support.

> +      * Implementation should support atleast one of the mode.

> +      */

>

> "mode" field definition missing on this line ?

>

>

> +

> +     /** Support type for synchronous operation mode

> (ODP_COMP_SYNC).

> +      *  User should set odp_comp_session_param_t:mode based on

> +      *  support level as indicated by this param.

> +      */

> +     odp_support_t sync;

> +

> +     /** Support type for asynchronous operation mode

> (ODP_COMP_ASYNC).

> +      *  User should set odp_comp_session_param_t:mode param based

> on

> +      *  support level as indicated by this param.

> +      */

> +     odp_support_t async;

> +} odp_comp_capability_t;

> +

> +/**

> + * Hash algorithm capabilities

> + *

> + */

> +typedef struct odp_comp_hash_alg_capability_t {

> +     /** Digest length in bytes */

> +     uint32_t digest_len;

> +} odp_comp_hash_alg_capability_t;

> +

> +/**

> + * Compression algorithm capabilities structure for each algorithm.

> + *

> + */

> +typedef struct odp_comp_alg_capability_t {

> +     /** Enumeration indicating alg support for dictionary load */

> +     odp_support_t support_dict;

> +

> +     /** Optional Maximum length of dictionary supported

> +      *   by implementation of the algorithm.

> +      *

> +      *   Invalid if support_dict == ODP_SUPPORT_NO.

> +      *

> +      *   Implementation use dictionary of length less than or equal

> to value

> +      *   indicated by dict_len. if set to 0 and if support_dict ==

> +      *   ODP_SUPPORT_YES, then implementation will use dictionary

> length

> +      *   less than or equal to user input length in

> odp_comp_set_dict()

> +      *   and update used dictionary length at output of the call.

> +      *

> +      */

> +     uint32_t dict_len;

> +

> +     /* Maximum compression level supported by implementation of

> this algo.

> +      *  Indicates number of compression levels supported by

> implementation,

> +      *

> +      * where,

> +      *

> +      * 1 means fastest compression i.e. output produced at

> +      * best possible speed at the expense of compression quality,

> and

> +      *

> +      * max_level means best compression i.e.output produced is best

> possible

> +      * compressed content at the expense of speed.

> +      *

> +      * Example, if max_level = 4 , it means algorithm supports four

> levels

> +      * of compression from value 1 up to 4. User can set this value

> from

> +      * 1 (fastest compression) to 4 (best compression).

> +      * See RFC1950 for an example explanation to level.

> +      *

> +      * Value 0 mean implementation use its default value.

> +      *

> +      */

> +     uint32_t max_level;

> +

> +     /* Supported hash algorithms */

> +     odp_comp_hash_algos_t hash_alg;

> +} odp_comp_alg_capability_t;

> +

> +/**

> + * Comp API dictionary type

> + *

> + */

> +typedef struct odp_comp_dict_t {

> +     /** pointer to character array */

> +     uint8_t *buf;

>

> Is the data input only? If it is, then this should be const data pointer.

>

> What is expected about the format of the data? Null terminated strings? If

> it's strings, then e.g. const char * would be better.

>

>

> +     /** length of the dictionary. */

> +     uint32_t len;

> +} odp_comp_dict_t;

> +

> +/**

> + * Comp API algorithm specific parameters

> + *

> + */

> +typedef struct odp_comp_alg_param_t {

> +     struct comp_alg_def_param {

> +             /** compression level where

> +              * ODP_COMP_LEVEL_MIN <= level <= ODP_COMP_LEVEL_MAX

> +              */

> +             odp_comp_level_t level;

> +             /** huffman code to use */

> +             odp_comp_huffman_code_t comp_code;

> +     } deflate;

>

> Doxygen comment missing? Is this filled in only if algorithms is ALG_DEFLATE

> ?

>

>

> +     struct comp_alg_zlib_param {

> +                     /** deflate algo params */

> +                     struct comp_alg_def_param def;

> +     } zlib;

>

> Same here.

>

> +} odp_comp_alg_param_t;

> +

> +/**

> + * Comp API data range specifier

> + *

> + */

> +typedef union odp_comp_data_t {

> +     struct {

> +             /** packet */

> +             odp_packet_t packet;

> +

> +             /** packet data range to operate on  */

> +             odp_packet_data_range_t data_range;

>

> Barry have indicated use case for defining multiple ranges per packet. How

> that will be handled?

>

>

> +     } pkt;

> +} odp_comp_data_t;

>

> Packet is quite deep in parameter structures. E.g. in compress call

>

> param.input.pkt.packet            = pkt;

> param.input.pkt.data_range.offset = 0;

> param.input.pkt.data_range.len    = len;

>

> odp_comp_compress(&param, &result)

>

>

>

> I'd pull up the packet handle(s) from param, and maybe have separate calls

> for full packets and ranges:

>

> int odp_comp_packet(const odp_packet_t pkt_in[],

>                     odp_packet_t pkt_out[],

>                     const odp_comp_op_param_t param[],

>                     int num_pkt);

>

> typedef struct odp_comp_packet_range_t {

>         int num_range;

>

>         struct {

>                 uint32_t offset;

>                 uint32_t len;

>         } range[];

>

> } odp_comp_packet_range_t;

>

> int odp_comp_packet_range(const odp_packet_t pkt_in[],

>                           const odp_comp_packet_range_t range_in[],

>                           odp_packet_t pkt_out[],

>                           // also range_out needed ??

>                           const odp_comp_op_param_t param[],

>                           int num_pkt);

>

>

> Then later on, direct memory address input/output would look something like

> this:

>

> int odp_comp_mem(uint8_t *ptr_in[],

>                  uint32_t len_in[],

>                  uint8_t *ptr_out[],

>                  uint32_t *len_out[],

>                  const odp_comp_op_param_t param[],

>                  int num_ptr);

>

>

> +

> + /**

> + * Comp API session creation parameters

> + *

> + */

> +typedef struct odp_comp_session_param_t {

> +     /** Compress vs. Decompress operation */

> +     odp_comp_op_t op;

> +

> +     /** Sync vs Async

> +      *

> +      * When mode = ODP_COMP_SYNC,

> odp_comp_compress()/odp_comp_decomp()

> +      * should be called.

> +      *

> +      * When mode = ODP_COMP_ASYNC, odp_comp_compress_enq()/

> +      * odp_comp_decomp_enq() should be called.

> +      *

> +      * Use odp_comp_capability() for supported mode.

> +      *

> +      */

> +     odp_comp_op_mode_t mode;

> +

> +     /** Compression algorithm

> +      *

> +      *  Use odp_comp_capability() for supported algorithms.

> +      */

> +     odp_comp_alg_t comp_alg;

> +

> +     /** Hash algorithm

> +      *

> +      *  Use odp_comp_alg_capability() for supported hash algo for

> +      *  compression algo given as comp_alg. Implementation should

> not

> +      *  support hash only operation on data. output should always

> contain

> +      *  data + hash.

> +      *

> +      */

> +     odp_comp_hash_alg_t hash_alg;

> +

> +     /** parameters specific to compression */

> +     odp_comp_alg_param_t alg_param;

> +

> +     /** Async mode completion event queue

> +      *

> +      * When mode = ODP_COMP_ASYNC, user should wait on

> ODP_EVENT_PACKET

> +      * with subtype ODP_EVENT_PACKET_COMP on this queue.

> +      *

> +      * By default, implementation enques completion events in-

> order-of

> +      * request submission and thus queue is considered ordered.

> +      *

> +      * Please note, behavior could be changed or enhanced

> +      * to queue event in-order-of their completion to enable

> +      * performance-oriented application to leverage hw offered

> parallelism.

> +      * However, this will be subject to application requirement and

> more

> +      * explicit defined use-case.

> +      *

> +      */

> +     odp_queue_t compl_queue;

> +} odp_comp_session_param_t;

> +

> +/**

> + * Comp API operation parameters.

> + * Called to process each data unit.

> + *

> + */

> +typedef struct odp_comp_op_param_t {

> +     /** Session handle from creation */

> +     odp_comp_session_t session;

> +

> +     /** User context */

> +     void *ctx;

> +

> +     /** Boolean indicating End of data, where

> +      *

> +      *   true : last chunk

> +      *

> +      *   false: more to follow

> +      *

> +      * If set to true, indicates this is the last chunk of

> +      * data. After processing of last chunk of data is complete

> i.e.

> +      * call returned with any error code except

> ODP_COMP_ERR_OUT_OF_SPACE,

> +      * implementation should move algorithm to stateless mode

> +      * for next of batch of operation i.e. reset history,

> +      * insert 'End of Block' marker into compressed data stream(if

> +      * supported by algo).See deflate/zlib for interpretation of

> +      * stateless/stateful.

> +      *

> +      * For stateless compressions (ex ipcomp), last should be set

> to 'true'

> +      * for every input packet processing call.

> +      *

> +      * For compression + hash, digest will be available after

> +      * last chunk is processed completely. In case of

> +      * ODP_COMP_ERR_OUT_OF_SPACE, application should keep on

> calling

> +      * odp_comp_xxx() API with more output buffer unless call

> returns

> +      * with ODP_COMP_ERR_NONE or other failure code except

> +      *  ODP_COMP_ERR_OUT_OF_SPACE.

> +      */

> +     odp_bool_t last;

> +

> +     /** Input data */

> +     odp_comp_data_t input;

>

> I'd move this out of the struct, since it's not operation parameter but the

> target of the operation. From call to call, everything else may remain

> constant, but the target is not constant.

>

> +

> +     /** placeholder for output data.

> +      *

> +      * For Compression/Decompression+hash session,

> +      * output  will store both data and digest(with digest appended

> at

> +      * end-of-data). User should pass packet of sufficiently large

> size

> +      * to store digest.

> +      *

> +      */

> +     odp_comp_data_t output;

>

> Same here.

>

> +} odp_comp_op_param_t;

> +

> +/**

> + * Comp API per operation result

> + *

> + */

> +typedef struct odp_comp_op_result_t {

> +     /** User context from request */

> +     void *ctx;

> +

> +     /** Operation Return Code */

> +     odp_comp_err_t err;

> +

> +     /** Pointer to output.Valid when odp_comp_err_t is

> +      * ODP_COMP_ERR_NONE or ODP_COMP_ERR_OUT_OF_SPACE

> +      *

> +      * Contain data after compression/decompression operation,

> +      * or data + digest for compression/decompression + hash

> operation.

> +      *

> +      */

> +     odp_comp_data_t output;

>

> This would also go out of the struct.

>

>

> +} odp_comp_op_result_t;

> +

>

> -Petri

>

>

> .

>

>
Savolainen, Petri (Nokia - FI/Espoo) Aug. 29, 2017, 11:32 a.m. | #6
> -----Original Message-----

> From: shally verma [mailto:shallyvermacavium@gmail.com]

> Sent: Tuesday, August 29, 2017 10:26 AM

> To: Narayana Prasad Athreya <pathreya@caviumnetworks.com>

> Cc: Savolainen, Petri (Nokia - FI/Espoo) <petri.savolainen@nokia.com>;

> Github ODP bot <odpbot@yandex.ru>; lng-odp@lists.linaro.org; Narayana,

> Prasad Athreya <PrasadAthreya.Narayana@cavium.com>; Mahipal Challa

> <mchalla@cavium.com>; Verma, Shally <shally.verma@cavium.com>

> Subject: Re: [lng-odp] [PATCH API-NEXT v8 1/1] comp: compression spec

> 

> Based on last discussion, I was reworking to add odp_comp_op_pkt ()

> API based on Crypto design. Help me to answer with these questions:

> 

> 1. Current crypto packet base API is not giving Error code as an

> output to its sync version i.e. in int odp_crypto_op(const

> odp_packet_t pkt_in[],......), I do not see where it is returning

> odp_crypto_packet_result_t. Can anyone help?


Error codes are part of operation results:

/**
 * Get crypto operation results from an crypto processed packet
 *
 * Successful crypto operations of all types (SYNC and ASYNC) produce packets
 * which contain crypto result metadata. This function copies the operation
 * results from an crypto processed packet. Event subtype of this kind of
 * packet is ODP_EVENT_PACKET_crypto. Results are undefined if a non-crypto
 * processed packet is passed as input.
 *
 * @param         packet  An crypto processed packet (ODP_EVENT_PACKET_CRYPTO)
 * @param[out]    result  Pointer to operation result for output
 *
 * @retval  0     On success
 * @retval <0     On failure
 */
int odp_crypto_result(odp_crypto_packet_result_t *result,
		      odp_packet_t packet);

/**
 * Crypto packet API operation result
 */
typedef struct odp_crypto_packet_result_t {
	/** Request completed successfully */
	odp_bool_t  ok;

	/** Cipher status */
	odp_crypto_op_status_t cipher_status;

	/** Authentication status */
	odp_crypto_op_status_t auth_status;

} odp_crypto_packet_result_t;

/**
 * Cryto API per packet operation completion status
 */
typedef struct odp_crypto_op_status {
	/** Algorithm specific return code */
	odp_crypto_alg_err_t alg_err;

	/** Hardware specific return code */
	odp_crypto_hw_err_t  hw_err;

} odp_crypto_op_status_t;

/**
 * Crypto API algorithm return code
 */
typedef enum {
	/** Algorithm successful */
	ODP_CRYPTO_ALG_ERR_NONE,
	/** Invalid data block size */
	ODP_CRYPTO_ALG_ERR_DATA_SIZE,
	/** Key size invalid for algorithm */
	ODP_CRYPTO_ALG_ERR_KEY_SIZE,
	/** Computed ICV value mismatch */
	ODP_CRYPTO_ALG_ERR_ICV_CHECK,
	/** IV value not specified */
	ODP_CRYPTO_ALG_ERR_IV_INVALID,
} odp_crypto_alg_err_t;


> 

> 2. Current crypto version of odp_crypto_op(odp_pkt_t pkt_in[] ...)

> does not have two separate version for encryption and decryption where

> as in Compression, we added two. One for compress and another for

> decompress.

> So do we want to retain two separate flavor or unify like crypto

> packet based api? Ex.

> odp_comp_op_pkt ( ... ) OR

> odp_comp_compress_pkt( ...),

> odp_comp_decompress_pkt(),

> odp_comp_compress_pkt_enq() and so on...?


Crypto has single operation, IPSEC has two operations (inbound vs outbound). So, both styles are used today. Benefits of an operation per direction are:
* more readable code: odp_comp_compress() vs odp_comp_op()
* possibility to have different set of arguments (parameters) for each direction. E.g. IPSEC does IP fragmentation on output direction and thus needs extra parameters for that, those params are not defined on inbound direction.
* cleaner specification of different operations e.g. "... output of odp_comp_compress()..." vs "... output of odp_comp_op() in compress mode ...."
* easier to extend since a new feature can be added to one side without changing the spec for the other side


BTW, since most of our operations process packets, we don't need to highlight it with "pkt". I'd name odp_comp_compress() for packets, and then later on add odp_comp_compress_mem(), odp_comp_compress_from_mem(), etc for mem -> mem, pkt -> mem operations.

-Petri
shally verma Aug. 29, 2017, 12:36 p.m. | #7
On Tue, Aug 29, 2017 at 5:02 PM, Savolainen, Petri (Nokia - FI/Espoo)
<petri.savolainen@nokia.com> wrote:
>

>

>> -----Original Message-----

>> From: shally verma [mailto:shallyvermacavium@gmail.com]

>> Sent: Tuesday, August 29, 2017 10:26 AM

>> To: Narayana Prasad Athreya <pathreya@caviumnetworks.com>

>> Cc: Savolainen, Petri (Nokia - FI/Espoo) <petri.savolainen@nokia.com>;

>> Github ODP bot <odpbot@yandex.ru>; lng-odp@lists.linaro.org; Narayana,

>> Prasad Athreya <PrasadAthreya.Narayana@cavium.com>; Mahipal Challa

>> <mchalla@cavium.com>; Verma, Shally <shally.verma@cavium.com>

>> Subject: Re: [lng-odp] [PATCH API-NEXT v8 1/1] comp: compression spec

>>

>> Based on last discussion, I was reworking to add odp_comp_op_pkt ()

>> API based on Crypto design. Help me to answer with these questions:

>>

>> 1. Current crypto packet base API is not giving Error code as an

>> output to its sync version i.e. in int odp_crypto_op(const

>> odp_packet_t pkt_in[],......), I do not see where it is returning

>> odp_crypto_packet_result_t. Can anyone help?

>

> Error codes are part of operation results:

>

> /**

>  * Get crypto operation results from an crypto processed packet

>  *

>  * Successful crypto operations of all types (SYNC and ASYNC) produce packets

>  * which contain crypto result metadata. This function copies the operation

>  * results from an crypto processed packet. Event subtype of this kind of

>  * packet is ODP_EVENT_PACKET_crypto. Results are undefined if a non-crypto

>  * processed packet is passed as input.

>  *

>  * @param         packet  An crypto processed packet (ODP_EVENT_PACKET_CRYPTO)

>  * @param[out]    result  Pointer to operation result for output

>  *

>  * @retval  0     On success

>  * @retval <0     On failure

>  */

> int odp_crypto_result(odp_crypto_packet_result_t *result,

>                       odp_packet_t packet);


Ok. That seems user need to make explicit call to this API to get
result, if he want.
So this is optional call in crypto context?

>

> /**

>  * Crypto packet API operation result

>  */

> typedef struct odp_crypto_packet_result_t {

>         /** Request completed successfully */

>         odp_bool_t  ok;

>

>         /** Cipher status */

>         odp_crypto_op_status_t cipher_status;

>

>         /** Authentication status */

>         odp_crypto_op_status_t auth_status;

>

> } odp_crypto_packet_result_t;

>

> /**

>  * Cryto API per packet operation completion status

>  */

> typedef struct odp_crypto_op_status {

>         /** Algorithm specific return code */

>         odp_crypto_alg_err_t alg_err;

>

>         /** Hardware specific return code */

>         odp_crypto_hw_err_t  hw_err;

>

> } odp_crypto_op_status_t;

>

> /**

>  * Crypto API algorithm return code

>  */

> typedef enum {

>         /** Algorithm successful */

>         ODP_CRYPTO_ALG_ERR_NONE,

>         /** Invalid data block size */

>         ODP_CRYPTO_ALG_ERR_DATA_SIZE,

>         /** Key size invalid for algorithm */

>         ODP_CRYPTO_ALG_ERR_KEY_SIZE,

>         /** Computed ICV value mismatch */

>         ODP_CRYPTO_ALG_ERR_ICV_CHECK,

>         /** IV value not specified */

>         ODP_CRYPTO_ALG_ERR_IV_INVALID,

> } odp_crypto_alg_err_t;

>

>

>>

>> 2. Current crypto version of odp_crypto_op(odp_pkt_t pkt_in[] ...)

>> does not have two separate version for encryption and decryption where

>> as in Compression, we added two. One for compress and another for

>> decompress.

>> So do we want to retain two separate flavor or unify like crypto

>> packet based api? Ex.

>> odp_comp_op_pkt ( ... ) OR

>> odp_comp_compress_pkt( ...),

>> odp_comp_decompress_pkt(),

>> odp_comp_compress_pkt_enq() and so on...?

>

> Crypto has single operation, IPSEC has two operations (inbound vs outbound). So, both styles are used today. Benefits of an operation per direction are:

> * more readable code: odp_comp_compress() vs odp_comp_op()

> * possibility to have different set of arguments (parameters) for each direction. E.g. IPSEC does IP fragmentation on output direction and thus needs extra parameters for that, those params are not defined on inbound direction.

> * cleaner specification of different operations e.g. "... output of odp_comp_compress()..." vs "... output of odp_comp_op() in compress mode ...."

> * easier to extend since a new feature can be added to one side without changing the spec for the other side

>

>

> BTW, since most of our operations process packets, we don't need to highlight it with "pkt". I'd name odp_comp_compress() for packets, and then later on add odp_comp_compress_mem(), odp_comp_compress_from_mem(), etc for mem -> mem, pkt -> mem operations.


Ok. no issues. I will retain separate flavor and keep API name in sync
to crypto.

Thanks
Shally

>

> -Petri
shally verma Aug. 29, 2017, 4:18 p.m. | #8
On Tue, Aug 29, 2017 at 6:06 PM, shally verma
<shallyvermacavium@gmail.com> wrote:
> On Tue, Aug 29, 2017 at 5:02 PM, Savolainen, Petri (Nokia - FI/Espoo)

> <petri.savolainen@nokia.com> wrote:

>>

>>

>>> -----Original Message-----

>>> From: shally verma [mailto:shallyvermacavium@gmail.com]

>>> Sent: Tuesday, August 29, 2017 10:26 AM

>>> To: Narayana Prasad Athreya <pathreya@caviumnetworks.com>

>>> Cc: Savolainen, Petri (Nokia - FI/Espoo) <petri.savolainen@nokia.com>;

>>> Github ODP bot <odpbot@yandex.ru>; lng-odp@lists.linaro.org; Narayana,

>>> Prasad Athreya <PrasadAthreya.Narayana@cavium.com>; Mahipal Challa

>>> <mchalla@cavium.com>; Verma, Shally <shally.verma@cavium.com>

>>> Subject: Re: [lng-odp] [PATCH API-NEXT v8 1/1] comp: compression spec

>>>

>>> Based on last discussion, I was reworking to add odp_comp_op_pkt ()

>>> API based on Crypto design. Help me to answer with these questions:

>>>

>>> 1. Current crypto packet base API is not giving Error code as an

>>> output to its sync version i.e. in int odp_crypto_op(const

>>> odp_packet_t pkt_in[],......), I do not see where it is returning

>>> odp_crypto_packet_result_t. Can anyone help?

>>

>> Error codes are part of operation results:

>>

>> /**

>>  * Get crypto operation results from an crypto processed packet

>>  *

>>  * Successful crypto operations of all types (SYNC and ASYNC) produce packets

>>  * which contain crypto result metadata. This function copies the operation

>>  * results from an crypto processed packet. Event subtype of this kind of

>>  * packet is ODP_EVENT_PACKET_crypto. Results are undefined if a non-crypto

>>  * processed packet is passed as input.

>>  *

>>  * @param         packet  An crypto processed packet (ODP_EVENT_PACKET_CRYPTO)

>>  * @param[out]    result  Pointer to operation result for output

>>  *

>>  * @retval  0     On success

>>  * @retval <0     On failure

>>  */

>> int odp_crypto_result(odp_crypto_packet_result_t *result,

>>                       odp_packet_t packet);

>

> Ok. That seems user need to make explicit call to this API to get

> result, if he want.

> So this is optional call in crypto context?

>

Alongside a question of retrieving results for synchronous operations,
have one more questions - in crypto, I dont see separate output packet
data range. So, does it mean it writes to same offset and uses same
length as mentioned for input packet?

Thanks
Shally

>>

>> /**

>>  * Crypto packet API operation result

>>  */

>> typedef struct odp_crypto_packet_result_t {

>>         /** Request completed successfully */

>>         odp_bool_t  ok;

>>

>>         /** Cipher status */

>>         odp_crypto_op_status_t cipher_status;

>>

>>         /** Authentication status */

>>         odp_crypto_op_status_t auth_status;

>>

>> } odp_crypto_packet_result_t;

>>

>> /**

>>  * Cryto API per packet operation completion status

>>  */

>> typedef struct odp_crypto_op_status {

>>         /** Algorithm specific return code */

>>         odp_crypto_alg_err_t alg_err;

>>

>>         /** Hardware specific return code */

>>         odp_crypto_hw_err_t  hw_err;

>>

>> } odp_crypto_op_status_t;

>>

>> /**

>>  * Crypto API algorithm return code

>>  */

>> typedef enum {

>>         /** Algorithm successful */

>>         ODP_CRYPTO_ALG_ERR_NONE,

>>         /** Invalid data block size */

>>         ODP_CRYPTO_ALG_ERR_DATA_SIZE,

>>         /** Key size invalid for algorithm */

>>         ODP_CRYPTO_ALG_ERR_KEY_SIZE,

>>         /** Computed ICV value mismatch */

>>         ODP_CRYPTO_ALG_ERR_ICV_CHECK,

>>         /** IV value not specified */

>>         ODP_CRYPTO_ALG_ERR_IV_INVALID,

>> } odp_crypto_alg_err_t;

>>

>>

>>>

>>> 2. Current crypto version of odp_crypto_op(odp_pkt_t pkt_in[] ...)

>>> does not have two separate version for encryption and decryption where

>>> as in Compression, we added two. One for compress and another for

>>> decompress.

>>> So do we want to retain two separate flavor or unify like crypto

>>> packet based api? Ex.

>>> odp_comp_op_pkt ( ... ) OR

>>> odp_comp_compress_pkt( ...),

>>> odp_comp_decompress_pkt(),

>>> odp_comp_compress_pkt_enq() and so on...?

>>

>> Crypto has single operation, IPSEC has two operations (inbound vs outbound). So, both styles are used today. Benefits of an operation per direction are:

>> * more readable code: odp_comp_compress() vs odp_comp_op()

>> * possibility to have different set of arguments (parameters) for each direction. E.g. IPSEC does IP fragmentation on output direction and thus needs extra parameters for that, those params are not defined on inbound direction.

>> * cleaner specification of different operations e.g. "... output of odp_comp_compress()..." vs "... output of odp_comp_op() in compress mode ...."

>> * easier to extend since a new feature can be added to one side without changing the spec for the other side

>>

>>

>> BTW, since most of our operations process packets, we don't need to highlight it with "pkt". I'd name odp_comp_compress() for packets, and then later on add odp_comp_compress_mem(), odp_comp_compress_from_mem(), etc for mem -> mem, pkt -> mem operations.

>

> Ok. no issues. I will retain separate flavor and keep API name in sync

> to crypto.

>

> Thanks

> Shally

>

>>

>> -Petri
Dmitry Eremin-Solenikov Aug. 29, 2017, 5:11 p.m. | #9
On 29/08/17 19:18, shally verma wrote:
> Alongside a question of retrieving results for synchronous operations,

> have one more questions - in crypto, I dont see separate output packet

> data range. So, does it mean it writes to same offset and uses same

> length as mentioned for input packet?


Yes. Aside of padding, encryption does not change data length (and we do
not support padding ATM). Hash is written at hash_result_offset offset.

-- 
With best wishes
Dmitry
shally verma Aug. 30, 2017, 7:12 a.m. | #10
On Tue, Aug 29, 2017 at 10:41 PM, Dmitry Eremin-Solenikov
<dmitry.ereminsolenikov@linaro.org> wrote:
> On 29/08/17 19:18, shally verma wrote:

>> Alongside a question of retrieving results for synchronous operations,

>> have one more questions - in crypto, I dont see separate output packet

>> data range. So, does it mean it writes to same offset and uses same

>> length as mentioned for input packet?

>

> Yes. Aside of padding, encryption does not change data length (and we do

> not support padding ATM). Hash is written at hash_result_offset offset.

>

> --

Ok. and could you also clarify on this
" retrieving results for synchronous call odp_crypto_op(const pkt_in[]
...)" . How do user get result per packet after this call is over ?

Thanks
Shally

> With best wishes

> Dmitry

Patch

diff --git a/include/odp/api/spec/comp.h b/include/odp/api/spec/comp.h
new file mode 100644
index 00000000..2956094c
--- /dev/null
+++ b/include/odp/api/spec/comp.h
@@ -0,0 +1,815 @@ 
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:	BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP Compression
+ */
+
+#ifndef ODP_API_COMP_H_
+#define ODP_API_COMP_H_
+
+#include <odp/visibility_begin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/support.h>
+#include <odp/api/packet.h>
+
+/** @defgroup odp_compression ODP COMP
+ *  ODP Compression defines API set to compress/decompress along with hash
+ *  operations on data. Hash is calculated on plaintext.
+ *
+ *  if opcode = ODP_COMP_COMPRESS, then it will Compress and apply hash,
+ *  if opcode = ODP_COMP_DECOMPRESS, then it will Decompress and apply
+ *  hash.
+ *  Independent hash-only operations are not supported. Implementation should
+ *  perform hash along with valid compression algo.
+ *  Macros, enums, types and operations to utilize compression.
+ *  @{
+ */
+
+/**
+ * @def ODP_COMP_SESSION_INVALID
+ * Invalid session handle
+ */
+
+/**
+ * @typedef odp_comp_session_t
+ * Compression/Decompression session handle
+ */
+
+/**
+ * Compression API operation mode
+ */
+typedef enum {
+	/** Synchronous, return results immediately */
+	ODP_COMP_SYNC,
+	/** Asynchronous, return results via event queue */
+	ODP_COMP_ASYNC
+} odp_comp_op_mode_t;
+
+/**
+ * Comp API operation type.
+ *
+ */
+typedef enum {
+	/** Compress  */
+	ODP_COMP_OP_COMPRESS,
+	/** Decompress */
+	ODP_COMP_OP_DECOMPRESS
+} odp_comp_op_t;
+
+/**
+ * Comp API hash algorithm
+ *
+ */
+typedef enum {
+	/** ODP_COMP_HASH_ALG_NONE*/
+	ODP_COMP_HASH_ALG_NONE,
+	/** ODP_COMP_HASH_ALG_SHA1*/
+	ODP_COMP_HASH_ALG_SHA1,
+	/**  ODP_COMP_HASH_ALG_SHA256*/
+	ODP_COMP_HASH_ALG_SHA256
+} odp_comp_hash_alg_t;
+
+/**
+ * Comp API compression algorithm
+ *
+ */
+typedef enum {
+	/** No algorithm specified.
+	 * Means no compression, output == input.
+	 * if provided, no operation (compression/decompression or hash)
+	 * applied on input. Added for testing purpose.
+	 */
+	ODP_COMP_ALG_NULL,
+	/** DEFLATE - RFC1951 */
+	ODP_COMP_ALG_DEFLATE,
+	/** ZLIB - RFC1950 */
+	ODP_COMP_ALG_ZLIB,
+	/** LZS */
+	ODP_COMP_ALG_LZS
+} odp_comp_alg_t;
+
+/**
+ * Comp API session creation return code
+ *
+ */
+typedef enum {
+	/** Session created */
+	ODP_COMP_SES_CREATE_ERR_NONE,
+	/** Creation failed, no resources */
+	ODP_COMP_SES_CREATE_ERR_ENOMEM,
+	/** Creation failed, bad compression params */
+	ODP_COMP_SES_CREATE_ERR_INV_COMP,
+	/** Creation failed, bad hash params */
+	ODP_COMP_SES_CREATE_ERR_INV_HASH,
+	/** Creation failed,requested configuration not supported*/
+	ODP_COMP_SES_CREATE_ERR_NOT_SUPPORTED
+} odp_comp_ses_create_err_t;
+
+/**
+ * Comp API operation return codes
+ *
+ */
+typedef enum {
+	/** Operation completed successfully*/
+	ODP_COMP_ERR_NONE,
+	/** Operation paused due to insufficient output buffer.
+	*
+	* This is not an error condition. On seeing this situation,
+	* Implementation should maintain context of in-progress operation and
+	* application should call packet processing API again with valid
+	* output buffer but no other alteration to operation params
+	* (odp_comp_op_param_t).
+	*
+	* if using async mode, application should either make sure to
+	* provide sufficient output buffer size OR maintain relevant
+	* context (or ordering) information with respect to each input packet
+	* en-queued for processing.
+	*
+	*/
+	ODP_COMP_ERR_OUT_OF_SPACE,
+	/** Invalid user data pointers*/
+	ODP_COMP_ERR_DATA_PTR,
+	/** Invalid input data size*/
+	ODP_COMP_ERR_DATA_SIZE,
+	/**  Compression and/or hash Algo failure*/
+	ODP_COMP_ERR_ALGO_FAIL,
+	/** Error if operation has been requested in an invalid state */
+	ODP_COMP_ERR_INV_STATE,
+	/** Error if API does not support any of the operational parameter. */
+	ODP_COMP_ERR_NOT_SUPPORTED,
+	/** Error if session is invalid. */
+	ODP_COMP_ERR_INV_SESS
+} odp_comp_err_t;
+
+/**
+ * Comp API enumeration for preferred compression level/speed. Applicable
+ * only for compression operation not decompression.
+ * Value provided defines a trade-off between speed and compression ratio.
+ *
+ * If compression level == ODP_COMP_LEVEL_MIN, output will be produced at
+ * fastest possible rate,
+ *
+ * If compression level == ODP_COMP_LEVEL_MAX, output will be highest possible
+ * compression,
+ *
+ * compression level == ODP_COMP_LEVEL_DEFAULT means implementation will use
+ * its default choice of compression level.
+ *
+ */
+typedef enum {
+	/* Use implementation default */
+	ODP_COMP_LEVEL_DEFAULT = 0,
+	/** Minimum compression (fastest in speed) */
+	ODP_COMP_LEVEL_MIN,
+	/** Maximum compression (slowest in speed) */
+	ODP_COMP_LEVEL_MAX,
+} odp_comp_level_t;
+
+/**
+ * Comp API enumeration for huffman encoding. Valid for compression operation.
+ *
+ */
+typedef enum {
+	/** use implementation default to choose between compression codes  */
+	ODP_COMP_HUFFMAN_CODE_DEFAULT = 0,
+	/** use fixed huffman codes */
+	ODP_COMP_HUFFMAN_CODE_FIXED,
+	/** use dynamic huffman coding */
+	ODP_COMP_HUFFMAN_CODE_DYNAMIC,
+} odp_comp_huffman_code_t;
+
+/**
+ * Hash algorithms in a bit field structure
+ *
+ */
+typedef union odp_comp_hash_algos_t {
+	/** hash algorithms */
+	struct {
+		/** SHA-1 */
+		uint32_t sha1  : 1;
+
+		/** SHA with 256 bits of Message Digest */
+		uint32_t sha256 : 1;
+
+	} bit;
+
+	/** All bits of the bit field structure
+	 *
+	 * This field can be used to set/clear all flags, or bitwise
+	 * operations over the entire structure.
+	 */
+	uint32_t all_bits;
+} odp_comp_hash_algos_t;
+
+/**
+ * Comp algorithms in a bit field structure
+ *
+ */
+typedef union odp_comp_algos_t {
+	/** Compression algorithms */
+	struct {
+		/** ODP_COMP_ALG_NULL */
+		uint32_t null       : 1;
+
+		/** ODP_COMP_ALG_DEFLATE */
+		uint32_t deflate    : 1;
+
+		/** ODP_COMP_ALG_ZLIB */
+		uint32_t zlib       : 1;
+
+		/** ODP_COMP_ALG_LZS */
+		uint32_t lzs        :1;
+	} bit;
+
+	/** All bits of the bit field structure
+	 * This field can be used to set/clear all flags, or bitwise
+	 * operations over the entire structure.
+	 */
+	uint32_t all_bits;
+} odp_comp_algos_t;
+
+/**
+ * Compression Interface Capabilities
+ *
+ */
+typedef struct odp_comp_capability_t {
+	/** Maximum number of  sessions */
+	uint32_t max_sessions;
+
+	/** Supported compression algorithms */
+	odp_comp_algos_t comp_algs;
+
+	/** Supported hash algorithms. */
+	odp_comp_hash_algos_t hash_algs;
+
+	/* sync/async mode of operation support.
+	 * Implementation should support atleast one of the mode.
+	 */
+
+	/** Support type for synchronous operation mode (ODP_COMP_SYNC).
+	 *  User should set odp_comp_session_param_t:mode based on
+	 *  support level as indicated by this param.
+	 */
+	odp_support_t sync;
+
+	/** Support type for asynchronous operation mode (ODP_COMP_ASYNC).
+	 *  User should set odp_comp_session_param_t:mode param based on
+	 *  support level as indicated by this param.
+	 */
+	odp_support_t async;
+} odp_comp_capability_t;
+
+/**
+ * Hash algorithm capabilities
+ *
+ */
+typedef struct odp_comp_hash_alg_capability_t {
+	/** Digest length in bytes */
+	uint32_t digest_len;
+} odp_comp_hash_alg_capability_t;
+
+/**
+ * Compression algorithm capabilities structure for each algorithm.
+ *
+ */
+typedef struct odp_comp_alg_capability_t {
+	/** Enumeration indicating alg support for dictionary load */
+	odp_support_t support_dict;
+
+	/** Optional Maximum length of dictionary supported
+	 *   by implementation of the algorithm.
+	 *
+	 *   Invalid if support_dict == ODP_SUPPORT_NO.
+	 *
+	 *   Implementation use dictionary of length less than or equal to value
+	 *   indicated by dict_len. if set to 0 and if support_dict ==
+	 *   ODP_SUPPORT_YES, then implementation will use dictionary length
+	 *   less than or equal to user input length in odp_comp_set_dict()
+	 *   and update used dictionary length at output of the call.
+	 *
+	 */
+	uint32_t dict_len;
+
+	/* Maximum compression level supported by implementation of this algo.
+	 *  Indicates number of compression levels supported by implementation,
+	 *
+	 * where,
+	 *
+	 * 1 means fastest compression i.e. output produced at
+	 * best possible speed at the expense of compression quality, and
+	 *
+	 * max_level means best compression i.e.output produced is best possible
+	 * compressed content at the expense of speed.
+	 *
+	 * Example, if max_level = 4 , it means algorithm supports four levels
+	 * of compression from value 1 up to 4. User can set this value from
+	 * 1 (fastest compression) to 4 (best compression).
+	 * See RFC1950 for an example explanation to level.
+	 *
+	 * Value 0 mean implementation use its default value.
+	 *
+	 */
+	uint32_t max_level;
+
+	/* Supported hash algorithms */
+	odp_comp_hash_algos_t hash_alg;
+} odp_comp_alg_capability_t;
+
+/**
+ * Comp API dictionary type
+ *
+ */
+typedef struct odp_comp_dict_t {
+	/** pointer to character array */
+	uint8_t *buf;
+	/** length of the dictionary. */
+	uint32_t len;
+} odp_comp_dict_t;
+
+/**
+ * Comp API algorithm specific parameters
+ *
+ */
+typedef struct odp_comp_alg_param_t {
+	struct comp_alg_def_param {
+		/** compression level where
+		 * ODP_COMP_LEVEL_MIN <= level <= ODP_COMP_LEVEL_MAX
+		 */
+		odp_comp_level_t level;
+		/** huffman code to use */
+		odp_comp_huffman_code_t comp_code;
+	} deflate;
+	struct comp_alg_zlib_param {
+			/** deflate algo params */
+			struct comp_alg_def_param def;
+	} zlib;
+} odp_comp_alg_param_t;
+
+/**
+ * Comp API data range specifier
+ *
+ */
+typedef union odp_comp_data_t {
+	struct {
+		/** packet */
+		odp_packet_t packet;
+
+		/** packet data range to operate on  */
+		odp_packet_data_range_t data_range;
+	} pkt;
+} odp_comp_data_t;
+
+ /**
+ * Comp API session creation parameters
+ *
+ */
+typedef struct odp_comp_session_param_t {
+	/** Compress vs. Decompress operation */
+	odp_comp_op_t op;
+
+	/** Sync vs Async
+	 *
+	 * When mode = ODP_COMP_SYNC, odp_comp_compress()/odp_comp_decomp()
+	 * should be called.
+	 *
+	 * When mode = ODP_COMP_ASYNC, odp_comp_compress_enq()/
+	 * odp_comp_decomp_enq() should be called.
+	 *
+	 * Use odp_comp_capability() for supported mode.
+	 *
+	 */
+	odp_comp_op_mode_t mode;
+
+	/** Compression algorithm
+	 *
+	 *  Use odp_comp_capability() for supported algorithms.
+	 */
+	odp_comp_alg_t comp_alg;
+
+	/** Hash algorithm
+	 *
+	 *  Use odp_comp_alg_capability() for supported hash algo for
+	 *  compression algo given as comp_alg. Implementation should not
+	 *  support hash only operation on data. output should always contain
+	 *  data + hash.
+	 *
+	 */
+	odp_comp_hash_alg_t hash_alg;
+
+	/** parameters specific to compression */
+	odp_comp_alg_param_t alg_param;
+
+	/** Async mode completion event queue
+	 *
+	 * When mode = ODP_COMP_ASYNC, user should wait on ODP_EVENT_PACKET
+	 * with subtype ODP_EVENT_PACKET_COMP on this queue.
+	 *
+	 * By default, implementation enques completion events in-order-of
+	 * request submission and thus queue is considered ordered.
+	 *
+	 * Please note, behavior could be changed or enhanced
+	 * to queue event in-order-of their completion to enable
+	 * performance-oriented application to leverage hw offered parallelism.
+	 * However, this will be subject to application requirement and more
+	 * explicit defined use-case.
+	 *
+	 */
+	odp_queue_t compl_queue;
+} odp_comp_session_param_t;
+
+/**
+ * Comp API operation parameters.
+ * Called to process each data unit.
+ *
+ */
+typedef struct odp_comp_op_param_t {
+	/** Session handle from creation */
+	odp_comp_session_t session;
+
+	/** User context */
+	void *ctx;
+
+	/** Boolean indicating End of data, where
+	 *
+	 *   true : last chunk
+	 *
+	 *   false: more to follow
+	 *
+	 * If set to true, indicates this is the last chunk of
+	 * data. After processing of last chunk of data is complete i.e.
+	 * call returned with any error code except ODP_COMP_ERR_OUT_OF_SPACE,
+	 * implementation should move algorithm to stateless mode
+	 * for next of batch of operation i.e. reset history,
+	 * insert 'End of Block' marker into compressed data stream(if
+	 * supported by algo).See deflate/zlib for interpretation of
+	 * stateless/stateful.
+	 *
+	 * For stateless compressions (ex ipcomp), last should be set to 'true'
+	 * for every input packet processing call.
+	 *
+	 * For compression + hash, digest will be available after
+	 * last chunk is processed completely. In case of
+	 * ODP_COMP_ERR_OUT_OF_SPACE, application should keep on calling
+	 * odp_comp_xxx() API with more output buffer unless call returns
+	 * with ODP_COMP_ERR_NONE or other failure code except
+	 *  ODP_COMP_ERR_OUT_OF_SPACE.
+	 */
+	odp_bool_t last;
+
+	/** Input data */
+	odp_comp_data_t input;
+
+	/** placeholder for output data.
+	 *
+	 * For Compression/Decompression+hash session,
+	 * output  will store both data and digest(with digest appended at
+	 * end-of-data). User should pass packet of sufficiently large size
+	 * to store digest.
+	 *
+	 */
+	odp_comp_data_t output;
+} odp_comp_op_param_t;
+
+/**
+ * Comp API per operation result
+ *
+ */
+typedef struct odp_comp_op_result_t {
+	/** User context from request */
+	void *ctx;
+
+	/** Operation Return Code */
+	odp_comp_err_t err;
+
+	/** Pointer to output.Valid when odp_comp_err_t is
+	 * ODP_COMP_ERR_NONE or ODP_COMP_ERR_OUT_OF_SPACE
+	 *
+	 * Contain data after compression/decompression operation,
+	 * or data + digest for compression/decompression + hash operation.
+	 *
+	 */
+	odp_comp_data_t output;
+} odp_comp_op_result_t;
+
+/**
+ * Query comp capabilities
+ *
+ * Output comp capabilities on success.
+ *
+ * @param[out] capa   Pointer to capability structure for output
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_comp_capability(odp_comp_capability_t *capa);
+
+/**
+ * Query supported compression algorithm capabilities
+ *
+ * Output algorithm capabilities.
+ *
+ * @param      comp     Compression algorithm
+ * @param[out] capa     Array of capability structures for output
+ * @param      num     Maximum number of capability structures to output
+ *
+ * @return Number of capability structures for the algorithm. If this is larger
+ *         than 'num', only 'num' first structures were output and application
+ *         may call the function again with a larger value of 'num'.
+ * @retval <0 on failure
+ */
+int odp_comp_alg_capability(odp_comp_alg_t comp,
+			    odp_comp_alg_capability_t capa[], int num);
+
+ /**
+  * Query supported hash algorithm capabilities
+  *
+  * Outputs all supported configuration options for the algorithm.
+  *
+  * @param      hash     Hash algorithm
+  * @param[out] capa     Array of capability structures for output
+  * @param      num     Maximum number of capability structures to output
+  *
+  * @return Number of capability structures for the algorithm. If this is larger
+  *	    than 'num', only 'num' first structures were output and application
+  *	    may call the function again with a larger value of 'num'.
+  * @retval <0 on failure
+  */
+int odp_comp_hash_alg_capability(odp_comp_hash_alg_t hash,
+				 odp_comp_hash_alg_capability_t capa[],
+				 int num);
+
+ /**
+  * Initialize comp session parameters
+  *
+  * Initialize an odp_comp_session_param_t to its default values for
+  * all fields.
+  *
+  * @param param   Pointer to odp_comp_session_param_t to be initialized
+  */
+void odp_comp_session_param_init(odp_comp_session_param_t *param);
+
+ /**
+  * Compression session creation
+  *
+  * Create a comp session according to the session parameters. Use
+  * odp_comp_session_param_init() to initialize parameters into their
+  * default values.
+  *
+  * @param param             Session parameters
+  * @param session           Created session else ODP_COMP_SESSION_INVALID
+  * @param status            Failure code if unsuccessful
+  *
+  * @retval 0 on success
+  * @retval <0 on failure
+  */
+int odp_comp_session_create(odp_comp_session_param_t *param,
+			    odp_comp_session_t *session,
+			    odp_comp_ses_create_err_t *status);
+
+ /**
+  * Comp session destroy
+  *
+  * Destroy an unused session. Result is undefined if session is being used
+  * (i.e. asynchronous operation is in progress).
+  *
+  * @param session           Session handle
+  *
+  * @retval 0 on success
+  * @retval <0 on failure
+  */
+int odp_comp_session_destroy(odp_comp_session_t session);
+
+/**
+ * Comp set dictionary
+ *
+ * Should be called when there is no operation in progress i.e.
+ * before initiating processing of first chunk of data and
+ * after processing of last chunk of data is complete.
+ *
+ * @param session           Session handle
+ * @param dict[in,out]      Pointer to dictionary.
+ *                          implementation should update length of dictionary
+ *                          used at output.
+ * @retval 0 on success
+ * @retval <0 on failure
+ *
+ * @note:
+ * Application should call odp_comp_alg_capability() to query 'support_dict'
+ * before making call to this API.
+ */
+int odp_comp_set_dict(odp_comp_session_t session,
+		      odp_comp_dict_t *dict);
+
+/**
+ * Comp compress data in synchronous mode
+ *
+ * If session is created in ODP_COMP_SYNC mode, this call wait for operation
+ * to complete and update result at output
+ *
+ * If session is created in ODP_COMP_ASYNC mode, this call fails and update
+ * status code ODP_COMP_ERR_NOT_SUPPORTED.
+ *
+ * If operation returns ODP_COMP_ERR_OUT_OF_SPACE, then application should call
+ * API again with valid output buffer (and no-more input) until call completes
+ * with status code except ODP_COMP_ERR_OUT_OF_SPACE.
+ *
+ * for compression + hash, call returns with hash appended to the end of
+ * last processed chunk of data.
+ * User should compute processed data len = total output len - digest_len, where
+ * digest_len queried through odp_comp_hash_alg_capability().
+ *
+ * @param param[in]         Operation parameters.
+ * @param result[out]       Result of operation.
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_comp_compress(odp_comp_op_param_t   *param,
+		      odp_comp_op_result_t  *result);
+
+/**
+ * Comp compress data in asynchronous mode.
+ *
+ * If session is created in ODP_COMP_ASYNC mode, event will be queued
+ * to completion queue. Application should monitor ODP_EVENT_PACKET with
+ * subtype ODP_EVENT_PACKET_COMP on queue.
+ *
+ * If session is created in ODP_COMP_SYNC mode, call fails with status
+ * code ODP_COMP_ERR_NOT_SUPPORTED.
+ *
+ * For compression + hash, call returns with hash appended to the end of
+ * last processed chunk of data.
+ * User should compute processed data len = total output len - digest_len, where
+ * digest_len queried through odp_comp_hash_alg_capability().
+ *
+ * If operation updates result structure with status
+ * ODP_COMP_ERR_OUT_OF_SPACE then application
+ * should call API again with valid output buffer (and no-more input)
+ * until call completes with any other error code.
+ * Please note it is always recommended that application using async mode,
+ * provide sufficiently large buffer size to avoid ODP_COMP_ERR_OUT_OF_SPACE.
+ * Else it is recommended that application maintain relevant context
+ * with respect to each input processing request to correctly identify
+ * its corresponding enqueued event.
+ *
+ * @param param[in]          Operation parameters.
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_comp_compress_enq(odp_comp_op_param_t *param);
+
+  /**
+   * Comp decompress data in synchronous mode
+   *
+   * If session is created in ODP_COMP_SYNC mode, this call wait for operation
+   * to complete and update result at output
+   *
+   * If session is created in ODP_COMP_ASYNC mode, this call fails and update
+   * status code ODP_COMP_ERR_NOT_SUPPORTED.
+   *
+   * If operation returns ODP_COMP_ERR_OUT_OF_SPACE, then application should
+   * call API again with valid output buffer (and no-more input) until call
+   * completes with status code except ODP_COMP_ERR_OUT_OF_SPACE.
+   *
+   * for decompression + hash, call returns with hash appended to the end of
+   * last processed chunk of data.User should compute processed data len =
+   * total output len - digest_len, where digest_len queried through
+   * odp_comp_hash_alg_capability().
+   *
+   * @param param[in]          Operation parameters.
+   * @param result[out]        Result of operation.
+   *
+   * @retval 0 on success
+   * @retval <0 on failure
+   */
+int odp_comp_decomp(odp_comp_op_param_t   *param,
+		    odp_comp_op_result_t  *result);
+
+ /**
+  * Comp decompress data in asynchronous mode.
+  *
+  * If session is created in ODP_COMP_ASYNC mode, result will be queued
+  * to completion queue. Application should monitor ODP_EVENT_PACKET
+  * with subtype ODP_EVENT_PACKET_COMP on queue.
+  *
+  * If session is created in ODP_COMP_SYNC mode, call fails with status
+  * code ODP_COMP_ERR_NOT_SUPPORTED.
+  *
+  * for decompression+hash, call returns with hash appended to the end of
+  * last processed chunk of data.
+  *
+  * User should compute processed data len = total output length - digest_len,
+  * where digest_len queried through odp_comp_hash_alg_capability().
+  *
+  * If operation updates result structure with status
+  * ODP_COMP_ERR_OUT_OF_SPACE then application
+  * should call API again with valid output buffer (and no-more input)
+  * until call completes with any other error code.
+  *
+  * Please note it is always recommended that application using async mode,
+  * provide sufficiently large buffer size to avoid ODP_COMP_ERR_OUT_OF_SPACE.
+  * Else it is recommended that application maintain required context
+  * to associate event to its respective input.
+  *
+  * @param param[in]          Operation parameters.
+  *
+  * @retval 0 on success
+  * @retval <0 on failure
+  */
+int odp_comp_decomp_enq(odp_comp_op_param_t *param);
+
+ /**
+  * Convert processed packet event to packet handle
+  *
+  * Get packet handle corresponding to processed packet event. Event subtype
+  * must be ODP_EVENT_PACKET_COMP. compression/decompression operation
+  * results can be examined with odp_comp_result().
+  *
+  * @param ev	    Event handle
+  *
+  * @return Valid Packet handle on success,
+  *	    ODP_PACKET_INVALID on failure
+  *
+  * @see odp_event_subtype(), odp_comp_result()
+  *
+  * @ Example Usage
+  * odp_event_t ev = odp_queue_deque(comple_q);
+  * odp_event_subtype_t subtype;
+  * if(ODP_PACKET_EVENT == odp_event_types(ev, &subtype)) {
+  * if(subtype == ODP_PACKET_EVENT_COMP) {
+  *  pkt = odp_comp_packet_from_event(ev);
+  *  odp_comp_op_result_t res;
+  *  odp_comp_result(packet, &res);
+  * }
+  * }
+  */
+odp_packet_t odp_comp_packet_from_event(odp_event_t event);
+
+ /**
+  * Convert processed packet handle to event
+  *
+  * The packet handle must be an output of an compression/decompression
+  * operation.
+  *
+  * @param pkt	    Packet handle from odp_comp_compress_enq()/
+  *		    odp_comp_decomp_enq()
+  *
+  * @return Event handle
+  */
+odp_event_t odp_comp_packet_to_event(odp_packet_t pkt);
+
+ /**
+  * Get compression/decompression operation results from an processed packet.
+  *
+  * Successful compression/decompression operations produce
+  * packets which contain operation result metadata. This function copies the
+  * operation results from an processed packet. Event subtype of this kind
+  * of packet is ODP_EVENT_PACKET_COMP. Results are undefined if input packet
+  * has not be processed by compression/decompression call.
+  *
+  * @param[out]    result  Pointer to operation result for output
+  * @param	   packet  An processed packet (ODP_EVENT_PACKET_COMP)
+  *
+  * @retval  0	   On success
+  * @retval <0	   On failure
+  *
+  * @see odp_comp_compress_enq(), odp_comp_decomp_enq(),
+  *	  odp_comp_packet_from_event()
+  */
+int odp_comp_result(odp_packet_t packet,
+		    odp_comp_op_result_t *result);
+
+/**
+ * Get printable value for an odp_comp_session_t
+ *
+ * @param hdl  odp_comp_session_t handle to be printed
+ * @return     uint64_t value that can be used to print/display this
+ *             handle
+ *
+ * @note This routine is intended to be used for diagnostic purposes
+ * to enable applications to generate a printable value that represents
+ * an odp_comp_session_t handle.
+ */
+uint64_t odp_comp_session_to_u64(odp_comp_session_t hdl);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <odp/visibility_end.h>
+#endif