CMSIS-NN: Update comments about handling of scratch buffers (#1546)

The caller is responsible to clear the scratch buffers to
for security reasons if applicable.
The test cases are updated inline with the comments.
diff --git a/CMSIS/NN/Include/arm_nnfunctions.h b/CMSIS/NN/Include/arm_nnfunctions.h
index 60d9872..60fa05d 100644
--- a/CMSIS/NN/Include/arm_nnfunctions.h
+++ b/CMSIS/NN/Include/arm_nnfunctions.h
@@ -21,8 +21,8 @@
  * Title:        arm_nnfunctions.h
  * Description:  Public header file for CMSIS NN Library
  *
- * $Date:        4 Aug 2022
- * $Revision:    V.10.1.1
+ * $Date:        7 Aug 2022
+ * $Revision:    V.10.1.2
  *
  * Target Processor:  Cortex-M CPUs
  * -------------------------------------------------------------------- */
@@ -175,11 +175,11 @@
 
 /**
  * @brief s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in
- cmsis-nn
- *        to perform the convolution.
+ *        cmsis-nn  to perform the convolution.
  *
  * @param[in, out] ctx            Function context that contains the additional buffer if required by the function.
-                                  arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required
+ *                                arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required.
+ *                                The caller is expected to clear the buffer ,if applicable, for security reasons.
  * @param[in]      conv_params    Convolution parameters (e.g. strides, dilations, pads,...).
  *                                Range of conv_params->input_offset  : [-127, 128]
  *                                Range of conv_params->output_offset : [-128, 127]
@@ -233,11 +233,11 @@
 
 /**
  * @brief s16 convolution layer wrapper function with the main purpose to call the optimal kernel available in
- cmsis-nn
- *        to perform the convolution.
+ *        cmsis-nn to perform the convolution.
  *
  * @param[in, out] ctx            Function context that contains the additional buffer if required by the function.
-                                  arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required
+ *                                arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required
+ *                                The caller is expected to clear the buffer ,if applicable, for security reasons.
  * @param[in]      conv_params    Convolution parameters (e.g. strides, dilations, pads,...).
  *                                conv_params->input_offset  : Not used
  *                                conv_params->output_offset : Not used
@@ -292,7 +292,8 @@
 /**
  * @brief Basic s8 convolution function
  * @param[in, out] ctx            Function context that contains the additional buffer if required by the function.
-                                  arm_convolve_s8_get_buffer_size will return the buffer_size if required
+ *                                arm_convolve_s8_get_buffer_size will return the buffer_size if required.
+ *                                The caller is expected to clear the buffer ,if applicable, for security reasons.
  * @param[in]      conv_params    Convolution parameters (e.g. strides, dilations, pads,...).
  *                                Range of conv_params->input_offset  : [-127, 128]
  *                                Range of conv_params->output_offset : [-128, 127]
@@ -342,7 +343,8 @@
 /**
  * @brief Basic s16 convolution function
  * @param[in, out] ctx            Function context that contains the additional buffer if required by the function.
-                                  arm_convolve_s16_get_buffer_size will return the buffer_size if required
+ *                                arm_convolve_s16_get_buffer_size will return the buffer_size if required.
+ *                                The caller is expected to clear the buffer ,if applicable, for security reasons.
  * @param[in]      conv_params    Convolution parameters (e.g. strides, dilations, pads,...).
  *                                conv_params->input_offset  : Not used
  *                                conv_params->output_offset : Not used
@@ -380,7 +382,8 @@
 /**
  * @brief Optimized s16 convolution function
  * @param[in, out] ctx            Function context that contains the additional buffer if required by the function.
-                                  arm_convolve_fast_s16_get_buffer_size will return the buffer_size if required
+ *                                arm_convolve_fast_s16_get_buffer_size will return the buffer_size if required.
+ *                                The caller is expected to clear the buffer ,if applicable, for security reasons.
  * @param[in]      conv_params    Convolution parameters (e.g. strides, dilations, pads,...).
  *                                conv_params->input_offset  : Not used
  *                                conv_params->output_offset : Not used
@@ -422,9 +425,9 @@
 /**
  * @brief Get the required buffer size for s16 convolution function
  *
- * @param[in]       input_dims            Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in]       filter_dims           Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
- * are the spatial filter dimensions
+ * @param[in]       input_dims    Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ * @param[in]       filter_dims   Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
+ *                                are the spatial filter dimensions
  * @return          The function returns  required buffer size(bytes)
  *
  */
@@ -433,9 +436,9 @@
 /**
  * @brief Get the required buffer size for fast s16 convolution function
  *
- * @param[in]       input_dims            Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in]       filter_dims           Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
- * are the spatial filter dimensions
+ * @param[in]       input_dims    Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ * @param[in]       filter_dims   Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK
+ *                                are the spatial filter dimensions
  * @return          The function returns required buffer size(bytes)
  *
  */
@@ -655,26 +658,26 @@
 
 /**
  * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape)
- * @param[in]       Im_in        pointer to input tensor
- * @param[in]       dim_im_in_x  input tensor dimension x
- * @param[in]       dim_im_in_y  input tensor dimension y
- * @param[in]       ch_im_in     number of input tensor channels
- * @param[in]       wt           pointer to kernel weights
- * @param[in]       ch_im_out    number of filters, i.e., output tensor channels
- * @param[in]       dim_kernel_x filter kernel size x
- * @param[in]       dim_kernel_y filter kernel size y
- * @param[in]       padding_x    padding size x
- * @param[in]       padding_y    padding size y
- * @param[in]       stride_x     convolution stride x
- * @param[in]       stride_y     convolution stride y
- * @param[in]       bias         pointer to bias
- * @param[in]       bias_shift   amount of left-shift for bias
- * @param[in]       out_shift    amount of right-shift for output
- * @param[in,out]   Im_out       pointer to output tensor
- * @param[in]       dim_im_out_x output tensor dimension x
- * @param[in]       dim_im_out_y output tensor dimension y
- * @param[in,out]   bufferA      pointer to buffer space for input
- * @param[in,out]   bufferB      pointer to buffer space for output
+ * @param[in]       Im_in         pointer to input tensor
+ * @param[in]       dim_im_in_x   input tensor dimension x
+ * @param[in]       dim_im_in_y   input tensor dimension y
+ * @param[in]       ch_im_in      number of input tensor channels
+ * @param[in]       wt            pointer to kernel weights
+ * @param[in]       ch_im_out     number of filters, i.e., output tensor channels
+ * @param[in]       dim_kernel_x  filter kernel size x
+ * @param[in]       dim_kernel_y  filter kernel size y
+ * @param[in]       padding_x     padding size x
+ * @param[in]       padding_y     padding size y
+ * @param[in]       stride_x      convolution stride x
+ * @param[in]       stride_y      convolution stride y
+ * @param[in]       bias          pointer to bias
+ * @param[in]       bias_shift    amount of left-shift for bias
+ * @param[in]       out_shift     amount of right-shift for output
+ * @param[in,out]   Im_out        pointer to output tensor
+ * @param[in]       dim_im_out_x  output tensor dimension x
+ * @param[in]       dim_im_out_y  output tensor dimension y
+ * @param[in,out]   bufferA       pointer to buffer space for input
+ * @param[in,out]   bufferB       pointer to buffer space for output
  * @return     The function returns either
  *                          <code>ARM_CMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
  *                          <code>ARM_CMSIS_NN_SUCCESS</code> on successful completion.
@@ -712,21 +715,22 @@
 /**
  * @brief Fast s8 version for 1x1 convolution (non-square shape)
  *
- * @param[in, out] ctx            Function context that contains the additional buffer if required by the function.
-                                  arm_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required
- * @param[in]      conv_params    Convolution parameters (e.g. strides, dilations, pads,...).
- *                                Range of conv_params->input_offset  : [-127, 128]
- *                                Range of conv_params->output_offset : [-128, 127]
- * @param[in]      quant_params   Per-channel quantization info.
- *                                It contains the multiplier and shift values to be applied to each output channel
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in]      input_data     Input (activation) data pointer. Data type: int8
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
- * @param[in]      filter_data    Filter data pointer. Data type: int8
- * @param[in]      bias_dims      Bias tensor dimensions. Format: [C_OUT]
- * @param[in]      bias_data      Optional bias data pointer. Data type: int32
- * @param[in]      output_dims    Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out]     output_data    Output data pointer. Data type: int8
+ * @param[in, out] ctx           Function context that contains the additional buffer if required by the function.
+ *                               arm_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required.
+ *                               The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      conv_params   Convolution parameters (e.g. strides, dilations, pads,...).
+ *                               Range of conv_params->input_offset  : [-127, 128]
+ *                               Range of conv_params->output_offset : [-128, 127]
+ * @param[in]      quant_params  Per-channel quantization info.
+ *                               It contains the multiplier and shift values to be applied to each output channel
+ * @param[in]      input_dims    Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ * @param[in]      input_data    Input (activation) data pointer. Data type: int8
+ * @param[in]      filter_dims   Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN]
+ * @param[in]      filter_data   Filter data pointer. Data type: int8
+ * @param[in]      bias_dims     Bias tensor dimensions. Format: [C_OUT]
+ * @param[in]      bias_data     Optional bias data pointer. Data type: int32
+ * @param[in]      output_dims   Output tensor dimensions. Format: [N, H, W, C_OUT]
+ * @param[out]     output_data   Output data pointer. Data type: int8
  *
  * @return     The function returns either
  *                  <code>ARM_CMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
@@ -764,22 +768,23 @@
 /**
  * @brief 1xn convolution
  *
- * @param[in, out] ctx            Function context that contains the additional buffer if required by the function.
-                                  arm_convolve_1_x_n_s8_get_buffer_size will return the buffer_size if required
- * @param[in]      conv_params    Convolution parameters (e.g. strides, dilations, pads,...).
- *                                Range of conv_params->input_offset  : [-127, 128]
- *                                Range of conv_params->output_offset : [-128, 127]
- * @param[in]      quant_params   Per-channel quantization info.
- *                                It contains the multiplier and shift values to be applied to each output channel
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- * @param[in]      input_data     Input (activation) data pointer. Data type: int8
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal
- *                                spatial filter dimension
- * @param[in]      filter_data    Filter data pointer. Data type: int8
- * @param[in]      bias_dims      Bias tensor dimensions. Format: [C_OUT]
- * @param[in]      bias_data      Optional bias data pointer. Data type: int32
- * @param[in]      output_dims    Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[out]     output_data    Output data pointer. Data type: int8
+ * @param[in, out] ctx           Function context that contains the additional buffer if required by the function.
+ *                               arm_convolve_1_x_n_s8_get_buffer_size will return the buffer_size if required
+ *                               The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      conv_params   Convolution parameters (e.g. strides, dilations, pads,...).
+ *                               Range of conv_params->input_offset  : [-127, 128]
+ *                               Range of conv_params->output_offset : [-128, 127]
+ * @param[in]      quant_params  Per-channel quantization info.
+ *                               It contains the multiplier and shift values to be applied to each output channel
+ * @param[in]      input_dims    Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ * @param[in]      input_data    Input (activation) data pointer. Data type: int8
+ * @param[in]      filter_dims   Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal
+ *                               spatial filter dimension
+ * @param[in]      filter_data   Filter data pointer. Data type: int8
+ * @param[in]      bias_dims     Bias tensor dimensions. Format: [C_OUT]
+ * @param[in]      bias_data     Optional bias data pointer. Data type: int32
+ * @param[in]      output_dims   Output tensor dimensions. Format: [N, H, W, C_OUT]
+ * @param[out]     output_data   Output data pointer. Data type: int8
  *
  * @return     The function returns either
  *                  <code>ARM_CMSIS_NN_ARG_ERROR</code> if argument constraints fail. or,
@@ -1067,26 +1072,27 @@
 /**
  * @brief Wrapper function to pick the right optimized s8 depthwise convolution function
  *
- * @param[in, out] ctx            Function context (e.g. temporary buffer). Check the function
- *                                definition file to see if an additional buffer is required.
- *                                Optional function {API}_get_buffer_size() provides the buffer
- *                                size if required.
- * @param[in]      dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- *                                dw_conv_params->dilation is not used.
- *                                Range of dw_conv_params->input_offset : [-127, 128]
- *                                Range of dw_conv_params->output_offset : [-128, 127]
- * @param[in]      quant_params   Per-channel quantization info.
- *                               It contains the multiplier and shift values to be applied to each
- *                               output channel
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [H, W, C_IN]
- *                                Batch argument N is not used and assumed to be 1.
- * @param[in]      input_data     Input (activation) data pointer. Data type: int8
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in]      filter_data    Filter data pointer. Data type: int8
- * @param[in]      bias_dims      Bias tensor dimensions. Format: [C_OUT]
- * @param[in]      bias_data      Bias data pointer. Data type: int32
- * @param[in]      output_dims    Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in, out] output_data    Output data pointer. Data type: int8
+ * @param[in, out] ctx             Function context (e.g. temporary buffer). Check the function
+ *                                 definition file to see if an additional buffer is required.
+ *                                 Optional function {API}_get_buffer_size() provides the buffer
+ *                                 size if required.
+ *                                 The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      dw_conv_params  Depthwise convolution parameters (e.g. strides, dilations, pads,...)
+ *                                 dw_conv_params->dilation is not used.
+ *                                 Range of dw_conv_params->input_offset : [-127, 128]
+ *                                 Range of dw_conv_params->output_offset : [-128, 127]
+ * @param[in]      quant_params    Per-channel quantization info.
+ *                                 It contains the multiplier and shift values to be applied to each
+ *                                 output channel
+ * @param[in]      input_dims      Input (activation) tensor dimensions. Format: [H, W, C_IN]
+ *                                 Batch argument N is not used and assumed to be 1.
+ * @param[in]      input_data      Input (activation) data pointer. Data type: int8
+ * @param[in]      filter_dims     Filter tensor dimensions. Format: [1, H, W, C_OUT]
+ * @param[in]      filter_data     Filter data pointer. Data type: int8
+ * @param[in]      bias_dims       Bias tensor dimensions. Format: [C_OUT]
+ * @param[in]      bias_data       Bias data pointer. Data type: int32
+ * @param[in]      output_dims     Output tensor dimensions. Format: [1, H, W, C_OUT]
+ * @param[in, out] output_data     Output data pointer. Data type: int8
  * @return     The function returns
  *                <code>ARM_CMSIS_NN_SUCCESS</code>   -  Successful completion.
  *
@@ -1115,14 +1121,14 @@
 /**
  * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s8()
  *
- * @param[in]      dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- *                                Range of dw_conv_params->input_offset : [-127, 128]
- *                                Range of dw_conv_params->input_offset : [-128, 127]
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [H, W, C_IN]
- *                                Batch argument N is not used and assumed to be 1.
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in]      output_dims    Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @return                        Size of additional memory required for optimizations in bytes.
+ * @param[in]      dw_conv_params  Depthwise convolution parameters (e.g. strides, dilations, pads,...)
+ *                                 Range of dw_conv_params->input_offset : [-127, 128]
+ *                                 Range of dw_conv_params->input_offset : [-128, 127]
+ * @param[in]      input_dims      Input (activation) tensor dimensions. Format: [H, W, C_IN]
+ *                                 Batch argument N is not used and assumed to be 1.
+ * @param[in]      filter_dims     Filter tensor dimensions. Format: [1, H, W, C_OUT]
+ * @param[in]      output_dims     Output tensor dimensions. Format: [1, H, W, C_OUT]
+ * @return                         Size of additional memory required for optimizations in bytes.
  *
  */
 int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params,
@@ -1133,27 +1139,27 @@
 /**
  * @brief Basic s8 depthwise convolution function that doesn't have any constraints on the input dimensions.
  *
- * @param[in, out] ctx            Function context (e.g. temporary buffer). Check the function
- *                                definition file to see if an additional buffer is required.
- *                                Optional function {API}_get_buffer_size() provides the buffer
- *                                size if an additional buffer is required.
- *                                exists if additional memory is.
- * @param[in]      dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- *                                dw_conv_params->dilation is not used.
- *                                Range of dw_conv_params->input_offset : [-127, 128]
- *                                Range of dw_conv_params->input_offset : [-128, 127]
- * @param[in]      quant_params   Per-channel quantization info.
- *                               It contains the multiplier and shift values to be applied to each
- *                               output channel
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- *                                Batch argument N is not used.
- * @param[in]      input_data     Input (activation) data pointer. Data type: int8
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in]      filter_data    Filter data pointer. Data type: int8
- * @param[in]      bias_dims      Bias tensor dimensions. Format: [C_OUT]
- * @param[in]      bias_data      Bias data pointer. Data type: int32
- * @param[in]      output_dims    Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[in, out] output_data    Output data pointer. Data type: int8
+ * @param[in, out] ctx             Function context (e.g. temporary buffer). Check the function
+ *                                 definition file to see if an additional buffer is required.
+ *                                 Optional function {API}_get_buffer_size() provides the buffer
+ *                                 size if an additional buffer is required exists if additional memory is.
+ *                                 The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      dw_conv_params  Depthwise convolution parameters (e.g. strides, dilations, pads,...)
+ *                                 dw_conv_params->dilation is not used.
+ *                                 Range of dw_conv_params->input_offset : [-127, 128]
+ *                                 Range of dw_conv_params->input_offset : [-128, 127]
+ * @param[in]      quant_params    Per-channel quantization info.
+ *                                 It contains the multiplier and shift values to be applied to each
+ *                                 output channel
+ * @param[in]      input_dims      Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ *                                 Batch argument N is not used.
+ * @param[in]      input_data      Input (activation) data pointer. Data type: int8
+ * @param[in]      filter_dims     Filter tensor dimensions. Format: [1, H, W, C_OUT]
+ * @param[in]      filter_data     Filter data pointer. Data type: int8
+ * @param[in]      bias_dims       Bias tensor dimensions. Format: [C_OUT]
+ * @param[in]      bias_data       Bias data pointer. Data type: int32
+ * @param[in]      output_dims     Output tensor dimensions. Format: [N, H, W, C_OUT]
+ * @param[in, out] output_data     Output data pointer. Data type: int8
  * @return     The function returns <code>ARM_CMSIS_NN_SUCCESS</code>
  *
  * @details
@@ -1175,26 +1181,27 @@
 /**
  * @brief Basic s16 depthwise convolution function that doesn't have any constraints on the input dimensions.
  *
- * @param[in, out] ctx            Function context (e.g. temporary buffer). Check the function
- *                                definition file to see if an additional buffer is required.
- *                                Optional function {API}_get_buffer_size() provides the buffer
- *                                size if an additional buffer is required.
- *                                exists if additional memory is.
- * @param[in]      dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- *                                conv_params->input_offset  : Not used
- *                                conv_params->output_offset : Not used
- * @param[in]      quant_params   Per-channel quantization info.
- *                               It contains the multiplier and shift values to be applied to each
- *                               output channel
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- *                                Batch argument N is not used.
- * @param[in]      input_data     Input (activation) data pointer. Data type: int8
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in]      filter_data    Filter data pointer. Data type: int8
- * @param[in]      bias_dims      Bias tensor dimensions. Format: [C_OUT]
- * @param[in]      bias_data      Bias data pointer. Data type: int64
- * @param[in]      output_dims    Output tensor dimensions. Format: [N, H, W, C_OUT]
- * @param[in, out] output_data    Output data pointer. Data type: int16
+ * @param[in, out] ctx             Function context (e.g. temporary buffer). Check the function
+ *                                 definition file to see if an additional buffer is required.
+ *                                 Optional function {API}_get_buffer_size() provides the buffer
+ *                                 size if an additional buffer is required.
+ *                                 exists if additional memory is.
+ *                                 The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      dw_conv_params  Depthwise convolution parameters (e.g. strides, dilations, pads,...)
+ *                                 conv_params->input_offset  : Not used
+ *                                 conv_params->output_offset : Not used
+ * @param[in]      quant_params    Per-channel quantization info.
+ *                                 It contains the multiplier and shift values to be applied to each
+ *                                 output channel
+ * @param[in]      input_dims      Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ *                                 Batch argument N is not used.
+ * @param[in]      input_data      Input (activation) data pointer. Data type: int8
+ * @param[in]      filter_dims     Filter tensor dimensions. Format: [1, H, W, C_OUT]
+ * @param[in]      filter_data     Filter data pointer. Data type: int8
+ * @param[in]      bias_dims       Bias tensor dimensions. Format: [C_OUT]
+ * @param[in]      bias_data       Bias data pointer. Data type: int64
+ * @param[in]      output_dims     Output tensor dimensions. Format: [N, H, W, C_OUT]
+ * @param[in, out] output_data     Output data pointer. Data type: int16
  * @return     The function returns <code>ARM_CMSIS_NN_SUCCESS</code>
  *
  * @details
@@ -1216,26 +1223,27 @@
 /**
  * @brief Wrapper function to pick the right optimized s16 depthwise convolution function
  *
- * @param[in, out] ctx            Function context (e.g. temporary buffer). Check the function
- *                                definition file to see if an additional buffer is required.
- *                                Optional function {API}_get_buffer_size() provides the buffer
- *                                size if required.
- * @param[in]      dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- *                                dw_conv_params->dilation is not used.
- *                                Range of dw_conv_params->input_offset : Not used
- *                                Range of dw_conv_params->output_offset : Not used
- * @param[in]      quant_params   Per-channel quantization info.
- *                                It contains the multiplier and shift values to be applied to each
- *                                output channel
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [H, W, C_IN]
- *                                Batch argument N is not used and assumed to be 1.
- * @param[in]      input_data     Input (activation) data pointer. Data type: int16
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in]      filter_data    Filter data pointer. Data type: int8
- * @param[in]      bias_dims      Bias tensor dimensions. Format: [C_OUT]
- * @param[in]      bias_data      Bias data pointer. Data type: int64
- * @param[in]      output_dims    Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in, out] output_data    Output data pointer. Data type: int16
+ * @param[in, out] ctx             Function context (e.g. temporary buffer). Check the function
+ *                                 definition file to see if an additional buffer is required.
+ *                                 Optional function {API}_get_buffer_size() provides the buffer
+ *                                 size if required.
+ *                                 The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      dw_conv_params  Depthwise convolution parameters (e.g. strides, dilations, pads,...)
+ *                                 dw_conv_params->dilation is not used.
+ *                                 Range of dw_conv_params->input_offset : Not used
+ *                                 Range of dw_conv_params->output_offset : Not used
+ * @param[in]      quant_params    Per-channel quantization info.
+ *                                 It contains the multiplier and shift values to be applied to each
+ *                                 output channel
+ * @param[in]      input_dims      Input (activation) tensor dimensions. Format: [H, W, C_IN]
+ *                                 Batch argument N is not used and assumed to be 1.
+ * @param[in]      input_data      Input (activation) data pointer. Data type: int16
+ * @param[in]      filter_dims     Filter tensor dimensions. Format: [1, H, W, C_OUT]
+ * @param[in]      filter_data     Filter data pointer. Data type: int8
+ * @param[in]      bias_dims       Bias tensor dimensions. Format: [C_OUT]
+ * @param[in]      bias_data       Bias data pointer. Data type: int64
+ * @param[in]      output_dims     Output tensor dimensions. Format: [1, H, W, C_OUT]
+ * @param[in, out] output_data     Output data pointer. Data type: int16
  * @return     The function returns
  *                <code>ARM_CMSIS_NN_SUCCESS</code>   -  Successful completion.
  *
@@ -1261,14 +1269,14 @@
 /**
  * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s16()
  *
- * @param[in]      dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...)
- *                                Range of dw_conv_params->input_offset : Not used
- *                                Range of dw_conv_params->input_offset : Not used
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [H, W, C_IN]
- *                                Batch argument N is not used and assumed to be 1.
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [1, H, W, C_OUT]
- * @param[in]      output_dims    Output tensor dimensions. Format: [1, H, W, C_OUT]
- * @return                        Size of additional memory required for optimizations in bytes.
+ * @param[in]      dw_conv_params  Depthwise convolution parameters (e.g. strides, dilations, pads,...)
+ *                                 Range of dw_conv_params->input_offset : Not used
+ *                                 Range of dw_conv_params->input_offset : Not used
+ * @param[in]      input_dims      Input (activation) tensor dimensions. Format: [H, W, C_IN]
+ *                                 Batch argument N is not used and assumed to be 1.
+ * @param[in]      filter_dims     Filter tensor dimensions. Format: [1, H, W, C_OUT]
+ * @param[in]      output_dims     Output tensor dimensions. Format: [1, H, W, C_OUT]
+ * @return                         Size of additional memory required for optimizations in bytes.
  *
  */
 int32_t arm_depthwise_conv_wrapper_s16_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params,
@@ -1311,9 +1319,9 @@
 /**
  * @brief Get the required buffer size for optimized s16 depthwise convolution
  * function with constraint that in_channel equals out_channel.
- * @param[in]       input_dims     Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
- *                                 Batch argument N is not used.
- * @param[in]       filter_dims    Filter tensor dimensions. Format: [1, H, W, C_OUT]
+ * @param[in]       input_dims   Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
+ *                               Batch argument N is not used.
+ * @param[in]       filter_dims  Filter tensor dimensions. Format: [1, H, W, C_OUT]
  * @return          The function returns  required buffer size in bytes
  *
  */
@@ -1387,9 +1395,9 @@
 /**
  * @brief Get the required buffer size for optimized s8 depthwise convolution
  * function with constraint that in_channel equals out_channel.
- * @param[in]       input_dims     Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
- *                                 Batch argument N is not used.
- * @param[in]       filter_dims    Filter tensor dimensions. Format: [1, H, W, C_OUT]
+ * @param[in]       input_dims   Input (activation) tensor dimensions. Format: [1, H, W, C_IN]
+ *                               Batch argument N is not used.
+ * @param[in]       filter_dims  Filter tensor dimensions. Format: [1, H, W, C_OUT]
  * @return          The function returns  required buffer size in bytes
  *
  */
@@ -1439,31 +1447,32 @@
 /**
  * @brief Basic s8 Fully Connected function.
  *
- * @param[in, out] ctx            Function context (e.g. temporary buffer). Check the function
- *                                definition file to see if an additional buffer is required.
- *                                Optional function {API}_get_buffer_size() provides the buffer
- *                                size if an additional buffer is required.
- * @param[in]      fc_params      Fully Connected layer parameters.
- *                                Range of fc_params->input_offset  : [-127, 128]
- *                                fc_params->filter_offset : 0
- *                                Range of fc_params->output_offset : [-128, 127]
- * @param[in]      quant_params   Per-tensor quantization info.
- *                                It contains the multiplier and shift values to be applied to the output tensor.
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- *                                Input dimension is taken as Nx(H * W * C_IN)
- * @param[in]      input_data     Input (activation) data pointer. Data type: int8
- * @param[in]      filter_dims    Two dimensional filter dimensions. Format: [N, C]
- *                                N : accumulation depth and equals (H * W * C_IN) from input_dims
- *                                C : output depth and equals C_OUT in output_dims
- *                                H & W : Not used
- * @param[in]      filter_data    Filter data pointer. Data type: int8
- * @param[in]      bias_dims      Bias tensor dimensions. Format: [C_OUT]
- *                                N, H, W : Not used
- * @param[in]      bias_data      Bias data pointer. Data type: int32
- * @param[in]      output_dims    Output tensor dimensions. Format: [N, C_OUT]
- *                                N : Batches
- *                                C_OUT : Output depth
- *                                H & W : Not used.
+ * @param[in, out] ctx           Function context (e.g. temporary buffer). Check the function
+ *                               definition file to see if an additional buffer is required.
+ *                               Optional function {API}_get_buffer_size() provides the buffer
+ *                               size if an additional buffer is required.
+ *                               The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      fc_params     Fully Connected layer parameters.
+ *                               Range of fc_params->input_offset  : [-127, 128]
+ *                               fc_params->filter_offset : 0
+ *                               Range of fc_params->output_offset : [-128, 127]
+ * @param[in]      quant_params  Per-tensor quantization info.
+ *                               It contains the multiplier and shift values to be applied to the output tensor.
+ * @param[in]      input_dims    Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ *                               Input dimension is taken as Nx(H * W * C_IN)
+ * @param[in]      input_data    Input (activation) data pointer. Data type: int8
+ * @param[in]      filter_dims   Two dimensional filter dimensions. Format: [N, C]
+ *                               N : accumulation depth and equals (H * W * C_IN) from input_dims
+ *                               C : output depth and equals C_OUT in output_dims
+ *                               H & W : Not used
+ * @param[in]      filter_data   Filter data pointer. Data type: int8
+ * @param[in]      bias_dims     Bias tensor dimensions. Format: [C_OUT]
+ *                               N, H, W : Not used
+ * @param[in]      bias_data     Bias data pointer. Data type: int32
+ * @param[in]      output_dims   Output tensor dimensions. Format: [N, C_OUT]
+ *                               N : Batches
+ *                               C_OUT : Output depth
+ *                               H & W : Not used.
  * @param[in, out] output_data    Output data pointer. Data type: int8
  * @return     The function returns <code>ARM_CMSIS_NN_SUCCESS</code>
  *
@@ -1495,31 +1504,32 @@
 /**
  * @brief Basic s16 Fully Connected function.
  *
- * @param[in, out] ctx            Function context (e.g. temporary buffer). Check the function
- *                                definition file to see if an additional buffer is required.
- *                                Optional function {API}_get_buffer_size() provides the buffer
- *                                size if an additional buffer is required.
- * @param[in]      fc_params      Fully Connected layer parameters.
- *                                fc_params->input_offset  : 0
- *                                fc_params->filter_offset : 0
- *                                fc_params->output_offset : 0
- * @param[in]      quant_params   Per-tensor quantization info.
- *                                It contains the multiplier and shift values to be applied to the output tensor.
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
- *                                Input dimension is taken as Nx(H * W * C_IN)
- * @param[in]      input_data     Input (activation) data pointer. Data type: int16
- * @param[in]      filter_dims    Two dimensional filter dimensions. Format: [N, C]
- *                                N : accumulation depth and equals (H * W * C_IN) from input_dims
- *                                C : output depth and equals C_OUT in output_dims
- *                                H & W : Not used
- * @param[in]      filter_data    Filter data pointer. Data type: int8
- * @param[in]      bias_dims      Bias tensor dimensions. Format: [C_OUT]
- *                                N, H, W : Not used
- * @param[in]      bias_data      Bias data pointer. Data type: int64
- * @param[in]      output_dims    Output tensor dimensions. Format: [N, C_OUT]
- *                                N : Batches
- *                                C_OUT : Output depth
- *                                H & W : Not used.
+ * @param[in, out] ctx           Function context (e.g. temporary buffer). Check the function
+ *                               definition file to see if an additional buffer is required.
+ *                               Optional function {API}_get_buffer_size() provides the buffer
+ *                               size if an additional buffer is required.
+ *                               The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      fc_params     Fully Connected layer parameters.
+ *                               fc_params->input_offset  : 0
+ *                               fc_params->filter_offset : 0
+ *                               fc_params->output_offset : 0
+ * @param[in]      quant_params  Per-tensor quantization info.
+ *                               It contains the multiplier and shift values to be applied to the output tensor.
+ * @param[in]      input_dims    Input (activation) tensor dimensions. Format: [N, H, W, C_IN]
+ *                               Input dimension is taken as Nx(H * W * C_IN)
+ * @param[in]      input_data    Input (activation) data pointer. Data type: int16
+ * @param[in]      filter_dims   Two dimensional filter dimensions. Format: [N, C]
+ *                               N : accumulation depth and equals (H * W * C_IN) from input_dims
+ *                               C : output depth and equals C_OUT in output_dims
+ *                               H & W : Not used
+ * @param[in]      filter_data   Filter data pointer. Data type: int8
+ * @param[in]      bias_dims     Bias tensor dimensions. Format: [C_OUT]
+ *                               N, H, W : Not used
+ * @param[in]      bias_data     Bias data pointer. Data type: int64
+ * @param[in]      output_dims   Output tensor dimensions. Format: [N, C_OUT]
+ *                               N : Batches
+ *                               C_OUT : Output depth
+ *                               H & W : Not used.
  * @param[in, out] output_data    Output data pointer. Data type: int16
  * @return     The function returns <code>ARM_CMSIS_NN_SUCCESS</code>
  *
@@ -1730,22 +1740,22 @@
 
 /**
  * @brief s8 elementwise add of two vectors
- * @param[in]       input_1_vect            pointer to input vector 1
- * @param[in]       input_2_vect            pointer to input vector 2
- * @param[in]       input_1_offset          offset for input 1. Range: -127 to 128
- * @param[in]       input_1_mult            multiplier for input 1
- * @param[in]       input_1_shift           shift for input 1
- * @param[in]       input_2_offset          offset for input 2. Range: -127 to 128
- * @param[in]       input_2_mult            multiplier for input 2
- * @param[in]       input_2_shift           shift for input 2
- * @param[in]       left_shift              input left shift
- * @param[in,out]   output                  pointer to output vector
- * @param[in]       out_offset              output offset.  Range: -128 to 127
- * @param[in]       out_mult                output multiplier
- * @param[in]       out_shift               output shift
- * @param[in]       out_activation_min      minimum value to clamp output to. Min: -128
- * @param[in]       out_activation_max      maximum value to clamp output to. Max: 127
- * @param[in]       block_size              number of samples
+ * @param[in]       input_1_vect        pointer to input vector 1
+ * @param[in]       input_2_vect        pointer to input vector 2
+ * @param[in]       input_1_offset      offset for input 1. Range: -127 to 128
+ * @param[in]       input_1_mult        multiplier for input 1
+ * @param[in]       input_1_shift       shift for input 1
+ * @param[in]       input_2_offset      offset for input 2. Range: -127 to 128
+ * @param[in]       input_2_mult        multiplier for input 2
+ * @param[in]       input_2_shift       shift for input 2
+ * @param[in]       left_shift          input left shift
+ * @param[in,out]   output              pointer to output vector
+ * @param[in]       out_offset          output offset.  Range: -128 to 127
+ * @param[in]       out_mult            output multiplier
+ * @param[in]       out_shift           output shift
+ * @param[in]       out_activation_min  minimum value to clamp output to. Min: -128
+ * @param[in]       out_activation_max  maximum value to clamp output to. Max: 127
+ * @param[in]       block_size          number of samples
  * @return          The function returns    ARM_CMSIS_NN_SUCCESS
  */
 arm_cmsis_nn_status arm_elementwise_add_s8(const int8_t *input_1_vect,
@@ -1767,23 +1777,23 @@
 
 /**
  * @brief s16 elementwise add of two vectors
- * @param[in]       input_1_vect            pointer to input vector 1
- * @param[in]       input_2_vect            pointer to input vector 2
- * @param[in]       input_1_offset          offset for input 1. Not used.
- * @param[in]       input_1_mult            multiplier for input 1
- * @param[in]       input_1_shift           shift for input 1
- * @param[in]       input_2_offset          offset for input 2. Not used.
- * @param[in]       input_2_mult            multiplier for input 2
- * @param[in]       input_2_shift           shift for input 2
- * @param[in]       left_shift              input left shift
- * @param[in,out]   output                  pointer to output vector
- * @param[in]       out_offset              output offset. Not used.
- * @param[in]       out_mult                output multiplier
- * @param[in]       out_shift               output shift
- * @param[in]       out_activation_min      minimum value to clamp output to. Min: -32768
- * @param[in]       out_activation_max      maximum value to clamp output to. Max: 32767
- * @param[in]       block_size              number of samples
- * @return          The function returns    ARM_CMSIS_NN_SUCCESS
+ * @param[in]       input_1_vect        pointer to input vector 1
+ * @param[in]       input_2_vect        pointer to input vector 2
+ * @param[in]       input_1_offset      offset for input 1. Not used.
+ * @param[in]       input_1_mult        multiplier for input 1
+ * @param[in]       input_1_shift       shift for input 1
+ * @param[in]       input_2_offset      offset for input 2. Not used.
+ * @param[in]       input_2_mult        multiplier for input 2
+ * @param[in]       input_2_shift       shift for input 2
+ * @param[in]       left_shift          input left shift
+ * @param[in,out]   output              pointer to output vector
+ * @param[in]       out_offset          output offset. Not used.
+ * @param[in]       out_mult            output multiplier
+ * @param[in]       out_shift           output shift
+ * @param[in]       out_activation_min  minimum value to clamp output to. Min: -32768
+ * @param[in]       out_activation_max  maximum value to clamp output to. Max: 32767
+ * @param[in]       block_size          number of samples
+ * @return          The function returns  ARM_CMSIS_NN_SUCCESS
  */
 arm_cmsis_nn_status arm_elementwise_add_s16(const int16_t *input_1_vect,
                                             const int16_t *input_2_vect,
@@ -1804,18 +1814,18 @@
 
 /**
  * @brief s8 elementwise multiplication
- * @param[in]       input_1_vect            pointer to input vector 1
- * @param[in]       input_2_vect            pointer to input vector 2
- * @param[in]       input_1_offset          offset for input 1. Range: -127 to 128
- * @param[in]       input_2_offset          offset for input 2. Range: -127 to 128
- * @param[in,out]   output                  pointer to output vector
- * @param[in]       out_offset              output offset. Range: -128 to 127
- * @param[in]       out_mult                output multiplier
- * @param[in]       out_shift               output shift
- * @param[in]       out_activation_min      minimum value to clamp output to. Min: -128
- * @param[in]       out_activation_max      maximum value to clamp output to. Max: 127
- * @param[in]       block_size              number of samples
- * @return          The function returns    ARM_CMSIS_NN_SUCCESS
+ * @param[in]       input_1_vect        pointer to input vector 1
+ * @param[in]       input_2_vect        pointer to input vector 2
+ * @param[in]       input_1_offset      offset for input 1. Range: -127 to 128
+ * @param[in]       input_2_offset      offset for input 2. Range: -127 to 128
+ * @param[in,out]   output              pointer to output vector
+ * @param[in]       out_offset          output offset. Range: -128 to 127
+ * @param[in]       out_mult            output multiplier
+ * @param[in]       out_shift           output shift
+ * @param[in]       out_activation_min  minimum value to clamp output to. Min: -128
+ * @param[in]       out_activation_max  maximum value to clamp output to. Max: 127
+ * @param[in]       block_size          number of samples
+ * @return          The function returns ARM_CMSIS_NN_SUCCESS
  *
  * @details   Supported framework: TensorFlow Lite micro
  */
@@ -1833,18 +1843,18 @@
 
 /**
  * @brief s16 elementwise multiplication
- * @param[in]       input_1_vect            pointer to input vector 1
- * @param[in]       input_2_vect            pointer to input vector 2
- * @param[in]       input_1_offset          offset for input 1. Not used.
- * @param[in]       input_2_offset          offset for input 2. Not used.
- * @param[in,out]   output                  pointer to output vector
- * @param[in]       out_offset              output offset. Not used.
- * @param[in]       out_mult                output multiplier
- * @param[in]       out_shift               output shift
- * @param[in]       out_activation_min      minimum value to clamp output to. Min: -32768
- * @param[in]       out_activation_max      maximum value to clamp output to. Max: 32767
- * @param[in]       block_size              number of samples
- * @return          The function returns    ARM_CMSIS_NN_SUCCESS
+ * @param[in]       input_1_vect        pointer to input vector 1
+ * @param[in]       input_2_vect        pointer to input vector 2
+ * @param[in]       input_1_offset      offset for input 1. Not used.
+ * @param[in]       input_2_offset      offset for input 2. Not used.
+ * @param[in,out]   output              pointer to output vector
+ * @param[in]       out_offset          output offset. Not used.
+ * @param[in]       out_mult            output multiplier
+ * @param[in]       out_shift           output shift
+ * @param[in]       out_activation_min  minimum value to clamp output to. Min: -32768
+ * @param[in]       out_activation_max  maximum value to clamp output to. Max: 32767
+ * @param[in]       block_size          number of samples
+ * @return          The function returns ARM_CMSIS_NN_SUCCESS
  *
  * @details   Supported framework: TensorFlow Lite micro
  */
@@ -1978,22 +1988,23 @@
 /**
  * @brief s8 average pooling function.
  *
- * @param[in, out] ctx            Function context (e.g. temporary buffer). Check the function
- *                                definition file to see if an additional buffer is required.
- *                                Optional function {API}_get_buffer_size() provides the buffer
- *                                size if an additional buffer is required.
- * @param[in]      pool_params    Pooling parameters
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [H, W, C_IN]
- *                                Argument 'N' is not used.
- * @param[in]      input_data     Input (activation) data pointer. Data type: int8
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [H, W]
- *                                Argument N and C are not used.
- * @param[in]      output_dims    Output tensor dimensions. Format: [H, W, C_OUT]
- *                                Argument N is not used.
- *                                C_OUT equals C_IN.
- * @param[in, out] output_data    Output data pointer. Data type: int8
- * @return                        The function returns
- *                                    <code>ARM_CMSIS_NN_SUCCESS</code> - Successful operation
+ * @param[in, out] ctx          Function context (e.g. temporary buffer). Check the function
+ *                              definition file to see if an additional buffer is required.
+ *                              Optional function {API}_get_buffer_size() provides the buffer
+ *                              size if an additional buffer is required.
+ *                              The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      pool_params  Pooling parameters
+ * @param[in]      input_dims   Input (activation) tensor dimensions. Format: [H, W, C_IN]
+ *                              Argument 'N' is not used.
+ * @param[in]      input_data   Input (activation) data pointer. Data type: int8
+ * @param[in]      filter_dims  Filter tensor dimensions. Format: [H, W]
+ *                              Argument N and C are not used.
+ * @param[in]      output_dims  Output tensor dimensions. Format: [H, W, C_OUT]
+ *                              Argument N is not used.
+ *                              C_OUT equals C_IN.
+ * @param[in, out] output_data Output data pointer. Data type: int8
+ * @return                     The function returns
+ *                             <code>ARM_CMSIS_NN_SUCCESS</code> - Successful operation
  *
  * @details
  *    - Supported Framework: TensorFlow Lite
@@ -2019,20 +2030,21 @@
 /**
  * @brief s16 average pooling function.
  *
- * @param[in, out] ctx            Function context (e.g. temporary buffer). Check the function
- *                                definition file to see if an additional buffer is required.
- *                                Optional function {API}_get_buffer_size() provides the buffer
- *                                size if an additional buffer is required.
- * @param[in]      pool_params    Pooling parameters
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [H, W, C_IN]
- *                                Argument 'N' is not used.
- * @param[in]      input_data     Input (activation) data pointer. Data type: int16
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [H, W]
- *                                Argument N and C are not used.
- * @param[in]      output_dims    Output tensor dimensions. Format: [H, W, C_OUT]
- *                                Argument N is not used.
- *                                C_OUT equals C_IN.
- * @param[in, out] output_data    Output data pointer. Data type: int16
+ * @param[in, out] ctx          Function context (e.g. temporary buffer). Check the function
+ *                              definition file to see if an additional buffer is required.
+ *                              Optional function {API}_get_buffer_size() provides the buffer
+ *                              size if an additional buffer is required.
+ *                              The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      pool_params  Pooling parameters
+ * @param[in]      input_dims   Input (activation) tensor dimensions. Format: [H, W, C_IN]
+ *                              Argument 'N' is not used.
+ * @param[in]      input_data   Input (activation) data pointer. Data type: int16
+ * @param[in]      filter_dims  Filter tensor dimensions. Format: [H, W]
+ *                              Argument N and C are not used.
+ * @param[in]      output_dims  Output tensor dimensions. Format: [H, W, C_OUT]
+ *                              Argument N is not used.
+ *                              C_OUT equals C_IN.
+ * @param[in, out] output_data  Output data pointer. Data type: int16
  * @return                        The function returns
  *                                    <code>ARM_CMSIS_NN_SUCCESS</code> - Successful operation
  *                                    <code>ARM_CMSIS_NN_ARG_ERROR</code> - In case of invalid arguments
@@ -2061,20 +2073,21 @@
 /**
  * @brief s8 max pooling function.
  *
- * @param[in, out] ctx            Function context (e.g. temporary buffer). Check the function
- *                                definition file to see if an additional buffer is required.
- *                                Optional function {API}_get_buffer_size() provides the buffer
- *                                size if an additional buffer is required.
- * @param[in]      pool_params    Pooling parameters
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [H, W, C_IN]
- *                                Argument 'N' is not used.
- * @param[in]      input_data     Input (activation) data pointer. The input tensor must not
- *                                overlap with the output tensor. Data type: int8
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [H, W]
- *                                Argument N and C are not used.
- * @param[in]      output_dims    Output tensor dimensions. Format: [H, W, C_OUT]
- *                                Argument N is not used.
- *                                C_OUT equals C_IN.
+ * @param[in, out] ctx          Function context (e.g. temporary buffer). Check the function
+ *                              definition file to see if an additional buffer is required.
+ *                              Optional function {API}_get_buffer_size() provides the buffer
+ *                              size if an additional buffer is required.
+ *                              The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      pool_params  Pooling parameters
+ * @param[in]      input_dims   Input (activation) tensor dimensions. Format: [H, W, C_IN]
+ *                              Argument 'N' is not used.
+ * @param[in]      input_data   Input (activation) data pointer. The input tensor must not
+ *                              overlap with the output tensor. Data type: int8
+ * @param[in]      filter_dims  Filter tensor dimensions. Format: [H, W]
+ *                              Argument N and C are not used.
+ * @param[in]      output_dims  Output tensor dimensions. Format: [H, W, C_OUT]
+ *                              Argument N is not used.
+ *                              C_OUT equals C_IN.
  * @param[in, out] output_data    Output data pointer. Data type: int8
  * @return                        The function returns
  *                                    <code>ARM_CMSIS_NN_SUCCESS</code> - Successful operation
@@ -2094,21 +2107,22 @@
 /**
  * @brief s16 max pooling function.
  *
- * @param[in, out] ctx            Function context (e.g. temporary buffer). Check the function
- *                                definition file to see if an additional buffer is required.
- *                                Optional function {API}_get_buffer_size() provides the buffer
- *                                size if an additional buffer is required.
- * @param[in]      pool_params    Pooling parameters
- * @param[in]      input_dims     Input (activation) tensor dimensions. Format: [H, W, C_IN]
- *                                Argument 'N' is not used.
- * @param[in]      src            Input (activation) data pointer. The input tensor must not
- *                                overlap with the output tensor. Data type: int16
- * @param[in]      filter_dims    Filter tensor dimensions. Format: [H, W]
- *                                Argument N and C are not used.
- * @param[in]      output_dims    Output tensor dimensions. Format: [H, W, C_OUT]
- *                                Argument N is not used.
- *                                C_OUT equals C_IN.
- * @param[in, out] dst            Output data pointer. Data type: int16
+ * @param[in, out] ctx          Function context (e.g. temporary buffer). Check the function
+ *                              definition file to see if an additional buffer is required.
+ *                              Optional function {API}_get_buffer_size() provides the buffer
+ *                              size if an additional buffer is required.
+ *                              The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]      pool_params  Pooling parameters
+ * @param[in]      input_dims   Input (activation) tensor dimensions. Format: [H, W, C_IN]
+ *                              Argument 'N' is not used.
+ * @param[in]      src          Input (activation) data pointer. The input tensor must not
+ *                              overlap with the output tensor. Data type: int16
+ * @param[in]      filter_dims  Filter tensor dimensions. Format: [H, W]
+ *                              Argument N and C are not used.
+ * @param[in]      output_dims  Output tensor dimensions. Format: [H, W, C_OUT]
+ *                              Argument N is not used.
+ *                              C_OUT equals C_IN.
+ * @param[in, out] dst          Output data pointer. Data type: int16
  * @return                        The function returns
  *                                    <code>ARM_CMSIS_NN_SUCCESS</code> - Successful operation
  *
@@ -2537,25 +2551,27 @@
 /**
  * @brief s8 SVDF function with 8 bit state tensor and 8 bit time weights
  *
- * @param[in]   input_ctx Temporary scratch buffer
- * @param[in]   output_ctx Temporary output scratch buffer
- * @param[in]   svdf_params SVDF Parameters
- *              Range of svdf_params->input_offset  : [-128, 127]
- *              Range of svdf_params->output_offset  : [-128, 127]
- * @param[in]   input_quant_params Input quantization parameters
- * @param[in]   output_quant_params Output quantization parameters
- * @param[in]   input_dims Input tensor dimensions
- * @param[in]   input_data Pointer to input tensor
- * @param[in]   state_dims State tensor dimensions
- * @param[in]   state_data Pointer to state tensor
- * @param[in]   weights_feature_dims Weights (feature) tensor dimensions
- * @param[in]   weights_feature_data Pointer to the weights (feature) tensor
- * @param[in]   weights_time_dims Weights (time) tensor dimensions
- * @param[in]   weights_time_data Pointer to the weights (time) tensor
- * @param[in]   bias_dims Bias tensor dimensions
- * @param[in]   bias_data Pointer to bias tensor
- * @param[in]   output_dims Output tensor dimensions
- * @param[out]  output_data Pointer to the output tensor
+ * @param[in]   input_ctx             Temporary scratch buffer
+ *                                    The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]   output_ctx            Temporary output scratch buffer
+ *                                    The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]   svdf_params           SVDF Parameters
+ *                                    Range of svdf_params->input_offset  : [-128, 127]
+ *                                    Range of svdf_params->output_offset  : [-128, 127]
+ * @param[in]   input_quant_params    Input quantization parameters
+ * @param[in]   output_quant_params   Output quantization parameters
+ * @param[in]   input_dims            Input tensor dimensions
+ * @param[in]   input_data            Pointer to input tensor
+ * @param[in]   state_dims            State tensor dimensions
+ * @param[in]   state_data            Pointer to state tensor
+ * @param[in]   weights_feature_dims  Weights (feature) tensor dimensions
+ * @param[in]   weights_feature_data  Pointer to the weights (feature) tensor
+ * @param[in]   weights_time_dims     Weights (time) tensor dimensions
+ * @param[in]   weights_time_data     Pointer to the weights (time) tensor
+ * @param[in]   bias_dims             Bias tensor dimensions
+ * @param[in]   bias_data             Pointer to bias tensor
+ * @param[in]   output_dims           Output tensor dimensions
+ * @param[out]  output_data           Pointer to the output tensor
  *
  * @return     The function returns <code>ARM_CMSIS_NN_SUCCESS</code>
  *
@@ -2585,25 +2601,27 @@
 /**
  * @brief s8 SVDF function with 16 bit state tensor and 16 bit time weights
  *
- * @param[in]   input_ctx Temporary scratch buffer
- * @param[in]   output_ctx Temporary output scratch buffer
- * @param[in]   svdf_params SVDF Parameters
- *              Range of svdf_params->input_offset  : [-128, 127]
- *              Range of svdf_params->output_offset  : [-128, 127]
- * @param[in]   input_quant_params Input quantization parameters
- * @param[in]   output_quant_params Output quantization parameters
- * @param[in]   input_dims Input tensor dimensions
- * @param[in]   input_data Pointer to input tensor
- * @param[in]   state_dims State tensor dimensions
- * @param[in]   state_data Pointer to state tensor
- * @param[in]   weights_feature_dims Weights (feature) tensor dimensions
- * @param[in]   weights_feature_data Pointer to the weights (feature) tensor
- * @param[in]   weights_time_dims Weights (time) tensor dimensions
- * @param[in]   weights_time_data Pointer to the weights (time) tensor
- * @param[in]   bias_dims Bias tensor dimensions
- * @param[in]   bias_data Pointer to bias tensor
- * @param[in]   output_dims Output tensor dimensions
- * @param[out]  output_data Pointer to the output tensor
+ * @param[in]   input_ctx             Temporary scratch buffer
+ *                                    The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]   output_ctx            Temporary output scratch buffer
+ *                                    The caller is expected to clear the buffer ,if applicable, for security reasons.
+ * @param[in]   svdf_params           SVDF Parameters
+ *                                    Range of svdf_params->input_offset  : [-128, 127]
+ *                                    Range of svdf_params->output_offset  : [-128, 127]
+ * @param[in]   input_quant_params    Input quantization parameters
+ * @param[in]   output_quant_params   Output quantization parameters
+ * @param[in]   input_dims            Input tensor dimensions
+ * @param[in]   input_data            Pointer to input tensor
+ * @param[in]   state_dims            State tensor dimensions
+ * @param[in]   state_data            Pointer to state tensor
+ * @param[in]   weights_feature_dims  Weights (feature) tensor dimensions
+ * @param[in]   weights_feature_data  Pointer to the weights (feature) tensor
+ * @param[in]   weights_time_dims     Weights (time) tensor dimensions
+ * @param[in]   weights_time_data     Pointer to the weights (time) tensor
+ * @param[in]   bias_dims             Bias tensor dimensions
+ * @param[in]   bias_data             Pointer to bias tensor
+ * @param[in]   output_dims           Output tensor dimensions
+ * @param[out]  output_data           Pointer to the output tensor
  *
  * @return     The function returns <code>ARM_CMSIS_NN_SUCCESS</code>
  *
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_avgpool_s16/test_arm_avgpool_s16.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_avgpool_s16/test_arm_avgpool_s16.c
index 87880ef..c226afd 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_avgpool_s16/test_arm_avgpool_s16.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_avgpool_s16/test_arm_avgpool_s16.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -62,7 +62,12 @@
     arm_cmsis_nn_status result =
         arm_avgpool_s16(&ctx, &pool_params, &input_dims, input_data, &filter_dims, &output_dims, output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, avgpooling_int16_output_ref, AVGPOOLING_INT16_DST_SIZE));
 }
@@ -104,7 +109,11 @@
     arm_cmsis_nn_status result =
         arm_avgpool_s16(&ctx, &pool_params, &input_dims, input_data, &filter_dims, &output_dims, output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, avgpooling_int16_1_output_ref, AVGPOOLING_INT16_1_DST_SIZE));
 }
@@ -146,7 +155,11 @@
     arm_cmsis_nn_status result =
         arm_avgpool_s16(&ctx, &pool_params, &input_dims, input_data, &filter_dims, &output_dims, output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, avgpooling_int16_2_output_ref, AVGPOOLING_INT16_2_DST_SIZE));
 }
@@ -188,7 +201,11 @@
     arm_cmsis_nn_status result =
         arm_avgpool_s16(&ctx, &pool_params, &input_dims, input_data, &filter_dims, &output_dims, output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, avgpooling_int16_3_output_ref, AVGPOOLING_INT16_3_DST_SIZE));
 }
\ No newline at end of file
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_avgpool_s8/test_arm_avgpool_s8.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_avgpool_s8/test_arm_avgpool_s8.c
index 98b30b2..3726d27 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_avgpool_s8/test_arm_avgpool_s8.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_avgpool_s8/test_arm_avgpool_s8.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -64,7 +64,12 @@
     arm_cmsis_nn_status result =
         arm_avgpool_s8(&ctx, &pool_params, &input_dims, input_data, &filter_dims, &output_dims, output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, avgpooling_output_ref, AVGPOOLING_DST_SIZE));
 }
@@ -106,7 +111,11 @@
     arm_cmsis_nn_status result =
         arm_avgpool_s8(&ctx, &pool_params, &input_dims, input_data, &filter_dims, &output_dims, output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, avgpooling_1_output_ref, AVGPOOLING_1_DST_SIZE));
 }
@@ -148,7 +157,11 @@
     arm_cmsis_nn_status result =
         arm_avgpool_s8(&ctx, &pool_params, &input_dims, input_data, &filter_dims, &output_dims, output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, avgpooling_2_output_ref, AVGPOOLING_2_DST_SIZE));
 }
@@ -190,7 +203,11 @@
     arm_cmsis_nn_status result =
         arm_avgpool_s8(&ctx, &pool_params, &input_dims, input_data, &filter_dims, &output_dims, output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, avgpooling_3_output_ref, AVGPOOLING_3_DST_SIZE));
 }
@@ -232,7 +249,11 @@
     arm_cmsis_nn_status result =
         arm_avgpool_s8(&ctx, &pool_params, &input_dims, input_data, &filter_dims, &output_dims, output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, avgpooling_4_output_ref, AVGPOOLING_4_DST_SIZE));
 }
@@ -274,7 +295,11 @@
     arm_cmsis_nn_status result =
         arm_avgpool_s8(&ctx, &pool_params, &input_dims, input_data, &filter_dims, &output_dims, output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, avgpooling_5_output_ref, AVGPOOLING_5_DST_SIZE));
 }
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_1x1_s8_fast/test_arm_convolve_1x1_s8_fast.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_1x1_s8_fast/test_arm_convolve_1x1_s8_fast.c
index 18ce8a5..25ac807 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_1x1_s8_fast/test_arm_convolve_1x1_s8_fast.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_1x1_s8_fast/test_arm_convolve_1x1_s8_fast.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -77,7 +77,12 @@
                                                           &output_dims,
                                                           output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, kernel1x1_output_ref, KERNEL1X1_DST_SIZE));
 }
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_fast_s16/test_arm_convolve_fast_s16.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_fast_s16/test_arm_convolve_fast_s16.c
index b4a4476..ba175fd 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_fast_s16/test_arm_convolve_fast_s16.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_fast_s16/test_arm_convolve_fast_s16.c
@@ -1,5 +1,6 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com> All rights
+ * reserved.
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -81,7 +82,12 @@
                                                           bias_data,
                                                           &output_dims,
                                                           output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
 
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
@@ -100,7 +106,11 @@
                                    bias_data,
                                    &output_dims,
                                    output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
 #if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI)
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
@@ -166,7 +176,11 @@
                                                           &output_dims,
                                                           output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -184,7 +198,11 @@
                                    bias_data,
                                    &output_dims,
                                    output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
 #if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI)
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_s16/test_arm_convolve_s16.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_s16/test_arm_convolve_s16.c
index 5eb35ea..06406a3 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_s16/test_arm_convolve_s16.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_s16/test_arm_convolve_s16.c
@@ -1,5 +1,6 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com> All rights
+ * reserved.
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -84,7 +85,12 @@
                                                   bias_data,
                                                   &output_dims,
                                                   output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -103,7 +109,11 @@
                                       &output_dims,
                                       output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -164,7 +174,11 @@
                                                   bias_data,
                                                   &output_dims,
                                                   output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -183,7 +197,11 @@
                                       &output_dims,
                                       output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -244,7 +262,11 @@
                                                   bias_data,
                                                   &output_dims,
                                                   output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -263,7 +285,11 @@
                                       &output_dims,
                                       output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -324,7 +350,11 @@
                                                   bias_data,
                                                   &output_dims,
                                                   output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -343,7 +373,11 @@
                                       &output_dims,
                                       output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -404,7 +438,11 @@
                                                   bias_data,
                                                   &output_dims,
                                                   output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -423,7 +461,11 @@
                                       &output_dims,
                                       output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
\ No newline at end of file
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_s8/test_arm_convolve_s8.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_s8/test_arm_convolve_s8.c
index 28a2711..c91a401 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_s8/test_arm_convolve_s8.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_convolve_s8/test_arm_convolve_s8.c
@@ -98,7 +98,12 @@
                                                  &output_dims,
                                                  output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -118,7 +123,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -182,7 +191,11 @@
                                                  &output_dims,
                                                  output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -202,7 +215,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -266,7 +283,11 @@
                                                  &output_dims,
                                                  output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -286,7 +307,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -350,7 +375,11 @@
                                                  &output_dims,
                                                  output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -370,7 +399,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -434,7 +467,11 @@
                                                  &output_dims,
                                                  output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -454,7 +491,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -518,7 +559,11 @@
                                                        &output_dims,
                                                        output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -536,7 +581,11 @@
                              bias_data,
                              &output_dims,
                              output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -600,7 +649,11 @@
                                                        &output_dims,
                                                        output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
 
     buf_size = arm_convolve_s8_get_buffer_size(&input_dims, &filter_dims);
@@ -617,7 +670,11 @@
                                      bias_data,
                                      &output_dims,
                                      output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -681,7 +738,11 @@
                                                        &output_dims,
                                                        output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
 
     buf_size = arm_convolve_s8_get_buffer_size(&input_dims, &filter_dims);
@@ -698,7 +759,11 @@
                                      bias_data,
                                      &output_dims,
                                      output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -759,7 +824,11 @@
                                                          bias_data,
                                                          &output_dims,
                                                          output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -823,7 +892,11 @@
                                                  &output_dims,
                                                  output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -843,7 +916,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -905,7 +982,11 @@
                                                  bias_data,
                                                  &output_dims,
                                                  output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -925,7 +1006,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -987,7 +1072,11 @@
                                                  bias_data,
                                                  &output_dims,
                                                  output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -1007,7 +1096,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -1069,7 +1162,11 @@
                                                  bias_data,
                                                  &output_dims,
                                                  output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -1089,7 +1186,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -1151,7 +1252,11 @@
                                                  bias_data,
                                                  &output_dims,
                                                  output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -1171,7 +1276,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -1233,7 +1342,11 @@
                                                  bias_data,
                                                  &output_dims,
                                                  output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -1253,7 +1366,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -1317,7 +1434,11 @@
                                                  &output_dims,
                                                  output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 
@@ -1337,7 +1458,11 @@
                                      &output_dims,
                                      output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_3x3_s8/test_arm_depthwise_conv_3x3_s8.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_3x3_s8/test_arm_depthwise_conv_3x3_s8.c
index bd30818..d6d2c3e 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_3x3_s8/test_arm_depthwise_conv_3x3_s8.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_3x3_s8/test_arm_depthwise_conv_3x3_s8.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -96,7 +96,12 @@
                                                            &output_dims,
                                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_kernel_3x3_output_ref, DEPTHWISE_KERNEL_3X3_DST_SIZE));
 
@@ -116,7 +121,11 @@
                                            &output_dims,
                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_kernel_3x3_output_ref, DEPTHWISE_KERNEL_3X3_DST_SIZE));
 }
@@ -180,7 +189,11 @@
                                                            &output_dims,
                                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
 
     const arm_cmsis_nn_status expected_wrapper = ARM_CMSIS_NN_SUCCESS;
@@ -201,7 +214,11 @@
                                            &output_dims,
                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected_wrapper, result);
 }
 
@@ -264,7 +281,11 @@
                                                            &output_dims,
                                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(
         validate(output, depthwise_kernel_3x3_null_bias_output_ref, DEPTHWISE_KERNEL_3X3_NULL_BIAS_DST_SIZE));
@@ -287,7 +308,11 @@
                                            &output_dims,
                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected_wrapper, result);
     TEST_ASSERT_TRUE(
         validate(output, depthwise_kernel_3x3_null_bias_output_ref, DEPTHWISE_KERNEL_3X3_NULL_BIAS_DST_SIZE));
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_fast_s16/test_arm_depthwise_conv_fast_s16.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_fast_s16/test_arm_depthwise_conv_fast_s16.c
index daeddfa..26fff52 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_fast_s16/test_arm_depthwise_conv_fast_s16.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_fast_s16/test_arm_depthwise_conv_fast_s16.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -104,7 +104,12 @@
                                                              &output_dims,
                                                              output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -123,7 +128,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -188,7 +197,11 @@
                                                              &output_dims,
                                                              output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -207,7 +220,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -272,7 +289,11 @@
                                                              &output_dims,
                                                              output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -291,7 +312,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -357,7 +382,11 @@
                                                              &output_dims,
                                                              output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -376,7 +405,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -442,7 +475,11 @@
                                                              &output_dims,
                                                              output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -461,7 +498,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -527,7 +568,11 @@
                                                              &output_dims,
                                                              output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -546,7 +591,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -612,7 +661,11 @@
                                                              &output_dims,
                                                              output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -631,7 +684,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -697,7 +754,11 @@
                                                              &output_dims,
                                                              output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -716,7 +777,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -782,7 +847,11 @@
                                                              &output_dims,
                                                              output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -801,7 +870,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s16/test_arm_depthwise_conv_s16.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s16/test_arm_depthwise_conv_s16.c
index df20780..f53f1e2 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s16/test_arm_depthwise_conv_s16.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s16/test_arm_depthwise_conv_s16.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -83,7 +83,12 @@
                                                         bias_data,
                                                         &output_dims,
                                                         output);
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -103,7 +108,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -168,7 +177,11 @@
                                                         &output_dims,
                                                         output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -188,7 +201,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -253,7 +270,11 @@
                                                         &output_dims,
                                                         output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 
@@ -273,7 +294,11 @@
                                             &output_dims,
                                             output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8/TestRunner/test_arm_depthwise_conv_s8_runner.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8/TestRunner/test_arm_depthwise_conv_s8_runner.c
deleted file mode 100644
index 001fe5a..0000000
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8/TestRunner/test_arm_depthwise_conv_s8_runner.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/* AUTOGENERATED FILE. DO NOT EDIT. */
-
-/*=======Automagically Detected Files To Include=====*/
-#include "Utils/validate.h"
-#include "basic/test_data.h"
-#include "stride2pad1/test_data.h"
-#include "unity.h"
-#include <arm_nnfunctions.h>
-
-/*=======External Functions This Runner Calls=====*/
-extern void setUp(void);
-extern void tearDown(void);
-
-/*=======Mock Management=====*/
-static void CMock_Init(void) {}
-static void CMock_Verify(void) {}
-static void CMock_Destroy(void) {}
-
-/*=======Setup (stub)=====*/
-void setUp(void) {}
-
-/*=======Teardown (stub)=====*/
-void tearDown(void) {}
-
-/*=======Test Reset Options=====*/
-void resetTest(void);
-void resetTest(void)
-{
-    tearDown();
-    CMock_Verify();
-    CMock_Destroy();
-    CMock_Init();
-    setUp();
-}
-void verifyTest(void);
-void verifyTest(void) { CMock_Verify(); }
-/*=======Test Runner Used To Run Each Test=====*/
-static void run_test(UnityTestFunction func, const char* name, int line_num)
-{
-    Unity.CurrentTestName = name;
-    Unity.CurrentTestLineNumber = line_num;
-#ifdef UNITY_USE_COMMAND_LINE_ARGS
-    if (!UnityTestMatches())
-        return;
-#endif
-    Unity.NumberOfTests++;
-    UNITY_CLR_DETAILS();
-    UNITY_EXEC_TIME_START();
-    CMock_Init();
-    if (TEST_PROTECT())
-    {
-
-        setUp();
-        func();
-    }
-    if (TEST_PROTECT())
-    {
-        tearDown();
-        CMock_Verify();
-    }
-    CMock_Destroy();
-    UNITY_EXEC_TIME_STOP();
-    UnityConcludeTest();
-}
-
-/*=======MAIN=====*/
-int main(void)
-{
-    UnityBegin("TestCases/test_arm_depthwise_conv_s8/test_arm_depthwise_conv_s8.c");
-
-    return UnityEnd();
-}
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8/test_arm_depthwise_conv_s8.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8/test_arm_depthwise_conv_s8.c
index da7824b..bb9b92c 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8/test_arm_depthwise_conv_s8.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8/test_arm_depthwise_conv_s8.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -100,7 +100,12 @@
                                                        &output_dims,
                                                        output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, basic_output_ref, BASIC_DST_SIZE));
 
@@ -120,7 +125,11 @@
                                            &output_dims,
                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, basic_output_ref, BASIC_DST_SIZE));
 }
@@ -183,7 +192,11 @@
                                                        &output_dims,
                                                        output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, stride2pad1_output_ref, STRIDE2PAD1_DST_SIZE));
 
@@ -203,7 +216,11 @@
                                            &output_dims,
                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, stride2pad1_output_ref, STRIDE2PAD1_DST_SIZE));
 }
@@ -266,7 +283,11 @@
                                                        &output_dims,
                                                        output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_2_output_ref, DEPTHWISE_2_DST_SIZE));
 
@@ -287,7 +308,11 @@
                                            &output_dims,
                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_2_output_ref, DEPTHWISE_2_DST_SIZE));
 }
@@ -349,7 +374,11 @@
                                                        &output_dims,
                                                        output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_out_activation_output_ref, DEPTHWISE_OUT_ACTIVATION_DST_SIZE));
 
@@ -368,7 +397,11 @@
                                    &output_dims,
                                    output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_out_activation_output_ref, DEPTHWISE_OUT_ACTIVATION_DST_SIZE));
 }
@@ -431,7 +464,11 @@
                                                        &output_dims,
                                                        output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_mult_batches_output_ref, DEPTHWISE_MULT_BATCHES_DST_SIZE));
 
@@ -450,7 +487,11 @@
                                    &output_dims,
                                    output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_mult_batches_output_ref, DEPTHWISE_MULT_BATCHES_DST_SIZE));
 }
@@ -513,7 +554,11 @@
                                                        &output_dims,
                                                        output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_null_bias_0_output_ref, DEPTHWISE_NULL_BIAS_0_DST_SIZE));
 }
@@ -576,7 +621,11 @@
                                                        &output_dims,
                                                        output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_null_bias_1_output_ref, DEPTHWISE_NULL_BIAS_1_DST_SIZE));
 }
@@ -639,7 +688,11 @@
                                                        &output_dims,
                                                        output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_dilation_output_ref, DEPTHWISE_DILATION_DST_SIZE));
 
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8_opt/TestRunner/test_arm_depthwise_conv_s8_opt_runner.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8_opt/TestRunner/test_arm_depthwise_conv_s8_opt_runner.c
deleted file mode 100644
index d2663f9..0000000
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8_opt/TestRunner/test_arm_depthwise_conv_s8_opt_runner.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/* AUTOGENERATED FILE. DO NOT EDIT. */
-
-/*=======Automagically Detected Files To Include=====*/
-#include "Utils/validate.h"
-#include "basic/test_data.h"
-#include "stride2pad1/test_data.h"
-#include "unity.h"
-#include <arm_nnfunctions.h>
-#include <stdlib.h>
-
-/*=======External Functions This Runner Calls=====*/
-extern void setUp(void);
-extern void tearDown(void);
-
-/*=======Mock Management=====*/
-static void CMock_Init(void) {}
-static void CMock_Verify(void) {}
-static void CMock_Destroy(void) {}
-
-/*=======Setup (stub)=====*/
-void setUp(void) {}
-
-/*=======Teardown (stub)=====*/
-void tearDown(void) {}
-
-/*=======Test Reset Options=====*/
-void resetTest(void);
-void resetTest(void)
-{
-    tearDown();
-    CMock_Verify();
-    CMock_Destroy();
-    CMock_Init();
-    setUp();
-}
-void verifyTest(void);
-void verifyTest(void) { CMock_Verify(); }
-/*=======Test Runner Used To Run Each Test=====*/
-static void run_test(UnityTestFunction func, const char* name, int line_num)
-{
-    Unity.CurrentTestName = name;
-    Unity.CurrentTestLineNumber = line_num;
-#ifdef UNITY_USE_COMMAND_LINE_ARGS
-    if (!UnityTestMatches())
-        return;
-#endif
-    Unity.NumberOfTests++;
-    UNITY_CLR_DETAILS();
-    UNITY_EXEC_TIME_START();
-    CMock_Init();
-    if (TEST_PROTECT())
-    {
-
-        setUp();
-        func();
-    }
-    if (TEST_PROTECT())
-    {
-        tearDown();
-        CMock_Verify();
-    }
-    CMock_Destroy();
-    UNITY_EXEC_TIME_STOP();
-    UnityConcludeTest();
-}
-
-/*=======MAIN=====*/
-int main(void)
-{
-    UnityBegin("TestCases/test_arm_depthwise_conv_s8_opt/test_arm_depthwise_conv_s8_opt.c");
-
-    return UnityEnd();
-}
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8_opt/test_arm_depthwise_conv_s8_opt.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8_opt/test_arm_depthwise_conv_s8_opt.c
index 51d0087..bfe6e0d 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8_opt/test_arm_depthwise_conv_s8_opt.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_depthwise_conv_s8_opt/test_arm_depthwise_conv_s8_opt.c
@@ -98,7 +98,12 @@
                                                            &output_dims,
                                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, basic_output_ref, BASIC_DST_SIZE));
 }
@@ -161,7 +166,11 @@
                                                            &output_dims,
                                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, stride2pad1_output_ref, STRIDE2PAD1_DST_SIZE));
 }
@@ -224,7 +233,11 @@
                                                            &output_dims,
                                                            output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, ctx.size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, depthwise_eq_in_out_ch_output_ref, DEPTHWISE_EQ_IN_OUT_CH_DST_SIZE));
 }
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_elementwise_mul_s16/test_arm_elementwise_mul_s16.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_elementwise_mul_s16/test_arm_elementwise_mul_s16.c
index 6648487..49bd3f4 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_elementwise_mul_s16/test_arm_elementwise_mul_s16.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_elementwise_mul_s16/test_arm_elementwise_mul_s16.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_fully_connected_s16/test_arm_fully_connected_s16.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_fully_connected_s16/test_arm_fully_connected_s16.c
index 88ddcb4..2524ebb 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_fully_connected_s16/test_arm_fully_connected_s16.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_fully_connected_s16/test_arm_fully_connected_s16.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -80,7 +80,12 @@
                                                          &output_dims,
                                                          output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -140,7 +145,11 @@
                                                          &output_dims,
                                                          output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
@@ -200,7 +209,11 @@
                                                          &output_dims,
                                                          output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate_s16(output, output_ref, output_ref_size));
 }
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_fully_connected_s8/test_arm_fully_connected_s8.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_fully_connected_s8/test_arm_fully_connected_s8.c
index b525c38..f09b7b9 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_fully_connected_s8/test_arm_fully_connected_s8.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_fully_connected_s8/test_arm_fully_connected_s8.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -80,7 +80,12 @@
                                                         &output_dims,
                                                         output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        // The caller is responsible to clear the scratch buffers for security reasons if applicable.
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -132,7 +137,11 @@
                                                         &output_dims,
                                                         output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -184,7 +193,11 @@
                                                         &output_dims,
                                                         output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -247,7 +260,11 @@
                                                         &output_dims,
                                                         output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
@@ -299,7 +316,11 @@
                                                         &output_dims,
                                                         output);
 
-    free(ctx.buf);
+    if (ctx.buf)
+    {
+        memset(ctx.buf, 0, buf_size);
+        free(ctx.buf);
+    }
     TEST_ASSERT_EQUAL(expected, result);
     TEST_ASSERT_TRUE(validate(output, output_ref, output_ref_size));
 }
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_max_pool_s8/test_arm_max_pool_s8.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_max_pool_s8/test_arm_max_pool_s8.c
index fa5aef1..49fb5c4 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_max_pool_s8/test_arm_max_pool_s8.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_max_pool_s8/test_arm_max_pool_s8.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s16/test_arm_softmax_s16.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s16/test_arm_softmax_s16.c
index aa816f3..a3f61fb 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s16/test_arm_softmax_s16.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s16/test_arm_softmax_s16.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s8/test_arm_softmax_s8.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s8/test_arm_softmax_s8.c
index 4770553..1d5dd1b 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s8/test_arm_softmax_s8.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s8/test_arm_softmax_s8.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s8_s16/test_arm_softmax_s8_s16.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s8_s16/test_arm_softmax_s8_s16.c
index a7c35b5..3ddab6d 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s8_s16/test_arm_softmax_s8_s16.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_softmax_s8_s16/test_arm_softmax_s8_s16.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *
diff --git a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_svdf_state_s16_s8/test_arm_svdf_state_s16_s8.c b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_svdf_state_s16_s8/test_arm_svdf_state_s16_s8.c
index ea52234..5aa4b05 100644
--- a/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_svdf_state_s16_s8/test_arm_svdf_state_s16_s8.c
+++ b/CMSIS/NN/Tests/UnitTest/TestCases/test_arm_svdf_state_s16_s8/test_arm_svdf_state_s16_s8.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010-2022 Arm Limited or its affiliates.
+ * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
  *
  * SPDX-License-Identifier: Apache-2.0
  *