1 /* ----------------------------------------------------------------------
2 * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
4 * $Date: 17. January 2013
7 * Project: CMSIS DSP Library
8 * Title: arm_conv_opt_q15.c
10 * Description: Convolution of Q15 sequences.
12 * Target Processor: Cortex-M4/Cortex-M3/Cortex-M0
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * - Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
23 * - Neither the name of ARM LIMITED nor the names of its contributors
24 * may be used to endorse or promote products derived from this
25 * software without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
34 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
35 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
37 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 * -------------------------------------------------------------------- */
44 * @ingroup groupFilters
53 * @brief Convolution of Q15 sequences.
54 * @param[in] *pSrcA points to the first input sequence.
55 * @param[in] srcALen length of the first input sequence.
56 * @param[in] *pSrcB points to the second input sequence.
57 * @param[in] srcBLen length of the second input sequence.
58 * @param[out] *pDst points to the location where the output result is written. Length srcALen+srcBLen-1.
59 * @param[in] *pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
60 * @param[in] *pScratch2 points to scratch buffer of size min(srcALen, srcBLen).
64 * If the silicon does not support unaligned memory access enable the macro UNALIGNED_SUPPORT_DISABLE
65 * In this case input, output, scratch1 and scratch2 buffers should be aligned by 32-bit
69 * <b>Scaling and Overflow Behavior:</b>
72 * The function is implemented using a 64-bit internal accumulator.
73 * Both inputs are in 1.15 format and multiplications yield a 2.30 result.
74 * The 2.30 intermediate results are accumulated in a 64-bit accumulator in 34.30 format.
75 * This approach provides 33 guard bits and there is no risk of overflow.
76 * The 34.30 result is then truncated to 34.15 format by discarding the low 15 bits and then saturated to 1.15 format.
80 * Refer to <code>arm_conv_fast_q15()</code> for a faster but less precise version of this function for Cortex-M3 and Cortex-M4.
85 void arm_conv_opt_q15(
94 q63_t acc0, acc1, acc2, acc3; /* Accumulator */
95 q31_t x1, x2, x3; /* Temporary variables to hold state and coefficient values */
96 q31_t y1, y2; /* State variables */
97 q15_t *pOut = pDst; /* output pointer */
98 q15_t *pScr1 = pScratch1; /* Temporary pointer for scratch1 */
99 q15_t *pScr2 = pScratch2; /* Temporary pointer for scratch1 */
100 q15_t *pIn1; /* inputA pointer */
101 q15_t *pIn2; /* inputB pointer */
102 q15_t *px; /* Intermediate inputA pointer */
103 q15_t *py; /* Intermediate inputB pointer */
104 uint32_t j, k, blkCnt; /* loop counter */
105 uint32_t tapCnt; /* loop count */
106 #ifdef UNALIGNED_SUPPORT_DISABLE
110 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
112 /* The algorithm implementation is based on the lengths of the inputs. */
113 /* srcB is always made to slide across srcA. */
114 /* So srcBLen is always considered as shorter or equal to srcALen */
115 if(srcALen >= srcBLen)
117 /* Initialization of inputA pointer */
120 /* Initialization of inputB pointer */
126 /* Initialization of inputA pointer */
129 /* Initialization of inputB pointer */
132 /* srcBLen is always considered as shorter or equal to srcALen */
138 /* pointer to take end of scratch2 buffer */
139 pScr2 = pScratch2 + srcBLen - 1;
141 /* points to smaller length sequence */
144 /* Apply loop unrolling and do 4 Copies simultaneously. */
147 /* First part of the processing with loop unrolling copies 4 data points at a time.
148 ** a second loop below copies for the remaining 1 to 3 samples. */
149 /* Copy smaller length input sequence in reverse order into second scratch buffer */
152 /* copy second buffer in reversal manner */
158 /* Decrement the loop counter */
162 /* If the count is not a multiple of 4, copy remaining samples here.
163 ** No loop unrolling is used. */
168 /* copy second buffer in reversal manner for remaining samples */
171 /* Decrement the loop counter */
175 /* Initialze temporary scratch pointer */
178 /* Assuming scratch1 buffer is aligned by 32-bit */
179 /* Fill (srcBLen - 1u) zeros in scratch buffer */
180 arm_fill_q15(0, pScr1, (srcBLen - 1u));
182 /* Update temporary scratch pointer */
183 pScr1 += (srcBLen - 1u);
185 /* Copy bigger length sequence(srcALen) samples in scratch1 buffer */
187 #ifndef UNALIGNED_SUPPORT_DISABLE
189 /* Copy (srcALen) samples in scratch buffer */
190 arm_copy_q15(pIn1, pScr1, srcALen);
192 /* Update pointers */
197 /* Apply loop unrolling and do 4 Copies simultaneously. */
200 /* First part of the processing with loop unrolling copies 4 data points at a time.
201 ** a second loop below copies for the remaining 1 to 3 samples. */
204 /* copy second buffer in reversal manner */
210 /* Decrement the loop counter */
214 /* If the count is not a multiple of 4, copy remaining samples here.
215 ** No loop unrolling is used. */
220 /* copy second buffer in reversal manner for remaining samples */
223 /* Decrement the loop counter */
230 #ifndef UNALIGNED_SUPPORT_DISABLE
232 /* Fill (srcBLen - 1u) zeros at end of scratch buffer */
233 arm_fill_q15(0, pScr1, (srcBLen - 1u));
236 pScr1 += (srcBLen - 1u);
240 /* Apply loop unrolling and do 4 Copies simultaneously. */
241 k = (srcBLen - 1u) >> 2u;
243 /* First part of the processing with loop unrolling copies 4 data points at a time.
244 ** a second loop below copies for the remaining 1 to 3 samples. */
247 /* copy second buffer in reversal manner */
253 /* Decrement the loop counter */
257 /* If the count is not a multiple of 4, copy remaining samples here.
258 ** No loop unrolling is used. */
259 k = (srcBLen - 1u) % 0x4u;
263 /* copy second buffer in reversal manner for remaining samples */
266 /* Decrement the loop counter */
272 /* Temporary pointer for scratch2 */
276 /* Initialization of pIn2 pointer */
279 /* First part of the processing with loop unrolling process 4 data points at a time.
280 ** a second loop below process for the remaining 1 to 3 samples. */
282 /* Actual convolution process starts here */
283 blkCnt = (srcALen + srcBLen - 1u) >> 2;
287 /* Initialze temporary scratch pointer as scratch1 */
290 /* Clear Accumlators */
296 /* Read two samples from scratch1 buffer */
297 x1 = *__SIMD32(pScr1)++;
299 /* Read next two samples from scratch1 buffer */
300 x2 = *__SIMD32(pScr1)++;
302 tapCnt = (srcBLen) >> 2u;
307 #ifndef UNALIGNED_SUPPORT_DISABLE
309 /* Read four samples from smaller buffer */
310 y1 = _SIMD32_OFFSET(pIn2);
311 y2 = _SIMD32_OFFSET(pIn2 + 2u);
313 /* multiply and accumlate */
314 acc0 = __SMLALD(x1, y1, acc0);
315 acc2 = __SMLALD(x2, y1, acc2);
317 /* pack input data */
318 #ifndef ARM_MATH_BIG_ENDIAN
319 x3 = __PKHBT(x2, x1, 0);
321 x3 = __PKHBT(x1, x2, 0);
324 /* multiply and accumlate */
325 acc1 = __SMLALDX(x3, y1, acc1);
327 /* Read next two samples from scratch1 buffer */
328 x1 = _SIMD32_OFFSET(pScr1);
330 /* multiply and accumlate */
331 acc0 = __SMLALD(x2, y2, acc0);
332 acc2 = __SMLALD(x1, y2, acc2);
334 /* pack input data */
335 #ifndef ARM_MATH_BIG_ENDIAN
336 x3 = __PKHBT(x1, x2, 0);
338 x3 = __PKHBT(x2, x1, 0);
341 acc3 = __SMLALDX(x3, y1, acc3);
342 acc1 = __SMLALDX(x3, y2, acc1);
344 x2 = _SIMD32_OFFSET(pScr1 + 2u);
346 #ifndef ARM_MATH_BIG_ENDIAN
347 x3 = __PKHBT(x2, x1, 0);
349 x3 = __PKHBT(x1, x2, 0);
352 acc3 = __SMLALDX(x3, y2, acc3);
356 /* Read four samples from smaller buffer */
360 #ifndef ARM_MATH_BIG_ENDIAN
361 y1 = __PKHBT(a, b, 16);
363 y1 = __PKHBT(b, a, 16);
368 #ifndef ARM_MATH_BIG_ENDIAN
369 y2 = __PKHBT(a, b, 16);
371 y2 = __PKHBT(b, a, 16);
374 acc0 = __SMLALD(x1, y1, acc0);
376 acc2 = __SMLALD(x2, y1, acc2);
378 #ifndef ARM_MATH_BIG_ENDIAN
379 x3 = __PKHBT(x2, x1, 0);
381 x3 = __PKHBT(x1, x2, 0);
384 acc1 = __SMLALDX(x3, y1, acc1);
389 #ifndef ARM_MATH_BIG_ENDIAN
390 x1 = __PKHBT(a, b, 16);
392 x1 = __PKHBT(b, a, 16);
395 acc0 = __SMLALD(x2, y2, acc0);
397 acc2 = __SMLALD(x1, y2, acc2);
399 #ifndef ARM_MATH_BIG_ENDIAN
400 x3 = __PKHBT(x1, x2, 0);
402 x3 = __PKHBT(x2, x1, 0);
405 acc3 = __SMLALDX(x3, y1, acc3);
407 acc1 = __SMLALDX(x3, y2, acc1);
412 #ifndef ARM_MATH_BIG_ENDIAN
413 x2 = __PKHBT(a, b, 16);
415 x2 = __PKHBT(b, a, 16);
418 #ifndef ARM_MATH_BIG_ENDIAN
419 x3 = __PKHBT(x2, x1, 0);
421 x3 = __PKHBT(x1, x2, 0);
424 acc3 = __SMLALDX(x3, y2, acc3);
426 #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
432 /* Decrement the loop counter */
436 /* Update scratch pointer for remaining samples of smaller length sequence */
439 /* apply same above for remaining samples of smaller length sequence */
440 tapCnt = (srcBLen) & 3u;
445 /* accumlate the results */
446 acc0 += (*pScr1++ * *pIn2);
447 acc1 += (*pScr1++ * *pIn2);
448 acc2 += (*pScr1++ * *pIn2);
449 acc3 += (*pScr1++ * *pIn2++);
453 /* Decrement the loop counter */
460 /* Store the results in the accumulators in the destination buffer. */
462 #ifndef ARM_MATH_BIG_ENDIAN
465 __PKHBT(__SSAT((acc0 >> 15), 16), __SSAT((acc1 >> 15), 16), 16);
468 __PKHBT(__SSAT((acc2 >> 15), 16), __SSAT((acc3 >> 15), 16), 16);
473 __PKHBT(__SSAT((acc1 >> 15), 16), __SSAT((acc0 >> 15), 16), 16);
476 __PKHBT(__SSAT((acc3 >> 15), 16), __SSAT((acc2 >> 15), 16), 16);
479 #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
481 /* Initialization of inputB pointer */
489 blkCnt = (srcALen + srcBLen - 1u) & 0x3;
491 /* Calculate convolution for remaining samples of Bigger length sequence */
494 /* Initialze temporary scratch pointer as scratch1 */
497 /* Clear Accumlators */
500 tapCnt = (srcBLen) >> 1u;
505 /* Read next two samples from scratch1 buffer */
506 acc0 += (*pScr1++ * *pIn2++);
507 acc0 += (*pScr1++ * *pIn2++);
509 /* Decrement the loop counter */
513 tapCnt = (srcBLen) & 1u;
515 /* apply same above for remaining samples of smaller length sequence */
519 /* accumlate the results */
520 acc0 += (*pScr1++ * *pIn2++);
522 /* Decrement the loop counter */
528 /* The result is in 2.30 format. Convert to 1.15 with saturation.
529 ** Then store the output in the destination buffer. */
530 *pOut++ = (q15_t) (__SSAT((acc0 >> 15), 16));
533 /* Initialization of inputB pointer */
544 * @} end of Conv group