/AliOS-Things-master/components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/ |
A D | sse_tensor_utils.cc | 98 const int8_t* __restrict__ vectors, in SseMatrixBatchVectorMultiplyAccumulateImpl() argument 163 sum += row_ptr[col] * vectors[col]; in SseMatrixBatchVectorMultiplyAccumulateImpl() 172 vectors += m_cols; in SseMatrixBatchVectorMultiplyAccumulateImpl() 210 const int8_t* __restrict__ vectors, in SseMatrixBatchVectorMultiplyAccumulate() argument 221 const int8_t* __restrict__ vectors, in SseMatrixBatchVectorMultiplyAccumulate() argument 272 const int8_t* __restrict__ vectors, in SseMatrixBatchVectorMultiplyAccumulate() argument 332 const int8_t* __restrict__ vector0 = vectors + 0 * m_cols; in SseSparseMatrix4VectorsMultiplyAccumulate() 333 const int8_t* __restrict__ vector1 = vectors + 1 * m_cols; in SseSparseMatrix4VectorsMultiplyAccumulate() 334 const int8_t* __restrict__ vector2 = vectors + 2 * m_cols; in SseSparseMatrix4VectorsMultiplyAccumulate() 405 vectors += kBatchSize4 * m_cols; in SseSparseMatrixBatchVectorMultiplyAccumulate() [all …]
|
A D | sse_tensor_utils_impl.h | 34 const int8_t* __restrict__ vectors, 42 const int8_t* __restrict__ vectors, 49 const int8_t* __restrict__ vectors, 59 const int m_rows, const int m_cols, const int8_t* __restrict__ vectors,
|
A D | neon_tensor_utils.h | 36 const int8_t* __restrict__ vectors, in MatrixBatchVectorMultiplyAccumulate() argument 41 vectors, scaling_factors, n_batch, result); in MatrixBatchVectorMultiplyAccumulate() 46 const int8_t* __restrict__ vectors, in MatrixBatchVectorMultiplyAccumulate() argument 52 vectors, scaling_factors, n_batch, scratch, result, context); in MatrixBatchVectorMultiplyAccumulate() 57 const int8_t* __restrict__ vectors, const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() argument 62 vectors, scaling_factors, n_batch, result, per_channel_scale, in MatrixBatchVectorMultiplyAccumulate() 84 const int m_cols, const int8_t* __restrict__ vectors, in SparseMatrixBatchVectorMultiplyAccumulate() argument 87 m_rows, m_cols, vectors, scaling_factors, n_batch, result); in SparseMatrixBatchVectorMultiplyAccumulate()
|
A D | sse_tensor_utils.h | 46 const int8_t* __restrict__ vectors, in MatrixBatchVectorMultiplyAccumulate() argument 50 vectors, scaling_factors, n_batch, result); in MatrixBatchVectorMultiplyAccumulate() 55 const int8_t* __restrict__ vectors, const float* scaling_factors, in MatrixBatchVectorMultiplyAccumulate() argument 60 vectors, scaling_factors, n_batch, result, per_channel_scale, in MatrixBatchVectorMultiplyAccumulate() 66 const int8_t* __restrict__ vectors, in MatrixBatchVectorMultiplyAccumulate() argument 71 vectors, scaling_factors, n_batch, scratch, result, context); in MatrixBatchVectorMultiplyAccumulate() 92 const int m_rows, const int m_cols, const int8_t* __restrict__ vectors, in SparseMatrixBatchVectorMultiplyAccumulate() argument 96 m_rows, m_cols, vectors, scaling_factors, n_batch, result); in SparseMatrixBatchVectorMultiplyAccumulate()
|
A D | neon_tensor_utils_impl.h | 39 const int8_t* __restrict__ vectors, 48 const int8_t* __restrict__ vectors, 57 const int8_t* __restrict__ vectors, const float* scaling_factors, 122 const int m_cols, const int8_t* __restrict__ vectors,
|
A D | neon_tensor_utils.cc | 249 const int8_t* ShuffleVectors(const int8_t* vectors, const int n_batch, in ShuffleVectors() argument 257 reinterpret_cast<const int8*>(vectors) + (i * m_cols); in ShuffleVectors() 259 reinterpret_cast<const int8*>(vectors) + ((i + 1) * m_cols); in ShuffleVectors() 261 reinterpret_cast<const int8*>(vectors) + ((i + 2) * m_cols); in ShuffleVectors() 263 reinterpret_cast<const int8*>(vectors) + ((i + 3) * m_cols); in ShuffleVectors() 304 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument 432 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument 622 memcpy(padded_vectors, vectors, n_batch * m_cols); in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate() 677 const int m_cols, const int8_t* __restrict__ vectors, in DotprodSparseMatrixBatchVectorMultiplyAccumulate() argument 690 const int8* vec_ptr = vectors + (batch * m_cols); in DotprodSparseMatrixBatchVectorMultiplyAccumulate() [all …]
|
/AliOS-Things-master/components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/ |
A D | tensor_utils.h | 44 const int8_t* __restrict__ vectors, 52 const int8_t* __restrict__ vectors, const float* scaling_factors, 62 const int8_t* __restrict__ vectors, const float matrix_scaling_factor, in MatrixBatchVectorMultiplyAccumulate() argument 72 MatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vectors, in MatrixBatchVectorMultiplyAccumulate()
|
/AliOS-Things-master/components/csi/CMSIS/Core/Include/ |
A D | core_cm0plus.h | 953 uint32_t *vectors = (uint32_t *)SCB->VTOR; in __NVIC_SetVector() local 954 vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; in __NVIC_SetVector() 956 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_SetVector() 957 …*(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to acc… in __NVIC_SetVector() 974 uint32_t *vectors = (uint32_t *)SCB->VTOR; in __NVIC_GetVector() local 975 return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; in __NVIC_GetVector() 977 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_GetVector() 978 …return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to acc… in __NVIC_GetVector()
|
A D | core_cm0.h | 834 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_SetVector() local 835 …*(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to acc… in __NVIC_SetVector() 850 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_GetVector() local 851 …return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to acc… in __NVIC_GetVector()
|
A D | core_sc000.h | 912 uint32_t *vectors = (uint32_t *)SCB->VTOR; in __NVIC_SetVector() local 913 vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; in __NVIC_SetVector() 928 uint32_t *vectors = (uint32_t *)SCB->VTOR; in __NVIC_GetVector() local 929 return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; in __NVIC_GetVector()
|
/AliOS-Things-master/components/ai_agent/src/engine/tflite-micro/third_party/cmsis/CMSIS/Core/Include/ |
A D | core_cm0plus.h | 953 uint32_t *vectors = (uint32_t *)SCB->VTOR; in __NVIC_SetVector() local 954 vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; in __NVIC_SetVector() 956 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_SetVector() 957 …*(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to acc… in __NVIC_SetVector() 974 uint32_t *vectors = (uint32_t *)SCB->VTOR; in __NVIC_GetVector() local 975 return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; in __NVIC_GetVector() 977 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_GetVector() 978 …return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to acc… in __NVIC_GetVector()
|
A D | core_cm0.h | 834 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_SetVector() local 835 …*(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to acc… in __NVIC_SetVector() 850 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_GetVector() local 851 …return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to acc… in __NVIC_GetVector()
|
/AliOS-Things-master/components/ai_agent/src/engine/tflite-micro/tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/Core/Include/ |
A D | core_cm0plus.h | 953 uint32_t *vectors = (uint32_t *)SCB->VTOR; in __NVIC_SetVector() local 954 vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; in __NVIC_SetVector() 956 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_SetVector() 957 …*(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to acc… in __NVIC_SetVector() 974 uint32_t *vectors = (uint32_t *)SCB->VTOR; in __NVIC_GetVector() local 975 return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; in __NVIC_GetVector() 977 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_GetVector() 978 …return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to acc… in __NVIC_GetVector()
|
A D | core_cm0.h | 834 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_SetVector() local 835 …*(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to acc… in __NVIC_SetVector() 850 …uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ in __NVIC_GetVector() local 851 …return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to acc… in __NVIC_GetVector()
|
/AliOS-Things-master/hardware/chip/haas1000/drivers/platform/cmsis/inc/ |
A D | core_cm0plus.h | 951 uint32_t vectors = SCB->VTOR; in __NVIC_SetVector() local 953 uint32_t vectors = 0x0U; in __NVIC_SetVector() 955 (* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)) = vector; in __NVIC_SetVector() 970 uint32_t vectors = SCB->VTOR; in __NVIC_GetVector() local 972 uint32_t vectors = 0x0U; in __NVIC_GetVector() 974 return (uint32_t)(* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)); in __NVIC_GetVector()
|
A D | core_cm0.h | 832 uint32_t vectors = 0x0U; in __NVIC_SetVector() local 833 (* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)) = vector; in __NVIC_SetVector() 847 uint32_t vectors = 0x0U; in __NVIC_GetVector() local 848 return (uint32_t)(* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)); in __NVIC_GetVector()
|
A D | core_cm1.h | 859 uint32_t *vectors = (uint32_t *)0x0U; in __NVIC_SetVector() local 860 vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; in __NVIC_SetVector() 874 uint32_t *vectors = (uint32_t *)0x0U; in __NVIC_GetVector() local 875 return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; in __NVIC_GetVector()
|
A D | core_sc000.h | 905 uint32_t *vectors = (uint32_t *)SCB->VTOR; in __NVIC_SetVector() local 906 vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; in __NVIC_SetVector() 920 uint32_t *vectors = (uint32_t *)SCB->VTOR; in __NVIC_GetVector() local 921 return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; in __NVIC_GetVector()
|
/AliOS-Things-master/components/py_engine/engine/lib/cmsis/inc/ |
A D | core_cm0plus.h | 951 uint32_t vectors = SCB->VTOR; in __NVIC_SetVector() local 953 uint32_t vectors = 0x0U; in __NVIC_SetVector() 955 (* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)) = vector; in __NVIC_SetVector() 970 uint32_t vectors = SCB->VTOR; in __NVIC_GetVector() local 972 uint32_t vectors = 0x0U; in __NVIC_GetVector() 974 return (uint32_t)(* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)); in __NVIC_GetVector()
|
A D | core_cm0.h | 832 uint32_t vectors = 0x0U; in __NVIC_SetVector() local 833 (* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)) = vector; in __NVIC_SetVector() 847 uint32_t vectors = 0x0U; in __NVIC_GetVector() local 848 return (uint32_t)(* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)); in __NVIC_GetVector()
|
A D | core_cm1.h | 859 uint32_t *vectors = (uint32_t *)0x0U; in __NVIC_SetVector() local 860 vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; in __NVIC_SetVector() 874 uint32_t *vectors = (uint32_t *)0x0U; in __NVIC_GetVector() local 875 return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; in __NVIC_GetVector()
|
/AliOS-Things-master/components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/ |
A D | portable_tensor_utils.cc | 140 const int8_t* __restrict__ vectors, const float* scaling_factors, in PortableMatrixBatchVectorMultiplyAccumulate() argument 142 for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { in PortableMatrixBatchVectorMultiplyAccumulate() 155 dotprod += (*row_ptr) * (vectors[col]); in PortableMatrixBatchVectorMultiplyAccumulate() 165 const int8_t* __restrict__ vectors, const float* scaling_factors, in PortableMatrixBatchVectorMultiplyAccumulate() argument 171 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result); in PortableMatrixBatchVectorMultiplyAccumulate() 181 for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { in PortableMatrixBatchVectorMultiplyAccumulate() 197 dotprod += (*row_ptr) * vectors[col]; in PortableMatrixBatchVectorMultiplyAccumulate() 261 const int m_cols, const int8_t* __restrict__ vectors, in PortableSparseMatrixBatchVectorMultiplyAccumulate() argument 266 for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { in PortableSparseMatrixBatchVectorMultiplyAccumulate() 282 const int8_t* vector_block_ptr = vectors + block_start_index; in PortableSparseMatrixBatchVectorMultiplyAccumulate()
|
A D | portable_tensor_utils_impl.h | 64 const int8_t* __restrict__ vectors, const float* scaling_factors, 69 const int8_t* __restrict__ vectors, const float* scaling_factors, 92 const int m_cols, const int8_t* __restrict__ vectors,
|
/AliOS-Things-master/components/csi/csi2/include/core/cmsis/ |
A D | core_cm0.h | 832 uint32_t vectors = 0x0U; in __NVIC_SetVector() local 833 (* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)) = vector; in __NVIC_SetVector() 847 uint32_t vectors = 0x0U; in __NVIC_GetVector() local 848 return (uint32_t)(* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)); in __NVIC_GetVector()
|
/AliOS-Things-master/components/csi/csi1/include/core/ |
A D | core_rv32.h | 536 uint32_t *vectors = (uint32_t *)__get_MTVT(); in csi_vic_set_vector() local 537 vectors[IRQn] = handler; in csi_vic_set_vector() 549 uint32_t *vectors = (uint32_t *)__get_MTVT(); in csi_vic_get_vector() local 550 return (uint32_t)vectors[IRQn]; in csi_vic_get_vector()
|