/AliOS-Things-master/solutions/tflite_micro_speech_demo/micro_speech/train/speech_commands/ |
A D | generate_streaming_test_wav.py | 89 len(words_list), FLAGS.sample_rate, FLAGS.clip_duration_ms, 97 output_audio_sample_count = FLAGS.sample_rate * FLAGS.test_duration_seconds 105 (background_segment_duration_ms * FLAGS.sample_rate) / 1000) 107 (FLAGS.clip_duration_ms * FLAGS.sample_rate) / 1000) 109 ((background_crossover_ms / 2) * FLAGS.sample_rate) / 1000) 129 word_stride_samples = int((word_stride_ms * FLAGS.sample_rate) / 1000) 131 (FLAGS.clip_duration_ms * FLAGS.sample_rate) / 1000) 132 word_gap_samples = int((FLAGS.word_gap_ms * FLAGS.sample_rate) / 1000) 140 output_offset_ms = (output_offset * 1000) / FLAGS.sample_rate 162 FLAGS.sample_rate)
|
A D | freeze.py | 63 def create_inference_graph(wanted_words, sample_rate, clip_duration_ms, argument 92 len(words_list), sample_rate, clip_duration_ms, window_size_ms, 119 sample_rate, 128 sample_rate = model_settings['sample_rate'] 130 1000) / sample_rate 132 1000) / sample_rate 137 sample_rate=sample_rate, 226 FLAGS.wanted_words, FLAGS.sample_rate, FLAGS.clip_duration_ms,
|
A D | freeze_test.py | 37 sample_rate=16000, 57 sample_rate=16000, 77 sample_rate=16000, 95 sample_rate=16000, 117 sample_rate=16000,
|
A D | test_streaming_accuracy.py | 108 return res.sample_rate, res.audio.flatten() 113 sample_rate, data = read_wav_file(FLAGS.wav) 128 clip_duration_samples = int(FLAGS.clip_duration_ms * sample_rate / 1000) 129 clip_stride_samples = int(FLAGS.clip_stride_ms * sample_rate / 1000) 152 sample_rate 155 current_time_ms = int(audio_data_offset * 1000 / sample_rate)
|
A D | test_streaming_accuracy.cc | 220 uint32 sample_rate; in main() local 222 wav_string, &audio_data, &sample_count, &channel_count, &sample_rate); in main() 233 const int64 clip_duration_samples = (clip_duration_ms * sample_rate) / 1000; in main() 234 const int64 clip_stride_samples = (clip_stride_ms * sample_rate) / 1000; in main() 239 sample_rate_tensor.scalar<int32>()() = sample_rate; in main() 264 const int64 current_time_ms = (audio_data_offset * 1000) / sample_rate; in main()
|
A D | wav_to_features.py | 47 def wav_to_features(sample_rate, clip_duration_ms, window_size_ms, argument 68 0, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms, 83 f.write(' * --sample_rate=%d \\\n' % sample_rate) 128 wav_to_features(FLAGS.sample_rate, FLAGS.clip_duration_ms,
|
A D | input_data.py | 136 def save_wav_file(filename, wav_data, sample_rate): argument 155 sample_rate_placeholder: sample_rate, 457 wav_decoder.sample_rate, 467 sample_rate = model_settings['sample_rate'] 469 1000) / sample_rate 471 1000) / sample_rate 475 sample_rate=sample_rate,
|
/AliOS-Things-master/components/uvoice/application/alicloudtts/ |
A D | alicloudtts.c | 118 …if ((config->sample_rate != TTS_ALIYUN_SAMPLE_RATE_8K) && (config->sample_rate != TTS_ALIYUN_SAMPL… in uvoice_tts_aliyun_init() 119 (config->sample_rate != TTS_ALIYUN_SAMPLE_RATE_24K)) { in uvoice_tts_aliyun_init() 120 M_LOGE("sample rate %d is not supported !", config->sample_rate); in uvoice_tts_aliyun_init() 142 …if ((voice_spec[i].sample_rate[j] == config->sample_rate) && (voice_spec[i].sample_rate[j] != 0)) { in uvoice_tts_aliyun_init() 154 M_LOGE("sample rate %d not match %s !", config->sample_rate, config->voice); in uvoice_tts_aliyun_init() 164 g_tts_config.sample_rate = config->sample_rate; /* support 8000Hz��16000Hz */ in uvoice_tts_aliyun_init() 281 … g_tts_config.sample_rate, g_tts_config.voice, g_tts_config.volume, g_tts_config.speech_rate, in uvoice_tts_aliyun_request()
|
/AliOS-Things-master/components/uvoice/tools/ |
A D | gen_voice.py | 12 sample_rate = 16000 variable 19 unit_sample = int(sample_rate*times) 20 theta_inc = 2 * math.pi * freq / sample_rate 21 df = sample_rate / (unit_sample - 1) 72 f.setframerate(sample_rate)
|
/AliOS-Things-master/components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/python/kernel_tests/ |
A D | audio_microfrontend_op_test.py | 48 sample_rate=SAMPLE_RATE, 66 sample_rate=SAMPLE_RATE, 87 sample_rate=SAMPLE_RATE, 107 sample_rate=SAMPLE_RATE, 129 sample_rate=SAMPLE_RATE, 149 sample_rate=SAMPLE_RATE,
|
/AliOS-Things-master/hardware/chip/rtl872xd/sdk/component/common/drivers/si/ |
A D | rl6548.c | 136 void CODEC_Init(u32 sample_rate, u32 word_len, u32 mono_stereo, u32 application) in CODEC_Init() argument 198 AUDIO_SI_WriteReg(DAC_ADC_SR_CTRL, (sample_rate << 4) | sample_rate); in CODEC_Init() 347 switch(sample_rate){ in CODEC_Init() 380 AUDIO_SI_WriteReg(DAC_ADC_SR_CTRL, (sample_rate << 4) | sample_rate); in CODEC_Init() 435 if ((sample_rate == SR_96K) || (sample_rate == SR_88P2K)){ in CODEC_Init() 524 void CODEC_SetSr(u32 sample_rate){ in CODEC_SetSr() argument 527 if ((sample_rate == SR_96K) || (sample_rate == SR_88P2K)){ in CODEC_SetSr() 530 reg_value |= (0x0800|(sample_rate << 4) | sample_rate); in CODEC_SetSr() 540 reg_value |= ((sample_rate << 4) | sample_rate); in CODEC_SetSr() 860 void CODEC_DacEqConfig(u32 sample_rate) in CODEC_DacEqConfig() argument [all …]
|
/AliOS-Things-master/components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/ |
A D | window_util.c | 33 struct WindowState* state, int sample_rate) { in WindowPopulateState() argument 34 state->size = config->size_ms * sample_rate / 1000; in WindowPopulateState() 35 state->step = config->step_size_ms * sample_rate / 1000; in WindowPopulateState()
|
A D | frontend_util.c | 31 struct FrontendState* state, int sample_rate) { in FrontendPopulateState() argument 34 if (!WindowPopulateState(&config->window, &state->window, sample_rate)) { in FrontendPopulateState() 46 sample_rate, state->fft.fft_size / 2 + 1)) { in FrontendPopulateState()
|
A D | frontend_memmap_generator.c | 32 int sample_rate = 16000; in main() local 34 if (!FrontendPopulateState(&frontend_config, &frontend_state, sample_rate)) { in main()
|
A D | frontend_main.c | 25 int sample_rate = 16000; in main() local 28 if (!FrontendPopulateState(&frontend_config, &frontend_state, sample_rate)) { in main()
|
/AliOS-Things-master/hardware/chip/haas1000/drivers/services/wifi_app/alsa_adapter/ |
A D | btpcm_config.h | 32 void btpcm_capture_start(SCO_SAMPRATE_T sample_rate,uint8_t*sco_buff,uint32_t data_size); 33 void btpcm_playback_start(SCO_SAMPRATE_T sample_rate,uint8_t*sco_buff,uint32_t data_size);
|
/AliOS-Things-master/hardware/chip/rtl872xd/sdk/component/common/audio/core/ |
A D | audio_internel.c | 274 switch(psp_obj->sample_rate){ in sp_init_hal() 308 CODEC_Init(psp_obj->sample_rate, psp_obj->word_len, psp_obj->mono_stereo, psp_obj->direction); in sp_init_hal() 390 psp_obj->sample_rate = SR_48K; in rl6548_audio_init() 393 psp_obj->sample_rate = SR_96K; in rl6548_audio_init() 396 psp_obj->sample_rate = SR_32K; in rl6548_audio_init() 399 psp_obj->sample_rate = SR_16K; in rl6548_audio_init() 402 psp_obj->sample_rate = SR_8K; in rl6548_audio_init() 405 psp_obj->sample_rate = SR_44P1K; in rl6548_audio_init() 408 psp_obj->sample_rate = SR_88P2K; in rl6548_audio_init()
|
/AliOS-Things-master/components/uvoice/codec/ |
A D | pvmp3_decoder.c | 75 int sample_rate = rate_v1[rate_idx]; in pvmp3_parse_header() local 77 sample_rate /= 2; in pvmp3_parse_header() 79 sample_rate /= 4; in pvmp3_parse_header() 98 decoder->frame_size = (12000 * decoder->bitrate / sample_rate + padding) * 4; in pvmp3_parse_header() 133 decoder->frame_size = 144000 * decoder->bitrate / sample_rate + padding; in pvmp3_parse_header() 141 decoder->frame_size = tmp * decoder->bitrate / sample_rate + padding; in pvmp3_parse_header() 149 decoder->rate = sample_rate; in pvmp3_parse_header()
|
/AliOS-Things-master/components/uvoice/internal/ |
A D | uvoice_wave.h | 36 uint32_t sample_rate; member 50 uint32_t sample_rate; member
|
/AliOS-Things-master/components/uvoice/audio/hal/linux/ |
A D | uvoice_pcm.c | 27 unsigned int sample_rate; in alsa_pcm_open() local 95 sample_rate = config->rate; in alsa_pcm_open() 97 ret = snd_pcm_hw_params_set_rate_near(pcm_handle, hw_params, &sample_rate, &dir); in alsa_pcm_open() 148 ret = snd_pcm_hw_params_get_rate(hw_params, &sample_rate, &dir); in alsa_pcm_open() 153 config->rate = sample_rate; in alsa_pcm_open() 154 snd_info("sample_rate %u\n", sample_rate); in alsa_pcm_open()
|
/AliOS-Things-master/hardware/chip/haas1000/drivers/services/multimedia/speech/inc/ |
A D | speech_utils.h | 62 inline float speech_frame_size2time_ms(int sample_rate, int frame_size) in speech_frame_size2time_ms() argument 66 frame_time = (float)frame_size / (sample_rate / 1000); in speech_frame_size2time_ms()
|
/AliOS-Things-master/hardware/chip/haas1000/drivers/platform/hal/ |
A D | hal_tdm.h | 72 uint32_t sample_rate, 76 uint32_t sample_rate);
|
/AliOS-Things-master/components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/python/ops/ |
A D | audio_microfrontend_op.py | 35 sample_rate=16000, argument 109 audio, sample_rate, window_size, window_step, num_channels,
|
/AliOS-Things-master/hardware/chip/haas1000/drivers/services/bt_app/ |
A D | app_bt_stream.h | 146 void bt_store_sbc_sample_rate(enum AUD_SAMPRATE_T sample_rate); 178 struct APP_RESAMPLE_T *app_playback_resample_open(enum AUD_SAMPRATE_T sample_rate, enum AUD_CHANNEL… 189 struct APP_RESAMPLE_T *app_capture_resample_open(enum AUD_SAMPRATE_T sample_rate, enum AUD_CHANNEL_…
|
/AliOS-Things-master/components/uvoice/media/ |
A D | uvoice_wave.c | 44 header->sample_rate = rate; in wave_header_init() 126 M_LOGD("sample_rate %u\n", chunk_fmt.sample_rate); in wave_info_parse() 143 pcm_info.rate = chunk_fmt.sample_rate; in wave_info_parse() 151 wave->rate = chunk_fmt.sample_rate; in wave_info_parse()
|