本文整理匯總了Python中pyaudio.PyAudio方法的典型用法代碼示例。如果您正苦於以下問題:Python pyaudio.PyAudio方法的具體用法?Python pyaudio.PyAudio怎麽用?Python pyaudio.PyAudio使用的例子?那麽恭喜您, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在模塊pyaudio的用法示例。
在下文中一共展示了pyaudio.PyAudio方法的20個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於我們的係統推薦出更棒的Python代碼示例。
示例1: __enter__
點讚 7
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
# format=pyaudio.paInt16,
format=pyaudio.paFloat32,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1,
rate=self._rate,
input=True,
frames_per_buffer=self._chunk,
input_device_index=self._device,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
開發者ID:pytorch,項目名稱:audio,代碼行數:23,
示例2: play_audio_file
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def play_audio_file(fname=DETECT_DING):
"""Simple callback function to play a wave file. By default it plays
a Ding sound.
:param str fname: wave file name
:return: None
"""
ding_wav = wave.open(fname, 'rb')
ding_data = ding_wav.readframes(ding_wav.getnframes())
audio = pyaudio.PyAudio()
stream_out = audio.open(
format=audio.get_format_from_width(ding_wav.getsampwidth()),
channels=ding_wav.getnchannels(),
rate=ding_wav.getframerate(), input=False, output=True)
stream_out.start_stream()
stream_out.write(ding_data)
time.sleep(0.2)
stream_out.stop_stream()
stream_out.close()
audio.terminate()
開發者ID:warchildmd,項目名稱:google-assistant-hotword-raspi,代碼行數:22,
示例3: valid_input_devices
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def valid_input_devices(self):
"""
See which devices can be opened for microphone input.
call this when no PyAudio object is loaded.
"""
mics=[]
for device in range(self.p.get_device_count()):
if self.valid_test(device):
mics.append(device)
if len(mics)==0:
print("no microphone devices found!")
else:
print("found %d microphone devices: %s"%(len(mics),mics))
return mics
### SETUP AND SHUTDOWN
開發者ID:swharden,項目名稱:Python-GUI-examples,代碼行數:18,
示例4: play_wav
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def play_wav(fname, chunk=CHUNK):
# create an audio object
wf = wave.open(fname, 'rb')
p = pyaudio.PyAudio()
# open stream based on the wave object which has been input.
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data (based on the chunk size)
data = wf.readframes(chunk)
# play stream (looping from beginning of file to the end)
while len(data) > 0:
# writing to the stream is what *actually* plays the sound.
stream.write(data)
data = wf.readframes(chunk)
# cleanup stuff
stream.close()
p.terminate()
開發者ID:gigagenie,項目名稱:ai-makers-kit,代碼行數:25,
示例5: play_file
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def play_file(fname):
# create an audio object
wf = wave.open(fname, 'rb')
p = pyaudio.PyAudio()
chunk = 1024
# open stream based on the wave object which has been input.
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data (based on the chunk size)
data = wf.readframes(chunk)
# play stream (looping from beginning of file to the end)
while len(data) > 0:
# writing to the stream is what *actually* plays the sound.
stream.write(data)
data = wf.readframes(chunk)
# cleanup stuff.
stream.close()
p.terminate()
開發者ID:gigagenie,項目名稱:ai-makers-kit,代碼行數:26,
示例6: __enter__
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
#def __exit__(self, type, value, traceback):
開發者ID:gigagenie,項目名稱:ai-makers-kit,代碼行數:19,
示例7: play_file
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def play_file(fname):
# create an audio object
wf = wave.open(fname, 'rb')
p = pyaudio.PyAudio()
chunk = 1024
# open stream based on the wave object which has been input.
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data (based on the chunk size)
data = wf.readframes(chunk)
# play stream (looping from beginning of file to the end)
while len(data) > 0:
# writing to the stream is what *actually* plays the sound.
stream.write(data)
data = wf.readframes(chunk)
# cleanup stuff.
stream.close()
p.terminate()
開發者ID:gigagenie,項目名稱:ai-makers-kit,代碼行數:26,
示例8: _play_audio
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def _play_audio(path, delay):
try:
time.sleep(delay)
wf = wave.open(path, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(TextToSpeech.CHUNK)
while data:
stream.write(data)
data = wf.readframes(TextToSpeech.CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
return
except:
pass
開發者ID:junzew,項目名稱:HanTTS,代碼行數:25,
示例9: play
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def play(self, file_):
wf = wave.open(file_, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(self.CHUNK)
while data != '':
stream.write(data)
data = wf.readframes(self.CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
開發者ID:namco1992,項目名稱:voicetools,代碼行數:20,
示例10: __init__
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def __init__(self, config, verbose, logger):
"""Initialize an MQTT client.
Args:
config (:class:`.ServerConfig`): The configuration of
the MQTT client.
verbose (bool): Whether or not the MQTT client runs in verbose
mode.
logger (:class:`logging.Logger`): The Logger object for logging
messages.
"""
self.config = config
self.verbose = verbose
self.logger = logger
self.mqtt = Client()
self.logger.debug('Using %s', pyaudio.get_portaudio_version_text())
self.logger.debug('Creating PyAudio object...')
self.audio = pyaudio.PyAudio()
self.initialize()
self.mqtt.on_connect = self.on_connect
self.mqtt.on_disconnect = self.on_disconnect
self.connect()
開發者ID:koenvervloesem,項目名稱:hermes-audio-server,代碼行數:26,
示例11: __init__
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def __init__(self):
self.interrupted = False
self.detector = None
rpack = RosPack()
# UMDL or PMDL file paths along with audio files
pkg_path = rpack.get_path('dialogflow_ros')
self.model_path = pkg_path + '/scripts/snowboy/resources/jarvis.umdl'
ding_path = pkg_path + '/scripts/snowboy/resources/ding.wav'
# Setup df
self.df_client = None
# Setup audio output
ding = wave.open(ding_path, 'rb')
self.ding_data = ding.readframes(ding.getnframes())
self.audio = pyaudio.PyAudio()
self.stream_out = self.audio.open(
format=self.audio.get_format_from_width(ding.getsampwidth()),
channels=ding.getnchannels(), rate=ding.getframerate(),
input=False, output=True)
self.last_contexts = []
rospy.loginfo("HOTWORD_CLIENT: Ready!")
開發者ID:piraka9011,項目名稱:dialogflow_ros,代碼行數:22,
示例12: __init__
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def __init__(self):
# Audio stream input setup
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
self.CHUNK = 4096
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=self.CHUNK,
stream_callback=self.get_data)
self._buff = Queue.Queue() # Buffer to hold audio data
self.closed = False
# ROS Text Publisher
self.text_pub = rospy.Publisher('/google_client/text', String, queue_size=10)
# Context clues in yaml file
rospack = rospkg.RosPack()
yamlFileDir = rospack.get_path('dialogflow_ros') + '/config/context.yaml'
with open(yamlFileDir, 'r') as f:
self.context = yaml.load(f)
開發者ID:piraka9011,項目名稱:dialogflow_ros,代碼行數:24,
示例13: __init__
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def __init__(self):
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 4096
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=FORMAT, channels=CHANNELS, rate=RATE,
input=True, frames_per_buffer=CHUNK,
stream_callback=self._callback)
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.read_list = [self.serversocket]
self._server_name = rospy.get_param('/dialogflow_client/server_name',
'127.0.0.1')
self._port = rospy.get_param('/dialogflow_client/port', 4444)
rospy.loginfo("DF_CLIENT: Audio Server Started!")
開發者ID:piraka9011,項目名稱:dialogflow_ros,代碼行數:19,
示例14: _record
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def _record(self):
# Start recording audio on the current thread until stop() is
# called.
p = pyaudio.PyAudio()
channels, rate = self.config.CHANNELS, self.config.RATE
frames_per_buffer = self.config.FRAMES_PER_BUFFER
pa_format = pyaudio.get_format_from_width(self.config.SAMPLE_WIDTH)
stream = p.open(input=True, format=pa_format, channels=channels,
rate=rate, frames_per_buffer=frames_per_buffer)
# Start recognising in a loop
stream.start_stream()
while self._recording:
with self._condition:
self._buffers.append(stream.read(frames_per_buffer))
# Notify waiting threads (if any).
self._condition.notifyAll()
# This improves the performance; we don't need to process as
# much audio as the device can read.
time.sleep(self.read_interval)
stream.close()
p.terminate()
開發者ID:dictation-toolbox,項目名稱:dragonfly,代碼行數:27,
示例15: _get_pa_instance
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def _get_pa_instance():
# Suppress initial ALSA messages if using ALSA.
# Got this from: https://stackoverflow.com/a/17673011/12157649
try:
asound = cdll.LoadLibrary('libasound.so')
c_error_handler = ERROR_HANDLER_FUNC(
lambda filename, line, function, err, fmt: None
)
asound.snd_lib_error_set_handler(c_error_handler)
except:
# We'll most likely get here if the Port Audio host API isn't ALSA.
asound = None
# Create the pa instance.
pa = pyaudio.PyAudio()
# If necessary, restore the original error handler.
if asound:
asound.snd_lib_error_set_handler(None)
return pa
開發者ID:dictation-toolbox,項目名稱:dragonfly,代碼行數:22,
示例16: __init__
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def __init__(self, speaker, passive_stt_engine, active_stt_engine):
"""
Initiates the pocketsphinx instance.
Arguments:
speaker -- handles platform-independent audio output
passive_stt_engine -- performs STT while Jasper is in passive listen
mode
acive_stt_engine -- performs STT while Jasper is in active listen mode
"""
self._logger = logging.getLogger(__name__)
self.speaker = speaker
self.passive_stt_engine = passive_stt_engine
self.active_stt_engine = active_stt_engine
self._logger.info("Initializing PyAudio. ALSA/Jack error messages " +
"that pop up during this process are normal and " +
"can usually be safely ignored.")
self._audio = pyaudio.PyAudio()
self._logger.info("Initialization of PyAudio completed.")
開發者ID:jjwang,項目名稱:laibot-client,代碼行數:21,
示例17: __init__
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def __init__(self, model):
import speech_recognition # lazy loading
import pyaudio
device_count = pyaudio.PyAudio().get_device_count() - 1
print("\ndevice_count: " + str(device_count))
self._r = speech_recognition.Recognizer()
try:
mics = speech_recognition.Microphone.list_microphone_names()
print("mics: " + str(mics))
index = mics.index(registered_mic)
except:
index = -1
print("index: " + str(index))
try:
if index == -1:
self._m = speech_recognition.Microphone()
else:
self._m = speech_recognition.Microphone(device_index=index)
except:
self._m = None
print("SpeechRecognizer_Common, no mic detected!")
self._model = model
開發者ID:richmondu,項目名稱:libfaceid,代碼行數:24,
示例18: run
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def run(self):
self.logger.debug("Start to recording...")
self.logger.debug(" Time = %s"%self.time)
self.logger.debug(" Sample Rate = %s"%self.sr)
self.start_time = time.time()
pa=PyAudio()
stream=pa.open(format = paInt16,channels=1, rate=self.sr,input=True, frames_per_buffer=self.frames_per_buffer)
my_buf=[]
count=0
if self.time is None:
total_count = 1e10
else:
total_count = self.time * self.sr / self.batch_num
while count< total_count and self.__running.isSet():
datawav = stream.read(self.batch_num, exception_on_overflow = True)
datause = np.fromstring(datawav,dtype = np.short)
for w in datause:
self.buffer.put(w)
count+=1
stream.close()
開發者ID:mhy12345,項目名稱:rcaudio,代碼行數:22,
示例19: __init__
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def __init__(self):
self.open = True
self.rate = 44100
self.frames_per_buffer = 1024
self.channels = 2
self.format = pyaudio.paInt16
self.audio_filename = "temp_audio.wav"
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer = self.frames_per_buffer)
self.audio_frames = []
# Audio starts being recorded
開發者ID:JRodrigoF,項目名稱:AVrecordeR,代碼行數:20,
示例20: __enter__
點讚 6
# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import PyAudio [as 別名]
def __enter__(self):
with SuperManager.getInstance().commons.shutUpAlsaFFS():
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
開發者ID:project-alice-assistant,項目名稱:ProjectAlice,代碼行數:21,
注:本文中的pyaudio.PyAudio方法示例整理自Github/MSDocs等源碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。