Raspberry pie Python 3.7 voice assistant development notes (5)
Then there's the chat module, the Turing Robot module in the Big Guy post, with little change.
def Turing(text_words=""): req = { "reqType":0, "perception": { "inputText": { "text": text_words }, "selfInfo": { "location": { "city": "Wuhan", "province": "Hubei", "street": "Hongshan District" } } }, "userInfo": { "apiKey": turing_api_key, "userId": "Your Turing ID" } } req["perception"]["inputText"]["text"] = text_words response = requests.request("post", api_url, json=req, headers=headers) response_dict = json.loads(response.text) result = response_dict["results"][0]["values"]["text"] print("AI Robot said:", result) return result
Finally, just like a big guy.
But what we're doing is smart home system, what I'm doing is the upper computer, so I'm going to signal the lower computer. The lower computer is using the board of esp8266, and serial communication is achieved through signal conversion in the middle (Arduino content may be rewritten next summer vacation, the time of the competition and busy school should be no time to write)
Let's start with filtering, which we're going to go to the enhance_speech.py file in enhance_speech and package all the code inside into one function parameter, which is our file path. Change the previous one to
def Filter(path): # Open WAV Document f = wave.open(path) # Read format information # (nchannels, sampwidth, framerate, nframes, comptype, compname) params = f.getparams() nchannels, sampwidth, framerate, nframes = params[:4]
Then save it and enter our Xiaobai.py
from enhance_speach import enhance_speach as es #Raspberry pie is different from ubuntu. This may cause problems when referring to raspberry pie. Please modify it yourself. while True: record_voice() es.Filter("/voices/myvoices.wav") request = listen()
This allows us to de-noise our voices and use variables to carry what we say.
Because what I need to do is to output the high and low level through the serial port, I connect four LED s outside to control the bulb in a binary way.
To reduce the number of lines of code for the principal, encapsulate the class On Light in a py file using the example sent by the raspberry website
#Light.py import RPi.GPIO as GPIO import time class Light(object): def __init__(self, port): self.port = port GPIO.setmode(GPIO.BCM) GPIO.setup(self.port, GPIO.OUT) self.on_state = GPIO.HIGH self.off_state = not self.on_state def set_on(self): GPIO.output(self.port, self.on_state) def set_off(self): GPIO.output(self.port, self.off_state) def is_on(self): return GPIO.input(self.port) == self.on_state def is_off(self): return GPIO.input(self.port) == self.off_state def toggle(self): if self.is_on(): self.set_off() else: self.set_on() def blink(self, t=0.3): self.set_off() self.set_on() time.sleep(t) self.set_off()
Save it, and then there's another problem. python's code runs from top to bottom, and the bulb goes on one after the other, not at the same time. So now we're going to add a library, threading with multiple threads
#Bright.py import threading import Light def bright(t): #The parameter is a string of str type passed in like "1010", "1" is bright "0" is extinct threads = [] #Thread Pool li = [17, 18, 22, 27] #Raspberry pie serial number, I plugged in the lights o = [] #Record serial number of lights on def output(digital): #Define threading tasks, pass-by is a serial password Light.Light(digital).blink(1) array = list(t) #List the strings, which seems to work without listing. counter = 0 #As a counter for j in array: if j == "1": o.append(li[counter]) counter += 1 for num in o: th = threading.Thread(target=output, args=(num,)) #Add Task to Thread Pool th.start() threads.append(th) for th in threads: th.join()
Then go back to Xiaobai.py
while True: record_voice() es.Filter("/voices/myvoices.wav") request = listen() if "open" in request and "lamp" in request: #Regular expressions can also be used to filter t = "1010" Bright.bright(t) os.system("aplay /voices/Light_on.wav") #Because of the network, I try to offline as much as I can. The files here are all synthesized. break #Add the big guy's Turing Robot to the chat module and use the regular expression to select with the switch elif "chat" in request and "day" in request and "with" in request and "I" in request: response = Turing(request) vo = voice(response) os.system("aplay " + vo) while True: record_voice() es.Filter("/voices/myvoices.wav") request = listen() if "No" in request and "chat" in request and "Yes" in request: os.system("aplay /voices/OK.wav") break else: response = Turing(request) vo = voice(response) os.system("aplay " + vo) engine.runAndWait() break else: os.system("aplay /voices/Error.wav") #Response after error break
Assembling these is our little white voice assistant.
Roughly these files, where the Interface is my interface, which is what the product needs, is implemented using the Tkinter library.
Installation method of Tk
#apt install python3-tk
Go to the website to download python with the same version number, and download it from the source code under the download on the website
After the download is complete
#tar zxvf Your Package
#cd Your Package
#./configure --prefix=/usr/local/python37
#make
#make install
Then re-establish the soft connection
#unlink /usr/bin/python3
#ln -s /usr/bin/python3.7 /usr/bin/python3
This is where you want to connect to the original one, not the one installed at / usr/local. The new one is compiled just to reconfigure Tkinter
Take my code with you
import tkinter as tk import tkinter.messagebox import Bright window = tk.Tk() window.title("Command control center") window.geometry(str(window.winfo_screenwidth()) + "x" + str(window.winfo_screenheight())) print(window.winfo_screenheight()) print(window.winfo_screenwidth()) l = tk.Label(window,text="Control Center", width=window.winfo_screenwidth(), height=3, bg="green") l.pack(side="top") def Disinfect(): Bright.bright("1010") tkinter.messagebox.showinfo(title="Success!", message="Success!") def Cloth_disinfect(): Bright.bright("1100") tkinter.messagebox.showinfo(title="Success!", message="Success!") def Bed_preheat(): Bright.bright("0011") tkinter.messagebox.showinfo(title="Success!", message="Success!") Img1 = tk.PhotoImage(file="/root/python/V1.4(Ubuntu)/images/Disinfect.png") b1 = tk.Button(window, font=("Arial", 12), image=Img1, width=100, height=100, command=Disinfect) b1.place(x=100, y=100) Img2 = tk.PhotoImage(file="/root/python/V1.4(Ubuntu)/images/Cloth_disinfect.png") b2 = tk.Button(window, font=("Arial", 12), image=Img2, width=100, height=100, command=Cloth_disinfect) b2.place(x=250, y=100) Img3 = tk.PhotoImage(file="/root/python/V1.4(Ubuntu)/images/Bed_preheat.png") b3 = tk.Button(window, font=("Arial", 12), image=Img3, width=100, height=100, command=Bed_preheat) b3.place(x=400, y=100) window.mainloop()
Specific tk learning ( Portal)
So that's where Xiao Bai develops, and after a while I uploaded it to GitHub to share it