subDesTagesMitExtraKaese 3 years ago
parent
commit
c657d9dcef
2 changed files with 129 additions and 128 deletions
  1. 1 0
      .gitignore
  2. 128 128
      main.py

+ 1 - 0
.gitignore

@@ -0,0 +1 @@
+__pycache__/

+ 128 - 128
main.py

@@ -16,33 +16,33 @@ brightness = 15
 filename = None
 
 try:
-	opts, args = getopt.getopt(sys.argv[1:],"hsif:t:b:w:",["shadow=","input=","framerate=","time=","brightness=","write="])
+  opts, args = getopt.getopt(sys.argv[1:],"hsif:t:b:w:",["shadow=","input=","framerate=","time=","brightness=","write="])
 except getopt.GetoptError:
-	print('main.py -s (shadow) -i (use input) -f --framerate <f=60>\n        -t --time <t=0.033> -b --brightness <b=15> -w --write <filename.wav>')
-	sys.exit(2)
+  print('main.py -s (shadow) -i (use input) -f --framerate <n=60>\n        -t --time <t=0.033> -b --brightness <b=15> -w --write <filename.wav>')
+  sys.exit(2)
 for opt, arg in opts:
-	if opt == '-h':
-		 print('main.py -s (shadow) -i (use input) -f --framerate <f=60>\n        -t --time <t=0.033> -b --brightness <b=15> -w --write <filename.wav>')
-		 sys.exit()
-	elif opt in ("-s", "--shadow"):
-		graphs = False
-	elif opt in ("-i", "--input"):
-		outDev = False
-	elif opt in ("-f", "--framerate"):
-		 framerate = float(arg)
-	elif opt in ("-t", "--time"):
-		 dataTime = float(arg)
-	elif opt in ("-b", "--brightness"):
-		brightness = int(arg)
-		if brightness > 15:
-			brightness = 15
-		elif brightness < 0:
-				brightness = 0
-	elif opt in ("-w", "--write"):
-		filename = arg
-		import wave
-		import pyaudio
-		
+  if opt == '-h':
+    print('main.py -s (shadow) -i (use input) -f --framerate <n=60>\n        -t --time <t=0.033> -b --brightness <b=15> -w --write <filename.wav>')
+    sys.exit()
+  elif opt in ("-s", "--shadow"):
+    graphs = False
+  elif opt in ("-i", "--input"):
+    outDev = False
+  elif opt in ("-f", "--framerate"):
+     framerate = float(arg)
+  elif opt in ("-t", "--time"):
+     dataTime = float(arg)
+  elif opt in ("-b", "--brightness"):
+    brightness = int(arg)
+    if brightness > 15:
+      brightness = 15
+    elif brightness < 0:
+        brightness = 0
+  elif opt in ("-w", "--write"):
+    filename = arg
+    import wave
+    import pyaudio
+    
 audio = audioHandler.Listener(dataTime, 30, not outDev)
 cube = serialHandler.SerialConnection()
 vis = serialHandler.Visualization()
@@ -52,12 +52,12 @@ lamp = mqttHandler.RgbLamp("<mqtt server ip>")
 intervalTime = time.time()
 
 if graphs:
-	import displayHandler
-	window = displayHandler.Plot(count=5)
-	window.addLabel(0, "x = f(t)", "t in s")
-	window.addLabel(1, "x = f(f)", "f in Hz")
-	window.addLabel(3, "Beat detection", "t in s")
-	window.addLabel(4, "", "bpm")
+  import displayHandler
+  window = displayHandler.Plot(count=5)
+  window.addLabel(0, "x = f(t)", "t in s")
+  window.addLabel(1, "x = f(f)", "f in Hz")
+  window.addLabel(3, "Beat detection", "t in s")
+  window.addLabel(4, "", "bpm")
 
 
 pitches = [(2**(1/12))**n * 220 for n in range(-12*2, 12*5)]
@@ -73,30 +73,30 @@ cube.connect()
 cube.setBrightness(brightness)
 
 if filename:
-	wf = wave.open(filename, 'wb')
-	wf.setnchannels(audio._device["channels"])
-	wf.setsampwidth(audio.p.get_sample_size(pyaudio.paInt16))
-	wf.setframerate(audio.getSampleRate())
+  wf = wave.open(filename, 'wb')
+  wf.setnchannels(audio._device["channels"])
+  wf.setsampwidth(audio.p.get_sample_size(pyaudio.paInt16))
+  wf.setframerate(audio.getSampleRate())
 
 if graphs:
 
-	x = [x / audio.getSampleRate() for x in range(len(audio.left ))]
-	lineAmpL = window.addLine(0, x, -1.5, 1.5, 'r-')
-	lineAmpR = window.addLine(0, x, -1.5, 1.5, 'g-')
+  x = [x / audio.getSampleRate() for x in range(len(audio.left ))]
+  lineAmpL = window.addLine(0, x, -1.5, 1.5, 'r-')
+  lineAmpR = window.addLine(0, x, -1.5, 1.5, 'g-')
 
-	x = [x * audio.getSampleRate() / nFFT for x in range(audio.fftMin, audio.fftMax)]
-	lineFft = window.addLine(1, x, 0, 1, 'r-', 'log')
+  x = [x * audio.getSampleRate() / nFFT for x in range(audio.fftMin, audio.fftMax)]
+  lineFft = window.addLine(1, x, 0, 1, 'r-', 'log')
 
-	x = ["{:.0f}\n{:.0f}".format(a, b) for a,b in zip(octaves, octaves[1:])]
-	#barsFft = window.addBars(2, x, 0, 1, 0.5, -0.25, 'r')
+  x = ["{:.0f}\n{:.0f}".format(a, b) for a,b in zip(octaves, octaves[1:])]
+  #barsFft = window.addBars(2, x, 0, 1, 0.5, -0.25, 'r')
 
-	x = ['as','h','c','cis','d','es', 'e', 'f', 'fis', 'g', 'gis', 'a']
-	colors = ['r', 'g', 'b', 'y']
-	barsPitch = [window.addBars(2, x, 0, 1, 0.25, -0.5+1/4*(i+1), colors[i]) for i in range(4)]
-	
-	lineAgc = window.addLine(3, [x * audio._dataTime for x in range(audio._agcLen)], 0, 2)
-	x = [x / audio._dataTime / audio.beatnFFT * 60 for x in range(audio.beatFftMin, audio.beatFftMax)]
-	lineAgcFft = window.addLine(4, x, 0, 1, 'r-')
+  x = ['as','h','c','cis','d','es', 'e', 'f', 'fis', 'g', 'gis', 'a']
+  colors = ['r', 'g', 'b', 'y']
+  barsPitch = [window.addBars(2, x, 0, 1, 0.25, -0.5+1/4*(i+1), colors[i]) for i in range(4)]
+  
+  lineAgc = window.addLine(3, [x * audio._dataTime for x in range(audio._agcLen)], 0, 2)
+  x = [x / audio._dataTime / audio.beatnFFT * 60 for x in range(audio.beatFftMin, audio.beatFftMax)]
+  lineAgcFft = window.addLine(4, x, 0, 1, 'r-')
 
 #delay to gather sound data
 time.sleep(0.1)
@@ -106,85 +106,85 @@ mean = (0,0)
 avg = [0]*len(octaves)
 
 def loop():
-	global intervalTime, avg, sums, mean
-	
-	if audio.hasNewData():
-	
-		left, right, fft = np.copy(audio.left), np.copy(audio.right), np.copy(audio.fft)
-		
-		avg = audio.fftGroup(fft, octaves)
-		mean = np.mean(np.absolute(left)),np.mean(np.absolute(right))
-		
-		if filename:
-			wf.writeframes(audio._buf)
-		
-		if graphs:
-			lineAmpL.set_ydata(left)
-			lineAmpR.set_ydata(right)
-			lineFft.set_ydata(fft[audio.fftMin-1:audio.fftMax])
-			#for b in range(octCount - 1):
-				#barsFft[b].set_height(avg[b])
-				
-			lineAgc.set_ydata(audio._agcMaxima / audio.meanAmp)
-			lineAgcFft.set_ydata(np.abs(audio.agcFFT[audio.beatFftMin:audio.beatFftMax]) / audio.meanAmp * 10)
-			
-		sums = np.zeros((4, 12))
-		
-		for i, p in enumerate(audio.fftGroup(fft, pitches)):
-			sums[int(i/12*4/octCount)][(i+1) % 12] += p
-			
-		#order = np.argsort(np.abs(audio.agcFFT[audio.beatFftMin:audio.beatFftMax]))
-		#print(order)
-	
-	vis.matrix *= 0.7
-	
-	
-	for k in range(4):
-		#mn = min(sums[k])
-		#diff = max(sums[k]) - mn
-		#sums[k] = (sums[k] - mn)
-	
-		if graphs:
-			for i in range(12):
-				barsPitch[k][i].set_height(sums[k][i])
-		
-		vis.matrix += vis.noteCircle(sums[k], 3-k)
-		vis.matrix += vis.ampPyramid((avg[0]*2,avg[1]*2,*mean))
-		np.clip(vis.matrix, 0, 1, out=vis.matrix)
-	cube.send(vis.matrix)
-		
-	if time.time() - intervalTime > 60:
-		if not cube.isConnected():
-			print("lost serial connection, retrying to connect...")
-			cube.connect()
-			
-		audio.stop()
-		audio.start()
-		intervalTime = time.time()
-		
-		
+  global intervalTime, avg, sums, mean
+  
+  if audio.hasNewData():
+  
+    left, right, fft = np.copy(audio.left), np.copy(audio.right), np.copy(audio.fft)
+    
+    avg = audio.fftGroup(fft, octaves)
+    mean = np.mean(np.absolute(left)),np.mean(np.absolute(right))
+    
+    if filename:
+      wf.writeframes(audio._buf)
+    
+    if graphs:
+      lineAmpL.set_ydata(left)
+      lineAmpR.set_ydata(right)
+      lineFft.set_ydata(fft[audio.fftMin-1:audio.fftMax])
+      #for b in range(octCount - 1):
+        #barsFft[b].set_height(avg[b])
+        
+      lineAgc.set_ydata(audio._agcMaxima / audio.meanAmp)
+      lineAgcFft.set_ydata(np.abs(audio.agcFFT[audio.beatFftMin:audio.beatFftMax]) / audio.meanAmp * 10)
+      
+    sums = np.zeros((4, 12))
+    
+    for i, p in enumerate(audio.fftGroup(fft, pitches)):
+      sums[int(i/12*4/octCount)][(i+1) % 12] += p
+      
+    #order = np.argsort(np.abs(audio.agcFFT[audio.beatFftMin:audio.beatFftMax]))
+    #print(order)
+  
+  vis.matrix *= 0.7
+  
+  
+  for k in range(4):
+    #mn = min(sums[k])
+    #diff = max(sums[k]) - mn
+    #sums[k] = (sums[k] - mn)
+  
+    if graphs:
+      for i in range(12):
+        barsPitch[k][i].set_height(sums[k][i])
+    
+    vis.matrix += vis.noteCircle(sums[k], 3-k)
+    vis.matrix += vis.ampPyramid((avg[0]*2,avg[1]*2,*mean))
+    np.clip(vis.matrix, 0, 1, out=vis.matrix)
+  cube.send(vis.matrix)
+    
+  if time.time() - intervalTime > 60:
+    if not cube.isConnected():
+      print("lost serial connection, retrying to connect...")
+      cube.connect()
+      
+    audio.stop()
+    audio.start()
+    intervalTime = time.time()
+    
+    
 try:
-	if graphs:
-		window.show(loop, framerate)
-	
-	else:
-		i = 0
-		while True:
-			startTime = time.time()
-			loop()
-			runTime = time.time() - startTime
-			if runTime < 1/framerate:
-				time.sleep(1/framerate-runTime)
-			i+=1
-			if i >= framerate/4:
-				print("agc: {:6.1%} \tvolume: {:7.1%}".format(audio.getAgc(), audio.getVolume()))
-				i = 1
-		
+  if graphs:
+    window.show(loop, framerate)
+  
+  else:
+    i = 0
+    while True:
+      startTime = time.time()
+      loop()
+      runTime = time.time() - startTime
+      if runTime < 1/framerate:
+        time.sleep(1/framerate-runTime)
+      i+=1
+      if i >= framerate*30:
+        print("agc: {:6.1%} \tvolume: {:7.1%}".format(audio.getAgc(), audio.getVolume()))
+        i = 1
+    
 except KeyboardInterrupt:
-	audio.closeStream()
-	if filename:
-		wf.close()
+  audio.closeStream()
+  if filename:
+    wf.close()
 except:
-	print("Unexpected error:", sys.exc_info()[0])
-	input("Press enter to continue...")
-	raise
+  print("Unexpected error:", sys.exc_info()[0])
+  input("Press enter to continue...")
+  raise