more comments + warnings for outdated dependencies

This commit is contained in:
tassaron 2017-07-09 01:10:06 -04:00
parent f027fd4353
commit 94d4acc1f4
13 changed files with 148 additions and 77 deletions

View File

@ -1,2 +0,0 @@
# file GENERATED by distutils, do NOT edit
freeze.py

View File

@ -3,7 +3,7 @@ audio-visualizer-python
This is a little GUI tool which creates an audio visualization video from an input audio file. Different components can be added and layered to change the resulting video and add images, videos, gradients, text, etc. The component setup can be saved as a Project and exporting can be automated using commandline options.
The program works on Linux (Ubuntu 16.04), Windows (Windows 7), and Mac OS X. If you encounter problems running it or have other bug reports or features that you wish to see implemented, please fork the project and send me a pull request and/or file an issue on this project.
The program works on Linux, macOS, and Windows. If you encounter problems running it or have other bug reports or features that you wish to see implemented, please fork the project and send me a pull request and/or file an issue on this project.
I also need a good name that is not as generic as "audio-visualizer-python"!
@ -11,6 +11,8 @@ Dependencies
------------
Python 3, PyQt5, pillow-simd, numpy, and ffmpeg 3.3
**Note:** Pillow may be used as a drop-in replacement for Pillow-SIMD if problems are encountered installing. However this will result in much slower video export times.
Installation
------------
### Manual installation on Ubuntu 16.04
@ -23,7 +25,7 @@ Installation
Download audio-visualizer-python from this repository and run it with `python3 main.py`.
### Manual installation on Windows
* **Not Recommended.** [Compiling Pillow is difficult on Windows](http://pillow.readthedocs.io/en/3.1.x/installation.html#building-on-windows) and required for a manual installation.
* **Warning:** [Compiling Pillow is difficult on Windows](http://pillow.readthedocs.io/en/3.1.x/installation.html#building-on-windows) and required for the best experience.
* Download and install Python 3.6 from [https://www.python.org/downloads/windows/](https://www.python.org/downloads/windows/)
* Add Python to your system PATH (it will ask during the installation process).
* Brave treacherous valley of getting prerequisites to [compile Pillow on Windows](https://www.pypkg.com/pypi/pillow-simd/f/winbuild/README.md). This is necessary because binary builds for Pillow-SIMD are not available.
@ -34,7 +36,7 @@ Download audio-visualizer-python from this repository and run it with `python3 m
Download audio-visualizer-python from this repository and run it from the command line with `python main.py`.
### Manual installation on macOS [Outdated]
### Manual installation on macOS **[Outdated]**
* Install [Homebrew](http://brew.sh/)
* Use the following commands to install the needed dependencies:

View File

@ -6,8 +6,11 @@ import os
class Component(QtCore.QObject):
''' A class for components to inherit.'''
# modified = QtCore.pyqtSignal(int, bool)
'''
A class for components to inherit. Read comments for documentation
on making a valid component. All subclasses must implement this signal:
modified = QtCore.pyqtSignal(int, bool)
'''
def __init__(self, moduleIndex, compPos, core):
super().__init__()
@ -36,30 +39,32 @@ class Component(QtCore.QObject):
# read your widget values, then call super().update()
def loadPreset(self, presetDict, presetName):
'''Subclasses take (presetDict, presetName=None) as args.
Must use super().loadPreset(presetDict, presetName) first,
then update self.page widgets using the preset dict.
'''
Subclasses take (presetDict, presetName=None) as args.
Must use super().loadPreset(presetDict, presetName) first,
then update self.page widgets using the preset dict.
'''
self.currentPreset = presetName \
if presetName is not None else presetDict['preset']
def preFrameRender(self, **kwargs):
'''Triggered only before a video is exported (video_thread.py)
self.worker = the video thread worker
self.completeAudioArray = a list of audio samples
self.sampleSize = number of audio samples per video frame
self.progressBarUpdate = signal to set progress bar number
self.progressBarSetText = signal to set progress bar text
Use the latter two signals to update the MainProgram if needed
for a long initialization procedure (i.e., for a visualizer)
''' Triggered only before a video is exported (video_thread.py)
self.worker = the video thread worker
self.completeAudioArray = a list of audio samples
self.sampleSize = number of audio samples per video frame
self.progressBarUpdate = signal to set progress bar number
self.progressBarSetText = signal to set progress bar text
Use the latter two signals to update the MainWindow if needed
for a long initialization procedure (i.e., for a visualizer)
'''
for var, value in kwargs.items():
exec('self.%s = value' % var)
def command(self, arg):
'''Configure a component using argument from the commandline.
Use super().command(arg) at the end of a subclass's method,
if no arguments are found in that method first
'''
Configure a component using argument from the commandline.
Use super().command(arg) at the end of a subclass's method,
if no arguments are found in that method first
'''
if arg.startswith('preset='):
_, preset = arg.split('=', 1)
@ -84,9 +89,10 @@ class Component(QtCore.QObject):
'''Print help text for this Component's commandline arguments'''
def pickColor(self):
'''Use color picker to get color input from the user,
and return this as an RGB string and QPushButton stylesheet.
In a subclass apply stylesheet to any color selection widgets
'''
Use color picker to get color input from the user,
and return this as an RGB string and QPushButton stylesheet.
In a subclass apply stylesheet to any color selection widgets
'''
dialog = QtWidgets.QColorDialog()
dialog.setOption(QtWidgets.QColorDialog.ShowAlphaChannel, True)
@ -101,7 +107,7 @@ class Component(QtCore.QObject):
return None, None
def RGBFromString(self, string):
''' Turns an RGB string like "255, 255, 255" into a tuple '''
'''Turns an RGB string like "255, 255, 255" into a tuple'''
try:
tup = tuple([int(i) for i in string.split(',')])
if len(tup) != 3:
@ -135,13 +141,16 @@ class Component(QtCore.QObject):
def previewRender(self, previewWorker):
width = int(previewWorker.core.settings.value('outputWidth'))
height = int(previewWorker.core.settings.value('outputHeight'))
image = Image.new("RGBA", (width, height), (0,0,0,0))
from frame import BlankFrame
image = BlankFrame(width, height)
return image
def frameRender(self, moduleNo, frameNo):
def frameRender(self, layerNo, frameNo):
audioArrayIndex = frameNo * self.sampleSize
width = int(self.worker.core.settings.value('outputWidth'))
height = int(self.worker.core.settings.value('outputHeight'))
image = Image.new("RGBA", (width, height), (0,0,0,0))
from frame import BlankFrame
image = BlankFrame(width, height)
return image
@classmethod

View File

@ -104,6 +104,9 @@ class Component(Component):
self.page.checkBox_trans.setEnabled(True)
self.page.checkBox_stretch.setEnabled(True)
self.page.comboBox_spread.setEnabled(True)
if self.trans:
self.page.lineEdit_color2.setEnabled(False)
self.page.pushButton_color2.setEnabled(False)
self.page.fillWidget.setCurrentIndex(self.fillType)
self.parent.drawPreview()
@ -118,7 +121,7 @@ class Component(Component):
super().preFrameRender(**kwargs)
return ['static']
def frameRender(self, moduleNo, arrayNo, frameNo):
def frameRender(self, layerNo, frameNo):
width = int(self.worker.core.settings.value('outputWidth'))
height = int(self.worker.core.settings.value('outputHeight'))
return self.drawFrame(width, height)

View File

@ -51,7 +51,7 @@ class Component(Component):
super().preFrameRender(**kwargs)
return ['static']
def frameRender(self, moduleNo, arrayNo, frameNo):
def frameRender(self, layerNo, frameNo):
width = int(self.worker.core.settings.value('outputWidth'))
height = int(self.worker.core.settings.value('outputHeight'))
return self.drawFrame(width, height)

View File

@ -107,7 +107,8 @@ class Component(Component):
self.progressBarSetText.emit(pStr)
self.progressBarUpdate.emit(int(progress))
def frameRender(self, moduleNo, arrayNo, frameNo):
def frameRender(self, layerNo, frameNo):
arrayNo = frameNo * self.sampleSize
return self.drawBars(
self.width, self.height,
self.spectrumArray[arrayNo],

View File

@ -123,7 +123,7 @@ class Component(Component):
super().preFrameRender(**kwargs)
return ['static']
def frameRender(self, moduleNo, arrayNo, frameNo):
def frameRender(self, layerNo, frameNo):
width = int(self.worker.core.settings.value('outputWidth'))
height = int(self.worker.core.settings.value('outputHeight'))
return self.addText(width, height)

View File

@ -165,7 +165,7 @@ class Component(Component):
component=self, scale=self.scale
) if os.path.exists(self.videoPath) else None
def frameRender(self, moduleNo, arrayNo, frameNo):
def frameRender(self, layerNo, frameNo):
if self.video:
return self.video.frame(frameNo)
else:

View File

@ -449,15 +449,15 @@ class Core:
else:
if sys.platform == "win32":
return "ffmpeg.exe"
return "ffmpeg"
else:
try:
with open(os.devnull, "w") as f:
sp.check_call(
['ffmpeg', '-version'], stdout=f, stderr=f
toolkit.checkOutput(
['ffmpeg', '-version'], stderr=f
)
return "ffmpeg"
except:
except sp.CalledProcessError:
return "avconv"
def readAudioFile(self, filename, parent):

View File

@ -8,17 +8,17 @@ import sys
class FramePainter(QtGui.QPainter):
'''
A QPainter for a blank frame, which can be converted into a
Pillow image with finalize()
'''
def __init__(self, width, height):
image = BlankFrame(width, height)
self.image = ImageQt(image)
super().__init__(self.image)
def setPen(self, RgbTuple):
if sys.byteorder == 'big':
color = QtGui.QColor(*RgbTuple)
else:
color = QtGui.QColor(*RgbTuple[::-1])
super().setPen(QtGui.QColor(color))
super().setPen(PaintColor(*RgbTuple))
def finalize(self):
self.end()
@ -28,15 +28,20 @@ class FramePainter(QtGui.QPainter):
'RGBA', (self.image.width(), self.image.height()), imBytes
)
class PaintColor(QtGui.QColor):
'''Reverse the painter colour if the hardware stores RGB values backward'''
def __init__(self, r, g, b, a=255):
if sys.byteorder == 'big':
super().__init__(r, g, b, a)
else:
super().__init__(b, g, r, a)
def FloodFrame(width, height, RgbaTuple):
return Image.new("RGBA", (width, height), RgbaTuple)
def BlankFrame(width, height):
'''The base frame used by each component to start drawing'''
return FloodFrame(width, height, (0, 0, 0, 0))

View File

@ -6,6 +6,7 @@
'''
from PyQt5 import QtCore, QtGui, uic, QtWidgets
from PyQt5.QtWidgets import QMenu, QShortcut
from PIL import Image
from queue import Queue
import sys
import os
@ -17,7 +18,7 @@ import core
import preview_thread
import video_thread
from presetmanager import PresetManager
from toolkit import LoadDefaultSettings, disableWhenEncoding
from toolkit import LoadDefaultSettings, disableWhenEncoding, checkOutput
class PreviewWindow(QtWidgets.QLabel):
@ -269,6 +270,37 @@ class MainWindow(QtWidgets.QMainWindow):
self.openProject(self.currentProject, prompt=False)
self.drawPreview(True)
# verify Pillow version
if not self.settings.value("pilMsgShown") \
and 'post' not in Image.PILLOW_VERSION:
self.showMessage(
msg="You are using the standard version of the "
"Python imaging library (Pillow %s). Upgrade "
"to the Pillow-SIMD fork to enable hardware accelerations "
"and export videos faster." % Image.PILLOW_VERSION
)
self.settings.setValue("pilMsgShown", True)
# verify Ffmpeg version
if not self.settings.value("ffmpegMsgShown"):
try:
with open(os.devnull, "w") as f:
ffmpegVers = checkOutput(
['ffmpeg', '-version'], stderr=f
)
goodVersion = str(ffmpegVers).split()[2].startswith('3')
except:
goodVersion = False
else:
goodVersion = True
if not goodVersion:
self.showMessage(
msg="You're using an old version of Ffmpeg. "
"Some features may not work as expected."
)
self.settings.setValue("ffmpegMsgShown", True)
# Setup Hotkeys
QtWidgets.QShortcut("Ctrl+S", self.window, self.saveCurrentProject)
QtWidgets.QShortcut("Ctrl+A", self.window, self.openSaveProjectDialog)

View File

@ -9,7 +9,8 @@ from PIL.ImageQt import ImageQt
import core
from queue import Queue, Empty
import os
from copy import copy
from frame import FloodFrame
class Worker(QtCore.QObject):
@ -22,11 +23,13 @@ class Worker(QtCore.QObject):
parent.newTask.connect(self.createPreviewImage)
parent.processTask.connect(self.process)
self.parent = parent
self.core = core.Core()
self.core = self.parent.core
self.queue = queue
self.core.settings = parent.settings
self.stackedWidget = parent.window.stackedWidget
self.background = Image.new("RGBA", (1920, 1080), (0, 0, 0, 0))
# create checkerboard background to represent transparency
self.background = FloodFrame(1920, 1080, (0, 0, 0, 0))
self.background.paste(Image.open(os.path.join(
self.core.wd, "background.png")))
@ -49,7 +52,7 @@ class Worker(QtCore.QObject):
width = int(self.core.settings.value('outputWidth'))
height = int(self.core.settings.value('outputHeight'))
frame = copy(self.background)
frame = self.background.copy()
frame = frame.resize((width, height))
components = nextPreviewInformation["components"]
@ -58,6 +61,7 @@ class Worker(QtCore.QObject):
frame = Image.alpha_composite(
frame, component.previewRender(self)
)
except ValueError as e:
self.parent.showMessage(
msg="Bad frame returned by %s's previewRender method. "

View File

@ -3,7 +3,7 @@
an input file, output path, and component list. During export multiple
threads are created to render the video as quickly as possible. Signals
are emitted to update MainWindow's progress bar, detail text, and preview.
Export can be cancelled with cancel() + reset()
Export can be cancelled with cancel()
'''
from PyQt5 import QtCore, QtGui, uic
from PyQt5.QtCore import pyqtSignal, pyqtSlot
@ -16,11 +16,11 @@ import os
from queue import Queue, PriorityQueue
from threading import Thread, Event
import time
from copy import copy
import signal
import core
from toolkit import openPipe
from frame import FloodFrame
class Worker(QtCore.QObject):
@ -44,49 +44,65 @@ class Worker(QtCore.QObject):
self.stopped = False
def renderNode(self):
'''
Grabs audio data indices at frames to export, from compositeQueue.
Sends it to the components' frameRender methods in layer order
to create subframes & composite them into the final frame.
The resulting frames are collected in the renderQueue
'''
while not self.stopped:
i = self.compositeQueue.get()
audioI = self.compositeQueue.get()
bgI = int(audioI / self.sampleSize)
frame = None
for compNo, comp in reversed(list(enumerate(self.components))):
if compNo in self.staticComponents and \
self.staticComponents[compNo] is not None:
if frame is None:
# static component
if frame is None: # bottom-most layer
frame = self.staticComponents[compNo]
else:
frame = Image.alpha_composite(
frame, self.staticComponents[compNo])
frame, self.staticComponents[compNo]
)
else:
if frame is None:
frame = comp.frameRender(compNo, i[0], i[1])
# animated component
if frame is None: # bottom-most layer
frame = comp.frameRender(compNo, bgI)
else:
frame = Image.alpha_composite(
frame, comp.frameRender(compNo, i[0], i[1]))
frame, comp.frameRender(compNo, bgI)
)
self.renderQueue.put([i[0], frame])
self.renderQueue.put([audioI, frame])
self.compositeQueue.task_done()
def renderDispatch(self):
'''
Places audio data indices in the compositeQueue, to be used
by a renderNode later. All indices are multiples of self.sampleSize
sampleSize * frameNo = audioI, AKA audio data starting at frameNo
'''
print('Dispatching Frames for Compositing...')
for i in range(0, len(self.completeAudioArray), self.sampleSize):
self.compositeQueue.put([i, self.bgI])
# increment tracked video frame for next iteration
self.bgI += 1
for audioI in range(0, len(self.completeAudioArray), self.sampleSize):
self.compositeQueue.put(audioI)
def previewDispatch(self):
background = Image.new("RGBA", (1920, 1080), (0, 0, 0, 0))
'''
Grabs frames from the previewQueue, adds them to the checkerboard
and emits a final QImage to the MainWindow for the live preview
'''
background = FloodFrame(1920, 1080, (0, 0, 0, 0))
background.paste(Image.open(os.path.join(
self.core.wd, "background.png")))
background = background.resize((self.width, self.height))
while not self.stopped:
i = self.previewQueue.get()
if time.time() - self.lastPreview >= 0.06 or i[0] == 0:
image = copy(background)
image = Image.alpha_composite(image, i[1])
self._image = ImageQt(image)
self.imageCreated.emit(QtGui.QImage(self._image))
audioI, frame = self.previewQueue.get()
if time.time() - self.lastPreview >= 0.06 or audioI == 0:
image = Image.alpha_composite(background.copy(), frame)
self.imageCreated.emit(ImageQt(image))
self.lastPreview = time.time()
self.previewQueue.task_done()
@ -99,7 +115,6 @@ class Worker(QtCore.QObject):
self.reset()
self.bgI = 0 # tracked video frame
self.width = int(self.core.settings.value('outputWidth'))
self.height = int(self.core.settings.value('outputHeight'))
progressBarValue = 0
@ -194,8 +209,8 @@ class Worker(QtCore.QObject):
)
if properties and 'static' in properties:
self.staticComponents[compNo] = copy(
comp.frameRender(compNo, 0, 0))
self.staticComponents[compNo] = \
comp.frameRender(compNo, 0).copy()
self.progressBarUpdate.emit(100)
# Create ffmpeg pipe and queues for frames
@ -231,9 +246,10 @@ class Worker(QtCore.QObject):
pStr = "Exporting video..."
self.progressBarSetText.emit(pStr)
if not self.canceled:
for i in range(0, len(self.completeAudioArray), self.sampleSize):
for audioI in range(
0, len(self.completeAudioArray), self.sampleSize):
while True:
if i in frameBuffer or self.canceled:
if audioI in frameBuffer or self.canceled:
# if frame's in buffer, pipe it to ffmpeg
break
# else fetch the next frame & add to the buffer
@ -244,15 +260,16 @@ class Worker(QtCore.QObject):
break
try:
self.out_pipe.stdin.write(frameBuffer[i].tobytes())
self.previewQueue.put([i, frameBuffer[i]])
del frameBuffer[i]
self.out_pipe.stdin.write(frameBuffer[audioI].tobytes())
self.previewQueue.put([audioI, frameBuffer[audioI]])
del frameBuffer[audioI]
except:
break
# increase progress bar value
if progressBarValue + 1 <= (i / len(self.completeAudioArray)) \
* 100:
if progressBarValue + 1 <= (
audioI / len(self.completeAudioArray)
) * 100:
progressBarValue = numpy.floor(
(i / len(self.completeAudioArray)) * 100)
self.progressBarUpdate.emit(progressBarValue)