diff --git a/README.md b/README.md index 658a22d..9149b4f 100644 --- a/README.md +++ b/README.md @@ -1,28 +1,31 @@ audio-visualizer-python ======================= +**We need a good name that is not as generic as "audio-visualizer-python"!** -This is a little GUI tool which creates an audio visualization video from an input audio file. Different components can be added and layered to change the resulting video and add images, videos, gradients, text, etc. The component setup can be saved as a Project and exporting can be automated using commandline options. +This is a little GUI tool which creates an audio visualization video from an input audio file. Different components can be added and layered to change the resulting video and add images, videos, gradients, text, etc. Encoding options can be changed with a variety of different output containers. -The program works on Linux, macOS, and Windows. If you encounter problems running it or have other bug reports or features that you wish to see implemented, please fork the project and send me a pull request and/or file an issue on this project. +Projects can be created from the GUI and used in commandline mode for easy automation of video production. Create a template project named `template` with your typical visualizers and watermarks, and add text to the top layer from commandline: +`avp template -c 99 text "title=Episode 371" -i /this/weeks/audio.ogg -o out` -I also need a good name that is not as generic as "audio-visualizer-python"! +For more information use `avp --help` or for help with a particular component use `avp -c 0 componentName help`. + +The program works on Linux, macOS, and Windows. If you encounter problems running it or have other bug reports or features that you wish to see implemented, please fork the project and submit a pull request and/or file an issue on this project. Dependencies ------------ -Python 3, PyQt5, pillow-simd, numpy, and ffmpeg 3.3 +Python 3.4, FFmpeg 3.3, PyQt5, Pillow-SIMD, NumPy -**Note:** Pillow may be used as a drop-in replacement for Pillow-SIMD if problems are encountered installing. However this will result in much slower video export times. +**Note:** Pillow may be used as a drop-in replacement for Pillow-SIMD if problems are encountered installing. However this will result in much slower video export times. For help troubleshooting installation problems, the * For any problems with installing Pillow-SIMD, see the [Pillow installation guide](http://pillow.readthedocs.io/en/3.1.x/installation.html). Installation ------------ ### Manual installation on Ubuntu 16.04 * Install pip: `sudo apt-get install python3-pip` -* Install [prerequisites to compile Pillow](http://pillow.readthedocs.io/en/3.1.x/installation.html#building-on-linux):`sudo apt-get install python3-dev python3-setuptools libtiff5-dev libjpeg8-dev zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python-tk` -* Prerequisites on **Fedora**:`sudo dnf install python3-devel redhat-rpm-config libtiff-devel libjpeg-devel libzip-devel freetype-devel lcms2-devel libwebp-devel tcl-devel tk-devel` -* Install dependencies from PyPI: `sudo pip3 install pyqt5 numpy pillow-simd` +* If Pillow is installed, it must be removed. Nothing should break because Pillow-SIMD is simply a drop-in replacement with better performance. +* Download audio-visualizer-python from this repository and run `sudo pip3 install .` in this directory * Install `ffmpeg` from the [website](http://ffmpeg.org/) or from a PPA (e.g. [https://launchpad.net/~jonathonf/+archive/ubuntu/ffmpeg-3](https://launchpad.net/~jonathonf/+archive/ubuntu/ffmpeg-3)). NOTE: `ffmpeg` in the standard repos is too old (v2.8). Old versions and `avconv` may be used but full functionality is only guaranteed with `ffmpeg` 3.3 or higher. -Download audio-visualizer-python from this repository and run it with `python3 main.py`. +Run the program with `avp` or `python3 -m avpython` ### Manual installation on Windows * **Warning:** [Compiling Pillow is difficult on Windows](http://pillow.readthedocs.io/en/3.1.x/installation.html#building-on-windows) and required for the best experience. diff --git a/setup.py b/setup.py index fde3461..6ef688a 100644 --- a/setup.py +++ b/setup.py @@ -1,19 +1,48 @@ -+from setuptools import setup, find_packages - - -# Dependencies are automatically detected, but it might need +setup(name='audio_visualizer_python', - -# fine tuning. + version='1.0', - -buildOptions = dict(packages = [], excludes = [ + description='a little GUI tool to render visualization \ - - "apport", + videos of audio files', - - "apt", + license='MIT', - - "ctypes", + url='https://github.com/djfun/audio-visualizer-python', - - "curses", + packages=find_packages(), - - "distutils", + package_data={ - - "email", + 'src': ['*'], - - "html", + }, - - "http", + install_requires=['pillow-simd', 'numpy', ''], - - "json", + entry_points={ - - "xmlrpc", + 'gui_scripts': [ - - "nose" + 'audio-visualizer-python = avpython.main:main' - - ], include_files = ["main.ui"]) + ] - - + } - -import sys + ) \ No newline at end of file +from setuptools import setup +import os + + +def package_files(directory): + paths = [] + for (path, directories, filenames) in os.walk(directory): + for filename in filenames: + paths.append(os.path.join('..', path, filename)) + return paths + + +setup( + name='audio_visualizer_python', + version='2.0.0rc1', + url='https://github.com/djfun/audio-visualizer-python/tree/feature-newgui', + license='MIT', + description='Create audio visualization videos from a GUI or commandline', + long_description="Create customized audio visualization videos and save " + "them as Projects to continue editing later. Different components can " + "be added and layered to add visualizers, images, videos, gradients, " + "text, etc. Use Projects created in the GUI with commandline mode to " + "automate your video production workflow without learning any complex " + "syntax.", + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python :: 3 :: Only', + 'Intended Audience :: End Users/Desktop', + 'Topic :: Multimedia :: Video :: Non-Linear Editor', + ], + keywords=['visualizer', 'visualization', 'commandline video', + 'video editor', 'ffmpeg', 'podcast'], + packages=[ + 'avpython', + 'avpython.components' + ], + package_dir={'avpython': 'src'}, + package_data={ + 'avpython': package_files('src'), + }, + install_requires=['Pillow-SIMD', 'PyQt5', 'numpy'], + entry_points={ + 'gui_scripts': [ + 'avp = avpython.main:main' + ], + } +) diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/__main__.py b/src/__main__.py new file mode 100644 index 0000000..a68739e --- /dev/null +++ b/src/__main__.py @@ -0,0 +1,3 @@ +from avpython.main import main + +main() \ No newline at end of file diff --git a/src/component.py b/src/component.py index 2b297d1..adb170e 100644 --- a/src/component.py +++ b/src/component.py @@ -178,8 +178,9 @@ class Component(QtCore.QObject): The first element can be: - A string (path to audio file), - Or an object that returns audio data through a pipe - The second element must be a dictionary of ffmpeg parameters - to apply to the input stream. + The second element must be a dictionary of ffmpeg filters/options + to apply to the input stream. See the filter docs for ideas: + https://ffmpeg.org/ffmpeg-filters.html \''' @classmethod diff --git a/src/components/sound.py b/src/components/sound.py index 4a5714b..bd7d002 100644 --- a/src/components/sound.py +++ b/src/components/sound.py @@ -17,12 +17,18 @@ class Component(Component): page.lineEdit_sound.textChanged.connect(self.update) page.pushButton_sound.clicked.connect(self.pickSound) + page.checkBox_chorus.stateChanged.connect(self.update) + page.spinBox_delay.valueChanged.connect(self.update) + page.spinBox_volume.valueChanged.connect(self.update) self.page = page return page def update(self): self.sound = self.page.lineEdit_sound.text() + self.delay = self.page.spinBox_delay.value() + self.volume = self.page.spinBox_volume.value() + self.chorus = self.page.checkBox_chorus.isChecked() super().update() def previewRender(self, previewWorker): @@ -46,7 +52,16 @@ class Component(Component): return "The audio file selected no longer exists!" def audio(self): - return (self.sound, {}) + params = {} + if self.delay != 0.0: + params['adelay'] = '=%s' % str(int(self.delay * 1000.00)) + if self.chorus: + params['chorus'] = \ + '=0.5:0.9:50|60|40:0.4|0.32|0.3:0.25|0.4|0.3:2|2.3|1.3' + if self.volume != 1.0: + params['volume'] = '=%s:replaygain_noclip=0' % str(self.volume) + + return (self.sound, params) def pickSound(self): sndDir = self.settings.value("componentDir", os.path.expanduser("~")) @@ -66,10 +81,16 @@ class Component(Component): def loadPreset(self, pr, presetName=None): super().loadPreset(pr, presetName) self.page.lineEdit_sound.setText(pr['sound']) + self.page.checkBox_chorus.setChecked(pr['chorus']) + self.page.spinBox_delay.setValue(pr['delay']) + self.page.spinBox_volume.setValue(pr['volume']) def savePreset(self): return { 'sound': self.sound, + 'chorus': self.chorus, + 'delay': self.delay, + 'volume': self.volume, } def commandHelp(self): diff --git a/src/components/sound.ui b/src/components/sound.ui index 5fc00c1..4c11332 100644 --- a/src/components/sound.ui +++ b/src/components/sound.ui @@ -87,6 +87,29 @@ + + + + Volume + + + + + + + x + + + 10.000000000000000 + + + 0.100000000000000 + + + 1.000000000000000 + + + @@ -100,6 +123,33 @@ + + + + Delay + + + + + + + s + + + 9999999.990000000223517 + + + 0.500000000000000 + + + + + + + Chorus + + + diff --git a/src/components/video.py b/src/components/video.py index 0b93293..9e3db30 100644 --- a/src/components/video.py +++ b/src/components/video.py @@ -45,7 +45,7 @@ class Video: '-i', self.videoPath, '-f', 'image2pipe', '-pix_fmt', 'rgba', - '-filter:v', 'scale=%s:%s' % scale( + '-filter_complex', '[0:v] scale=%s:%s' % scale( self.scale, self.width, self.height, str), '-vcodec', 'rawvideo', '-', ] @@ -127,6 +127,7 @@ class Component(Component): page.checkBox_distort.stateChanged.connect(self.update) page.checkBox_useAudio.stateChanged.connect(self.update) page.spinBox_scale.valueChanged.connect(self.update) + page.spinBox_volume.valueChanged.connect(self.update) page.spinBox_x.valueChanged.connect(self.update) page.spinBox_y.valueChanged.connect(self.update) @@ -139,9 +140,17 @@ class Component(Component): self.useAudio = self.page.checkBox_useAudio.isChecked() self.distort = self.page.checkBox_distort.isChecked() self.scale = self.page.spinBox_scale.value() + self.volume = self.page.spinBox_volume.value() self.xPosition = self.page.spinBox_x.value() self.yPosition = self.page.spinBox_y.value() + if self.useAudio: + self.page.label_volume.setEnabled(True) + self.page.spinBox_volume.setEnabled(True) + else: + self.page.label_volume.setEnabled(False) + self.page.spinBox_volume.setEnabled(False) + super().update() def previewRender(self, previewWorker): @@ -193,7 +202,10 @@ class Component(Component): self.badAudio = False def audio(self): - return (self.videoPath, {'map': '-v'}) + params = {} + if self.volume != 1.0: + params['volume'] = '=%s:replaygain_noclip=0' % str(self.volume) + return (self.videoPath, params) def preFrameRender(self, **kwargs): super().preFrameRender(**kwargs) @@ -222,6 +234,7 @@ class Component(Component): self.page.checkBox_useAudio.setChecked(pr['useAudio']) self.page.checkBox_distort.setChecked(pr['distort']) self.page.spinBox_scale.setValue(pr['scale']) + self.page.spinBox_volume.setValue(pr['volume']) self.page.spinBox_x.setValue(pr['x']) self.page.spinBox_y.setValue(pr['y']) @@ -233,6 +246,7 @@ class Component(Component): 'useAudio': self.useAudio, 'distort': self.distort, 'scale': self.scale, + 'volume': self.volume, 'x': self.xPosition, 'y': self.yPosition, } @@ -258,7 +272,7 @@ class Component(Component): '-i', self.videoPath, '-f', 'image2pipe', '-pix_fmt', 'rgba', - '-filter:v', 'scale=%s:%s' % scale( + '-filter_complex', '[0:v] scale=%s:%s' % scale( self.scale, width, height, str), '-vcodec', 'rawvideo', '-', '-ss', '90', diff --git a/src/components/video.ui b/src/components/video.ui index 97b7d6f..08d15d3 100644 --- a/src/components/video.ui +++ b/src/components/video.ui @@ -10,6 +10,18 @@ 197 + + + 0 + 0 + + + + + 0 + 197 + + Form @@ -189,13 +201,6 @@ - - - - Use Audio - - - @@ -247,6 +252,62 @@ + + + + + + Use Audio + + + + + + + Volume + + + + + + + + 0 + 0 + + + + x + + + 0.000000000000000 + + + 10.000000000000000 + + + 0.100000000000000 + + + 1.000000000000000 + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + diff --git a/src/core.py b/src/core.py index 4c12209..a0a028b 100644 --- a/src/core.py +++ b/src/core.py @@ -468,7 +468,8 @@ class Core: ''' Constructs the major ffmpeg command used to export the video ''' - duration = str(duration) + safeDuration = "{0:.3f}".format(duration - 0.05) # used by filters + duration = "{0:.3f}".format(duration + 0.1) # used by input sources # Test if user has libfdk_aac encoders = toolkit.checkOutput( @@ -526,35 +527,103 @@ class Core: '-i', inputFile ] + # Add extra audio inputs and any needed avfilters + # NOTE: Global filters are currently hard-coded here for debugging use + globalFilters = 0 # increase to add global filters extraAudio = [ comp.audio() for comp in self.selectedComponents if 'audio' in comp.properties() ] - if extraAudio: - unwantedVideoStreams = [] - for streamNo, params in enumerate(extraAudio): + if extraAudio or globalFilters > 0: + # Add -i options for extra input files + extraFilters = {} + for streamNo, params in enumerate(reversed(extraAudio)): extraInputFile, params = params ffmpegCommand.extend([ - '-t', duration, + '-t', safeDuration, + # Tell ffmpeg about shorter clips (seemingly not needed) + # streamDuration = self.getAudioDuration(extraInputFile) + # if streamDuration > float(safeDuration) + # else "{0:.3f}".format(streamDuration), '-i', extraInputFile ]) - if 'map' in params and params['map'] == '-v': - # a video stream to remove - unwantedVideoStreams.append(streamNo + 1) + # Construct dataset of extra filters we'll need to add later + for ffmpegFilter in params: + if streamNo + 2 not in extraFilters: + extraFilters[streamNo + 2] = [] + extraFilters[streamNo + 2].append(( + ffmpegFilter, params[ffmpegFilter] + )) - if unwantedVideoStreams: - ffmpegCommand.extend(['-map', '0']) - for streamNo in unwantedVideoStreams: - ffmpegCommand.extend([ - '-map', '-%s:v' % str(streamNo) - ]) + # Start creating avfilters! Popen-style, so don't use semicolons; + extraFilterCommand = [] + + if globalFilters <= 0: + # Dictionary of last-used tmp labels for a given stream number + tmpInputs = {streamNo: -1 for streamNo in extraFilters} + else: + # Insert blank entries for global filters into extraFilters + # so the per-stream filters know what input to source later + for streamNo in range(len(extraAudio), 0, -1): + if streamNo + 1 not in extraFilters: + extraFilters[streamNo + 1] = [] + # Also filter the primary audio track + extraFilters[1] = [] + tmpInputs = { + streamNo: globalFilters - 1 + for streamNo in extraFilters + } + + # Add the global filters! + # NOTE: list length must = globalFilters, currently hardcoded + if tmpInputs: + extraFilterCommand.extend([ + '[%s:a] ashowinfo [%stmp0]' % ( + str(streamNo), + str(streamNo) + ) + for streamNo in tmpInputs + ]) + + # Now add the per-stream filters! + for streamNo, paramList in extraFilters.items(): + for param in paramList: + source = '[%s:a]' % str(streamNo) \ + if tmpInputs[streamNo] == -1 else \ + '[%stmp%s]' % ( + str(streamNo), str(tmpInputs[streamNo]) + ) + tmpInputs[streamNo] = tmpInputs[streamNo] + 1 + extraFilterCommand.append( + '%s %s%s [%stmp%s]' % ( + source, param[0], param[1], str(streamNo), + str(tmpInputs[streamNo]) + ) + ) + + # Join all the filters together and combine into 1 stream + extraFilterCommand = "; ".join(extraFilterCommand) + '; ' \ + if tmpInputs else '' ffmpegCommand.extend([ '-filter_complex', - 'amix=inputs=%s:duration=first:dropout_transition=3' % str( - len(extraAudio) + 1 + extraFilterCommand + + '%s amix=inputs=%s:duration=first [a]' + % ( + "".join([ + '[%stmp%s]' % (str(i), tmpInputs[i]) + if i in extraFilters else '[%s:a]' % str(i) + for i in range(1, len(extraAudio) + 2) + ]), + str(len(extraAudio) + 1) ), ]) + # Only map audio from the filters, and video from the pipe + ffmpegCommand.extend([ + '-map', '0:v', + '-map', '[a]', + ]) + ffmpegCommand.extend([ # OUTPUT '-vcodec', vencoder, @@ -573,7 +642,7 @@ class Core: ffmpegCommand.append(outputFile) return ffmpegCommand - def readAudioFile(self, filename, parent): + def getAudioDuration(self, filename): command = [self.FFMPEG_BIN, '-i', filename] try: @@ -588,6 +657,10 @@ class Core: d = d.split(' ')[3] d = d.split(':') duration = float(d[0])*3600 + float(d[1])*60 + float(d[2]) + return duration + + def readAudioFile(self, filename, parent): + duration = self.getAudioDuration(filename) command = [ self.FFMPEG_BIN, diff --git a/src/main.py b/src/main.py index 2216d2a..6a9a25e 100644 --- a/src/main.py +++ b/src/main.py @@ -2,12 +2,18 @@ from PyQt5 import uic, QtWidgets import sys import os -import core -import preview_thread -import video_thread +def main(): + if getattr(sys, 'frozen', False): + # frozen + wd = os.path.dirname(sys.executable) + else: + # unfrozen + wd = os.path.dirname(os.path.realpath(__file__)) + + # make local imports work everywhere + sys.path.insert(0, wd) -if __name__ == "__main__": mode = 'GUI' if len(sys.argv) > 2: mode = 'commandline' @@ -28,22 +34,15 @@ if __name__ == "__main__": # app.setOrganizationName("audio-visualizer") if mode == 'commandline': - from command import * + from command import Command main = Command() elif mode == 'GUI': - from mainwindow import * + from mainwindow import MainWindow import atexit import signal - if getattr(sys, 'frozen', False): - # frozen - wd = os.path.dirname(sys.executable) - else: - # unfrozen - wd = os.path.dirname(os.path.realpath(__file__)) - window = uic.loadUi(os.path.join(wd, "mainwindow.ui")) # window.adjustSize() desc = QtWidgets.QDesktopWidget() @@ -64,3 +63,7 @@ if __name__ == "__main__": # applicable to both modes sys.exit(app.exec_()) + + +if __name__ == "__main__": + main() diff --git a/src/mainwindow.py b/src/mainwindow.py index 76ed179..ca8e697 100644 --- a/src/mainwindow.py +++ b/src/mainwindow.py @@ -305,7 +305,12 @@ class MainWindow(QtWidgets.QMainWindow): QtWidgets.QShortcut("Ctrl+A", self.window, self.openSaveProjectDialog) QtWidgets.QShortcut("Ctrl+O", self.window, self.openOpenProjectDialog) QtWidgets.QShortcut("Ctrl+N", self.window, self.createNewProject) - QtWidgets.QShortcut("Ctrl+Alt+Shift+R", self.window, self.drawPreview) + QtWidgets.QShortcut( + "Ctrl+Alt+Shift+R", self.window, self.drawPreview + ) + QtWidgets.QShortcut( + "Ctrl+Alt+Shift+F", self.window, self.showFfmpegCommand + ) QtWidgets.QShortcut( "Ctrl+T", self.window, @@ -580,6 +585,18 @@ class MainWindow(QtWidgets.QMainWindow): def showPreviewImage(self, image): self.previewWindow.changePixmap(image) + def showFfmpegCommand(self): + from textwrap import wrap + command = self.core.createFfmpegCommand( + self.window.lineEdit_audioFile.text(), + self.window.lineEdit_outputFile.text(), + self.core.getAudioDuration(self.window.lineEdit_audioFile.text()) + ) + lines = wrap(" ".join(command), 49) + self.showMessage( + msg="Current FFmpeg command:\n\n %s" % " ".join(lines) + ) + def insertComponent(self, index): componentList = self.window.listWidget_componentList stackedWidget = self.window.stackedWidget diff --git a/src/presetmanager.py b/src/presetmanager.py index 0028203..6e003a1 100644 --- a/src/presetmanager.py +++ b/src/presetmanager.py @@ -6,7 +6,6 @@ from PyQt5 import QtCore, QtWidgets import string import os -import core import toolkit diff --git a/src/preview_thread.py b/src/preview_thread.py index 4ffb7f6..6c33aff 100644 --- a/src/preview_thread.py +++ b/src/preview_thread.py @@ -6,7 +6,6 @@ from PyQt5 import QtCore, QtGui, uic from PyQt5.QtCore import pyqtSignal, pyqtSlot from PIL import Image from PIL.ImageQt import ImageQt -import core from queue import Queue, Empty import os diff --git a/src/toolkit.py b/src/toolkit.py index 589d8e6..5493f37 100644 --- a/src/toolkit.py +++ b/src/toolkit.py @@ -13,11 +13,14 @@ def badName(name): return any([letter in string.punctuation for letter in name]) +def alphabetizeDict(dictionary): + '''Alphabetizes a dict into OrderedDict ''' + return OrderedDict(sorted(dictionary.items(), key=lambda t: t[0])) + + def presetToString(dictionary): - '''Alphabetizes a dict into OrderedDict & returns string repr''' - return repr( - OrderedDict(sorted(dictionary.items(), key=lambda t: t[0])) - ) + '''Returns string repr of a preset''' + return repr(alphabetizeDict(dictionary)) def presetFromString(string): diff --git a/src/video_thread.py b/src/video_thread.py index 674765a..60db99f 100644 --- a/src/video_thread.py +++ b/src/video_thread.py @@ -18,7 +18,6 @@ from threading import Thread, Event import time import signal -import core from toolkit import openPipe from frame import Checkerboard