This repository has been archived on 2020-08-22. You can view files and clone it, but cannot push or open issues or pull requests.
pyaudviz/src/video_thread.py

312 lines
12 KiB
Python
Raw Normal View History

'''
Thread created to export a video. It has a slot to begin export using
an input file, output path, and component list. During export multiple
threads are created to render the video as quickly as possible. Signals
are emitted to update MainWindow's progress bar, detail text, and preview.
Export can be cancelled with cancel()
'''
2017-06-23 18:38:05 -04:00
from PyQt5 import QtCore, QtGui, uic
from PyQt5.QtCore import pyqtSignal, pyqtSlot
2015-03-03 14:11:55 -05:00
from PIL import Image, ImageDraw, ImageFont
from PIL.ImageQt import ImageQt
import numpy
import subprocess as sp
import sys
2017-06-02 00:24:13 -04:00
import os
2017-05-31 05:01:18 -04:00
from queue import Queue, PriorityQueue
2017-06-02 09:14:04 -04:00
from threading import Thread, Event
import time
2017-06-02 00:24:13 -04:00
import signal
2015-03-03 14:11:55 -05:00
import core
from toolkit import openPipe, checkOutput
2017-07-13 00:05:11 -04:00
from frame import Checkerboard
2017-06-06 11:14:39 -04:00
2015-03-03 14:11:55 -05:00
class Worker(QtCore.QObject):
imageCreated = pyqtSignal(['QImage'])
videoCreated = pyqtSignal()
progressBarUpdate = pyqtSignal(int)
progressBarSetText = pyqtSignal(str)
encoding = pyqtSignal(bool)
def __init__(self, parent=None):
QtCore.QObject.__init__(self)
self.core = parent.core
self.settings = parent.core.settings
self.modules = parent.core.modules
self.parent = parent
parent.videoTask.connect(self.createVideo)
self.sampleSize = 1470 # 44100 / 30 = 1470
2017-06-02 00:24:13 -04:00
self.canceled = False
self.error = False
2017-06-02 09:14:04 -04:00
self.stopped = False
def renderNode(self):
'''
Grabs audio data indices at frames to export, from compositeQueue.
Sends it to the components' frameRender methods in layer order
to create subframes & composite them into the final frame.
The resulting frames are collected in the renderQueue
'''
2017-06-02 09:14:04 -04:00
while not self.stopped:
audioI = self.compositeQueue.get()
bgI = int(audioI / self.sampleSize)
frame = None
for compNo, comp in reversed(list(enumerate(self.components))):
2017-07-13 00:05:11 -04:00
if compNo in self.staticComponents:
if self.staticComponents[compNo] is None:
# this layer was merged into a following layer
continue
# static component
if frame is None: # bottom-most layer
frame = self.staticComponents[compNo]
else:
2017-06-06 11:14:39 -04:00
frame = Image.alpha_composite(
frame, self.staticComponents[compNo]
)
else:
# animated component
if frame is None: # bottom-most layer
frame = comp.frameRender(compNo, bgI)
else:
2017-06-06 11:14:39 -04:00
frame = Image.alpha_composite(
frame, comp.frameRender(compNo, bgI)
)
2015-03-03 14:11:55 -05:00
self.renderQueue.put([audioI, frame])
self.compositeQueue.task_done()
def renderDispatch(self):
'''
Places audio data indices in the compositeQueue, to be used
by a renderNode later. All indices are multiples of self.sampleSize
sampleSize * frameNo = audioI, AKA audio data starting at frameNo
'''
print('Dispatching Frames for Compositing...')
for audioI in range(0, len(self.completeAudioArray), self.sampleSize):
self.compositeQueue.put(audioI)
def previewDispatch(self):
'''
Grabs frames from the previewQueue, adds them to the checkerboard
and emits a final QImage to the MainWindow for the live preview
'''
2017-07-13 00:05:11 -04:00
background = Checkerboard(self.width, self.height)
2017-06-06 05:04:42 -04:00
2017-06-02 09:14:04 -04:00
while not self.stopped:
audioI, frame = self.previewQueue.get()
if time.time() - self.lastPreview >= 0.06 or audioI == 0:
image = Image.alpha_composite(background.copy(), frame)
self.imageCreated.emit(QtGui.QImage(ImageQt(image)))
2017-06-01 09:05:20 -04:00
self.lastPreview = time.time()
self.previewQueue.task_done()
@pyqtSlot(str, str, list)
def createVideo(self, inputFile, outputFile, components):
numpy.seterr(divide='ignore')
self.encoding.emit(True)
self.components = components
2017-06-02 00:24:13 -04:00
self.outputFile = outputFile
self.extraAudio = []
self.width = int(self.settings.value('outputWidth'))
self.height = int(self.settings.value('outputHeight'))
self.compositeQueue = Queue()
self.compositeQueue.maxsize = 20
self.renderQueue = PriorityQueue()
self.renderQueue.maxsize = 20
self.previewQueue = PriorityQueue()
self.reset()
progressBarValue = 0
self.progressBarUpdate.emit(progressBarValue)
# =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
# READ AUDIO, INITIALIZE COMPONENTS, OPEN A PIPE TO FFMPEG
# =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
self.progressBarSetText.emit("Loading audio file...")
2017-06-02 04:30:51 -04:00
self.completeAudioArray = self.core.readAudioFile(inputFile, self)
self.progressBarUpdate.emit(0)
self.progressBarSetText.emit("Starting components...")
print('Loaded Components:', ", ".join([
"%s) %s" % (num, str(component))
for num, component in enumerate(reversed(self.components))
]))
self.staticComponents = {}
numComps = len(self.components)
for compNo, comp in enumerate(reversed(self.components)):
comp.preFrameRender(
worker=self,
completeAudioArray=self.completeAudioArray,
sampleSize=self.sampleSize,
progressBarUpdate=self.progressBarUpdate,
progressBarSetText=self.progressBarSetText
)
2017-07-11 06:06:22 -04:00
if 'error' in comp.properties():
self.cancel()
2017-07-11 06:06:22 -04:00
self.canceled = True
errMsg = "Component #%s encountered an error!" % compNo \
if comp.error() is None else 'Component #%s (%s): %s' % (
str(compNo),
str(comp),
comp.error()
)
2017-07-11 06:06:22 -04:00
self.parent.showMessage(
msg=errMsg,
icon='Warning',
parent=None # MainWindow is in a different thread
)
break
if 'static' in comp.properties():
self.staticComponents[compNo] = \
comp.frameRender(compNo, 0).copy()
if self.canceled:
print('Export cancelled by component #%s (%s): %s' % (
compNo, str(comp), comp.error()
))
self.progressBarSetText.emit('Export Canceled')
self.encoding.emit(False)
self.videoCreated.emit()
return
2017-07-13 00:05:11 -04:00
# Merge consecutive static component frames together
for compNo in range(len(self.components)):
2017-07-13 00:05:11 -04:00
if compNo not in self.staticComponents \
or compNo + 1 not in self.staticComponents:
2017-07-13 00:05:11 -04:00
continue
self.staticComponents[compNo + 1] = Image.alpha_composite(
2017-07-13 00:05:11 -04:00
self.staticComponents.pop(compNo),
self.staticComponents[compNo + 1]
2017-07-13 00:05:11 -04:00
)
self.staticComponents[compNo] = None
ffmpegCommand = self.core.createFfmpegCommand(inputFile, outputFile)
print('###### FFMPEG COMMAND ######\n%s' % " ".join(ffmpegCommand))
print('############################')
self.out_pipe = openPipe(
ffmpegCommand, stdin=sp.PIPE, stdout=sys.stdout, stderr=sys.stdout
)
# =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
# START CREATING THE VIDEO
# =~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~==~=~=~=~=~=~=~=~=~=~=~=~=~=~
# Make three renderNodes in new threads to create the frames
self.renderThreads = []
for i in range(3):
2017-06-06 11:14:39 -04:00
self.renderThreads.append(
Thread(target=self.renderNode, name="Render Thread"))
2017-06-02 09:14:04 -04:00
self.renderThreads[i].daemon = True
self.renderThreads[i].start()
2017-06-06 11:14:39 -04:00
self.dispatchThread = Thread(
target=self.renderDispatch, name="Render Dispatch Thread")
self.dispatchThread.daemon = True
self.dispatchThread.start()
self.lastPreview = 0.0
2017-06-06 11:14:39 -04:00
self.previewDispatch = Thread(
target=self.previewDispatch, name="Render Dispatch Thread")
self.previewDispatch.daemon = True
self.previewDispatch.start()
# Begin piping into ffmpeg!
frameBuffer = {}
progressBarValue = 0
self.progressBarUpdate.emit(progressBarValue)
self.progressBarSetText.emit("Exporting video...")
if not self.canceled:
for audioI in range(
0, len(self.completeAudioArray), self.sampleSize):
while True:
if audioI in frameBuffer or self.canceled:
# if frame's in buffer, pipe it to ffmpeg
break
# else fetch the next frame & add to the buffer
audioI_, frame = self.renderQueue.get()
frameBuffer[audioI_] = frame
self.renderQueue.task_done()
if self.canceled:
break
try:
self.out_pipe.stdin.write(frameBuffer[audioI].tobytes())
self.previewQueue.put([audioI, frameBuffer.pop(audioI)])
except:
2017-05-31 05:01:18 -04:00
break
# increase progress bar value
completion = (audioI / len(self.completeAudioArray)) * 100
if progressBarValue + 1 <= completion:
progressBarValue = numpy.floor(completion)
self.progressBarUpdate.emit(progressBarValue)
self.progressBarSetText.emit(
"Exporting video: %s%%" % str(int(progressBarValue))
)
numpy.seterr(all='print')
2017-06-02 00:24:13 -04:00
self.out_pipe.stdin.close()
if self.out_pipe.stderr is not None:
print(self.out_pipe.stderr.read())
self.out_pipe.stderr.close()
self.error = True
# out_pipe.terminate() # don't terminate ffmpeg too early
2017-06-02 00:24:13 -04:00
self.out_pipe.wait()
if self.canceled:
print("Export Canceled")
try:
os.remove(self.outputFile)
except:
pass
2017-06-02 00:24:13 -04:00
self.progressBarUpdate.emit(0)
self.progressBarSetText.emit('Export Canceled')
2017-06-06 11:14:39 -04:00
2017-06-02 00:24:13 -04:00
else:
if self.error:
print("Export Failed")
self.progressBarUpdate.emit(0)
self.progressBarSetText.emit('Export Failed')
else:
print("Export Complete")
self.progressBarUpdate.emit(100)
self.progressBarSetText.emit('Export Complete')
2017-06-06 11:14:39 -04:00
2017-06-02 00:24:13 -04:00
self.error = False
self.canceled = False
2017-06-02 09:14:04 -04:00
self.stopped = True
self.encoding.emit(False)
self.videoCreated.emit()
2017-06-06 11:14:39 -04:00
2017-06-02 04:30:51 -04:00
def updateProgress(self, pStr, pVal):
self.progressBarValue.emit(pVal)
self.progressBarSetText.emit(pStr)
def cancel(self):
self.canceled = True
self.stopped = True
self.core.cancel()
2017-06-06 11:14:39 -04:00
for comp in self.components:
comp.cancel()
2017-06-06 11:14:39 -04:00
try:
self.out_pipe.send_signal(signal.SIGINT)
except:
pass
def reset(self):
self.core.reset()
self.canceled = False
for comp in self.components:
comp.reset()