2015-03-03 14:11:55 -05:00
|
|
|
from PyQt4 import QtCore, QtGui, uic
|
|
|
|
from PyQt4.QtCore import pyqtSignal, pyqtSlot
|
|
|
|
from PIL import Image, ImageDraw, ImageFont
|
|
|
|
from PIL.ImageQt import ImageQt
|
|
|
|
import core
|
|
|
|
import numpy
|
|
|
|
import subprocess as sp
|
|
|
|
import sys
|
2017-06-02 00:24:13 -04:00
|
|
|
import os
|
2017-05-31 05:01:18 -04:00
|
|
|
from queue import Queue, PriorityQueue
|
2017-06-02 09:14:04 -04:00
|
|
|
from threading import Thread, Event
|
2017-05-31 03:15:09 -04:00
|
|
|
import time
|
2017-06-01 10:52:40 -04:00
|
|
|
from copy import copy
|
2017-06-02 00:24:13 -04:00
|
|
|
import signal
|
2015-03-03 14:11:55 -05:00
|
|
|
|
|
|
|
class Worker(QtCore.QObject):
|
|
|
|
|
2017-05-31 03:15:09 -04:00
|
|
|
imageCreated = pyqtSignal(['QImage'])
|
|
|
|
videoCreated = pyqtSignal()
|
|
|
|
progressBarUpdate = pyqtSignal(int)
|
|
|
|
progressBarSetText = pyqtSignal(str)
|
2017-06-03 01:07:30 -04:00
|
|
|
encoding = pyqtSignal(bool)
|
2017-05-31 03:15:09 -04:00
|
|
|
|
|
|
|
def __init__(self, parent=None):
|
|
|
|
QtCore.QObject.__init__(self)
|
|
|
|
self.core = core.Core()
|
|
|
|
self.core.settings = parent.settings
|
|
|
|
self.modules = parent.modules
|
|
|
|
self.stackedWidget = parent.window.stackedWidget
|
|
|
|
self.parent = parent
|
|
|
|
parent.videoTask.connect(self.createVideo)
|
|
|
|
self.sampleSize = 1470
|
2017-06-02 00:24:13 -04:00
|
|
|
self.canceled = False
|
|
|
|
self.error = False
|
2017-06-02 09:14:04 -04:00
|
|
|
self.stopped = False
|
2017-05-31 03:15:09 -04:00
|
|
|
|
|
|
|
def renderNode(self):
|
2017-06-02 09:14:04 -04:00
|
|
|
while not self.stopped:
|
2017-05-31 03:15:09 -04:00
|
|
|
i = self.compositeQueue.get()
|
2017-06-05 05:54:58 -04:00
|
|
|
frame = None
|
2017-05-31 03:15:09 -04:00
|
|
|
|
2017-06-03 20:29:25 -04:00
|
|
|
for compNo, comp in reversed(list(enumerate(self.components))):
|
2017-05-31 03:15:09 -04:00
|
|
|
if compNo in self.staticComponents and self.staticComponents[compNo] != None:
|
2017-06-05 05:54:58 -04:00
|
|
|
if frame is None:
|
|
|
|
frame = self.staticComponents[compNo]
|
|
|
|
else:
|
|
|
|
frame = Image.alpha_composite(frame, self.staticComponents[compNo])
|
2017-05-31 03:15:09 -04:00
|
|
|
else:
|
2017-06-05 05:54:58 -04:00
|
|
|
if frame is None:
|
|
|
|
frame = comp.frameRender(compNo, i[0], i[1])
|
|
|
|
else:
|
|
|
|
frame = Image.alpha_composite(frame, comp.frameRender(compNo, i[0], i[1]))
|
2015-03-03 14:11:55 -05:00
|
|
|
|
2017-05-31 03:15:09 -04:00
|
|
|
self.renderQueue.put([i[0], frame])
|
|
|
|
self.compositeQueue.task_done()
|
|
|
|
|
|
|
|
def renderDispatch(self):
|
|
|
|
print('Dispatching Frames for Compositing...')
|
|
|
|
|
|
|
|
for i in range(0, len(self.completeAudioArray), self.sampleSize):
|
|
|
|
self.compositeQueue.put([i, self.bgI])
|
2017-06-04 13:00:36 -04:00
|
|
|
# increment tracked video frame for next iteration
|
|
|
|
self.bgI += 1
|
2017-05-31 03:15:09 -04:00
|
|
|
|
|
|
|
def previewDispatch(self):
|
2017-06-02 09:14:04 -04:00
|
|
|
while not self.stopped:
|
2017-05-31 03:15:09 -04:00
|
|
|
i = self.previewQueue.get()
|
2017-06-01 23:46:45 -04:00
|
|
|
if time.time() - self.lastPreview >= 0.06 or i[0] == 0:
|
2017-05-31 03:15:09 -04:00
|
|
|
self._image = ImageQt(i[1])
|
|
|
|
self.imageCreated.emit(QtGui.QImage(self._image))
|
2017-06-01 09:05:20 -04:00
|
|
|
self.lastPreview = time.time()
|
2017-05-31 03:15:09 -04:00
|
|
|
|
|
|
|
self.previewQueue.task_done()
|
|
|
|
|
2017-06-04 13:00:36 -04:00
|
|
|
@pyqtSlot(str, str, list)
|
|
|
|
def createVideo(self, inputFile, outputFile, components):
|
2017-06-03 01:07:30 -04:00
|
|
|
self.encoding.emit(True)
|
2017-06-02 01:30:44 -04:00
|
|
|
self.components = components
|
2017-06-02 00:24:13 -04:00
|
|
|
self.outputFile = outputFile
|
2017-06-04 13:00:36 -04:00
|
|
|
self.bgI = 0 # tracked video frame
|
2017-06-02 01:30:44 -04:00
|
|
|
self.reset()
|
2017-05-31 03:15:09 -04:00
|
|
|
self.width = int(self.core.settings.value('outputWidth'))
|
|
|
|
self.height = int(self.core.settings.value('outputHeight'))
|
|
|
|
# print('worker thread id: {}'.format(QtCore.QThread.currentThreadId()))
|
|
|
|
progressBarValue = 0
|
|
|
|
self.progressBarUpdate.emit(progressBarValue)
|
|
|
|
|
2017-06-02 04:30:51 -04:00
|
|
|
self.progressBarSetText.emit('Loading audio file...')
|
|
|
|
self.completeAudioArray = self.core.readAudioFile(inputFile, self)
|
2017-05-31 03:15:09 -04:00
|
|
|
|
|
|
|
# test if user has libfdk_aac
|
|
|
|
encoders = sp.check_output(self.core.FFMPEG_BIN + " -encoders -hide_banner", shell=True)
|
|
|
|
acodec = self.core.settings.value('outputAudioCodec')
|
2017-06-02 09:14:04 -04:00
|
|
|
|
2017-05-31 03:15:09 -04:00
|
|
|
if b'libfdk_aac' in encoders and acodec == 'aac':
|
|
|
|
acodec = 'libfdk_aac'
|
|
|
|
|
|
|
|
ffmpegCommand = [
|
|
|
|
self.core.FFMPEG_BIN,
|
2017-06-05 05:54:58 -04:00
|
|
|
'-thread_queue_size', '512',
|
2017-05-31 03:15:09 -04:00
|
|
|
'-y', # (optional) means overwrite the output file if it already exists.
|
|
|
|
'-f', 'rawvideo',
|
|
|
|
'-vcodec', 'rawvideo',
|
|
|
|
'-s', str(self.width)+'x'+str(self.height), # size of one frame
|
|
|
|
'-pix_fmt', 'rgba',
|
|
|
|
'-r', self.core.settings.value('outputFrameRate'), # frames per second
|
|
|
|
'-i', '-', # The input comes from a pipe
|
|
|
|
'-an',
|
|
|
|
'-i', inputFile,
|
|
|
|
'-acodec', acodec, # output audio codec
|
|
|
|
'-b:a', self.core.settings.value('outputAudioBitrate'),
|
|
|
|
'-vcodec', self.core.settings.value('outputVideoCodec'),
|
|
|
|
'-pix_fmt', self.core.settings.value('outputVideoFormat'),
|
|
|
|
'-preset', self.core.settings.value('outputPreset'),
|
|
|
|
'-f', self.core.settings.value('outputFormat')
|
|
|
|
]
|
|
|
|
|
|
|
|
if acodec == 'aac':
|
|
|
|
ffmpegCommand.append('-strict')
|
|
|
|
ffmpegCommand.append('-2')
|
|
|
|
|
|
|
|
ffmpegCommand.append(outputFile)
|
2017-06-02 00:24:13 -04:00
|
|
|
self.out_pipe = sp.Popen(ffmpegCommand, stdin=sp.PIPE,stdout=sys.stdout, stderr=sys.stdout)
|
|
|
|
|
2017-05-31 03:15:09 -04:00
|
|
|
# create video for output
|
|
|
|
numpy.seterr(divide='ignore')
|
|
|
|
|
2017-06-01 09:05:20 -04:00
|
|
|
# initialize components
|
|
|
|
print('loaded components:',
|
2017-06-02 01:30:44 -04:00
|
|
|
["%s%s" % (num, str(component)) for num, component in enumerate(self.components)])
|
2017-06-01 09:05:20 -04:00
|
|
|
self.staticComponents = {}
|
2017-06-02 04:30:51 -04:00
|
|
|
numComps = len(self.components)
|
2017-06-02 01:30:44 -04:00
|
|
|
for compNo, comp in enumerate(self.components):
|
2017-06-02 04:30:51 -04:00
|
|
|
pStr = "Analyzing audio..."
|
|
|
|
self.progressBarSetText.emit(pStr)
|
2017-06-01 09:05:20 -04:00
|
|
|
properties = None
|
|
|
|
properties = comp.preFrameRender(
|
|
|
|
worker=self,
|
|
|
|
completeAudioArray=self.completeAudioArray,
|
2017-06-02 01:30:44 -04:00
|
|
|
sampleSize=self.sampleSize,
|
2017-06-02 04:30:51 -04:00
|
|
|
progressBarUpdate=self.progressBarUpdate,
|
|
|
|
progressBarSetText=self.progressBarSetText
|
2017-06-01 09:05:20 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
if properties and 'static' in properties:
|
2017-06-04 13:00:36 -04:00
|
|
|
self.staticComponents[compNo] = copy(comp.frameRender(compNo, 0, 0))
|
2017-06-02 09:14:04 -04:00
|
|
|
self.progressBarUpdate.emit(100)
|
2017-06-01 09:05:20 -04:00
|
|
|
|
2017-05-31 03:15:09 -04:00
|
|
|
self.compositeQueue = Queue()
|
|
|
|
self.compositeQueue.maxsize = 20
|
2017-05-31 05:01:18 -04:00
|
|
|
self.renderQueue = PriorityQueue()
|
2017-05-31 03:15:09 -04:00
|
|
|
self.renderQueue.maxsize = 20
|
2017-05-31 05:01:18 -04:00
|
|
|
self.previewQueue = PriorityQueue()
|
2017-05-31 03:15:09 -04:00
|
|
|
|
2017-06-02 09:14:04 -04:00
|
|
|
self.renderThreads = []
|
2017-06-01 09:05:20 -04:00
|
|
|
# create threads to render frames and send them back here for piping out
|
2017-06-01 12:01:51 -04:00
|
|
|
for i in range(3):
|
2017-06-02 09:14:04 -04:00
|
|
|
self.renderThreads.append(Thread(target=self.renderNode, name="Render Thread"))
|
|
|
|
self.renderThreads[i].daemon = True
|
|
|
|
self.renderThreads[i].start()
|
2017-05-31 03:15:09 -04:00
|
|
|
|
2017-06-01 09:05:20 -04:00
|
|
|
self.dispatchThread = Thread(target=self.renderDispatch, name="Render Dispatch Thread")
|
2017-05-31 03:15:09 -04:00
|
|
|
self.dispatchThread.daemon = True
|
|
|
|
self.dispatchThread.start()
|
|
|
|
|
2017-06-01 09:05:20 -04:00
|
|
|
self.previewDispatch = Thread(target=self.previewDispatch, name="Render Dispatch Thread")
|
2017-05-31 03:15:09 -04:00
|
|
|
self.previewDispatch.daemon = True
|
|
|
|
self.previewDispatch.start()
|
|
|
|
|
|
|
|
frameBuffer = {}
|
|
|
|
self.lastPreview = 0.0
|
2017-06-02 09:14:04 -04:00
|
|
|
self.progressBarUpdate.emit(0)
|
|
|
|
pStr = "Exporting video..."
|
|
|
|
self.progressBarSetText.emit(pStr)
|
2017-06-02 01:30:44 -04:00
|
|
|
if not self.canceled:
|
|
|
|
for i in range(0, len(self.completeAudioArray), self.sampleSize):
|
|
|
|
while True:
|
|
|
|
if i in frameBuffer:
|
|
|
|
# if frame's in buffer, pipe it to ffmpeg
|
|
|
|
break
|
|
|
|
# else fetch the next frame & add to the buffer
|
|
|
|
data = self.renderQueue.get()
|
|
|
|
frameBuffer[data[0]] = data[1]
|
|
|
|
self.renderQueue.task_done()
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.out_pipe.stdin.write(frameBuffer[i].tobytes())
|
|
|
|
self.previewQueue.put([i, frameBuffer[i]])
|
|
|
|
del frameBuffer[i]
|
|
|
|
except:
|
2017-05-31 05:01:18 -04:00
|
|
|
break
|
2017-05-31 03:15:09 -04:00
|
|
|
|
2017-06-02 01:30:44 -04:00
|
|
|
# increase progress bar value
|
|
|
|
if progressBarValue + 1 <= (i / len(self.completeAudioArray)) * 100:
|
|
|
|
progressBarValue = numpy.floor((i / len(self.completeAudioArray)) * 100)
|
|
|
|
self.progressBarUpdate.emit(progressBarValue)
|
2017-06-02 04:30:51 -04:00
|
|
|
pStr = "Exporting video: " + str(int(progressBarValue)) + "%"
|
|
|
|
self.progressBarSetText.emit(pStr)
|
2017-05-31 03:15:09 -04:00
|
|
|
|
|
|
|
numpy.seterr(all='print')
|
|
|
|
|
2017-06-02 00:24:13 -04:00
|
|
|
self.out_pipe.stdin.close()
|
|
|
|
if self.out_pipe.stderr is not None:
|
|
|
|
print(self.out_pipe.stderr.read())
|
|
|
|
self.out_pipe.stderr.close()
|
|
|
|
self.error = True
|
2017-05-31 03:15:09 -04:00
|
|
|
# out_pipe.terminate() # don't terminate ffmpeg too early
|
2017-06-02 00:24:13 -04:00
|
|
|
self.out_pipe.wait()
|
|
|
|
if self.canceled:
|
|
|
|
print("Export Canceled")
|
2017-06-02 01:30:44 -04:00
|
|
|
try:
|
|
|
|
os.remove(self.outputFile)
|
|
|
|
except:
|
|
|
|
pass
|
2017-06-02 00:24:13 -04:00
|
|
|
self.progressBarUpdate.emit(0)
|
|
|
|
self.progressBarSetText.emit('Export Canceled')
|
2017-06-02 04:30:51 -04:00
|
|
|
|
2017-06-02 00:24:13 -04:00
|
|
|
else:
|
|
|
|
if self.error:
|
|
|
|
print("Export Failed")
|
|
|
|
self.progressBarUpdate.emit(0)
|
|
|
|
self.progressBarSetText.emit('Export Failed')
|
|
|
|
else:
|
|
|
|
print("Export Complete")
|
|
|
|
self.progressBarUpdate.emit(100)
|
|
|
|
self.progressBarSetText.emit('Export Complete')
|
|
|
|
|
|
|
|
self.error = False
|
|
|
|
self.canceled = False
|
2017-05-31 03:15:09 -04:00
|
|
|
self.parent.drawPreview()
|
2017-06-02 09:14:04 -04:00
|
|
|
self.stopped = True
|
2017-06-03 01:07:30 -04:00
|
|
|
self.encoding.emit(False)
|
2017-05-31 03:15:09 -04:00
|
|
|
self.videoCreated.emit()
|
2017-06-02 01:30:44 -04:00
|
|
|
|
2017-06-02 04:30:51 -04:00
|
|
|
def updateProgress(self, pStr, pVal):
|
|
|
|
self.progressBarValue.emit(pVal)
|
|
|
|
self.progressBarSetText.emit(pStr)
|
|
|
|
|
2017-06-02 01:30:44 -04:00
|
|
|
def cancel(self):
|
|
|
|
self.canceled = True
|
|
|
|
self.core.cancel()
|
|
|
|
|
|
|
|
for comp in self.components:
|
|
|
|
comp.cancel()
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.out_pipe.send_signal(signal.SIGINT)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def reset(self):
|
|
|
|
self.core.reset()
|
|
|
|
self.canceled = False
|
|
|
|
for comp in self.components:
|
|
|
|
comp.reset()
|