This repository has been archived on 2020-08-22. You can view files and clone it, but cannot push or open issues or pull requests.
pyaudviz/src/video_thread.py

307 lines
11 KiB
Python
Raw Normal View History

2017-06-23 18:38:05 -04:00
from PyQt5 import QtCore, QtGui, uic
from PyQt5.QtCore import pyqtSignal, pyqtSlot
2015-03-03 14:11:55 -05:00
from PIL import Image, ImageDraw, ImageFont
from PIL.ImageQt import ImageQt
import core
import numpy
import subprocess as sp
import sys
2017-06-02 00:24:13 -04:00
import os
2017-05-31 05:01:18 -04:00
from queue import Queue, PriorityQueue
2017-06-02 09:14:04 -04:00
from threading import Thread, Event
import time
from copy import copy
2017-06-02 00:24:13 -04:00
import signal
2015-03-03 14:11:55 -05:00
2017-06-06 11:14:39 -04:00
2015-03-03 14:11:55 -05:00
class Worker(QtCore.QObject):
imageCreated = pyqtSignal(['QImage'])
videoCreated = pyqtSignal()
progressBarUpdate = pyqtSignal(int)
progressBarSetText = pyqtSignal(str)
encoding = pyqtSignal(bool)
def __init__(self, parent=None):
QtCore.QObject.__init__(self)
self.core = core.Core()
self.core.settings = parent.settings
self.modules = parent.core.modules
self.parent = parent
parent.videoTask.connect(self.createVideo)
self.sampleSize = 1470 # 44100 / 30 = 1470
2017-06-02 00:24:13 -04:00
self.canceled = False
self.error = False
2017-06-02 09:14:04 -04:00
self.stopped = False
def renderNode(self):
2017-06-02 09:14:04 -04:00
while not self.stopped:
i = self.compositeQueue.get()
frame = None
for compNo, comp in reversed(list(enumerate(self.components))):
2017-06-06 11:14:39 -04:00
if compNo in self.staticComponents and \
self.staticComponents[compNo] is not None:
if frame is None:
frame = self.staticComponents[compNo]
else:
2017-06-06 11:14:39 -04:00
frame = Image.alpha_composite(
frame, self.staticComponents[compNo])
else:
if frame is None:
frame = comp.frameRender(compNo, i[0], i[1])
else:
2017-06-06 11:14:39 -04:00
frame = Image.alpha_composite(
frame, comp.frameRender(compNo, i[0], i[1]))
2015-03-03 14:11:55 -05:00
self.renderQueue.put([i[0], frame])
self.compositeQueue.task_done()
def renderDispatch(self):
print('Dispatching Frames for Compositing...')
for i in range(0, len(self.completeAudioArray), self.sampleSize):
self.compositeQueue.put([i, self.bgI])
# increment tracked video frame for next iteration
self.bgI += 1
def previewDispatch(self):
2017-06-06 11:14:39 -04:00
background = Image.new("RGBA", (1920, 1080), (0, 0, 0, 0))
background.paste(Image.open(os.path.join(
2017-06-23 03:39:56 -04:00
self.core.wd, "background.png")))
2017-06-06 05:04:42 -04:00
background = background.resize((self.width, self.height))
2017-06-02 09:14:04 -04:00
while not self.stopped:
i = self.previewQueue.get()
if time.time() - self.lastPreview >= 0.06 or i[0] == 0:
2017-06-06 05:04:42 -04:00
image = copy(background)
image = Image.alpha_composite(image, i[1])
self._image = ImageQt(image)
self.imageCreated.emit(QtGui.QImage(self._image))
2017-06-01 09:05:20 -04:00
self.lastPreview = time.time()
self.previewQueue.task_done()
@pyqtSlot(str, str, list)
def createVideo(self, inputFile, outputFile, components):
self.encoding.emit(True)
self.components = components
2017-06-02 00:24:13 -04:00
self.outputFile = outputFile
2017-06-06 11:14:39 -04:00
self.bgI = 0 # tracked video frame
self.reset()
self.width = int(self.core.settings.value('outputWidth'))
self.height = int(self.core.settings.value('outputHeight'))
progressBarValue = 0
self.progressBarUpdate.emit(progressBarValue)
2017-06-02 04:30:51 -04:00
self.progressBarSetText.emit('Loading audio file...')
self.completeAudioArray = self.core.readAudioFile(inputFile, self)
# test if user has libfdk_aac
2017-06-06 11:14:39 -04:00
encoders = sp.check_output(
self.core.FFMPEG_BIN + " -encoders -hide_banner",
shell=True)
encoders = encoders.decode("utf-8")
acodec = self.core.settings.value('outputAudioCodec')
options = self.core.encoder_options
containerName = self.core.settings.value('outputContainer')
vcodec = self.core.settings.value('outputVideoCodec')
2017-06-07 13:33:22 -04:00
vbitrate = str(self.core.settings.value('outputVideoBitrate'))+'k'
acodec = self.core.settings.value('outputAudioCodec')
2017-06-07 13:33:22 -04:00
abitrate = str(self.core.settings.value('outputAudioBitrate'))+'k'
2017-06-06 11:14:39 -04:00
for cont in options['containers']:
if cont['name'] == containerName:
container = cont['container']
break
vencoders = options['video-codecs'][vcodec]
aencoders = options['audio-codecs'][acodec]
for encoder in vencoders:
if encoder in encoders:
vencoder = encoder
break
for encoder in aencoders:
if encoder in encoders:
aencoder = encoder
break
ffmpegCommand = [
self.core.FFMPEG_BIN,
'-thread_queue_size', '512',
2017-06-06 11:14:39 -04:00
'-y', # overwrite the output file if it already exists.
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-s', str(self.width)+'x'+str(self.height), # size of one frame
'-pix_fmt', 'rgba',
2017-06-06 11:14:39 -04:00
# frames per second
'-r', self.core.settings.value('outputFrameRate'),
'-i', '-', # The input comes from a pipe
'-an',
'-i', inputFile,
'-vcodec', vencoder,
'-acodec', aencoder, # output audio codec
2017-06-07 13:33:22 -04:00
'-b:v', vbitrate,
'-b:a', abitrate,
'-pix_fmt', self.core.settings.value('outputVideoFormat'),
'-preset', self.core.settings.value('outputPreset'),
'-f', container
]
if acodec == 'aac':
ffmpegCommand.append('-strict')
ffmpegCommand.append('-2')
ffmpegCommand.append(outputFile)
2017-06-02 00:24:13 -04:00
# ### Now start creating video for output ###
numpy.seterr(divide='ignore')
# Call preFrameRender on all components
2017-06-23 23:00:24 -04:00
print('Loaded Components:', ", ".join([
"%s) %s" % (num, str(component))
for num, component in enumerate(reversed(self.components))
]))
2017-06-01 09:05:20 -04:00
self.staticComponents = {}
2017-06-02 04:30:51 -04:00
numComps = len(self.components)
for compNo, comp in enumerate(self.components):
2017-06-02 04:30:51 -04:00
pStr = "Analyzing audio..."
self.progressBarSetText.emit(pStr)
2017-06-01 09:05:20 -04:00
properties = None
properties = comp.preFrameRender(
worker=self,
completeAudioArray=self.completeAudioArray,
sampleSize=self.sampleSize,
2017-06-02 04:30:51 -04:00
progressBarUpdate=self.progressBarUpdate,
progressBarSetText=self.progressBarSetText
2017-06-01 09:05:20 -04:00
)
if properties and 'static' in properties:
2017-06-06 11:14:39 -04:00
self.staticComponents[compNo] = copy(
comp.frameRender(compNo, 0, 0))
2017-06-02 09:14:04 -04:00
self.progressBarUpdate.emit(100)
2017-06-01 09:05:20 -04:00
# Create ffmpeg pipe and queues for frames
self.out_pipe = sp.Popen(
ffmpegCommand, stdin=sp.PIPE, stdout=sys.stdout, stderr=sys.stdout)
self.compositeQueue = Queue()
self.compositeQueue.maxsize = 20
2017-05-31 05:01:18 -04:00
self.renderQueue = PriorityQueue()
self.renderQueue.maxsize = 20
2017-05-31 05:01:18 -04:00
self.previewQueue = PriorityQueue()
2017-06-06 11:14:39 -04:00
# Threads to render frames and send them back here for piping out
self.renderThreads = []
for i in range(3):
2017-06-06 11:14:39 -04:00
self.renderThreads.append(
Thread(target=self.renderNode, name="Render Thread"))
2017-06-02 09:14:04 -04:00
self.renderThreads[i].daemon = True
self.renderThreads[i].start()
2017-06-06 11:14:39 -04:00
self.dispatchThread = Thread(
target=self.renderDispatch, name="Render Dispatch Thread")
self.dispatchThread.daemon = True
self.dispatchThread.start()
2017-06-06 11:14:39 -04:00
self.previewDispatch = Thread(
target=self.previewDispatch, name="Render Dispatch Thread")
self.previewDispatch.daemon = True
self.previewDispatch.start()
frameBuffer = {}
self.lastPreview = 0.0
2017-06-02 09:14:04 -04:00
self.progressBarUpdate.emit(0)
pStr = "Exporting video..."
self.progressBarSetText.emit(pStr)
if not self.canceled:
for i in range(0, len(self.completeAudioArray), self.sampleSize):
while True:
if i in frameBuffer or self.canceled:
# if frame's in buffer, pipe it to ffmpeg
break
# else fetch the next frame & add to the buffer
data = self.renderQueue.get()
frameBuffer[data[0]] = data[1]
self.renderQueue.task_done()
if self.canceled:
break
try:
self.out_pipe.stdin.write(frameBuffer[i].tobytes())
self.previewQueue.put([i, frameBuffer[i]])
del frameBuffer[i]
except:
2017-05-31 05:01:18 -04:00
break
# increase progress bar value
2017-06-06 11:14:39 -04:00
if progressBarValue + 1 <= (i / len(self.completeAudioArray)) \
* 100:
progressBarValue = numpy.floor(
(i / len(self.completeAudioArray)) * 100)
self.progressBarUpdate.emit(progressBarValue)
2017-06-06 11:14:39 -04:00
pStr = "Exporting video: " + str(int(progressBarValue)) \
+ "%"
2017-06-02 04:30:51 -04:00
self.progressBarSetText.emit(pStr)
numpy.seterr(all='print')
2017-06-02 00:24:13 -04:00
self.out_pipe.stdin.close()
if self.out_pipe.stderr is not None:
print(self.out_pipe.stderr.read())
self.out_pipe.stderr.close()
self.error = True
# out_pipe.terminate() # don't terminate ffmpeg too early
2017-06-02 00:24:13 -04:00
self.out_pipe.wait()
if self.canceled:
print("Export Canceled")
try:
os.remove(self.outputFile)
except:
pass
2017-06-02 00:24:13 -04:00
self.progressBarUpdate.emit(0)
self.progressBarSetText.emit('Export Canceled')
2017-06-06 11:14:39 -04:00
2017-06-02 00:24:13 -04:00
else:
if self.error:
print("Export Failed")
self.progressBarUpdate.emit(0)
self.progressBarSetText.emit('Export Failed')
else:
print("Export Complete")
self.progressBarUpdate.emit(100)
self.progressBarSetText.emit('Export Complete')
2017-06-06 11:14:39 -04:00
2017-06-02 00:24:13 -04:00
self.error = False
self.canceled = False
2017-06-02 09:14:04 -04:00
self.stopped = True
self.encoding.emit(False)
self.videoCreated.emit()
2017-06-06 11:14:39 -04:00
2017-06-02 04:30:51 -04:00
def updateProgress(self, pStr, pVal):
self.progressBarValue.emit(pVal)
self.progressBarSetText.emit(pStr)
def cancel(self):
self.canceled = True
self.core.cancel()
2017-06-06 11:14:39 -04:00
for comp in self.components:
comp.cancel()
2017-06-06 11:14:39 -04:00
try:
self.out_pipe.send_signal(signal.SIGINT)
except:
pass
def reset(self):
self.core.reset()
self.canceled = False
for comp in self.components:
comp.reset()