Merge pull request #6 from IamDH4/feature-rendering-engine

a rendering engine that uses more threads
This commit is contained in:
Brianna 2017-06-01 12:07:20 -04:00 committed by GitHub
commit 11e5ec0439
4 changed files with 263 additions and 184 deletions

View File

@ -4,6 +4,8 @@ from PyQt4 import uic, QtGui
from PyQt4.QtGui import QColor
import os, random
from . import __base__
import time
from copy import copy
class Component(__base__.Component):
@ -47,20 +49,24 @@ class Component(__base__.Component):
spectrum = numpy.fromfunction(lambda x: 0.008*(x-128)**2, (255,), dtype="int16")
width = int(previewWorker.core.settings.value('outputWidth'))
height = int(previewWorker.core.settings.value('outputHeight'))
return drawBars(width, height, spectrum, self.visColor, self.layout)
return self.drawBars(width, height, spectrum, self.visColor, self.layout)
def preFrameRender(self, **kwargs):
super().preFrameRender(**kwargs)
self.smoothConstantDown = 0.08
self.smoothConstantUp = 0.8
self.lastSpectrum = None
self.spectrumArray = {}
for i in range(0, len(self.completeAudioArray), self.sampleSize):
self.lastSpectrum = self.transformData(i, self.completeAudioArray, self.sampleSize,
self.smoothConstantDown, self.smoothConstantUp, self.lastSpectrum)
self.spectrumArray[i] = copy(self.lastSpectrum)
def frameRender(self, moduleNo, frameNo):
self.lastSpectrum = transformData(frameNo, self.completeAudioArray, self.sampleSize,
self.smoothConstantDown, self.smoothConstantUp, self.lastSpectrum)
width = int(self.worker.core.settings.value('outputWidth'))
height = int(self.worker.core.settings.value('outputHeight'))
return drawBars(width, height, self.lastSpectrum, self.visColor, self.layout)
return self.drawBars(width, height, self.spectrumArray[frameNo], self.visColor, self.layout)
def pickColor(self):
RGBstring, btnStyle = super().pickColor()
@ -69,71 +75,70 @@ class Component(__base__.Component):
self.page.lineEdit_visColor.setText(RGBstring)
self.page.pushButton_visColor.setStyleSheet(btnStyle)
def transformData(i, completeAudioArray, sampleSize, smoothConstantDown, smoothConstantUp, lastSpectrum):
if len(completeAudioArray) < (i + sampleSize):
sampleSize = len(completeAudioArray) - i
numpy.seterr(divide='ignore')
window = numpy.hanning(sampleSize)
data = completeAudioArray[i:i+sampleSize][::1] * window
paddedSampleSize = 2048
paddedData = numpy.pad(data, (0, paddedSampleSize - sampleSize), 'constant')
spectrum = numpy.fft.fft(paddedData)
sample_rate = 44100
frequencies = numpy.fft.fftfreq(len(spectrum), 1./sample_rate)
def transformData(self, i, completeAudioArray, sampleSize, smoothConstantDown, smoothConstantUp, lastSpectrum):
if len(completeAudioArray) < (i + sampleSize):
sampleSize = len(completeAudioArray) - i
y = abs(spectrum[0:int(paddedSampleSize/2) - 1])
window = numpy.hanning(sampleSize)
data = completeAudioArray[i:i+sampleSize][::1] * window
paddedSampleSize = 2048
paddedData = numpy.pad(data, (0, paddedSampleSize - sampleSize), 'constant')
spectrum = numpy.fft.fft(paddedData)
sample_rate = 44100
frequencies = numpy.fft.fftfreq(len(spectrum), 1./sample_rate)
# filter the noise away
# y[y<80] = 0
y = abs(spectrum[0:int(paddedSampleSize/2) - 1])
y = 20 * numpy.log10(y)
y[numpy.isinf(y)] = 0
# filter the noise away
# y[y<80] = 0
if lastSpectrum is not None:
lastSpectrum[y < lastSpectrum] = y[y < lastSpectrum] * smoothConstantDown + lastSpectrum[y < lastSpectrum] * (1 - smoothConstantDown)
lastSpectrum[y >= lastSpectrum] = y[y >= lastSpectrum] * smoothConstantUp + lastSpectrum[y >= lastSpectrum] * (1 - smoothConstantUp)
else:
lastSpectrum = y
y = 20 * numpy.log10(y)
y[numpy.isinf(y)] = 0
x = frequencies[0:int(paddedSampleSize/2) - 1]
if lastSpectrum is not None:
lastSpectrum[y < lastSpectrum] = y[y < lastSpectrum] * smoothConstantDown + lastSpectrum[y < lastSpectrum] * (1 - smoothConstantDown)
lastSpectrum[y >= lastSpectrum] = y[y >= lastSpectrum] * smoothConstantUp + lastSpectrum[y >= lastSpectrum] * (1 - smoothConstantUp)
else:
lastSpectrum = y
return lastSpectrum
def drawBars(width, height, spectrum, color, layout):
vH = height-height/8
bF = width / 64
bH = bF / 2
bQ = bF / 4
imTop = Image.new("RGBA", (width, height),(0,0,0,0))
draw = ImageDraw.Draw(imTop)
r, g, b = color
color2 = (r, g, b, 125)
x = frequencies[0:int(paddedSampleSize/2) - 1]
bP = height / 1200
return lastSpectrum
for j in range(0, 63):
draw.rectangle((bH + j * bF, vH+bQ, bH + j * bF + bF, vH + bQ - spectrum[j * 4] * bP - bH), fill=color2)
draw.rectangle((bH + bQ + j * bF, vH , bH + bQ + j * bF + bH, vH - spectrum[j * 4] * bP), fill=color)
def drawBars(self, width, height, spectrum, color, layout):
vH = height-height/8
bF = width / 64
bH = bF / 2
bQ = bF / 4
imTop = Image.new("RGBA", (width, height),(0,0,0,0))
draw = ImageDraw.Draw(imTop)
r, g, b = color
color2 = (r, g, b, 125)
bP = height / 1200
imBottom = imTop.transpose(Image.FLIP_TOP_BOTTOM)
for j in range(0, 63):
draw.rectangle((bH + j * bF, vH+bQ, bH + j * bF + bF, vH + bQ - spectrum[j * 4] * bP - bH), fill=color2)
draw.rectangle((bH + bQ + j * bF, vH , bH + bQ + j * bF + bH, vH - spectrum[j * 4] * bP), fill=color)
im = Image.new("RGBA", (width, height),(0,0,0,0))
imBottom = imTop.transpose(Image.FLIP_TOP_BOTTOM)
if layout == 0:
y = 0 - int(height/100*43)
im.paste(imTop, (0, y), mask=imTop)
y = 0 + int(height/100*43)
im.paste(imBottom, (0, y), mask=imBottom)
im = Image.new("RGBA", (width, height),(0,0,0,0))
if layout == 1:
y = 0 + int(height/100*10)
im.paste(imTop, (0, y), mask=imTop)
y = 0 - int(height/100*10)
im.paste(imBottom, (0, y), mask=imBottom)
if layout == 0:
y = 0 - int(height/100*43)
im.paste(imTop, (0, y), mask=imTop)
y = 0 + int(height/100*43)
im.paste(imBottom, (0, y), mask=imBottom)
if layout == 2:
y = 0 + int(height/100*10)
im.paste(imTop, (0, y), mask=imTop)
if layout == 1:
y = 0 + int(height/100*10)
im.paste(imTop, (0, y), mask=imTop)
y = 0 - int(height/100*10)
im.paste(imBottom, (0, y), mask=imBottom)
return im
if layout == 2:
y = 0 + int(height/100*10)
im.paste(imTop, (0, y), mask=imTop)
return im

View File

@ -108,19 +108,16 @@ class Component(__base__.Component):
im = Image.new("RGBA", (width, height),(0,0,0,0))
image = ImageQt(im)
image1 = QtGui.QImage(image)
painter = QPainter(image1)
painter = QPainter(image)
self.titleFont.setPixelSize(self.fontSize)
painter.setFont(self.titleFont)
painter.setPen(QColor(*self.textColor))
fm = QtGui.QFontMetrics(self.titleFont)
painter.drawText(self.xPosition, self.yPosition, self.title)
painter.end()
buffer = QtCore.QBuffer()
buffer.open(QtCore.QIODevice.ReadWrite)
image1.save(buffer, "PNG")
image.save(buffer, "PNG")
strio = io.BytesIO()
strio.write(buffer.data())

View File

@ -243,6 +243,7 @@ class Main(QtCore.QObject):
self.videoWorker.videoCreated.connect(self.videoCreated)
self.videoWorker.progressBarUpdate.connect(self.progressBarUpdated)
self.videoWorker.progressBarSetText.connect(self.progressBarSetText)
self.videoWorker.imageCreated.connect(self.showPreviewImage)
self.videoThread.start()
self.videoTask.emit(self.window.lineEdit_background.text(),

View File

@ -6,136 +6,212 @@ import core
import numpy
import subprocess as sp
import sys
from queue import Queue, PriorityQueue
from threading import Thread
import time
from copy import copy
class Worker(QtCore.QObject):
videoCreated = pyqtSignal()
progressBarUpdate = pyqtSignal(int)
progressBarSetText = pyqtSignal(str)
imageCreated = pyqtSignal(['QImage'])
videoCreated = pyqtSignal()
progressBarUpdate = pyqtSignal(int)
progressBarSetText = pyqtSignal(str)
def __init__(self, parent=None):
QtCore.QObject.__init__(self)
self.core = core.Core()
self.core.settings = parent.settings
self.modules = parent.modules
self.stackedWidget = parent.window.stackedWidget
parent.videoTask.connect(self.createVideo)
def __init__(self, parent=None):
QtCore.QObject.__init__(self)
self.core = core.Core()
self.core.settings = parent.settings
self.modules = parent.modules
self.stackedWidget = parent.window.stackedWidget
self.parent = parent
parent.videoTask.connect(self.createVideo)
self.sampleSize = 1470
@pyqtSlot(str, str, str, list)
def createVideo(self, backgroundImage, inputFile, outputFile, components):
# print('worker thread id: {}'.format(QtCore.QThread.currentThreadId()))
def getBackgroundAtIndex(i):
return self.core.drawBaseImage(backgroundFrames[i])
def renderNode(self):
while True:
i = self.compositeQueue.get()
progressBarValue = 0
self.progressBarUpdate.emit(progressBarValue)
self.progressBarSetText.emit('Loading background image…')
backgroundFrames = self.core.parseBaseImage(backgroundImage)
if len(backgroundFrames) < 2:
# the base image is not a video so we can draw it now
imBackground = getBackgroundAtIndex(0)
else:
# base images will be drawn while drawing the audio bars
imBackground = None
self.progressBarSetText.emit('Loading audio file…')
completeAudioArray = self.core.readAudioFile(inputFile)
# test if user has libfdk_aac
encoders = sp.check_output(self.core.FFMPEG_BIN + " -encoders -hide_banner", shell=True)
acodec = self.core.settings.value('outputAudioCodec')
if b'libfdk_aac' in encoders and acodec == 'aac':
acodec = 'libfdk_aac'
ffmpegCommand = [ self.core.FFMPEG_BIN,
'-y', # (optional) means overwrite the output file if it already exists.
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-s', self.core.settings.value('outputWidth')+'x'+self.core.settings.value('outputHeight'), # size of one frame
'-pix_fmt', 'rgb24',
'-r', self.core.settings.value('outputFrameRate'), # frames per second
'-i', '-', # The input comes from a pipe
'-an',
'-i', inputFile,
'-acodec', acodec, # output audio codec
'-b:a', self.core.settings.value('outputAudioBitrate'),
'-vcodec', self.core.settings.value('outputVideoCodec'),
'-pix_fmt', self.core.settings.value('outputVideoFormat'),
'-preset', self.core.settings.value('outputPreset'),
'-f', self.core.settings.value('outputFormat')]
if acodec == 'aac':
ffmpegCommand.append('-strict')
ffmpegCommand.append('-2')
ffmpegCommand.append(outputFile)
out_pipe = sp.Popen(ffmpegCommand,
stdin=sp.PIPE,stdout=sys.stdout, stderr=sys.stdout)
# initialize components
print('######################## Data')
print('loaded components:',
["%s%s" % (num, str(component)) for num, component in enumerate(components)])
staticComponents = {}
sampleSize = 1470
for compNo, comp in enumerate(components):
properties = None
properties = comp.preFrameRender(worker=self, completeAudioArray=completeAudioArray, sampleSize=sampleSize)
if properties and 'static' in properties:
staticComponents[compNo] = None
# create video for output
numpy.seterr(divide='ignore')
frame = getBackgroundAtIndex(0)
bgI = 0
for i in range(0, len(completeAudioArray), sampleSize):
newFrame = Image.new("RGBA", (int(self.core.settings.value('outputWidth')), int(self.core.settings.value('outputHeight'))),(0,0,0,255))
if imBackground:
newFrame.paste(imBackground)
else:
newFrame.paste(getBackgroundAtIndex(bgI))
# composite all frames returned by the components in order
for compNo, comp in enumerate(components):
if compNo in staticComponents and staticComponents[compNo] != None:
newFrame = Image.alpha_composite(newFrame,staticComponents[compNo])
if self.imBackground is not None:
frame = self.imBackground
else:
newFrame = Image.alpha_composite(newFrame,comp.frameRender(compNo, i))
if i == 0 and compNo in staticComponents:
staticComponents[compNo] = comp.frameRender(compNo, i)
frame = self.getBackgroundAtIndex(i[1])
if not imBackground:
for compNo, comp in enumerate(self.components):
if compNo in self.staticComponents and self.staticComponents[compNo] != None:
frame = Image.alpha_composite(frame, self.staticComponents[compNo])
else:
frame = Image.alpha_composite(frame, comp.frameRender(compNo, i[0]))
# frame.paste(compFrame, mask=compFrame)
self.renderQueue.put([i[0], frame])
self.compositeQueue.task_done()
def renderDispatch(self):
print('Dispatching Frames for Compositing...')
if not self.imBackground:
# increment background video frame for next iteration
if bgI < len(backgroundFrames)-1:
bgI += 1
# write to out_pipe
try:
frame = Image.new("RGB", (int(self.core.settings.value('outputWidth')), int(self.core.settings.value('outputHeight'))),(0,0,0))
frame.paste(newFrame)
out_pipe.stdin.write(frame.tobytes())
finally:
True
if self.bgI < len(self.backgroundFrames)-1 and i != 0:
self.bgI += 1
# increase progress bar value
if progressBarValue + 1 <= (i / len(completeAudioArray)) * 100:
progressBarValue = numpy.floor((i / len(completeAudioArray)) * 100)
self.progressBarUpdate.emit(progressBarValue)
self.progressBarSetText.emit('%s%%' % str(int(progressBarValue)))
for i in range(0, len(self.completeAudioArray), self.sampleSize):
self.compositeQueue.put([i, self.bgI])
self.compositeQueue.join()
print('Compositing Complete.')
numpy.seterr(all='print')
def previewDispatch(self):
while True:
i = self.previewQueue.get()
if time.time() - self.lastPreview >= 0.05 or i[0] == 0:
self._image = ImageQt(i[1])
self.imageCreated.emit(QtGui.QImage(self._image))
self.lastPreview = time.time()
out_pipe.stdin.close()
if out_pipe.stderr is not None:
print(out_pipe.stderr.read())
out_pipe.stderr.close()
# out_pipe.terminate() # don't terminate ffmpeg too early
out_pipe.wait()
print("Video file created")
self.core.deleteTempDir()
self.progressBarUpdate.emit(100)
self.progressBarSetText.emit('100%')
self.videoCreated.emit()
self.previewQueue.task_done()
def getBackgroundAtIndex(self, i):
background = Image.new(
"RGBA",
(self.width, self.height),
(0, 0, 0, 255)
)
layer = self.core.drawBaseImage(self.backgroundFrames[i])
background.paste(layer)
return background
@pyqtSlot(str, str, str, list)
def createVideo(self, backgroundImage, inputFile, outputFile, components):
self.width = int(self.core.settings.value('outputWidth'))
self.height = int(self.core.settings.value('outputHeight'))
# print('worker thread id: {}'.format(QtCore.QThread.currentThreadId()))
self.components = components
progressBarValue = 0
self.progressBarUpdate.emit(progressBarValue)
self.progressBarSetText.emit('Loading background image…')
self.backgroundImage = backgroundImage
self.backgroundFrames = self.core.parseBaseImage(backgroundImage)
if len(self.backgroundFrames) < 2:
# the base image is not a video so we can draw it now
self.imBackground = self.getBackgroundAtIndex(0)
else:
# base images will be drawn while drawing the audio bars
self.imBackground = None
self.bgI = 0
self.progressBarSetText.emit('Loading audio file…')
self.completeAudioArray = self.core.readAudioFile(inputFile)
# test if user has libfdk_aac
encoders = sp.check_output(self.core.FFMPEG_BIN + " -encoders -hide_banner", shell=True)
acodec = self.core.settings.value('outputAudioCodec')
if b'libfdk_aac' in encoders and acodec == 'aac':
acodec = 'libfdk_aac'
ffmpegCommand = [
self.core.FFMPEG_BIN,
'-y', # (optional) means overwrite the output file if it already exists.
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-s', str(self.width)+'x'+str(self.height), # size of one frame
'-pix_fmt', 'rgba',
'-r', self.core.settings.value('outputFrameRate'), # frames per second
'-i', '-', # The input comes from a pipe
'-an',
'-i', inputFile,
'-acodec', acodec, # output audio codec
'-b:a', self.core.settings.value('outputAudioBitrate'),
'-vcodec', self.core.settings.value('outputVideoCodec'),
'-pix_fmt', self.core.settings.value('outputVideoFormat'),
'-preset', self.core.settings.value('outputPreset'),
'-f', self.core.settings.value('outputFormat')
]
if acodec == 'aac':
ffmpegCommand.append('-strict')
ffmpegCommand.append('-2')
ffmpegCommand.append(outputFile)
out_pipe = sp.Popen(ffmpegCommand, stdin=sp.PIPE,stdout=sys.stdout, stderr=sys.stdout)
# create video for output
numpy.seterr(divide='ignore')
# initialize components
print('loaded components:',
["%s%s" % (num, str(component)) for num, component in enumerate(components)])
self.staticComponents = {}
for compNo, comp in enumerate(components):
properties = None
properties = comp.preFrameRender(
worker=self,
completeAudioArray=self.completeAudioArray,
sampleSize=self.sampleSize
)
if properties and 'static' in properties:
self.staticComponents[compNo] = copy(comp.frameRender(compNo, 0))
self.compositeQueue = Queue()
self.compositeQueue.maxsize = 20
self.renderQueue = PriorityQueue()
self.renderQueue.maxsize = 20
self.previewQueue = PriorityQueue()
# create threads to render frames and send them back here for piping out
for i in range(3):
t = Thread(target=self.renderNode, name="Render Thread")
t.daemon = True
t.start()
self.dispatchThread = Thread(target=self.renderDispatch, name="Render Dispatch Thread")
self.dispatchThread.daemon = True
self.dispatchThread.start()
self.previewDispatch = Thread(target=self.previewDispatch, name="Render Dispatch Thread")
self.previewDispatch.daemon = True
self.previewDispatch.start()
frameBuffer = {}
self.lastPreview = 0.0
for i in range(0, len(self.completeAudioArray), self.sampleSize):
while True:
if i in frameBuffer:
# if frame's in buffer, pipe it to ffmpeg
break
# else fetch the next frame & add to the buffer
data = self.renderQueue.get()
frameBuffer[data[0]] = data[1]
self.renderQueue.task_done()
try:
out_pipe.stdin.write(frameBuffer[i].tobytes())
self.previewQueue.put([i, frameBuffer[i]])
del frameBuffer[i]
finally:
True
# increase progress bar value
if progressBarValue + 1 <= (i / len(self.completeAudioArray)) * 100:
progressBarValue = numpy.floor((i / len(self.completeAudioArray)) * 100)
self.progressBarUpdate.emit(progressBarValue)
self.progressBarSetText.emit('%s%%' % str(int(progressBarValue)))
numpy.seterr(all='print')
out_pipe.stdin.close()
if out_pipe.stderr is not None:
print(out_pipe.stderr.read())
out_pipe.stderr.close()
# out_pipe.terminate() # don't terminate ffmpeg too early
out_pipe.wait()
print("Video file created")
self.parent.drawPreview()
self.core.deleteTempDir()
self.progressBarUpdate.emit(100)
self.progressBarSetText.emit('100%')
self.videoCreated.emit()