summaryrefslogtreecommitdiff
path: root/voctocore
diff options
context:
space:
mode:
authorMaZderMind <peter@mazdermind.de>2014-08-24 15:36:26 +0200
committerMaZderMind <peter@mazdermind.de>2014-08-24 15:36:26 +0200
commit90001a38c03e41faeccd0b959753a00db670c61a (patch)
tree5b615386c6033fc40fa05c931ff4a435041f5183 /voctocore
parente8775c12ff7027675a564cf722c7778ffd060cc9 (diff)
basic quadmix impl
Diffstat (limited to 'voctocore')
-rw-r--r--voctocore/default-config.ini4
-rwxr-xr-xvoctocore/experiments/test-grabber-src.sh4
-rwxr-xr-xvoctocore/experiments/video-grabber-src.sh6
-rw-r--r--voctocore/lib/config.py5
-rw-r--r--voctocore/lib/controlserver.py9
-rw-r--r--voctocore/lib/helper.py12
-rw-r--r--voctocore/lib/pipeline.py405
-rw-r--r--voctocore/lib/quadmix.py172
-rw-r--r--voctocore/lib/shmsrc.py116
-rw-r--r--voctocore/lib/videomix.py347
-rwxr-xr-xvoctocore/voctocore.py77
11 files changed, 788 insertions, 369 deletions
diff --git a/voctocore/default-config.ini b/voctocore/default-config.ini
index f9d90ee..f00f91d 100644
--- a/voctocore/default-config.ini
+++ b/voctocore/default-config.ini
@@ -2,7 +2,9 @@
video=cam1,cam2,grabber
audio=cam1,cam2
-videocaps=video/x-raw,format=RGBx,width=1280,height=720,framerate=25/1
+socketpath=/tmp/voctomix-sockets
+
+videocaps=video/x-raw,format=RGBx,width=1280,height=720,framerate=25/1,pixel-aspect-ratio=1/1
audiocaps=audio/x-raw,format=S16LE,layout=interleaved,channels=2,rate=48000
[pause]
diff --git a/voctocore/experiments/test-grabber-src.sh b/voctocore/experiments/test-grabber-src.sh
index 7978b69..9ef15d5 100755
--- a/voctocore/experiments/test-grabber-src.sh
+++ b/voctocore/experiments/test-grabber-src.sh
@@ -1,10 +1,10 @@
#!/bin/sh
gst-launch-1.0 -vm \
- videotestsrc !\
+ videotestsrc pattern=ball !\
video/x-raw,width=1280,height=720,framerate=25/1,format=RGBx !\
queue !\
shmsink \
sync=true \
- socket-path=/tmp/grabber-v \
+ socket-path=/tmp/voctomix-sockets/v-cam1 \
wait-for-connection=false \
shm-size=100000000
diff --git a/voctocore/experiments/video-grabber-src.sh b/voctocore/experiments/video-grabber-src.sh
index 02ba1b9..e592940 100755
--- a/voctocore/experiments/video-grabber-src.sh
+++ b/voctocore/experiments/video-grabber-src.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-gst-launch-1.0 -v \
+gst-launch-1.0 \
uridecodebin \
uri=http://video.blendertestbuilds.de/download.blender.org/ED/ED_1280.avi \
name=src \
@@ -13,7 +13,7 @@ gst-launch-1.0 -v \
video/x-raw,format=RGBx,width=1280,height=720,framerate=25/1 !\
shmsink \
sync=true \
- socket-path=/tmp/grabber-v \
+ socket-path=/tmp/voctomix-sockets/v-cam1 \
wait-for-connection=false \
shm-size=100000000
\
@@ -24,6 +24,6 @@ gst-launch-1.0 -v \
audio/x-raw,format=S16LE,layout=interleaved,rate=44100,channels=2 !\
shmsink \
sync=true \
- socket-path=/tmp/grabber-a \
+ socket-path=/tmp/voctomix-sockets/a-cam1 \
wait-for-connection=false \
shm-size=10000000
diff --git a/voctocore/lib/config.py b/voctocore/lib/config.py
index b4c64aa..76f67b0 100644
--- a/voctocore/lib/config.py
+++ b/voctocore/lib/config.py
@@ -1,6 +1,11 @@
import os.path
from configparser import SafeConfigParser
+def getlist(self, section, option):
+ return [x.strip() for x in self.get(section, option).split(',')]
+
+SafeConfigParser.getlist = getlist
+
Config = SafeConfigParser()
Config.read([
'default-config.ini',
diff --git a/voctocore/lib/controlserver.py b/voctocore/lib/controlserver.py
index e11d6a8..fffeda8 100644
--- a/voctocore/lib/controlserver.py
+++ b/voctocore/lib/controlserver.py
@@ -1,4 +1,4 @@
-import socket, threading, queue
+import socket, threading, queue, logging
from gi.repository import GObject
def controlServerEntrypoint(f):
@@ -7,6 +7,7 @@ def controlServerEntrypoint(f):
return f
class ControlServer():
+ log = logging.getLogger('ControlServer')
def __init__(self, videomix):
'''Initialize server and start listening.'''
self.videomix = videomix
@@ -22,7 +23,7 @@ class ControlServer():
def listener(self, sock, *args):
'''Asynchronous connection listener. Starts a handler for each connection.'''
conn, addr = sock.accept()
- print("Connection from ", addr)
+ self.log.info("Connection from %s", addr)
# register data-received handler inside the GTK-Mainloop
GObject.io_add_watch(conn, GObject.IO_IN, self.handler)
@@ -32,7 +33,7 @@ class ControlServer():
'''Asynchronous connection handler. Processes each line from the socket.'''
line = conn.recv(4096)
if not len(line):
- print("Connection closed.")
+ self.log.debug("Connection closed.")
return False
r = self.processLine(line.decode('utf-8'))
@@ -49,7 +50,7 @@ class ControlServer():
def processLine(self, line):
command, argstring = (line.strip()+' ').split(' ', 1)
args = argstring.strip().split()
- print(command, args)
+ self.log.info(command % args)
if not hasattr(self.videomix, command):
return 'unknown command {}'.format(command)
diff --git a/voctocore/lib/helper.py b/voctocore/lib/helper.py
new file mode 100644
index 0000000..ac9bfe5
--- /dev/null
+++ b/voctocore/lib/helper.py
@@ -0,0 +1,12 @@
+from gi.repository import Gst
+
+def iteratorHelper(it):
+ while True:
+ result, value = it.next()
+ if result == Gst.IteratorResult.DONE:
+ break
+
+ if result != Gst.IteratorResult.OK:
+ raise IteratorError(result)
+
+ yield value
diff --git a/voctocore/lib/pipeline.py b/voctocore/lib/pipeline.py
new file mode 100644
index 0000000..aa9754c
--- /dev/null
+++ b/voctocore/lib/pipeline.py
@@ -0,0 +1,405 @@
+#!/usr/bin/python3
+import os, errno, time, logging
+from gi.repository import GLib, Gst
+
+# import controlserver annotation
+from lib.controlserver import controlServerEntrypoint
+
+# import library components
+from lib.config import Config
+from lib.quadmix import QuadMix
+# from lib.videomix import VideoMix
+# from lib.audiomix import AudioMix
+# from lib.distributor import TimesTwoDistributor
+from lib.shmsrc import FailsafeShmSrc
+
+class Pipeline(Gst.Pipeline):
+ """mixing, streaming and encoding pipeline constuction and control"""
+ log = logging.getLogger('Pipeline')
+ videonames = []
+ audionames = []
+
+ def __init__(self):
+ super().__init__()
+
+ self.log.debug('Creating Video-Mixer')
+ # create audio and video mixer
+ self.quadmixer = QuadMix()
+ self.add(self.quadmixer)
+
+ # self.videomixer = VideoMix()
+ # self.add(self.videomixer)
+
+ # self.audiomixer = AudioMix()
+ # self.add(self.audiomixer)
+
+ # read the path where the shm-control-sockets are located and ensure it exists
+ socketpath = Config.get('sources', 'socketpath')
+ self.log.info('Ensuring the configured socketpath exists: %s', socketpath)
+ try:
+ os.makedirs(socketpath)
+ except OSError as exception:
+ if exception.errno != errno.EEXIST:
+ raise
+
+ self.videonames = Config.getlist('sources', 'video')
+ self.audionames = Config.getlist('sources', 'video')
+
+ for name in self.videonames:
+ socket = os.path.join(socketpath, 'v-'+name)
+
+ self.log.info('Creating video-source %s at socket-path %s', name, socket)
+ sourcebin = FailsafeShmSrc(name, socket)
+ self.add(sourcebin)
+ self.quadmixer.add_source(sourcebin)
+
+ # distributor = TimesTwoDistributor(sourcebin)
+ # self.add(distributor)
+
+ # distributor.link(self.quadmixer)
+ # distributor.link(self.videomixer)
+
+ # for audiosource in Config.getlist('sources', 'audio'):
+ # sourcebin = FailsafeShmSrc(os.path.join(socketpath, audiosource))
+
+ # self.add(sourcebin)
+ # sourcebin.link(self.audiomixer)
+
+ # tell the quadmix that this were all sources and no more sources will come after this
+ self.quadmixer.finalize()
+
+ self.quadmixsink = Gst.ElementFactory.make('autovideosink', 'quadmixsink')
+ self.quadmixsink.set_property('sync', False)
+ self.add(self.quadmixsink)
+ self.quadmixer.link(self.quadmixsink)
+
+ # self.videosink = Gst.ElementFactory.make('autovideosink', 'videosink')
+ # self.add(self.videosink)
+ # self.videomixer.link(self.videosink)
+
+ # self.audiosink = Gst.ElementFactory.make('autoaudiosink', 'audiosink')
+ # self.add(self.audiosink)
+ # self.audiomixer.link(self.audiosink)
+
+ def run(self):
+ self.set_state(Gst.State.PAUSED)
+ time.sleep(0.5)
+ self.set_state(Gst.State.PLAYING)
+
+ def quit(self):
+ self.set_state(Gst.State.NULL)
+
+
+
+
+ # # collection of video-sources to connect to the quadmix
+ # quadmixSources = []
+
+ # # create camera sources
+ # for camberabin in self.createDummyCamSources():
+ # # link camerasource to audiomixer
+ # camberabin.get_by_name('audio_src').link(self.pipeline.get_by_name('liveaudio'))
+
+ # # inject a ×2 distributor and link one end to the live-mixer
+ # distributor = self.createDistributor(camberabin.get_by_name('video_src'), camberabin.get_name())
+ # distributor.get_by_name('a').link(self.pipeline.get_by_name('livevideo'))
+
+ # # collect the other end to add it later to the quadmix
+ # quadmixSources.append(distributor.get_by_name('b'))
+
+ # # TODO: generate pause & slides with another generator here which only
+ # # yields if the respective files are present and which only have a video-pad
+
+ # # add all video-sources to the quadmix-monitor-screen
+ # self.addVideosToQuadmix(quadmixSources, self.pipeline.get_by_name('quadmix'))
+
+ # # initialize to known defaults
+ # # TODO: make configurable
+ # self.switchVideo(0)
+ # self.switchAudio(0)
+
+ # Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL, 'test')
+ # self.pipeline.set_state(Gst.State.PLAYING)
+
+
+
+
+
+
+ # def createMixer(self):
+ # """create audio and video mixer"""
+ # # create mixer-pipeline from string
+ # mixerbin = Gst.parse_bin_from_description("""
+ # videomixer name=livevideo ! autovideosink
+ # input-selector name=liveaudio ! autoaudiosink
+
+ # videotestsrc pattern="solid-color" foreground-color=0x808080 ! capsfilter name=filter ! videomixer name=quadmix ! autovideosink
+ # """, False)
+
+ # # define caps for the videotestsrc which generates the background-color for the quadmix
+ # bgcaps = Gst.Caps.new_empty_simple('video/x-raw')
+ # bgcaps.set_value('width', round(self.monitorSize[0]))
+ # bgcaps.set_value('height', round(self.monitorSize[1]))
+ # mixerbin.get_by_name('filter').set_property('caps', bgcaps)
+
+ # # name the bin, add and return it
+ # mixerbin.set_name('mixerbin')
+ # self.pipeline.add(mixerbin)
+ # return mixerbin
+
+ # def addVideosToQuadmix(self, videosources, quadmix):
+ # """add all avaiable videosources to the quadmix"""
+ # count = len(videosources)
+
+ # # coordinate of the cell where we place the next video
+ # place = [0, 0]
+
+ # # number of cells in the quadmix-monitor
+ # grid = [0, 0]
+ # grid[0] = math.ceil(math.sqrt(count))
+ # grid[1] = math.ceil(count / grid[0])
+
+ # # size of each cell in the quadmix-monitor
+ # cellSize = (
+ # self.monitorSize[0] / grid[0],
+ # self.monitorSize[1] / grid[1]
+ # )
+
+ # print("showing {} videosources in a {}×{} grid in a {}×{} px window, which gives cells of {}×{} px per videosource".format(
+ # count, grid[0], grid[1], self.monitorSize[0], self.monitorSize[1], cellSize[0], cellSize[1]))
+
+ # # iterate over all video-sources
+ # for idx, videosource in enumerate(videosources):
+ # # generate a pipeline for this videosource which
+ # # - scales the video to the request
+ # # - remove n px of the video (n = 5 if the video is highlighted else 0)
+ # # - add a colored border of n px of the video (n = 5 if the video is highlighted else 0)
+ # # - overlay the index of the video as text in the top left corner
+ # # - known & named output
+ # previewbin = Gst.parse_bin_from_description("""
+ # videoscale name=in !
+ # capsfilter name=caps !
+ # videobox name=crop top=0 left=0 bottom=0 right=0 !
+ # videobox fill=red top=-0 left=-0 bottom=-0 right=-0 name=add !
+ # textoverlay color=0xFFFFFFFF halignment=left valignment=top xpad=10 ypad=5 font-desc="sans 35" name=text !
+ # identity name=out
+ # """, False)
+
+ # # name the bin and add it
+ # previewbin.set_name('previewbin-{}'.format(idx))
+ # self.pipeline.add(previewbin)
+ # self.previewbins.append(previewbin)
+
+ # # set the overlay-text
+ # previewbin.get_by_name('text').set_property('text', str(idx))
+
+ # # query the video-source caps and extract its size
+ # caps = videosource.get_static_pad('src').query_caps(None)
+ # capsstruct = caps.get_structure(0)
+ # srcSize = (
+ # capsstruct.get_int('width')[1],
+ # capsstruct.get_int('height')[1],
+ # )
+
+ # # calculate the ideal scale factor and scale the sizes
+ # f = max(srcSize[0] / cellSize[0], srcSize[1] / cellSize[1])
+ # scaleSize = (
+ # srcSize[0] / f,
+ # srcSize[1] / f,
+ # )
+
+ # # calculate the top/left coordinate
+ # coord = (
+ # place[0] * cellSize[0] + (cellSize[0] - scaleSize[0]) / 2,
+ # place[1] * cellSize[1] + (cellSize[1] - scaleSize[1]) / 2,
+ # )
+
+ # print("placing videosource {} of size {}×{} scaled by {} to {}×{} in a cell {}×{} px cell ({}/{}) at position ({}/{})".format(
+ # idx, srcSize[0], srcSize[1], f, scaleSize[0], scaleSize[1], cellSize[0], cellSize[1], place[0], place[1], coord[0], coord[1]))
+
+ # # link the videosource to the input of the preview-bin
+ # videosource.link(previewbin.get_by_name('in'))
+
+ # # create and set the caps for the preview-scaler
+ # scalecaps = Gst.Caps.new_empty_simple('video/x-raw')
+ # scalecaps.set_value('width', round(scaleSize[0]))
+ # scalecaps.set_value('height', round(scaleSize[1]))
+ # previewbin.get_by_name('caps').set_property('caps', scalecaps)
+
+ # # request a pad from the quadmixer and configure x/y position
+ # sinkpad = quadmix.get_request_pad('sink_%u')
+ # sinkpad.set_property('xpos', round(coord[0]))
+ # sinkpad.set_property('ypos', round(coord[1]))
+
+ # # link the output of the preview-bin to the mixer
+ # previewbin.get_by_name('out').link(quadmix)
+
+ # # increment grid position
+ # place[0] += 1
+ # if place[0] >= grid[0]:
+ # place[1] += 1
+ # place[0] = 0
+
+ # def createDistributor(self, videosource, name):
+ # """create a simple ×2 distributor"""
+ # distributor = Gst.parse_bin_from_description("""
+ # tee name=t
+ # t. ! queue name=a
+ # t. ! queue name=b
+ # """, False)
+
+ # # set a name and add to pipeline
+ # distributor.set_name('distributor({0})'.format(name))
+ # self.pipeline.add(distributor)
+
+ # # link input to the tee
+ # videosource.link(distributor.get_by_name('t'))
+ # return distributor
+
+ # def createDummyCamSources(self):
+ # """create test-video-sources from files or urls"""
+
+ # # TODO make configurable
+ # uris = ('file:///home/peter/122.mp4', 'file:///home/peter/10025.mp4',)
+ # for idx, uri in enumerate(uris):
+ # # create a bin for a simulated camera input
+ # # force the input resolution to 1024x576 because that way the following elements
+ # # in the pipeline cam know the size even if the file is not yet loaded. the quadmixer
+ # # is not resize-capable
+ # camberabin = Gst.parse_bin_from_description("""
+ # uridecodebin name=input
+ # input. ! videoconvert ! videoscale ! videorate ! video/x-raw,width=1024,height=576,framerate=25/1 ! identity name=video_src
+ # input. ! audioconvert name=audio_src
+ # """, False)
+
+ # # set name and uri
+ # camberabin.set_name('dummy-camberabin({0})'.format(uri))
+ # camberabin.get_by_name('input').set_property('uri', uri)
+
+ # # add to pipeline and pass the bin upstream
+ # self.pipeline.add(camberabin)
+ # yield camberabin
+
+
+ # def createCamSources(self):
+ # """create real-video-sources from the bmd-drivers"""
+
+ # # TODO make number of installed cams configurable
+ # for cam in range(2):
+ # # create a bin for camera input
+ # camberabin = Gst.parse_bin_from_description("""
+ # decklinksrc name=input input=sdi input-mode=1080p25
+ # input. ! videoconvert ! videoscale ! videorate ! video/x-raw,width=1920,height=1080,framerate=25/1 ! identity name=video_src
+ # input. ! audioconvert name=audio_src
+ # """, False)
+
+ # # set name and subdevice
+ # camberabin.set_name('camberabin({0})'.format(cam))
+ # camberabin.get_by_name('input').set_property('subdevice', cam)
+
+ # # add to pipeline and pass the bin upstream
+ # self.pipeline.add(camberabin)
+ # yield camberabin
+
+
+
+
+ ### below are access-methods for the ControlServer
+ @controlServerEntrypoint
+ def status(self):
+ """System Status Query"""
+ raise NotImplementedError("status command is not implemented yet")
+
+ @controlServerEntrypoint
+ def numAudioSources(self):
+ """return number of available audio sources"""
+ raise NotImplementedError("audio is not implemented yet")
+
+
+ @controlServerEntrypoint
+ def switchAudio(self, audiosource):
+ """switch audio to the selected audio"""
+ raise NotImplementedError("audio is not implemented yet")
+
+
+ @controlServerEntrypoint
+ def numVideoSources(self):
+ """return number of available video sources"""
+ livevideo = self.pipeline.get_by_name('livevideo')
+ return str(len(self.videonames))
+
+
+ @controlServerEntrypoint
+ def switchVideo(self, videosource):
+ """switch audio to the selected video"""
+ if videosource.isnumeric():
+ idx = int(videosource)
+ self.log.info("interpreted input as videosource-index %u", idx)
+ if idx >= len(self.videonames):
+ idx = None
+ else:
+ try:
+ idx = self.videonames.index(videosource)
+ self.log.info("interpreted input as videosource-name, lookup to %u", idx)
+ except IndexError:
+ idx = None
+
+ if idx == None:
+ return 'unknown video-source: %s' % (videosource)
+
+ self.log.info("switching quadmix to video-source %u", idx)
+ self.quadmixer.set_active(idx)
+ # todo: switch main switcher
+
+ @controlServerEntrypoint
+ def fadeVideo(self, videosource):
+ """fade video to the selected video"""
+ raise NotImplementedError("fade command is not implemented yet")
+
+
+ @controlServerEntrypoint
+ def setPipVideo(self, videosource):
+ """switch video-source in the PIP to the selected video"""
+ raise NotImplementedError("pip commands are not implemented yet")
+
+
+ @controlServerEntrypoint
+ def fadePipVideo(self, videosource):
+ """fade video-source in the PIP to the selected video"""
+ raise NotImplementedError("pip commands are not implemented yet")
+
+
+ class PipPlacements:
+ """enumeration of possible PIP-Placements"""
+ TopLeft, TopRight, BottomLeft, BottomRight = range(4)
+
+
+ @controlServerEntrypoint
+ def setPipPlacement(self, placement):
+ """place PIP in the selected position"""
+ assert(isinstance(placement, PipPlacements))
+ raise NotImplementedError("pip commands are not implemented yet")
+
+
+ @controlServerEntrypoint
+ def setPipStatus(self, enabled):
+ """show or hide PIP"""
+ raise NotImplementedError("pip commands are not implemented yet")
+
+
+ @controlServerEntrypoint
+ def fadePipStatus(self, enabled):
+ """fade PIP in our out"""
+ raise NotImplementedError("pip commands are not implemented yet")
+
+
+ class StreamContents:
+ """enumeration of possible PIP-Placements"""
+ Live, Pause, NoStream = range(3)
+
+
+ @controlServerEntrypoint
+ def selectStreamContent(self, content):
+ """switch the livestream-content between selected mixer output, pause-image or nostream-imag"""
+ assert(isinstance(content, StreamContents))
+ raise NotImplementedError("pause/nostream switching is not implemented yet")
diff --git a/voctocore/lib/quadmix.py b/voctocore/lib/quadmix.py
new file mode 100644
index 0000000..5115919
--- /dev/null
+++ b/voctocore/lib/quadmix.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python3
+import math, logging
+from gi.repository import GLib, Gst
+
+from lib.helper import iteratorHelper
+from lib.config import Config
+
+class QuadMix(Gst.Bin):
+ log = logging.getLogger('QuadMix')
+ sources = []
+ previewbins = []
+
+ def __init__(self):
+ super().__init__()
+
+ caps = Gst.Caps.from_string(Config.get('mix', 'monitorcaps'))
+ self.log.debug('parsing monitorcaps from config: %s', caps.to_string())
+ struct = caps.get_structure(0)
+
+ self.monitorSize = [struct.get_int('width')[1], struct.get_int('height')[1]]
+
+ self.bgsrc = Gst.ElementFactory.make('videotestsrc', 'bgsrc')
+ self.mixer = Gst.ElementFactory.make('videomixer', 'mixer')
+ self.scale = Gst.ElementFactory.make('videoscale', 'scale')
+
+ self.add(self.bgsrc)
+ self.add(self.mixer)
+ self.add(self.scale)
+
+ self.bgsrc.link_filtered(self.mixer, caps)
+ self.mixer.link_filtered(self.scale, caps)
+
+ self.bgsrc.set_property('pattern', 'solid-color')
+ self.bgsrc.set_property('foreground-color', 0x808080)
+
+ self.add_pad(
+ Gst.GhostPad.new('src', self.scale.get_static_pad('src'))
+ )
+
+ # I don't know how to create a on-request ghost-pad
+ def add_source(self, src):
+ self.log.info('adding source %s', src.get_name())
+ self.sources.append(src)
+
+ def finalize(self):
+ self.log.debug('all sources added, calculating layout')
+
+ # number of placed sources
+ count = len(self.sources)
+
+ # coordinate of the cell where we place the next video
+ place = [0, 0]
+
+ # number of cells in the quadmix-monitor
+ grid = [0, 0]
+ grid[0] = math.ceil(math.sqrt(count))
+ grid[1] = math.ceil(count / grid[0])
+
+ # size of each cell in the quadmix-monitor
+ cellSize = (
+ self.monitorSize[0] / grid[0],
+ self.monitorSize[1] / grid[1]
+ )
+
+ # report calculation results
+ self.log.info('showing %u videosources in a %u×%u grid in a %u×%u px window, which gives cells of %u×%u px per videosource',
+ count, grid[0], grid[1], self.monitorSize[0], self.monitorSize[1], cellSize[0], cellSize[1])
+
+ # iterate over all video-sources
+ for idx, videosource in enumerate(self.sources):
+ # query the video-source caps and extract its size
+ caps = videosource.get_static_pad('src').query_caps(None)
+ capsstruct = caps.get_structure(0)
+ srcSize = (
+ capsstruct.get_int('width')[1],
+ capsstruct.get_int('height')[1],
+ )
+
+ # calculate the ideal scale factor and scale the sizes
+ f = max(srcSize[0] / cellSize[0], srcSize[1] / cellSize[1])
+ scaleSize = (
+ srcSize[0] / f,
+ srcSize[1] / f,
+ )
+
+ # calculate the top/left coordinate
+ coord = (
+ place[0] * cellSize[0] + (cellSize[0] - scaleSize[0]) / 2,
+ place[1] * cellSize[1] + (cellSize[1] - scaleSize[1]) / 2,
+ )
+
+ self.log.info('placing videosource %u of size %u×%u scaled by %u to %u×%u in a cell %u×%u px cell (%u/%u) at position (%u/%u)',
+ idx, srcSize[0], srcSize[1], f, scaleSize[0], scaleSize[1], cellSize[0], cellSize[1], place[0], place[1], coord[0], coord[1])
+
+ # request a pad from the quadmixer and configure x/y position
+ sinkpad = self.mixer.get_request_pad('sink_%u')
+ sinkpad.set_property('xpos', round(coord[0]))
+ sinkpad.set_property('ypos', round(coord[1]))
+
+ # create a sub-preview-bin
+ previewbin = QuadMixPreview(idx, scaleSize)
+ self.add(previewbin)
+ self.previewbins.append(previewbin)
+
+ # link videosource to input of previewbin
+ videosource.link(previewbin)
+
+ # link the output of the preview-bin to the mixer
+ previewbin.get_static_pad('src').link(sinkpad)
+
+ # increment grid position
+ place[0] += 1
+ if place[0] >= grid[0]:
+ place[1] += 1
+ place[0] = 0
+
+ def set_active(self, target):
+ for idx, previewbin in enumerate(self.previewbins):
+ previewbin.set_active(target == idx)
+
+class QuadMixPreview(Gst.Bin):
+ log = logging.getLogger('QuadMixPreview')
+ strokeWidth = 5
+
+ def __init__(self, idx, scaleSize):
+ super().__init__()
+
+ self.scale = Gst.ElementFactory.make('videoscale', 'scale')
+ self.cropbox = Gst.ElementFactory.make('videobox', 'cropbox')
+ self.strokebox = Gst.ElementFactory.make('videobox', 'strokebox')
+ self.textoverlay = Gst.ElementFactory.make('textoverlay', 'textoverlay')
+
+ self.add(self.scale)
+ self.add(self.cropbox)
+ self.add(self.strokebox)
+ self.add(self.textoverlay)
+
+ caps = Gst.Caps.new_empty_simple('video/x-raw')
+ caps.set_value('width', round(scaleSize[0]))
+ caps.set_value('height', round(scaleSize[1]))
+
+ self.strokebox.set_property('fill', 'green')
+
+ self.textoverlay.set_property('color', 0xFFFFFFFF)
+ self.textoverlay.set_property('halignment', 'left')
+ self.textoverlay.set_property('valignment', 'top')
+ self.textoverlay.set_property('xpad', 10)
+ self.textoverlay.set_property('ypad', 5)
+ self.textoverlay.set_property('font-desc', 'sans 35')
+
+ self.scale.link_filtered(self.cropbox, caps)
+ self.cropbox.link(self.strokebox)
+ self.strokebox.link(self.textoverlay)
+
+ self.set_active(False)
+
+ # Add Ghost Pads
+ self.add_pad(
+ Gst.GhostPad.new('sink', self.scale.get_static_pad('sink'))
+ )
+ self.add_pad(
+ Gst.GhostPad.new('src', self.textoverlay.get_static_pad('src'))
+ )
+
+ def set_active(self, active):
+ self.log.info("switching active-state to %u", active)
+ for side in ('top', 'left', 'right', 'bottom'):
+ self.cropbox.set_property(side, self.strokeWidth if active else 0)
+ self.strokebox.set_property(side, -self.strokeWidth if active else 0)
+
+ def setColor(self, color):
+ self.strokebox.set_property('fill', color)
diff --git a/voctocore/lib/shmsrc.py b/voctocore/lib/shmsrc.py
new file mode 100644
index 0000000..53a7d9d
--- /dev/null
+++ b/voctocore/lib/shmsrc.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python3
+import time, logging
+from gi.repository import GLib, Gst
+
+from lib.config import Config
+
+class FailsafeShmSrc(Gst.Bin):
+ log = logging.getLogger('FailsafeShmSrc')
+ last_buffer_arrived = 0
+ is_in_failstate = True
+
+ def __init__(self, name, socket):
+ super().__init__()
+ self.set_name(socket)
+
+ caps = Gst.Caps.from_string(Config.get('sources', 'videocaps'))
+ self.log.debug('parsing videocaps from config: %s', caps.to_string())
+
+ # Create elements
+ self.shmsrc = Gst.ElementFactory.make('shmsrc', None)
+ self.identity1 = Gst.ElementFactory.make('identity', None)
+ self.identity2 = Gst.ElementFactory.make('identity', None)
+ self.switch = Gst.ElementFactory.make('input-selector', None)
+ self.failsrc = Gst.ElementFactory.make('videotestsrc', None)
+
+ if not self.shmsrc or not self.identity1 or not self.identity2 or not self.switch or not self.failsrc:
+ self.log.error('could not create elements')
+
+ # Add elements to Bin
+ self.add(self.shmsrc)
+ self.add(self.identity1)
+ self.add(self.identity2)
+ self.add(self.switch)
+ self.add(self.failsrc)
+
+ # Get Switcher-Pads
+ self.goodpad = self.switch.get_request_pad('sink_%u')
+ self.failpad = self.switch.get_request_pad('sink_%u')
+
+ # Set properties
+ self.shmsrc.set_property('socket-path', socket)
+ self.shmsrc.set_property('is-live', True)
+ self.shmsrc.set_property('do-timestamp', True)
+ self.identity2.set_property('sync', True)
+ self.switch.set_property('active-pad', self.failpad)
+
+ # Link elements
+ self.shmsrc.link_filtered(self.identity1, caps)
+ self.identity1.get_static_pad('src').link(self.goodpad)
+
+ self.failsrc.link_filtered(self.identity2, caps)
+ self.identity2.get_static_pad('src').link(self.failpad)
+
+ # Install pad probes
+ self.shmsrc.get_static_pad('src').add_probe(Gst.PadProbeType.BLOCK | Gst.PadProbeType.EVENT_DOWNSTREAM, self.event_probe, None)
+ self.shmsrc.get_static_pad('src').add_probe(Gst.PadProbeType.BLOCK | Gst.PadProbeType.BUFFER, self.data_probe, None)
+
+ # Install Watchdog
+ GLib.timeout_add(500, self.watchdog)
+
+ # Add Ghost Pads
+ self.add_pad(
+ Gst.GhostPad.new('src', self.switch.get_static_pad('src'))
+ )
+
+ def do_handle_message(self, msg):
+ if msg.type == Gst.MessageType.ERROR and msg.src == self.shmsrc:
+ self.log.warning('received error-message from ShmSrc, dropping')
+ else:
+ Gst.Bin.do_handle_message(self, msg)
+
+ def event_probe(self, pad, info, ud):
+ e = info.get_event()
+ if e.type == Gst.EventType.EOS:
+ self.log.warning('received EOS-event on event-probe, dropping')
+ self.switch_to_failstate()
+ return Gst.PadProbeReturn.DROP
+
+ return Gst.PadProbeReturn.PASS
+
+
+ def data_probe(self, pad, info, ud):
+ self.last_buffer_arrived = time.time()
+ self.switch_to_goodstate()
+ return Gst.PadProbeReturn.PASS
+
+ def watchdog(self):
+ if self.last_buffer_arrived + 0.1 < time.time():
+ self.log.warning('watchdog encountered a timeout')
+ self.switch_to_failstate()
+
+ if self.last_buffer_arrived + 3 < time.time() and round(time.time() % 3) == 0:
+ self.restart()
+
+ return True
+
+ def restart(self):
+ self.log.warning('restarting ShmSrc')
+ self.shmsrc.set_state(Gst.State.NULL)
+ self.shmsrc.set_state(Gst.State.PLAYING)
+
+ def switch_to_goodstate(self):
+ if not self.is_in_failstate:
+ return
+
+ self.log.warning('switching output to goodstate')
+ self.is_in_failstate = False
+ self.switch.set_property('active-pad', self.goodpad)
+
+ def switch_to_failstate(self):
+ if self.is_in_failstate:
+ return
+
+ self.log.warning('switching output to failstate')
+ self.is_in_failstate = True
+ self.switch.set_property('active-pad', self.failpad)
diff --git a/voctocore/lib/videomix.py b/voctocore/lib/videomix.py
deleted file mode 100644
index f98582a..0000000
--- a/voctocore/lib/videomix.py
+++ /dev/null
@@ -1,347 +0,0 @@
-import sys, inspect, math
-from pprint import pprint
-from gi.repository import GLib, Gst
-from lib.controlserver import controlServerEntrypoint
-
-class Videomix:
- """mixing, streaming and encoding pipeline constuction and control"""
- # size of the monitor-streams
- # should be anamorphic PAL, beacuse we encode it to dv and send it to the mixer-gui
- monitorSize = (1024, 576)
-
- previewbins = []
-
- def __init__(self):
- """initialize video mixing, streaming and encoding pipeline"""
- # initialize an empty pipeline
- self.pipeline = Gst.Pipeline()
-
- # create audio and video mixer
- mixerbin = self.createMixer()
-
- # collection of video-sources to connect to the quadmix
- quadmixSources = []
-
- # create camera sources
- for camberabin in self.createDummyCamSources():
- # link camerasource to audiomixer
- camberabin.get_by_name('audio_src').link(self.pipeline.get_by_name('liveaudio'))
-
- # inject a ×2 distributor and link one end to the live-mixer
- distributor = self.createDistributor(camberabin.get_by_name('video_src'), camberabin.get_name())
- distributor.get_by_name('a').link(self.pipeline.get_by_name('livevideo'))
-
- # collect the other end to add it later to the quadmix
- quadmixSources.append(distributor.get_by_name('b'))
-
- # TODO: generate pause & slides with another generator here which only
- # yields if the respective files are present and which only have a video-pad
-
- # add all video-sources to the quadmix-monitor-screen
- self.addVideosToQuadmix(quadmixSources, self.pipeline.get_by_name('quadmix'))
-
- # initialize to known defaults
- # TODO: make configurable
- self.switchVideo(0)
- self.switchAudio(0)
-
- Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL, 'test')
- self.pipeline.set_state(Gst.State.PLAYING)
-
- def createMixer(self):
- """create audio and video mixer"""
- # create mixer-pipeline from string
- mixerbin = Gst.parse_bin_from_description("""
- videomixer name=livevideo ! autovideosink
- input-selector name=liveaudio ! autoaudiosink
-
- videotestsrc pattern="solid-color" foreground-color=0x808080 ! capsfilter name=filter ! videomixer name=quadmix ! autovideosink
- """, False)
-
- # define caps for the videotestsrc which generates the background-color for the quadmix
- bgcaps = Gst.Caps.new_empty_simple('video/x-raw')
- bgcaps.set_value('width', round(self.monitorSize[0]))
- bgcaps.set_value('height', round(self.monitorSize[1]))
- mixerbin.get_by_name('filter').set_property('caps', bgcaps)
-
- # name the bin, add and return it
- mixerbin.set_name('mixerbin')
- self.pipeline.add(mixerbin)
- return mixerbin
-
- def addVideosToQuadmix(self, videosources, quadmix):
- """add all avaiable videosources to the quadmix"""
- count = len(videosources)
-
- # coordinate of the cell where we place the next video
- place = [0, 0]
-
- # number of cells in the quadmix-monitor
- grid = [0, 0]
- grid[0] = math.ceil(math.sqrt(count))
- grid[1] = math.ceil(count / grid[0])
-
- # size of each cell in the quadmix-monitor
- cellSize = (
- self.monitorSize[0] / grid[0],
- self.monitorSize[1] / grid[1]
- )
-
- print("showing {} videosources in a {}×{} grid in a {}×{} px window, which gives cells of {}×{} px per videosource".format(
- count, grid[0], grid[1], self.monitorSize[0], self.monitorSize[1], cellSize[0], cellSize[1]))
-
- # iterate over all video-sources
- for idx, videosource in enumerate(videosources):
- # generate a pipeline for this videosource which
- # - scales the video to the request
- # - remove n px of the video (n = 5 if the video is highlighted else 0)
- # - add a colored border of n px of the video (n = 5 if the video is highlighted else 0)
- # - overlay the index of the video as text in the top left corner
- # - known & named output
- previewbin = Gst.parse_bin_from_description("""
- videoscale name=in !
- capsfilter name=caps !
- videobox name=crop top=0 left=0 bottom=0 right=0 !
- videobox fill=red top=-0 left=-0 bottom=-0 right=-0 name=add !
- textoverlay color=0xFFFFFFFF halignment=left valignment=top xpad=10 ypad=5 font-desc="sans 35" name=text !
- identity name=out
- """, False)
-
- # name the bin and add it
- previewbin.set_name('previewbin-{}'.format(idx))
- self.pipeline.add(previewbin)
- self.previewbins.append(previewbin)
-
- # set the overlay-text
- previewbin.get_by_name('text').set_property('text', str(idx))
-
- # query the video-source caps and extract its size
- caps = videosource.get_static_pad('src').query_caps(None)
- capsstruct = caps.get_structure(0)
- srcSize = (
- capsstruct.get_int('width')[1],
- capsstruct.get_int('height')[1],
- )
-
- # calculate the ideal scale factor and scale the sizes
- f = max(srcSize[0] / cellSize[0], srcSize[1] / cellSize[1])
- scaleSize = (
- srcSize[0] / f,
- srcSize[1] / f,
- )
-
- # calculate the top/left coordinate
- coord = (
- place[0] * cellSize[0] + (cellSize[0] - scaleSize[0]) / 2,
- place[1] * cellSize[1] + (cellSize[1] - scaleSize[1]) / 2,
- )
-
- print("placing videosource {} of size {}×{} scaled by {} to {}×{} in a cell {}×{} px cell ({}/{}) at position ({}/{})".format(
- idx, srcSize[0], srcSize[1], f, scaleSize[0], scaleSize[1], cellSize[0], cellSize[1], place[0], place[1], coord[0], coord[1]))
-
- # link the videosource to the input of the preview-bin
- videosource.link(previewbin.get_by_name('in'))
-
- # create and set the caps for the preview-scaler
- scalecaps = Gst.Caps.new_empty_simple('video/x-raw')
- scalecaps.set_value('width', round(scaleSize[0]))
- scalecaps.set_value('height', round(scaleSize[1]))
- previewbin.get_by_name('caps').set_property('caps', scalecaps)
-
- # request a pad from the quadmixer and configure x/y position
- sinkpad = quadmix.get_request_pad('sink_%u')
- sinkpad.set_property('xpos', round(coord[0]))
- sinkpad.set_property('ypos', round(coord[1]))
-
- # link the output of the preview-bin to the mixer
- previewbin.get_by_name('out').link(quadmix)
-
- # increment grid position
- place[0] += 1
- if place[0] >= grid[0]:
- place[1] += 1
- place[0] = 0
-
- def createDistributor(self, videosource, name):
- """create a simple ×2 distributor"""
- distributor = Gst.parse_bin_from_description("""
- tee name=t
- t. ! queue name=a
- t. ! queue name=b
- """, False)
-
- # set a name and add to pipeline
- distributor.set_name('distributor({0})'.format(name))
- self.pipeline.add(distributor)
-
- # link input to the tee
- videosource.link(distributor.get_by_name('t'))
- return distributor
-
- def createDummyCamSources(self):
- """create test-video-sources from files or urls"""
-
- # TODO make configurable
- uris = ('file:///home/peter/122.mp4', 'file:///home/peter/10025.mp4',)
- for idx, uri in enumerate(uris):
- # create a bin for a simulated camera input
- # force the input resolution to 1024x576 because that way the following elements
- # in the pipeline cam know the size even if the file is not yet loaded. the quadmixer
- # is not resize-capable
- camberabin = Gst.parse_bin_from_description("""
- uridecodebin name=input
- input. ! videoconvert ! videoscale ! videorate ! video/x-raw,width=1024,height=576,framerate=25/1 ! identity name=video_src
- input. ! audioconvert name=audio_src
- """, False)
-
- # set name and uri
- camberabin.set_name('dummy-camberabin({0})'.format(uri))
- camberabin.get_by_name('input').set_property('uri', uri)
-
- # add to pipeline and pass the bin upstream
- self.pipeline.add(camberabin)
- yield camberabin
-
-
- def createCamSources(self):
- """create real-video-sources from the bmd-drivers"""
-
- # TODO make number of installed cams configurable
- for cam in range(2):
- # create a bin for camera input
- camberabin = Gst.parse_bin_from_description("""
- decklinksrc name=input input=sdi input-mode=1080p25
- input. ! videoconvert ! videoscale ! videorate ! video/x-raw,width=1920,height=1080,framerate=25/1 ! identity name=video_src
- input. ! audioconvert name=audio_src
- """, False)
-
- # set name and subdevice
- camberabin.set_name('camberabin({0})'.format(cam))
- camberabin.get_by_name('input').set_property('subdevice', cam)
-
- # add to pipeline and pass the bin upstream
- self.pipeline.add(camberabin)
- yield camberabin
-
- def iteratorHelper(self, it):
- while True:
- result, value = it.next()
- if result == Gst.IteratorResult.DONE:
- break
-
- if result != Gst.IteratorResult.OK:
- raise IteratorError(result)
-
- yield value
-
- def previewBorderHelper(self, previewbin, enabled, color = 'red'):
- crop = previewbin.get_by_name('crop')
- add = previewbin.get_by_name('add')
- add.set_property('fill', color)
- for side in ('top', 'left', 'right', 'bottom'):
- crop.set_property(side, 5 if enabled else 0)
- add.set_property(side, -5 if enabled else 0)
-
-
- ### below are access-methods for the ControlServer
-
- @controlServerEntrypoint
- def numAudioSources(self):
- """return number of available audio sources"""
- liveaudio = self.pipeline.get_by_name('liveaudio')
- return str(len(list(self.iteratorHelper(liveaudio.iterate_sink_pads()))))
-
-
- @controlServerEntrypoint
- def switchAudio(self, audiosource):
- """switch audio to the selected audio"""
- liveaudio = self.pipeline.get_by_name('liveaudio')
- pad = liveaudio.get_static_pad('sink_{}'.format(audiosource))
- if pad is None:
- return 'unknown audio-source: {}'.format(audiosource)
-
- liveaudio.set_property('active-pad', pad)
- return True
-
-
- @controlServerEntrypoint
- def numVideoSources(self):
- """return number of available video sources"""
- livevideo = self.pipeline.get_by_name('livevideo')
- return str(len(list(self.iteratorHelper(livevideo.iterate_sink_pads()))))
-
-
- @controlServerEntrypoint
- def switchVideo(self, videosource):
- """switch audio to the selected video"""
- livevideo = self.pipeline.get_by_name('livevideo')
- pad = livevideo.get_static_pad('sink_{}'.format(videosource))
- previewbin = self.pipeline.get_by_name('previewbin-{}'.format(videosource))
-
- if pad is None or previewbin is None:
- return 'unknown video-source: {}'.format(videosource)
-
- self.previewBorderHelper(previewbin, True, 'green')
- for iterbin in self.previewbins:
- if previewbin != iterbin:
- self.previewBorderHelper(iterbin, False)
-
- pad.set_property('alpha', 1)
- for iterpad in self.iteratorHelper(livevideo.iterate_sink_pads()):
- if pad != iterpad:
- #self.previewBorderHelper(iterpad, 0)
- iterpad.set_property('alpha', 0)
-
-
- @controlServerEntrypoint
- def fadeVideo(self, videosource):
- """fade video to the selected video"""
- raise NotImplementedError("fade command is not implemented yet")
-
-
- @controlServerEntrypoint
- def setPipVideo(self, videosource):
- """switch video-source in the PIP to the selected video"""
- raise NotImplementedError("pip commands are not implemented yet")
-
-
- @controlServerEntrypoint
- def fadePipVideo(self, videosource):
- """fade video-source in the PIP to the selected video"""
- raise NotImplementedError("pip commands are not implemented yet")
-
-
- class PipPlacements:
- """enumeration of possible PIP-Placements"""
- TopLeft, TopRight, BottomLeft, BottomRight = range(4)
-
-
- @controlServerEntrypoint
- def setPipPlacement(self, placement):
- """place PIP in the selected position"""
- assert(isinstance(placement, PipPlacements))
- raise NotImplementedError("pip commands are not implemented yet")
-
-
- @controlServerEntrypoint
- def setPipStatus(self, enabled):
- """show or hide PIP"""
- raise NotImplementedError("pip commands are not implemented yet")
-
-
- @controlServerEntrypoint
- def fadePipStatus(self, enabled):
- """fade PIP in our out"""
- raise NotImplementedError("pip commands are not implemented yet")
-
-
- class StreamContents:
- """enumeration of possible PIP-Placements"""
- Live, Pause, NoStream = range(3)
-
-
- @controlServerEntrypoint
- def selectStreamContent(self, content):
- """switch the livestream-content between selected mixer output, pause-image or nostream-imag"""
- assert(isinstance(content, StreamContents))
- raise NotImplementedError("pause/nostream switching is not implemented yet")
diff --git a/voctocore/voctocore.py b/voctocore/voctocore.py
index 454a09d..8c15463 100755
--- a/voctocore/voctocore.py
+++ b/voctocore/voctocore.py
@@ -1,31 +1,84 @@
#!/usr/bin/python3
-import gi, signal
+import gi, signal, logging, sys
-# import GStreamer and GTK-Helper classes
+# import GStreamer and GLib-Helper classes
gi.require_version('Gst', '1.0')
-from gi.repository import GLib, Gst, Gtk, GObject
+from gi.repository import GLib, Gst, GObject
+
+# check min-version
+minGst = (1, 4)
+minPy = (3, 0)
+
+if Gst.version() < minGst:
+ raise Exception("GStreamer version", Gst.version(), 'is too old, at least', minGst, 'is required')
+
+if sys.version_info < minPy:
+ raise Exception("Python version", sys.version_info, 'is too old, at least', minPy, 'is required')
+
# init GObject before importing local classes
GObject.threads_init()
Gst.init(None)
# import local classes
-from lib.videomix import Videomix
+from lib.pipeline import Pipeline
from lib.controlserver import ControlServer
-class Main:
+# main class
+class Voctocore:
+ log = logging.getLogger('Voctocore')
+
def __init__(self):
+ self.log.debug('creating GObject-MainLoop')
+ self.mainloop = GObject.MainLoop()
+
# initialize subsystem
- self.videomix = Videomix()
- self.controlserver = ControlServer(self.videomix)
+ self.log.debug('creating Video-Pipeline')
+ self.pipeline = Pipeline()
+
+ self.log.debug('creating ControlServer')
+ self.controlserver = ControlServer(self.pipeline)
+
+ def run(self):
+ self.log.info('running Video-Pipeline')
+ self.pipeline.run()
+
+ self.log.info('running GObject-MainLoop')
+ self.mainloop.run()
+
+ def kill(self):
+ self.log.info('quitting Video-Pipeline')
+ self.pipeline.quit()
+
+ self.log.info('quitting GObject-MainLoop')
+ self.mainloop.quit()
+
+ def on_eos(self, bus, msg):
+ self.log.warning('received EOS-Signal on the Video-Bus from Element %s. This shouldn\'t happen if the program is not terminating right now', msg.src)
+ self.kill()
+
+ def on_error(self, bus, msg):
+ err = msg.parse_error()
+ self.log.error('received Error-Signal on the Video-Bus from Element %s: %s', msg.src, err[1])
+ self.kill()
+
+
+# run mainclass
+def main(argv):
+ # configure logging
+ logging.basicConfig(level=logging.DEBUG,
+ format='%(levelname)8s %(name)s: %(message)s')
-def runmain():
# make killable by ctrl-c
+ logging.debug('setting SIGINT handler')
signal.signal(signal.SIGINT, signal.SIG_DFL)
- # start main-class and main-loop
- start = Main()
- Gtk.main()
+ # init main-class and main-loop
+ logging.debug('initializing Voctocore')
+ voctocore = Voctocore()
+
+ logging.debug('running Voctocore')
+ voctocore.run()
if __name__ == '__main__':
- runmain()
+ main(sys.argv)