summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaZderMind <github@mazdermind.de>2015-05-10 16:28:38 +0200
committerMaZderMind <github@mazdermind.de>2015-05-10 16:28:38 +0200
commitb9ec27da68ddce2afc94454e92a78fe262812a0e (patch)
treeeb917f731b6d199c6a76b1cf541d6f6af29ca6fd
parentf8a0b46028b9c902da7ce67590f9ce952ea44544 (diff)
Implementing a Video-Switching-Server is easy … if you know what your're doing
-rw-r--r--voctocore/README.md11
-rw-r--r--voctocore/default-config.ini19
-rw-r--r--voctocore/lib/args.py15
-rw-r--r--voctocore/lib/config.py2
-rw-r--r--voctocore/lib/distributor.py33
-rw-r--r--voctocore/lib/failaudiosrc.py25
-rw-r--r--voctocore/lib/failvideosrc.py34
-rw-r--r--voctocore/lib/pipeline.py449
-rw-r--r--voctocore/lib/quadmix.py190
-rw-r--r--voctocore/lib/shmsrc.py125
-rw-r--r--voctocore/lib/videomix.py2
-rw-r--r--voctocore/lib/videosrc.py92
-rw-r--r--voctocore/lib/videosrcmirror.py66
-rwxr-xr-xvoctocore/scripts/play-cam1-mirror.sh5
-rwxr-xr-xvoctocore/scripts/test-audio-cam1.sh6
-rwxr-xr-xvoctocore/scripts/test-av-sync-cam1.sh23
-rwxr-xr-xvoctocore/scripts/test-video-cam1.sh7
-rwxr-xr-xvoctocore/scripts/videosrc1.sh30
-rwxr-xr-xvoctocore/scripts/videosrc2.sh29
-rwxr-xr-xvoctocore/voctocore.py44
20 files changed, 279 insertions, 928 deletions
diff --git a/voctocore/README.md b/voctocore/README.md
index e69de29..df6abf8 100644
--- a/voctocore/README.md
+++ b/voctocore/README.md
@@ -0,0 +1,11 @@
+````
+ /-> Encoder -> PreviewPort 12000
+ /-> VideoMix -> OutputPort 11000
+10000… VideoSrc --> MirrorPort 13000…
+ \-> Encoder -> PreviewPort 14000…
+
+ /-> Encoder -> PreviewPort 22000
+ /-> AudioMix --> OutputPort 21000
+20000… AudioSrc --> MirrorPort 23000…
+ \-> Encoder -> PreviewPort 24000…
+````
diff --git a/voctocore/default-config.ini b/voctocore/default-config.ini
index b40f5d7..51f9c83 100644
--- a/voctocore/default-config.ini
+++ b/voctocore/default-config.ini
@@ -1,11 +1,14 @@
-[sources]
-video=cam1,cam2,grabber
-audio=cam1,cam2
+[mix]
+videocaps=video/x-raw,format=I420,width=1280,height=720,framerate=25/1,pixel-aspect-ratio=1/1
+audiocaps=audio/x-raw,format=S16LE,channels=2,layout=interleaved,rate=48000
-socketpath=/tmp/voctomix-sockets
+[sources]
+; tcp-ports will be 10000,10001
+; video=cam1,cam2,grabber
+video=cam1
-videocaps=video/x-raw,format=RGBx,width=1280,height=720,framerate=25/1,pixel-aspect-ratio=1/1
-audiocaps=audio/x-raw,format=S16LE,layout=interleaved,rate=48000,channels=2
+; tcp-ports will be 20000,20001
+audio=cam1,cam2
[pause]
;image=/video/pause.png
@@ -17,8 +20,4 @@ image=/video/nosteam.png
;video=/video/nosteam.m4v
audio=/video/dudel.m4a
-[mix]
-monitorcaps=video/x-raw,width=640,height=360
-outputcaps=video/x-raw,width=1280,height=720
-
[client]
diff --git a/voctocore/lib/args.py b/voctocore/lib/args.py
new file mode 100644
index 0000000..6699679
--- /dev/null
+++ b/voctocore/lib/args.py
@@ -0,0 +1,15 @@
+import argparse
+
+__all__ = ['Args']
+
+parser = argparse.ArgumentParser(description='Voctocore')
+parser.add_argument('-v', '--verbose', action='store_true',
+ help="Also print INFO and DEBUG messages.")
+
+parser.add_argument('-c', '--color', action='store', choices=['auto', 'always', 'never'], default='auto',
+ help="Control the use of colors in the Log-Output")
+
+parser.add_argument('-i', '---config-ini', action='store',
+ help="Load a custom config.ini-File")
+
+Args = parser.parse_args()
diff --git a/voctocore/lib/config.py b/voctocore/lib/config.py
index 76f67b0..88e713c 100644
--- a/voctocore/lib/config.py
+++ b/voctocore/lib/config.py
@@ -1,6 +1,8 @@
import os.path
from configparser import SafeConfigParser
+__all__ = ['Config']
+
def getlist(self, section, option):
return [x.strip() for x in self.get(section, option).split(',')]
diff --git a/voctocore/lib/distributor.py b/voctocore/lib/distributor.py
deleted file mode 100644
index 8282839..0000000
--- a/voctocore/lib/distributor.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/python3
-import time, logging
-from gi.repository import GLib, Gst
-
-from lib.config import Config
-
-class TimesTwoDistributor(Gst.Bin):
- log = logging.getLogger('TimesTwoDistributor')
-
- def __init__(self):
- super().__init__()
-
- self.tee = Gst.ElementFactory.make('tee', None)
- self.queue_a = Gst.ElementFactory.make('queue', 'queue-a')
- self.queue_b = Gst.ElementFactory.make('queue', 'queue-b')
-
- self.add(self.tee)
- self.add(self.queue_a)
- self.add(self.queue_b)
-
- self.tee.link(self.queue_a)
- self.tee.link(self.queue_b)
-
- # Add Ghost Pads
- self.add_pad(
- Gst.GhostPad.new('sink', self.tee.get_static_pad('sink'))
- )
- self.add_pad(
- Gst.GhostPad.new('src_a', self.queue_a.get_static_pad('src'))
- )
- self.add_pad(
- Gst.GhostPad.new('src_b', self.queue_b.get_static_pad('src'))
- )
diff --git a/voctocore/lib/failaudiosrc.py b/voctocore/lib/failaudiosrc.py
deleted file mode 100644
index 0ce885e..0000000
--- a/voctocore/lib/failaudiosrc.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/python3
-import time, logging
-from gi.repository import GLib, Gst
-
-from lib.config import Config
-
-class FailAudioSrc(Gst.Bin):
- log = logging.getLogger('FailAudioSrc')
-
- def __init__(self, idx, name):
- super().__init__()
-
- # Create elements
- self.failsrc = Gst.ElementFactory.make('audiotestsrc', None)
-
- # Add elements to Bin
- self.add(self.failsrc)
-
- # Set properties
- self.failsrc.set_property('freq', 400+idx*50)
-
- # Add Ghost Pads
- self.add_pad(
- Gst.GhostPad.new('src', self.failsrc.get_static_pad('src'))
- )
diff --git a/voctocore/lib/failvideosrc.py b/voctocore/lib/failvideosrc.py
deleted file mode 100644
index 500603a..0000000
--- a/voctocore/lib/failvideosrc.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/python3
-import time, logging
-from gi.repository import GLib, Gst
-
-from lib.config import Config
-
-class FailVideoSrc(Gst.Bin):
- log = logging.getLogger('FailVideoSrc')
- colors = [
- 0xffff0000, # red
- 0xff00ff00, # green
- 0xff0000ff, # blue
- 0xffffff00, # yellow
- 0xff00ffff, # cyan
- 0xffff00ff, # magenta
- 0xffffffff, # white
- ]
-
- def __init__(self, idx, name):
- super().__init__()
-
- # Create elements
- self.failsrc = Gst.ElementFactory.make('videotestsrc', None)
-
- # Add elements to Bin
- self.add(self.failsrc)
-
- # Set properties
- self.failsrc.set_property('foreground-color', self.colors[idx % len(self.colors)])
-
- # Add Ghost Pads
- self.add_pad(
- Gst.GhostPad.new('src', self.failsrc.get_static_pad('src'))
- )
diff --git a/voctocore/lib/pipeline.py b/voctocore/lib/pipeline.py
index 6267e9e..6c207fe 100644
--- a/voctocore/lib/pipeline.py
+++ b/voctocore/lib/pipeline.py
@@ -1,444 +1,43 @@
#!/usr/bin/python3
-import os, errno, time, logging
-from gi.repository import GLib, Gst
+import logging
+from gi.repository import Gst
# import controlserver annotation
from lib.controlserver import controlServerEntrypoint
# import library components
from lib.config import Config
-from lib.quadmix import QuadMix
-from lib.videomix import VideoMix
-from lib.audiomix import AudioMix
-from lib.distributor import TimesTwoDistributor
-from lib.shmsrc import FailsafeShmSrc
-from lib.failvideosrc import FailVideoSrc
-from lib.failaudiosrc import FailAudioSrc
+from lib.videosrc import VideoSrc
+from lib.videosrcmirror import VideoSrcMirror
-class Pipeline(Gst.Pipeline):
+class Pipeline(object):
"""mixing, streaming and encoding pipeline constuction and control"""
log = logging.getLogger('Pipeline')
- videonames = []
- audionames = []
- def __init__(self):
- super().__init__()
-
- self.log.debug('Creating Video-Mixer')
- # create audio and video mixer
- self.quadmixer = QuadMix()
- self.add(self.quadmixer)
-
- self.videomixer = VideoMix()
- self.add(self.videomixer)
-
- self.audiomixer = AudioMix()
- self.add(self.audiomixer)
-
- # read the path where the shm-control-sockets are located and ensure it exists
- socketpath = Config.get('sources', 'socketpath')
- self.log.info('Ensuring the configured socketpath exists: %s', socketpath)
- try:
- os.makedirs(socketpath)
- except OSError as exception:
- if exception.errno != errno.EEXIST:
- raise
-
- self.videonames = Config.getlist('sources', 'video')
- self.audionames = Config.getlist('sources', 'audio')
-
- caps = Gst.Caps.from_string(Config.get('sources', 'videocaps'))
- self.log.debug('parsing videocaps from config: %s', caps.to_string())
-
- for idx, name in enumerate(self.videonames):
- socket = os.path.join(socketpath, 'v-'+name)
-
- self.log.info('Creating video-source "%s" at socket-path %s', name, socket)
- sourcebin = FailsafeShmSrc(socket, caps, FailVideoSrc(idx, name))
- self.add(sourcebin)
-
- distributor = TimesTwoDistributor()
- self.add(distributor)
- sourcebin.link(distributor)
-
- mixerpad = self.quadmixer.request_mixer_pad()
- distributor.get_static_pad('src_a').link(mixerpad)
-
- mixerpad = self.videomixer.request_mixer_pad()
- distributor.get_static_pad('src_b').link(mixerpad)
-
-
- caps = Gst.Caps.from_string(Config.get('sources', 'audiocaps'))
- self.log.debug('parsing videocaps from config: %s', caps.to_string())
-
- for idx, name in enumerate(self.audionames):
- socket = os.path.join(socketpath, 'a-'+name)
-
- self.log.info('Creating audio-source "%s" at socket-path %s', name, socket)
- sourcebin = FailsafeShmSrc(socket, caps, FailAudioSrc(idx, name))
- self.add(sourcebin)
-
- mixerpad = self.audiomixer.request_mixer_pad()
- sourcebin.get_static_pad('src').link(mixerpad)
-
- # tell the quadmix that this were all sources and no more sources will come after this
- self.quadmixer.finalize()
-
- self.quadmixer.set_active(0)
- self.videomixer.set_active(0)
- self.audiomixer.set_active(0)
-
- self.audioconv = Gst.ElementFactory.make('audioconvert', 'audioconv')
- self.audioenc = Gst.ElementFactory.make('avenc_mp2', 'audioenc')
-
- self.videoconv = Gst.ElementFactory.make('videoconvert', 'videoconv')
- self.videoenc = Gst.ElementFactory.make('avenc_mpeg2video', 'videoenc')
-
- self.mux = Gst.ElementFactory.make('mpegtsmux', 'mux')
- self.sink = Gst.ElementFactory.make('filesink', 'sink')
-
- self.add(self.audioconv)
- self.add(self.audioenc)
- self.add(self.videoconv)
- self.add(self.videoenc)
-
- self.add(self.mux)
- self.add(self.sink)
-
- self.videomixer.link(self.videoconv)
- self.videoconv.link(self.videoenc)
-
- self.audiomixer.link(self.audioconv)
- self.audioconv.link(self.audioenc)
-
- self.videoenc.link(self.mux)
- self.audioenc.link(self.mux)
-
- self.mux.link(self.sink)
-
- self.sink.set_property('location', '/home/peter/test.ts')
-
-
- self.quadmixsink = Gst.ElementFactory.make('autovideosink', 'quadmixsink')
- self.quadmixsink.set_property('sync', False)
- self.add(self.quadmixsink)
- self.quadmixer.link(self.quadmixsink)
-
- def run(self):
- self.set_state(Gst.State.PAUSED)
- time.sleep(0.5)
- self.set_state(Gst.State.PLAYING)
-
- def quit(self):
- self.set_state(Gst.State.NULL)
-
-
-
-
- # # collection of video-sources to connect to the quadmix
- # quadmixSources = []
-
- # # create camera sources
- # for camberabin in self.createDummyCamSources():
- # # link camerasource to audiomixer
- # camberabin.get_by_name('audio_src').link(self.pipeline.get_by_name('liveaudio'))
-
- # # inject a ×2 distributor and link one end to the live-mixer
- # distributor = self.createDistributor(camberabin.get_by_name('video_src'), camberabin.get_name())
- # distributor.get_by_name('a').link(self.pipeline.get_by_name('livevideo'))
-
- # # collect the other end to add it later to the quadmix
- # quadmixSources.append(distributor.get_by_name('b'))
-
- # # TODO: generate pause & slides with another generator here which only
- # # yields if the respective files are present and which only have a video-pad
-
- # # add all video-sources to the quadmix-monitor-screen
- # self.addVideosToQuadmix(quadmixSources, self.pipeline.get_by_name('quadmix'))
-
- # # initialize to known defaults
- # # TODO: make configurable
- # self.switchVideo(0)
- # self.switchAudio(0)
-
- # Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL, 'test')
- # self.pipeline.set_state(Gst.State.PLAYING)
-
-
-
-
-
-
- # def createMixer(self):
- # """create audio and video mixer"""
- # # create mixer-pipeline from string
- # mixerbin = Gst.parse_bin_from_description("""
- # videomixer name=livevideo ! autovideosink
- # input-selector name=liveaudio ! autoaudiosink
-
- # videotestsrc pattern="solid-color" foreground-color=0x808080 ! capsfilter name=filter ! videomixer name=quadmix ! autovideosink
- # """, False)
-
- # # define caps for the videotestsrc which generates the background-color for the quadmix
- # bgcaps = Gst.Caps.new_empty_simple('video/x-raw')
- # bgcaps.set_value('width', round(self.monitorSize[0]))
- # bgcaps.set_value('height', round(self.monitorSize[1]))
- # mixerbin.get_by_name('filter').set_property('caps', bgcaps)
-
- # # name the bin, add and return it
- # mixerbin.set_name('mixerbin')
- # self.pipeline.add(mixerbin)
- # return mixerbin
-
- # def addVideosToQuadmix(self, videosources, quadmix):
- # """add all avaiable videosources to the quadmix"""
- # count = len(videosources)
-
- # # coordinate of the cell where we place the next video
- # place = [0, 0]
-
- # # number of cells in the quadmix-monitor
- # grid = [0, 0]
- # grid[0] = math.ceil(math.sqrt(count))
- # grid[1] = math.ceil(count / grid[0])
-
- # # size of each cell in the quadmix-monitor
- # cellSize = (
- # self.monitorSize[0] / grid[0],
- # self.monitorSize[1] / grid[1]
- # )
-
- # print("showing {} videosources in a {}×{} grid in a {}×{} px window, which gives cells of {}×{} px per videosource".format(
- # count, grid[0], grid[1], self.monitorSize[0], self.monitorSize[1], cellSize[0], cellSize[1]))
-
- # # iterate over all video-sources
- # for idx, videosource in enumerate(videosources):
- # # generate a pipeline for this videosource which
- # # - scales the video to the request
- # # - remove n px of the video (n = 5 if the video is highlighted else 0)
- # # - add a colored border of n px of the video (n = 5 if the video is highlighted else 0)
- # # - overlay the index of the video as text in the top left corner
- # # - known & named output
- # previewbin = Gst.parse_bin_from_description("""
- # videoscale name=in !
- # capsfilter name=caps !
- # videobox name=crop top=0 left=0 bottom=0 right=0 !
- # videobox fill=red top=-0 left=-0 bottom=-0 right=-0 name=add !
- # textoverlay color=0xFFFFFFFF halignment=left valignment=top xpad=10 ypad=5 font-desc="sans 35" name=text !
- # identity name=out
- # """, False)
-
- # # name the bin and add it
- # previewbin.set_name('previewbin-{}'.format(idx))
- # self.pipeline.add(previewbin)
- # self.previewbins.append(previewbin)
-
- # # set the overlay-text
- # previewbin.get_by_name('text').set_property('text', str(idx))
-
- # # query the video-source caps and extract its size
- # caps = videosource.get_static_pad('src').query_caps(None)
- # capsstruct = caps.get_structure(0)
- # srcSize = (
- # capsstruct.get_int('width')[1],
- # capsstruct.get_int('height')[1],
- # )
-
- # # calculate the ideal scale factor and scale the sizes
- # f = max(srcSize[0] / cellSize[0], srcSize[1] / cellSize[1])
- # scaleSize = (
- # srcSize[0] / f,
- # srcSize[1] / f,
- # )
-
- # # calculate the top/left coordinate
- # coord = (
- # place[0] * cellSize[0] + (cellSize[0] - scaleSize[0]) / 2,
- # place[1] * cellSize[1] + (cellSize[1] - scaleSize[1]) / 2,
- # )
-
- # print("placing videosource {} of size {}×{} scaled by {} to {}×{} in a cell {}×{} px cell ({}/{}) at position ({}/{})".format(
- # idx, srcSize[0], srcSize[1], f, scaleSize[0], scaleSize[1], cellSize[0], cellSize[1], place[0], place[1], coord[0], coord[1]))
-
- # # link the videosource to the input of the preview-bin
- # videosource.link(previewbin.get_by_name('in'))
-
- # # create and set the caps for the preview-scaler
- # scalecaps = Gst.Caps.new_empty_simple('video/x-raw')
- # scalecaps.set_value('width', round(scaleSize[0]))
- # scalecaps.set_value('height', round(scaleSize[1]))
- # previewbin.get_by_name('caps').set_property('caps', scalecaps)
-
- # # request a pad from the quadmixer and configure x/y position
- # sinkpad = quadmix.get_request_pad('sink_%u')
- # sinkpad.set_property('xpos', round(coord[0]))
- # sinkpad.set_property('ypos', round(coord[1]))
-
- # # link the output of the preview-bin to the mixer
- # previewbin.get_by_name('out').link(quadmix)
-
- # # increment grid position
- # place[0] += 1
- # if place[0] >= grid[0]:
- # place[1] += 1
- # place[0] = 0
-
- # def createDistributor(self, videosource, name):
- # """create a simple ×2 distributor"""
- # distributor = Gst.parse_bin_from_description("""
- # tee name=t
- # t. ! queue name=a
- # t. ! queue name=b
- # """, False)
-
- # # set a name and add to pipeline
- # distributor.set_name('distributor({0})'.format(name))
- # self.pipeline.add(distributor)
-
- # # link input to the tee
- # videosource.link(distributor.get_by_name('t'))
- # return distributor
-
- # def createDummyCamSources(self):
- # """create test-video-sources from files or urls"""
-
- # # TODO make configurable
- # uris = ('file:///home/peter/122.mp4', 'file:///home/peter/10025.mp4',)
- # for idx, uri in enumerate(uris):
- # # create a bin for a simulated camera input
- # # force the input resolution to 1024x576 because that way the following elements
- # # in the pipeline cam know the size even if the file is not yet loaded. the quadmixer
- # # is not resize-capable
- # camberabin = Gst.parse_bin_from_description("""
- # uridecodebin name=input
- # input. ! videoconvert ! videoscale ! videorate ! video/x-raw,width=1024,height=576,framerate=25/1 ! identity name=video_src
- # input. ! audioconvert name=audio_src
- # """, False)
-
- # # set name and uri
- # camberabin.set_name('dummy-camberabin({0})'.format(uri))
- # camberabin.get_by_name('input').set_property('uri', uri)
-
- # # add to pipeline and pass the bin upstream
- # self.pipeline.add(camberabin)
- # yield camberabin
-
-
- # def createCamSources(self):
- # """create real-video-sources from the bmd-drivers"""
-
- # # TODO make number of installed cams configurable
- # for cam in range(2):
- # # create a bin for camera input
- # camberabin = Gst.parse_bin_from_description("""
- # decklinksrc name=input input=sdi input-mode=1080p25
- # input. ! videoconvert ! videoscale ! videorate ! video/x-raw,width=1920,height=1080,framerate=25/1 ! identity name=video_src
- # input. ! audioconvert name=audio_src
- # """, False)
-
- # # set name and subdevice
- # camberabin.set_name('camberabin({0})'.format(cam))
- # camberabin.get_by_name('input').set_property('subdevice', cam)
-
- # # add to pipeline and pass the bin upstream
- # self.pipeline.add(camberabin)
- # yield camberabin
-
-
-
-
- ### below are access-methods for the ControlServer
- @controlServerEntrypoint
- def status(self):
- """System Status Query"""
- raise NotImplementedError("status command is not implemented yet")
-
- @controlServerEntrypoint
- def numAudioSources(self):
- """return number of available audio sources"""
- raise NotImplementedError("audio is not implemented yet")
-
-
- @controlServerEntrypoint
- def switchAudio(self, audiosource):
- """switch audio to the selected audio"""
- idx = int(audiosource)
- if idx >= len(self.audionames):
- return 'unknown audio-source: %s' % (audiosource)
-
- self.log.info("switching mixer to audio-source %u", idx)
- self.audiomixer.set_active(idx)
-
-
-
- @controlServerEntrypoint
- def numVideoSources(self):
- """return number of available video sources"""
- livevideo = self.pipeline.get_by_name('livevideo')
- return str(len(self.videonames))
-
-
- @controlServerEntrypoint
- def switchVideo(self, videosource):
- """switch video to the selected video"""
- idx = int(videosource)
- if idx >= len(self.videonames):
- return 'unknown video-source: %s' % (videosource)
-
- self.log.info("switching mixer to video-source %u", idx)
- self.quadmixer.set_active(idx)
- self.videomixer.set_active(idx)
-
-
- @controlServerEntrypoint
- def fadeVideo(self, videosource):
- """fade video to the selected video"""
- raise NotImplementedError("fade command is not implemented yet")
-
-
- @controlServerEntrypoint
- def setPipVideo(self, videosource):
- """switch video-source in the PIP to the selected video"""
- raise NotImplementedError("pip commands are not implemented yet")
-
-
- @controlServerEntrypoint
- def fadePipVideo(self, videosource):
- """fade video-source in the PIP to the selected video"""
- raise NotImplementedError("pip commands are not implemented yet")
-
-
- class PipPlacements:
- """enumeration of possible PIP-Placements"""
- TopLeft, TopRight, BottomLeft, BottomRight = range(4)
-
-
- @controlServerEntrypoint
- def setPipPlacement(self, placement):
- """place PIP in the selected position"""
- assert(isinstance(placement, PipPlacements))
- raise NotImplementedError("pip commands are not implemented yet")
+ vsources = []
+ vmirrors = []
+ def __init__(self):
+ # self.log.debug('Creating A/V-Mixer')
+ # self.videomixer = VideoMix()
+ # self.add(self.videomixer)
- @controlServerEntrypoint
- def setPipStatus(self, enabled):
- """show or hide PIP"""
- raise NotImplementedError("pip commands are not implemented yet")
+ # self.audiomixer = AudioMix()
+ # self.add(self.audiomixer)
+ caps = Config.get('mix', 'videocaps')
+ self.log.info('Video-Caps configured to: %s', caps)
- @controlServerEntrypoint
- def fadePipStatus(self, enabled):
- """fade PIP in our out"""
- raise NotImplementedError("pip commands are not implemented yet")
+ for idx, name in enumerate(Config.getlist('sources', 'video')):
+ port = 10000 + idx
+ self.log.info('Creating Video-Source %s at tcp-port %u', name, port)
+ source = VideoSrc(name, port, caps)
+ self.vsources.append(source)
- class StreamContents:
- """enumeration of possible PIP-Placements"""
- Live, Pause, NoStream = range(3)
+ port = 13000 + idx
+ self.log.info('Creating Mirror-Output for Video-Source %s at tcp-port %u', name, port)
- @controlServerEntrypoint
- def selectStreamContent(self, content):
- """switch the livestream-content between selected mixer output, pause-image or nostream-imag"""
- assert(isinstance(content, StreamContents))
- raise NotImplementedError("pause/nostream switching is not implemented yet")
+ mirror = VideoSrcMirror(name, port, caps)
+ self.vmirrors.append(mirror)
diff --git a/voctocore/lib/quadmix.py b/voctocore/lib/quadmix.py
deleted file mode 100644
index 8d69304..0000000
--- a/voctocore/lib/quadmix.py
+++ /dev/null
@@ -1,190 +0,0 @@
-#!/usr/bin/python3
-import math, logging
-from gi.repository import GLib, Gst
-
-from lib.helper import iteratorHelper
-from lib.config import Config
-
-class QuadMix(Gst.Bin):
- log = logging.getLogger('QuadMix')
- previewbins = []
- mixerpads = []
-
- def __init__(self):
- super().__init__()
-
- caps = Gst.Caps.from_string(Config.get('mix', 'monitorcaps'))
- self.log.debug('parsing monitorcaps from config: %s', caps.to_string())
- struct = caps.get_structure(0)
-
- self.monitorSize = [struct.get_int('width')[1], struct.get_int('height')[1]]
-
- self.bgsrc = Gst.ElementFactory.make('videotestsrc', 'bgsrc')
- self.mixer = Gst.ElementFactory.make('videomixer', 'mixer')
- self.scale = Gst.ElementFactory.make('videoscale', 'scale')
-
- self.add(self.bgsrc)
- self.add(self.mixer)
- self.add(self.scale)
-
- self.bgsrc.link_filtered(self.mixer, caps)
- self.mixer.link_filtered(self.scale, caps)
-
- self.bgsrc.set_property('pattern', 'solid-color')
- self.bgsrc.set_property('foreground-color', 0x808080)
-
- self.add_pad(
- Gst.GhostPad.new('src', self.scale.get_static_pad('src'))
- )
-
- def request_mixer_pad(self):
- previewbin = QuadMixPreview()
- self.add(previewbin)
- self.previewbins.append(previewbin)
-
- srcpad = previewbin.get_static_pad('src')
- sinkpad = previewbin.get_static_pad('sink')
-
- mixerpad = self.mixer.get_request_pad('sink_%u')
- self.mixerpads.append(mixerpad)
- srcpad.link(mixerpad)
-
- self.log.info('requested mixerpad %u (named %s)', len(self.mixerpads) - 1, mixerpad.get_name())
- ghostpad = Gst.GhostPad.new(mixerpad.get_name(), sinkpad)
- self.add_pad(ghostpad)
- return ghostpad
-
- def finalize(self):
- self.log.debug('all sources linked, calculating layout')
-
- # number of placed sources
- count = len(self.previewbins)
-
- # coordinate of the cell where we place the next video
- place = [0, 0]
-
- # number of cells in the quadmix-monitor
- grid = [0, 0]
- grid[0] = math.ceil(math.sqrt(count))
- grid[1] = math.ceil(count / grid[0])
-
- # size of each cell in the quadmix-monitor
- cellSize = (
- self.monitorSize[0] / grid[0],
- self.monitorSize[1] / grid[1]
- )
-
- # report calculation results
- self.log.info('showing %u videosources in a %u×%u grid in a %u×%u px window, which gives cells of %u×%u px per videosource',
- count, grid[0], grid[1], self.monitorSize[0], self.monitorSize[1], cellSize[0], cellSize[1])
-
- # iterate over all previewbins
- for idx, previewbin in enumerate(self.previewbins):
- # request srcpad to query videosize and aspect from it
- srcpad = previewbin.get_static_pad('src')
-
- # select the mixerpad responsible for this previewbin from the videomixer
- mixerpad = self.mixerpads[idx]
-
- # query the video-source caps and extract its size
- caps = srcpad.query_caps(None)
- capsstruct = caps.get_structure(0)
- srcSize = (
- capsstruct.get_int('width')[1],
- capsstruct.get_int('height')[1],
- )
-
- # calculate the ideal scale factor and scale the sizes
- f = max(srcSize[0] / cellSize[0], srcSize[1] / cellSize[1])
- scaleSize = (
- srcSize[0] / f,
- srcSize[1] / f,
- )
-
- # calculate the top/left coordinate
- coord = (
- place[0] * cellSize[0] + (cellSize[0] - scaleSize[0]) / 2,
- place[1] * cellSize[1] + (cellSize[1] - scaleSize[1]) / 2,
- )
-
- self.log.info('placing mixerpad %u of size %u×%u scaled by %u to %u×%u in a cell %u×%u px cell (%u/%u) at position (%u/%u)',
- idx, srcSize[0], srcSize[1], f, scaleSize[0], scaleSize[1], cellSize[0], cellSize[1], place[0], place[1], coord[0], coord[1])
-
- # request a pad from the quadmixer and configure x/y position
- mixerpad.set_property('xpos', round(coord[0]))
- mixerpad.set_property('ypos', round(coord[1]))
-
- previewbin.set_size(scaleSize)
- previewbin.set_idx(idx)
-
- # increment grid position
- place[0] += 1
- if place[0] >= grid[0]:
- place[1] += 1
- place[0] = 0
-
- def set_active(self, target):
- self.log.info('setting videosource %u active, disabling other', target)
- for idx, previewbin in enumerate(self.previewbins):
- previewbin.set_active(target == idx)
-
-class QuadMixPreview(Gst.Bin):
- log = logging.getLogger('QuadMixPreview')
- strokeWidth = 5
-
- def __init__(self):
- super().__init__()
-
- self.scale = Gst.ElementFactory.make('videoscale', 'scale')
- self.caps = Gst.ElementFactory.make('capsfilter', 'caps')
- self.cropbox = Gst.ElementFactory.make('videobox', 'cropbox')
- self.strokebox = Gst.ElementFactory.make('videobox', 'strokebox')
- self.textoverlay = Gst.ElementFactory.make('textoverlay', 'textoverlay')
-
- self.add(self.scale)
- self.add(self.caps)
- self.add(self.cropbox)
- self.add(self.strokebox)
- self.add(self.textoverlay)
-
- self.strokebox.set_property('fill', 'green')
-
- self.textoverlay.set_property('color', 0xFFFFFFFF)
- self.textoverlay.set_property('halignment', 'left')
- self.textoverlay.set_property('valignment', 'top')
- self.textoverlay.set_property('xpad', 10)
- self.textoverlay.set_property('ypad', 5)
- self.textoverlay.set_property('font-desc', 'sans 35')
-
- self.scale.link(self.caps)
- self.caps.link(self.cropbox)
- self.cropbox.link(self.strokebox)
- self.strokebox.link(self.textoverlay)
-
- self.set_active(False)
-
- # Add Ghost Pads
- self.add_pad(
- Gst.GhostPad.new('sink', self.scale.get_static_pad('sink'))
- )
- self.add_pad(
- Gst.GhostPad.new('src', self.textoverlay.get_static_pad('src'))
- )
-
- def set_size(self, scaleSize):
- caps = Gst.Caps.new_empty_simple('video/x-raw')
- caps.set_value('width', round(scaleSize[0]))
- caps.set_value('height', round(scaleSize[1]))
- self.caps.set_property('caps', caps)
-
- def set_idx(self, idx):
- self.textoverlay.set_property('text', str(idx))
-
- def set_active(self, active):
- self.log.info("switching active-state to %u", active)
- for side in ('top', 'left', 'right', 'bottom'):
- self.cropbox.set_property(side, self.strokeWidth if active else 0)
- self.strokebox.set_property(side, -self.strokeWidth if active else 0)
-
- def set_color(self, color):
- self.strokebox.set_property('fill', color)
diff --git a/voctocore/lib/shmsrc.py b/voctocore/lib/shmsrc.py
deleted file mode 100644
index 4e3c402..0000000
--- a/voctocore/lib/shmsrc.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/python3
-import time, logging
-from gi.repository import GLib, Gst
-
-from lib.config import Config
-
-class FailsafeShmSrc(Gst.Bin):
- log = logging.getLogger('FailsafeShmSrc')
- last_buffer_arrived = 0
- last_restart_retry = 0
- is_in_failstate = True
-
- def __init__(self, socket, caps, failsrc):
- super().__init__()
-
- # Create elements
- self.shmsrc = Gst.ElementFactory.make('shmsrc', None)
- self.depay = Gst.ElementFactory.make('gdpdepay', None)
- self.capsfilter = Gst.ElementFactory.make('capsfilter', None)
- self.failsrcsyncer = Gst.ElementFactory.make('identity', None)
- self.switch = Gst.ElementFactory.make('input-selector', None)
- self.failsrc = failsrc
- self.capsstr = caps.to_string()
-
- if not self.shmsrc or not self.capsfilter or not self.failsrcsyncer or not self.switch or not self.failsrc:
- self.log.error('could not create elements')
-
- # Add elements to Bin
- self.add(self.shmsrc)
- self.add(self.depay)
- self.add(self.capsfilter)
- self.add(self.failsrcsyncer)
- self.add(self.switch)
- self.add(self.failsrc)
-
- # Get Switcher-Pads
- self.goodpad = self.switch.get_request_pad('sink_%u')
- self.failpad = self.switch.get_request_pad('sink_%u')
-
- # Set properties
- self.shmsrc.set_property('socket-path', socket)
- self.shmsrc.link(self.depay)
- self.switch.set_property('active-pad', self.failpad)
- self.failsrcsyncer.set_property('sync', True)
- self.capsfilter.set_property('caps', caps)
-
- # Link elements
- self.depay.link(self.capsfilter)
- self.capsfilter.get_static_pad('src').link(self.goodpad)
-
- self.failsrc.link_filtered(self.failsrcsyncer, caps)
- self.failsrcsyncer.get_static_pad('src').link(self.failpad)
-
- # Install pad probes
- self.shmsrc.get_static_pad('src').add_probe(Gst.PadProbeType.BLOCK | Gst.PadProbeType.EVENT_DOWNSTREAM, self.event_probe, None)
- self.shmsrc.get_static_pad('src').add_probe(Gst.PadProbeType.BLOCK | Gst.PadProbeType.BUFFER, self.data_probe, None)
-
- # Install Watchdog
- if self.capsstr.startswith('audio'):
- timeoutms = 1000
- else:
- timeoutms = 250
-
- GLib.timeout_add(timeoutms, self.watchdog)
-
- # Add Ghost Pads
- self.add_pad(
- Gst.GhostPad.new('src', self.switch.get_static_pad('src'))
- )
-
- def do_handle_message(self, msg):
- if msg.type == Gst.MessageType.ERROR and msg.src == self.shmsrc:
- (err, debug) = msg.parse_error()
- self.log.warning('received error-message from ShmSrc, dropping: %s', err)
- self.log.debug(' debug-info from shmsrc: %s', debug)
- else:
- Gst.Bin.do_handle_message(self, msg)
-
- def event_probe(self, pad, info, ud):
- e = info.get_event()
- if e.type == Gst.EventType.EOS:
- self.log.warning('received EOS-event on event-probe, dropping')
- self.switch_to_failstate()
- return Gst.PadProbeReturn.DROP
-
- return Gst.PadProbeReturn.PASS
-
- def data_probe(self, pad, info, ud):
- self.last_buffer_arrived = time.time()
- self.switch_to_goodstate()
- return Gst.PadProbeReturn.PASS
-
- def watchdog(self):
- t = time.time()
- if self.last_buffer_arrived + 0.1 < t:
- self.log.warning('watchdog encountered a timeout')
- self.switch_to_failstate()
-
- if self.is_in_failstate and self.last_restart_retry + 1 < t:
- self.last_restart_retry = t
- self.restart()
-
- return True
-
- def restart(self):
- self.log.warning('restarting ShmSrc')
- self.shmsrc.set_state(Gst.State.NULL)
- self.shmsrc.set_base_time(self.get_parent().get_base_time())
- self.shmsrc.set_state(Gst.State.PLAYING)
-
- def switch_to_goodstate(self):
- if not self.is_in_failstate:
- return
-
- self.log.warning('switching output to goodstate')
- self.is_in_failstate = False
- self.switch.set_property('active-pad', self.goodpad)
-
- def switch_to_failstate(self):
- if self.is_in_failstate:
- return
-
- self.log.warning('switching output to failstate')
- self.is_in_failstate = True
- self.switch.set_property('active-pad', self.failpad)
diff --git a/voctocore/lib/videomix.py b/voctocore/lib/videomix.py
index 3f2dd7e..9207723 100644
--- a/voctocore/lib/videomix.py
+++ b/voctocore/lib/videomix.py
@@ -4,7 +4,7 @@ from gi.repository import GLib, Gst
from lib.config import Config
-class VideoMix(Gst.Bin):
+class VideoMix(object):
log = logging.getLogger('VideoMix')
mixerpads = []
diff --git a/voctocore/lib/videosrc.py b/voctocore/lib/videosrc.py
new file mode 100644
index 0000000..10b63fb
--- /dev/null
+++ b/voctocore/lib/videosrc.py
@@ -0,0 +1,92 @@
+#!/usr/bin/python3
+import logging, socket
+from gi.repository import GObject, Gst
+
+from lib.config import Config
+
+class VideoSrc(object):
+ log = logging.getLogger('VideoSrc')
+
+ name = None
+ port = None
+ caps = None
+
+ distributionPipeline = None
+ receiverPipeline = None
+
+ boundSocket = None
+ currentConnection = None
+
+ def __init__(self, name, port, caps):
+ self.log = logging.getLogger('VideoSrc['+name+']')
+
+ self.name = name
+ self.port = port
+ self.caps = caps
+
+ pipeline = """
+ intervideosrc channel={name}_in !
+ {caps} !
+ timeoverlay halignment=left valignment=top !
+ textoverlay text={name}_in halignment=left valignment=top ypad=75 !
+ queue !
+ tee name=tee
+
+ tee. ! queue ! intervideosink channel={name}_mirror
+ tee. ! queue ! intervideosink channel={name}_preview
+ tee. ! queue ! intervideosink channel={name}_mixer
+ """.format(
+ name=self.name,
+ caps=self.caps
+ )
+
+ self.log.debug('Launching Source-Distribution-Pipeline:\n%s', pipeline)
+ self.distributionPipeline = Gst.parse_launch(pipeline)
+ self.distributionPipeline.set_state(Gst.State.PLAYING)
+
+ self.log.debug('Binding to Source-Socket on [::]:%u', port)
+ self.boundSocket = socket.socket(socket.AF_INET6)
+ self.boundSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.boundSocket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, False)
+ self.boundSocket.bind(('::', port))
+ self.boundSocket.listen(1)
+
+ self.log.debug('Setting GObject io-watch on Socket')
+ GObject.io_add_watch(self.boundSocket, GObject.IO_IN, self.on_connect)
+
+ def on_connect(self, sock, *args):
+ conn, addr = sock.accept()
+ self.log.info("incomming connection from %s", addr)
+
+ if self.currentConnection is not None:
+ self.log.warn("another source is already connected")
+ return True
+
+ pipeline = """
+ fdsrc fd={fd} !
+ gdpdepay !
+ {caps} !
+ intervideosink channel={name}_in
+ """.format(
+ fd=conn.fileno(),
+ name=self.name,
+ caps=self.caps
+ )
+ self.log.debug('Launching Source-Receiver-Pipeline:\n%s', pipeline)
+ self.receiverPipeline = Gst.parse_launch(pipeline)
+
+ self.log.debug('Binding End-of-Stream-Signal on Source-Receiver-Pipeline')
+ self.receiverPipeline.bus.add_signal_watch()
+ self.receiverPipeline.bus.connect("message::eos", self.on_disconnect)
+
+ self.receiverPipeline.set_state(Gst.State.PLAYING)
+
+ self.currentConnection = conn
+ return True
+
+ def on_disconnect(self, bus, message):
+ self.log.info('Received End-of-Stream-Signal on Source-Receiver-Pipeline')
+ self.receiverPipeline.set_state(Gst.State.NULL)
+ self.receiverPipeline = None
+
+ self.currentConnection = None
diff --git a/voctocore/lib/videosrcmirror.py b/voctocore/lib/videosrcmirror.py
new file mode 100644
index 0000000..8fb911d
--- /dev/null
+++ b/voctocore/lib/videosrcmirror.py
@@ -0,0 +1,66 @@
+#!/usr/bin/python3
+import logging, socket
+from gi.repository import GObject, Gst
+
+from lib.config import Config
+
+class VideoSrcMirror(object):
+ log = logging.getLogger('VideoSrcMirror')
+
+ name = None
+ port = None
+ caps = None
+
+ boundSocket = None
+
+ receiverPipelines = []
+ currentConnections = []
+
+ def __init__(self, name, port, caps):
+ self.log = logging.getLogger('VideoSrcMirror['+name+']')
+
+ self.name = name
+ self.port = port
+ self.caps = caps
+
+ self.log.debug('Binding to Mirror-Socket on [::]:%u', port)
+ self.boundSocket = socket.socket(socket.AF_INET6)
+ self.boundSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.boundSocket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, False)
+ self.boundSocket.bind(('::', port))
+ self.boundSocket.listen(1)
+
+ self.log.debug('Setting GObject io-watch on Socket')
+ GObject.io_add_watch(self.boundSocket, GObject.IO_IN, self.on_connect)
+
+ def on_connect(self, sock, *args):
+ conn, addr = sock.accept()
+ self.log.info("incomming connection from %s", addr)
+
+ pipeline = """
+ intervideosrc channel={name}_mirror !
+ {caps} !
+ textoverlay text={name}_mirror halignment=left valignment=top ypad=125 !
+ gdppay !
+ fdsink fd={fd}
+ """.format(
+ fd=conn.fileno(),
+ name=self.name,
+ caps=self.caps
+ )
+ self.log.debug('Launching Mirror-Receiver-Pipeline:\n%s', pipeline)
+ receiverPipeline = Gst.parse_launch(pipeline)
+
+ self.log.debug('Binding End-of-Stream-Signal on Source-Receiver-Pipeline')
+ receiverPipeline.bus.add_signal_watch()
+ receiverPipeline.bus.connect("message::eos", self.on_disconnect)
+
+ receiverPipeline.set_state(Gst.State.PLAYING)
+
+ self.receiverPipelines.append(receiverPipeline)
+ self.currentConnections.append(conn)
+
+ return True
+
+ def on_disconnect(self, bus, message):
+ self.log.info('Received End-of-Stream-Signal on Source-Receiver-Pipeline')
diff --git a/voctocore/scripts/play-cam1-mirror.sh b/voctocore/scripts/play-cam1-mirror.sh
new file mode 100755
index 0000000..75e65c7
--- /dev/null
+++ b/voctocore/scripts/play-cam1-mirror.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+gst-launch-1.0 \
+ tcpclientsrc host=localhost port=13000 !\
+ gdpdepay !\
+ xvimagesink
diff --git a/voctocore/scripts/test-audio-cam1.sh b/voctocore/scripts/test-audio-cam1.sh
new file mode 100755
index 0000000..0ca10d8
--- /dev/null
+++ b/voctocore/scripts/test-audio-cam1.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+gst-launch-1.0 \
+ audiotestsrc !\
+ audio/x-raw,format=S16LE,channels=2,layout=interleaved,rate=48000 !\
+ gdppay !\
+ tcpclientsink host=localhost port=20000
diff --git a/voctocore/scripts/test-av-sync-cam1.sh b/voctocore/scripts/test-av-sync-cam1.sh
new file mode 100755
index 0000000..756d83b
--- /dev/null
+++ b/voctocore/scripts/test-av-sync-cam1.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+gst-launch-1.0 -vm \
+ uridecodebin \
+ uri=http://c3voc.mazdermind.de/avsync.mp4 \
+ name=src \
+ \
+ src. !\
+ queue !\
+ videoconvert !\
+ videoscale !\
+ video/x-raw,format=I420,width=1280,height=720,framerate=25/1,pixel-aspect-ratio=1/1 ! \
+ timeoverlay valignment=bottom ! \
+ gdppay ! \
+ tcpclientsink host=localhost port=10000 \
+ \
+ src. !\
+ queue !\
+ audioconvert !\
+ audioresample !\
+ audiorate !\
+ audio/x-raw,format=S16LE,channels=2,layout=interleaved,rate=48000 !\
+ gdppay !\
+ tcpclientsink host=localhost port=20000
diff --git a/voctocore/scripts/test-video-cam1.sh b/voctocore/scripts/test-video-cam1.sh
new file mode 100755
index 0000000..e920c43
--- /dev/null
+++ b/voctocore/scripts/test-video-cam1.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+gst-launch-1.0 \
+ videotestsrc !\
+ video/x-raw,format=I420,width=1280,height=720,framerate=25/1,pixel-aspect-ratio=1/1 !\
+ timeoverlay valignment=bottom !\
+ gdppay !\
+ tcpclientsink host=localhost port=10000
diff --git a/voctocore/scripts/videosrc1.sh b/voctocore/scripts/videosrc1.sh
deleted file mode 100755
index fe34b16..0000000
--- a/voctocore/scripts/videosrc1.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-mkdir -p /tmp/voctomix-sockets/
-rm -f /tmp/voctomix-sockets/v-cam1 /tmp/voctomix-sockets/a-cam1
-gst-launch-1.0 -v \
- uridecodebin \
- uri=file:///home/peter/avsync.mp4 \
- name=src \
- \
- src. !\
- queue !\
- progressreport !\
- videoconvert !\
- videorate !\
- videoscale !\
- video/x-raw,format=RGBx,width=1280,height=720,framerate=25/1 !\
- gdppay !\
- shmsink \
- socket-path=/tmp/voctomix-sockets/v-cam1 \
- shm-size=100000000 \
- \
- src. !\
- queue !\
- audioresample !\
- audioconvert !\
- audio/x-raw,format=S16LE,layout=interleaved,rate=48000,channels=2 !\
- gdppay !\
- shmsink \
- socket-path=/tmp/voctomix-sockets/a-cam1 \
- shm-size=10000000
-
diff --git a/voctocore/scripts/videosrc2.sh b/voctocore/scripts/videosrc2.sh
deleted file mode 100755
index 1c5bc56..0000000
--- a/voctocore/scripts/videosrc2.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh
-mkdir -p /tmp/voctomix-sockets/
-rm -f /tmp/voctomix-sockets/v-cam2 /tmp/voctomix-sockets/a-cam2
-gst-launch-1.0 -v \
- uridecodebin \
- uri=file:///home/peter/ED_1280.avi \
- name=src \
- \
- src. !\
- queue !\
- progressreport !\
- videoconvert !\
- videorate !\
- videoscale !\
- video/x-raw,format=RGBx,width=1280,height=720,framerate=25/1,pixel-aspect-ratio=1/1 !\
- gdppay !\
- shmsink \
- socket-path=/tmp/voctomix-sockets/v-cam2 \
- shm-size=100000000 \
- \
- src. !\
- queue !\
- audioresample !\
- audioconvert !\
- audio/x-raw,format=S16LE,layout=interleaved,rate=48000,channels=2 !\
- gdppay !\
- shmsink \
- socket-path=/tmp/voctomix-sockets/a-cam2 \
- shm-size=10000000
diff --git a/voctocore/voctocore.py b/voctocore/voctocore.py
index 8c15463..7a96277 100755
--- a/voctocore/voctocore.py
+++ b/voctocore/voctocore.py
@@ -3,7 +3,7 @@ import gi, signal, logging, sys
# import GStreamer and GLib-Helper classes
gi.require_version('Gst', '1.0')
-from gi.repository import GLib, Gst, GObject
+from gi.repository import Gtk, Gdk, Gst, GObject, GdkX11, GstVideo
# check min-version
minGst = (1, 4)
@@ -16,16 +16,19 @@ if sys.version_info < minPy:
raise Exception("Python version", sys.version_info, 'is too old, at least', minPy, 'is required')
-# init GObject before importing local classes
+# init GObject & Co. before importing local classes
GObject.threads_init()
-Gst.init(None)
+Gdk.init([])
+Gtk.init([])
+Gst.init([])
# import local classes
+from lib.args import Args
from lib.pipeline import Pipeline
from lib.controlserver import ControlServer
# main class
-class Voctocore:
+class Voctocore(object):
log = logging.getLogger('Voctocore')
def __init__(self):
@@ -33,41 +36,30 @@ class Voctocore:
self.mainloop = GObject.MainLoop()
# initialize subsystem
- self.log.debug('creating Video-Pipeline')
+ self.log.debug('creating A/V-Pipeline')
self.pipeline = Pipeline()
self.log.debug('creating ControlServer')
self.controlserver = ControlServer(self.pipeline)
-
- def run(self):
- self.log.info('running Video-Pipeline')
- self.pipeline.run()
+ def run(self):
self.log.info('running GObject-MainLoop')
self.mainloop.run()
- def kill(self):
- self.log.info('quitting Video-Pipeline')
- self.pipeline.quit()
-
+ def quit(self):
self.log.info('quitting GObject-MainLoop')
- self.mainloop.quit()
- def on_eos(self, bus, msg):
- self.log.warning('received EOS-Signal on the Video-Bus from Element %s. This shouldn\'t happen if the program is not terminating right now', msg.src)
- self.kill()
-
- def on_error(self, bus, msg):
- err = msg.parse_error()
- self.log.error('received Error-Signal on the Video-Bus from Element %s: %s', msg.src, err[1])
- self.kill()
+ self.mainloop.quit()
# run mainclass
-def main(argv):
+def main():
# configure logging
- logging.basicConfig(level=logging.DEBUG,
- format='%(levelname)8s %(name)s: %(message)s')
+ docolor = (Args.color == 'always') or (Args.color == 'auto' and sys.stderr.isatty())
+
+ logging.basicConfig(
+ level=logging.DEBUG if Args.verbose else logging.WARNING,
+ format='\x1b[33m%(levelname)8s\x1b[0m \x1b[32m%(name)s\x1b[0m: %(message)s' if docolor else '%(levelname)8s %(name)s: %(message)s')
# make killable by ctrl-c
logging.debug('setting SIGINT handler')
@@ -81,4 +73,4 @@ def main(argv):
voctocore.run()
if __name__ == '__main__':
- main(sys.argv)
+ main()