aboutsummaryrefslogtreecommitdiff
path: root/voctogui/lib
diff options
context:
space:
mode:
authorMaZderMind <git@mazdermind.de>2015-09-06 13:49:48 +0200
committerMaZderMind <git@mazdermind.de>2015-09-06 13:49:48 +0200
commit2e28d09b04d66add839d4b43529ca11e25c699a3 (patch)
tree08650e88e5d7d93814b8e7aa6cc5a70a4cb6186b /voctogui/lib
parent65356fd083f31cc6956516a7e8fc04896c9f5ffc (diff)
Implement jpeg-previews and a/v receiving in ui
Diffstat (limited to 'voctogui/lib')
-rw-r--r--voctogui/lib/videodisplay.py57
1 files changed, 48 insertions, 9 deletions
diff --git a/voctogui/lib/videodisplay.py b/voctogui/lib/videodisplay.py
index a2f7f5a..4133d5a 100644
--- a/voctogui/lib/videodisplay.py
+++ b/voctogui/lib/videodisplay.py
@@ -1,6 +1,8 @@
import logging
from gi.repository import Gst
+from lib.config import Config
+
class VideoDisplay(object):
""" Displays a Voctomix-Video-Stream into a GtkWidget """
@@ -11,14 +13,41 @@ class VideoDisplay(object):
self.draw_callback = draw_callback
self.level_callback = level_callback
+ caps = Config.get('mix', 'videocaps')
+ use_previews = Config.getboolean('previews', 'enabled') and Config.getboolean('previews', 'use')
+
+ # Preview-Ports are Raw-Ports + 1000
+ if use_previews:
+ self.log.info('using jpeg-previews instead of raw-video for gui')
+ port += 1000
+ else:
+ self.log.info('using raw-video instead of jpeg-previews for gui')
+
# Setup Server-Connection, Demuxing and Decoding
pipeline = """
- videotestsrc !
- timeoverlay !
- video/x-raw,width=1920,height=1080,framerate=25/1 !
- """.format(
- port=port
- )
+ tcpclientsrc host={host} port={port} !
+ queue !
+ matroskademux name=demux
+ """
+
+ if use_previews:
+ pipeline += """
+ demux. !
+ image/jpeg !
+ jpegdec !
+ {previewcaps} !
+ videoscale method=nearest-neighbour !
+ videorate !
+ {vcaps} !
+ queue !
+ """
+
+ else:
+ pipeline += """
+ demux. !
+ {vcaps} !
+ queue !
+ """
# If an overlay is required, add an cairooverlay-Element into the Video-Path
if self.draw_callback:
@@ -37,15 +66,17 @@ class VideoDisplay(object):
# If an Audio-Path is required, add an Audio-Path through a level-Element
if self.level_callback or play_audio:
pipeline += """
- audiotestsrc wave=blue-noise !
- audio/x-raw !
+ demux. !
+ {acaps} !
+ queue !
level name=lvl interval=50000000 !
"""
# If Playback is requested, push fo alsa
if play_audio:
+ # ts-offset=1000000000 (1s) - should keep audio & video in sync but delay by 1s
pipeline += """
- alsasink
+ alsasink sync=False
"""
# Otherwise just trash the Audio
@@ -54,6 +85,14 @@ class VideoDisplay(object):
fakesink
"""
+ pipeline = pipeline.format(
+ acaps=Config.get('mix', 'audiocaps'),
+ vcaps=Config.get('mix', 'videocaps'),
+ previewcaps=Config.get('previews', 'videocaps'),
+ host=Config.get('server', 'host'),
+ port=port,
+ )
+
self.log.debug('Creating Display-Pipeline:\n%s', pipeline)
self.pipeline = Gst.parse_launch(pipeline)