aboutsummaryrefslogtreecommitdiff
path: root/voctocore/lib/pipeline.py
blob: 6267e9ef4cd5f0d295f38acf0055302596bec60e (plain)
  1. #!/usr/bin/python3
  2. import os, errno, time, logging
  3. from gi.repository import GLib, Gst
  4. # import controlserver annotation
  5. from lib.controlserver import controlServerEntrypoint
  6. # import library components
  7. from lib.config import Config
  8. from lib.quadmix import QuadMix
  9. from lib.videomix import VideoMix
  10. from lib.audiomix import AudioMix
  11. from lib.distributor import TimesTwoDistributor
  12. from lib.shmsrc import FailsafeShmSrc
  13. from lib.failvideosrc import FailVideoSrc
  14. from lib.failaudiosrc import FailAudioSrc
  15. class Pipeline(Gst.Pipeline):
  16. """mixing, streaming and encoding pipeline constuction and control"""
  17. log = logging.getLogger('Pipeline')
  18. videonames = []
  19. audionames = []
  20. def __init__(self):
  21. super().__init__()
  22. self.log.debug('Creating Video-Mixer')
  23. # create audio and video mixer
  24. self.quadmixer = QuadMix()
  25. self.add(self.quadmixer)
  26. self.videomixer = VideoMix()
  27. self.add(self.videomixer)
  28. self.audiomixer = AudioMix()
  29. self.add(self.audiomixer)
  30. # read the path where the shm-control-sockets are located and ensure it exists
  31. socketpath = Config.get('sources', 'socketpath')
  32. self.log.info('Ensuring the configured socketpath exists: %s', socketpath)
  33. try:
  34. os.makedirs(socketpath)
  35. except OSError as exception:
  36. if exception.errno != errno.EEXIST:
  37. raise
  38. self.videonames = Config.getlist('sources', 'video')
  39. self.audionames = Config.getlist('sources', 'audio')
  40. caps = Gst.Caps.from_string(Config.get('sources', 'videocaps'))
  41. self.log.debug('parsing videocaps from config: %s', caps.to_string())
  42. for idx, name in enumerate(self.videonames):
  43. socket = os.path.join(socketpath, 'v-'+name)
  44. self.log.info('Creating video-source "%s" at socket-path %s', name, socket)
  45. sourcebin = FailsafeShmSrc(socket, caps, FailVideoSrc(idx, name))
  46. self.add(sourcebin)
  47. distributor = TimesTwoDistributor()
  48. self.add(distributor)
  49. sourcebin.link(distributor)
  50. mixerpad = self.quadmixer.request_mixer_pad()
  51. distributor.get_static_pad('src_a').link(mixerpad)
  52. mixerpad = self.videomixer.request_mixer_pad()
  53. distributor.get_static_pad('src_b').link(mixerpad)
  54. caps = Gst.Caps.from_string(Config.get('sources', 'audiocaps'))
  55. self.log.debug('parsing videocaps from config: %s', caps.to_string())
  56. for idx, name in enumerate(self.audionames):
  57. socket = os.path.join(socketpath, 'a-'+name)
  58. self.log.info('Creating audio-source "%s" at socket-path %s', name, socket)
  59. sourcebin = FailsafeShmSrc(socket, caps, FailAudioSrc(idx, name))
  60. self.add(sourcebin)
  61. mixerpad = self.audiomixer.request_mixer_pad()
  62. sourcebin.get_static_pad('src').link(mixerpad)
  63. # tell the quadmix that this were all sources and no more sources will come after this
  64. self.quadmixer.finalize()
  65. self.quadmixer.set_active(0)
  66. self.videomixer.set_active(0)
  67. self.audiomixer.set_active(0)
  68. self.audioconv = Gst.ElementFactory.make('audioconvert', 'audioconv')
  69. self.audioenc = Gst.ElementFactory.make('avenc_mp2', 'audioenc')
  70. self.videoconv = Gst.ElementFactory.make('videoconvert', 'videoconv')
  71. self.videoenc = Gst.ElementFactory.make('avenc_mpeg2video', 'videoenc')
  72. self.mux = Gst.ElementFactory.make('mpegtsmux', 'mux')
  73. self.sink = Gst.ElementFactory.make('filesink', 'sink')
  74. self.add(self.audioconv)
  75. self.add(self.audioenc)
  76. self.add(self.videoconv)
  77. self.add(self.videoenc)
  78. self.add(self.mux)
  79. self.add(self.sink)
  80. self.videomixer.link(self.videoconv)
  81. self.videoconv.link(self.videoenc)
  82. self.audiomixer.link(self.audioconv)
  83. self.audioconv.link(self.audioenc)
  84. self.videoenc.link(self.mux)
  85. self.audioenc.link(self.mux)
  86. self.mux.link(self.sink)
  87. self.sink.set_property('location', '/home/peter/test.ts')
  88. self.quadmixsink = Gst.ElementFactory.make('autovideosink', 'quadmixsink')
  89. self.quadmixsink.set_property('sync', False)
  90. self.add(self.quadmixsink)
  91. self.quadmixer.link(self.quadmixsink)
  92. def run(self):
  93. self.set_state(Gst.State.PAUSED)
  94. time.sleep(0.5)
  95. self.set_state(Gst.State.PLAYING)
  96. def quit(self):
  97. self.set_state(Gst.State.NULL)
  98. # # collection of video-sources to connect to the quadmix
  99. # quadmixSources = []
  100. # # create camera sources
  101. # for camberabin in self.createDummyCamSources():
  102. # # link camerasource to audiomixer
  103. # camberabin.get_by_name('audio_src').link(self.pipeline.get_by_name('liveaudio'))
  104. # # inject a ×2 distributor and link one end to the live-mixer
  105. # distributor = self.createDistributor(camberabin.get_by_name('video_src'), camberabin.get_name())
  106. # distributor.get_by_name('a').link(self.pipeline.get_by_name('livevideo'))
  107. # # collect the other end to add it later to the quadmix
  108. # quadmixSources.append(distributor.get_by_name('b'))
  109. # # TODO: generate pause & slides with another generator here which only
  110. # # yields if the respective files are present and which only have a video-pad
  111. # # add all video-sources to the quadmix-monitor-screen
  112. # self.addVideosToQuadmix(quadmixSources, self.pipeline.get_by_name('quadmix'))
  113. # # initialize to known defaults
  114. # # TODO: make configurable
  115. # self.switchVideo(0)
  116. # self.switchAudio(0)
  117. # Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL, 'test')
  118. # self.pipeline.set_state(Gst.State.PLAYING)
  119. # def createMixer(self):
  120. # """create audio and video mixer"""
  121. # # create mixer-pipeline from string
  122. # mixerbin = Gst.parse_bin_from_description("""
  123. # videomixer name=livevideo ! autovideosink
  124. # input-selector name=liveaudio ! autoaudiosink
  125. # videotestsrc pattern="solid-color" foreground-color=0x808080 ! capsfilter name=filter ! videomixer name=quadmix ! autovideosink
  126. # """, False)
  127. # # define caps for the videotestsrc which generates the background-color for the quadmix
  128. # bgcaps = Gst.Caps.new_empty_simple('video/x-raw')
  129. # bgcaps.set_value('width', round(self.monitorSize[0]))
  130. # bgcaps.set_value('height', round(self.monitorSize[1]))
  131. # mixerbin.get_by_name('filter').set_property('caps', bgcaps)
  132. # # name the bin, add and return it
  133. # mixerbin.set_name('mixerbin')
  134. # self.pipeline.add(mixerbin)
  135. # return mixerbin
  136. # def addVideosToQuadmix(self, videosources, quadmix):
  137. # """add all avaiable videosources to the quadmix"""
  138. # count = len(videosources)
  139. # # coordinate of the cell where we place the next video
  140. # place = [0, 0]
  141. # # number of cells in the quadmix-monitor
  142. # grid = [0, 0]
  143. # grid[0] = math.ceil(math.sqrt(count))
  144. # grid[1] = math.ceil(count / grid[0])
  145. # # size of each cell in the quadmix-monitor
  146. # cellSize = (
  147. # self.monitorSize[0] / grid[0],
  148. # self.monitorSize[1] / grid[1]
  149. # )
  150. # print("showing {} videosources in a {}×{} grid in a {}×{} px window, which gives cells of {}×{} px per videosource".format(
  151. # count, grid[0], grid[1], self.monitorSize[0], self.monitorSize[1], cellSize[0], cellSize[1]))
  152. # # iterate over all video-sources
  153. # for idx, videosource in enumerate(videosources):
  154. # # generate a pipeline for this videosource which
  155. # # - scales the video to the request
  156. # # - remove n px of the video (n = 5 if the video is highlighted else 0)
  157. # # - add a colored border of n px of the video (n = 5 if the video is highlighted else 0)
  158. # # - overlay the index of the video as text in the top left corner
  159. # # - known & named output
  160. # previewbin = Gst.parse_bin_from_description("""
  161. # videoscale name=in !
  162. # capsfilter name=caps !
  163. # videobox name=crop top=0 left=0 bottom=0 right=0 !
  164. # videobox fill=red top=-0 left=-0 bottom=-0 right=-0 name=add !
  165. # textoverlay color=0xFFFFFFFF halignment=left valignment=top xpad=10 ypad=5 font-desc="sans 35" name=text !
  166. # identity name=out
  167. # """, False)
  168. # # name the bin and add it
  169. # previewbin.set_name('previewbin-{}'.format(idx))
  170. # self.pipeline.add(previewbin)
  171. # self.previewbins.append(previewbin)
  172. # # set the overlay-text
  173. # previewbin.get_by_name('text').set_property('text', str(idx))
  174. # # query the video-source caps and extract its size
  175. # caps = videosource.get_static_pad('src').query_caps(None)
  176. # capsstruct = caps.get_structure(0)
  177. # srcSize = (
  178. # capsstruct.get_int('width')[1],
  179. # capsstruct.get_int('height')[1],
  180. # )
  181. # # calculate the ideal scale factor and scale the sizes
  182. # f = max(srcSize[0] / cellSize[0], srcSize[1] / cellSize[1])
  183. # scaleSize = (
  184. # srcSize[0] / f,
  185. # srcSize[1] / f,
  186. # )
  187. # # calculate the top/left coordinate
  188. # coord = (
  189. # place[0] * cellSize[0] + (cellSize[0] - scaleSize[0]) / 2,
  190. # place[1] * cellSize[1] + (cellSize[1] - scaleSize[1]) / 2,
  191. # )
  192. # print("placing videosource {} of size {}×{} scaled by {} to {}×{} in a cell {}×{} px cell ({}/{}) at position ({}/{})".format(
  193. # idx, srcSize[0], srcSize[1], f, scaleSize[0], scaleSize[1], cellSize[0], cellSize[1], place[0], place[1], coord[0], coord[1]))
  194. # # link the videosource to the input of the preview-bin
  195. # videosource.link(previewbin.get_by_name('in'))
  196. # # create and set the caps for the preview-scaler
  197. # scalecaps = Gst.Caps.new_empty_simple('video/x-raw')
  198. # scalecaps.set_value('width', round(scaleSize[0]))
  199. # scalecaps.set_value('height', round(scaleSize[1]))
  200. # previewbin.get_by_name('caps').set_property('caps', scalecaps)
  201. # # request a pad from the quadmixer and configure x/y position
  202. # sinkpad = quadmix.get_request_pad('sink_%u')
  203. # sinkpad.set_property('xpos', round(coord[0]))
  204. # sinkpad.set_property('ypos', round(coord[1]))
  205. # # link the output of the preview-bin to the mixer
  206. # previewbin.get_by_name('out').link(quadmix)
  207. # # increment grid position
  208. # place[0] += 1
  209. # if place[0] >= grid[0]:
  210. # place[1] += 1
  211. # place[0] = 0
  212. # def createDistributor(self, videosource, name):
  213. # """create a simple ×2 distributor"""
  214. # distributor = Gst.parse_bin_from_description("""
  215. # tee name=t
  216. # t. ! queue name=a
  217. # t. ! queue name=b
  218. # """, False)
  219. # # set a name and add to pipeline
  220. # distributor.set_name('distributor({0})'.format(name))
  221. # self.pipeline.add(distributor)
  222. # # link input to the tee
  223. # videosource.link(distributor.get_by_name('t'))
  224. # return distributor
  225. # def createDummyCamSources(self):
  226. # """create test-video-sources from files or urls"""
  227. # # TODO make configurable
  228. # uris = ('file:///home/peter/122.mp4', 'file:///home/peter/10025.mp4',)
  229. # for idx, uri in enumerate(uris):
  230. # # create a bin for a simulated camera input
  231. # # force the input resolution to 1024x576 because that way the following elements
  232. # # in the pipeline cam know the size even if the file is not yet loaded. the quadmixer
  233. # # is not resize-capable
  234. # camberabin = Gst.parse_bin_from_description("""
  235. # uridecodebin name=input
  236. # input. ! videoconvert ! videoscale ! videorate ! video/x-raw,width=1024,height=576,framerate=25/1 ! identity name=video_src
  237. # input. ! audioconvert name=audio_src
  238. # """, False)
  239. # # set name and uri
  240. # camberabin.set_name('dummy-camberabin({0})'.format(uri))
  241. # camberabin.get_by_name('input').set_property('uri', uri)
  242. # # add to pipeline and pass the bin upstream
  243. # self.pipeline.add(camberabin)
  244. # yield camberabin
  245. # def createCamSources(self):
  246. # """create real-video-sources from the bmd-drivers"""
  247. # # TODO make number of installed cams configurable
  248. # for cam in range(2):
  249. # # create a bin for camera input
  250. # camberabin = Gst.parse_bin_from_description("""
  251. # decklinksrc name=input input=sdi input-mode=1080p25
  252. # input. ! videoconvert ! videoscale ! videorate ! video/x-raw,width=1920,height=1080,framerate=25/1 ! identity name=video_src
  253. # input. ! audioconvert name=audio_src
  254. # """, False)
  255. # # set name and subdevice
  256. # camberabin.set_name('camberabin({0})'.format(cam))
  257. # camberabin.get_by_name('input').set_property('subdevice', cam)
  258. # # add to pipeline and pass the bin upstream
  259. # self.pipeline.add(camberabin)
  260. # yield camberabin
  261. ### below are access-methods for the ControlServer
  262. @controlServerEntrypoint
  263. def status(self):
  264. """System Status Query"""
  265. raise NotImplementedError("status command is not implemented yet")
  266. @controlServerEntrypoint
  267. def numAudioSources(self):
  268. """return number of available audio sources"""
  269. raise NotImplementedError("audio is not implemented yet")
  270. @controlServerEntrypoint
  271. def switchAudio(self, audiosource):
  272. """switch audio to the selected audio"""
  273. idx = int(audiosource)
  274. if idx >= len(self.audionames):
  275. return 'unknown audio-source: %s' % (audiosource)
  276. self.log.info("switching mixer to audio-source %u", idx)
  277. self.audiomixer.set_active(idx)
  278. @controlServerEntrypoint
  279. def numVideoSources(self):
  280. """return number of available video sources"""
  281. livevideo = self.pipeline.get_by_name('livevideo')
  282. return str(len(self.videonames))
  283. @controlServerEntrypoint
  284. def switchVideo(self, videosource):
  285. """switch video to the selected video"""
  286. idx = int(videosource)
  287. if idx >= len(self.videonames):
  288. return 'unknown video-source: %s' % (videosource)
  289. self.log.info("switching mixer to video-source %u", idx)
  290. self.quadmixer.set_active(idx)
  291. self.videomixer.set_active(idx)
  292. @controlServerEntrypoint
  293. def fadeVideo(self, videosource):
  294. """fade video to the selected video"""
  295. raise NotImplementedError("fade command is not implemented yet")
  296. @controlServerEntrypoint
  297. def setPipVideo(self, videosource):
  298. """switch video-source in the PIP to the selected video"""
  299. raise NotImplementedError("pip commands are not implemented yet")
  300. @controlServerEntrypoint
  301. def fadePipVideo(self, videosource):
  302. """fade video-source in the PIP to the selected video"""
  303. raise NotImplementedError("pip commands are not implemented yet")
  304. class PipPlacements:
  305. """enumeration of possible PIP-Placements"""
  306. TopLeft, TopRight, BottomLeft, BottomRight = range(4)
  307. @controlServerEntrypoint
  308. def setPipPlacement(self, placement):
  309. """place PIP in the selected position"""
  310. assert(isinstance(placement, PipPlacements))
  311. raise NotImplementedError("pip commands are not implemented yet")
  312. @controlServerEntrypoint
  313. def setPipStatus(self, enabled):
  314. """show or hide PIP"""
  315. raise NotImplementedError("pip commands are not implemented yet")
  316. @controlServerEntrypoint
  317. def fadePipStatus(self, enabled):
  318. """fade PIP in our out"""
  319. raise NotImplementedError("pip commands are not implemented yet")
  320. class StreamContents:
  321. """enumeration of possible PIP-Placements"""
  322. Live, Pause, NoStream = range(3)
  323. @controlServerEntrypoint
  324. def selectStreamContent(self, content):
  325. """switch the livestream-content between selected mixer output, pause-image or nostream-imag"""
  326. assert(isinstance(content, StreamContents))
  327. raise NotImplementedError("pause/nostream switching is not implemented yet")