aboutsummaryrefslogtreecommitdiff
path: root/voctocore/lib/pipeline.py
blob: ce319bf0f245c9ad1938eac46e021fa9f8024581 (plain)
  1. #!/usr/bin/python3
  2. import os, errno, time, logging
  3. from gi.repository import GLib, Gst
  4. # import controlserver annotation
  5. from lib.controlserver import controlServerEntrypoint
  6. # import library components
  7. from lib.config import Config
  8. from lib.quadmix import QuadMix
  9. from lib.videomix import VideoMix
  10. # from lib.audiomix import AudioMix
  11. from lib.distributor import TimesTwoDistributor
  12. from lib.shmsrc import FailsafeShmSrc
  13. class Pipeline(Gst.Pipeline):
  14. """mixing, streaming and encoding pipeline constuction and control"""
  15. log = logging.getLogger('Pipeline')
  16. videonames = []
  17. audionames = []
  18. def __init__(self):
  19. super().__init__()
  20. self.log.debug('Creating Video-Mixer')
  21. # create audio and video mixer
  22. self.quadmixer = QuadMix()
  23. self.add(self.quadmixer)
  24. self.videomixer = VideoMix()
  25. self.add(self.videomixer)
  26. # self.audiomixer = AudioMix()
  27. # self.add(self.audiomixer)
  28. # read the path where the shm-control-sockets are located and ensure it exists
  29. socketpath = Config.get('sources', 'socketpath')
  30. self.log.info('Ensuring the configured socketpath exists: %s', socketpath)
  31. try:
  32. os.makedirs(socketpath)
  33. except OSError as exception:
  34. if exception.errno != errno.EEXIST:
  35. raise
  36. self.videonames = Config.getlist('sources', 'video')
  37. self.audionames = Config.getlist('sources', 'video')
  38. for name in self.videonames:
  39. socket = os.path.join(socketpath, 'v-'+name)
  40. self.log.info('Creating video-source %s at socket-path %s', name, socket)
  41. sourcebin = FailsafeShmSrc(socket)
  42. self.add(sourcebin)
  43. distributor = TimesTwoDistributor()
  44. self.add(distributor)
  45. sourcebin.link(distributor)
  46. mixerpad = self.quadmixer.request_mixer_pad()
  47. distributor.get_static_pad('src_a').link(mixerpad)
  48. self.videomixer.add_source(distributor)
  49. # distributor.link(self.quadmixer)
  50. # distributor.link(self.videomixer)
  51. # for audiosource in Config.getlist('sources', 'audio'):
  52. # sourcebin = FailsafeShmSrc(os.path.join(socketpath, audiosource))
  53. # self.add(sourcebin)
  54. # sourcebin.link(self.audiomixer)
  55. # tell the quadmix that this were all sources and no more sources will come after this
  56. self.quadmixer.finalize()
  57. self.quadmixer.set_active(0)
  58. self.videomixer.set_active(0)
  59. self.quadmixsink = Gst.ElementFactory.make('autovideosink', 'quadmixsink')
  60. self.quadmixsink.set_property('sync', False)
  61. self.add(self.quadmixsink)
  62. self.quadmixer.link(self.quadmixsink)
  63. self.videosink = Gst.ElementFactory.make('autovideosink', 'videosink')
  64. self.videosink.set_property('sync', False)
  65. self.add(self.videosink)
  66. self.videomixer.link(self.videosink)
  67. # self.audiosink = Gst.ElementFactory.make('autoaudiosink', 'audiosink')
  68. # self.add(self.audiosink)
  69. # self.audiomixer.link(self.audiosink)
  70. def run(self):
  71. self.set_state(Gst.State.PAUSED)
  72. time.sleep(0.5)
  73. self.set_state(Gst.State.PLAYING)
  74. def quit(self):
  75. self.set_state(Gst.State.NULL)
  76. # # collection of video-sources to connect to the quadmix
  77. # quadmixSources = []
  78. # # create camera sources
  79. # for camberabin in self.createDummyCamSources():
  80. # # link camerasource to audiomixer
  81. # camberabin.get_by_name('audio_src').link(self.pipeline.get_by_name('liveaudio'))
  82. # # inject a ×2 distributor and link one end to the live-mixer
  83. # distributor = self.createDistributor(camberabin.get_by_name('video_src'), camberabin.get_name())
  84. # distributor.get_by_name('a').link(self.pipeline.get_by_name('livevideo'))
  85. # # collect the other end to add it later to the quadmix
  86. # quadmixSources.append(distributor.get_by_name('b'))
  87. # # TODO: generate pause & slides with another generator here which only
  88. # # yields if the respective files are present and which only have a video-pad
  89. # # add all video-sources to the quadmix-monitor-screen
  90. # self.addVideosToQuadmix(quadmixSources, self.pipeline.get_by_name('quadmix'))
  91. # # initialize to known defaults
  92. # # TODO: make configurable
  93. # self.switchVideo(0)
  94. # self.switchAudio(0)
  95. # Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL, 'test')
  96. # self.pipeline.set_state(Gst.State.PLAYING)
  97. # def createMixer(self):
  98. # """create audio and video mixer"""
  99. # # create mixer-pipeline from string
  100. # mixerbin = Gst.parse_bin_from_description("""
  101. # videomixer name=livevideo ! autovideosink
  102. # input-selector name=liveaudio ! autoaudiosink
  103. # videotestsrc pattern="solid-color" foreground-color=0x808080 ! capsfilter name=filter ! videomixer name=quadmix ! autovideosink
  104. # """, False)
  105. # # define caps for the videotestsrc which generates the background-color for the quadmix
  106. # bgcaps = Gst.Caps.new_empty_simple('video/x-raw')
  107. # bgcaps.set_value('width', round(self.monitorSize[0]))
  108. # bgcaps.set_value('height', round(self.monitorSize[1]))
  109. # mixerbin.get_by_name('filter').set_property('caps', bgcaps)
  110. # # name the bin, add and return it
  111. # mixerbin.set_name('mixerbin')
  112. # self.pipeline.add(mixerbin)
  113. # return mixerbin
  114. # def addVideosToQuadmix(self, videosources, quadmix):
  115. # """add all avaiable videosources to the quadmix"""
  116. # count = len(videosources)
  117. # # coordinate of the cell where we place the next video
  118. # place = [0, 0]
  119. # # number of cells in the quadmix-monitor
  120. # grid = [0, 0]
  121. # grid[0] = math.ceil(math.sqrt(count))
  122. # grid[1] = math.ceil(count / grid[0])
  123. # # size of each cell in the quadmix-monitor
  124. # cellSize = (
  125. # self.monitorSize[0] / grid[0],
  126. # self.monitorSize[1] / grid[1]
  127. # )
  128. # print("showing {} videosources in a {}×{} grid in a {}×{} px window, which gives cells of {}×{} px per videosource".format(
  129. # count, grid[0], grid[1], self.monitorSize[0], self.monitorSize[1], cellSize[0], cellSize[1]))
  130. # # iterate over all video-sources
  131. # for idx, videosource in enumerate(videosources):
  132. # # generate a pipeline for this videosource which
  133. # # - scales the video to the request
  134. # # - remove n px of the video (n = 5 if the video is highlighted else 0)
  135. # # - add a colored border of n px of the video (n = 5 if the video is highlighted else 0)
  136. # # - overlay the index of the video as text in the top left corner
  137. # # - known & named output
  138. # previewbin = Gst.parse_bin_from_description("""
  139. # videoscale name=in !
  140. # capsfilter name=caps !
  141. # videobox name=crop top=0 left=0 bottom=0 right=0 !
  142. # videobox fill=red top=-0 left=-0 bottom=-0 right=-0 name=add !
  143. # textoverlay color=0xFFFFFFFF halignment=left valignment=top xpad=10 ypad=5 font-desc="sans 35" name=text !
  144. # identity name=out
  145. # """, False)
  146. # # name the bin and add it
  147. # previewbin.set_name('previewbin-{}'.format(idx))
  148. # self.pipeline.add(previewbin)
  149. # self.previewbins.append(previewbin)
  150. # # set the overlay-text
  151. # previewbin.get_by_name('text').set_property('text', str(idx))
  152. # # query the video-source caps and extract its size
  153. # caps = videosource.get_static_pad('src').query_caps(None)
  154. # capsstruct = caps.get_structure(0)
  155. # srcSize = (
  156. # capsstruct.get_int('width')[1],
  157. # capsstruct.get_int('height')[1],
  158. # )
  159. # # calculate the ideal scale factor and scale the sizes
  160. # f = max(srcSize[0] / cellSize[0], srcSize[1] / cellSize[1])
  161. # scaleSize = (
  162. # srcSize[0] / f,
  163. # srcSize[1] / f,
  164. # )
  165. # # calculate the top/left coordinate
  166. # coord = (
  167. # place[0] * cellSize[0] + (cellSize[0] - scaleSize[0]) / 2,
  168. # place[1] * cellSize[1] + (cellSize[1] - scaleSize[1]) / 2,
  169. # )
  170. # print("placing videosource {} of size {}×{} scaled by {} to {}×{} in a cell {}×{} px cell ({}/{}) at position ({}/{})".format(
  171. # idx, srcSize[0], srcSize[1], f, scaleSize[0], scaleSize[1], cellSize[0], cellSize[1], place[0], place[1], coord[0], coord[1]))
  172. # # link the videosource to the input of the preview-bin
  173. # videosource.link(previewbin.get_by_name('in'))
  174. # # create and set the caps for the preview-scaler
  175. # scalecaps = Gst.Caps.new_empty_simple('video/x-raw')
  176. # scalecaps.set_value('width', round(scaleSize[0]))
  177. # scalecaps.set_value('height', round(scaleSize[1]))
  178. # previewbin.get_by_name('caps').set_property('caps', scalecaps)
  179. # # request a pad from the quadmixer and configure x/y position
  180. # sinkpad = quadmix.get_request_pad('sink_%u')
  181. # sinkpad.set_property('xpos', round(coord[0]))
  182. # sinkpad.set_property('ypos', round(coord[1]))
  183. # # link the output of the preview-bin to the mixer
  184. # previewbin.get_by_name('out').link(quadmix)
  185. # # increment grid position
  186. # place[0] += 1
  187. # if place[0] >= grid[0]:
  188. # place[1] += 1
  189. # place[0] = 0
  190. # def createDistributor(self, videosource, name):
  191. # """create a simple ×2 distributor"""
  192. # distributor = Gst.parse_bin_from_description("""
  193. # tee name=t
  194. # t. ! queue name=a
  195. # t. ! queue name=b
  196. # """, False)
  197. # # set a name and add to pipeline
  198. # distributor.set_name('distributor({0})'.format(name))
  199. # self.pipeline.add(distributor)
  200. # # link input to the tee
  201. # videosource.link(distributor.get_by_name('t'))
  202. # return distributor
  203. # def createDummyCamSources(self):
  204. # """create test-video-sources from files or urls"""
  205. # # TODO make configurable
  206. # uris = ('file:///home/peter/122.mp4', 'file:///home/peter/10025.mp4',)
  207. # for idx, uri in enumerate(uris):
  208. # # create a bin for a simulated camera input
  209. # # force the input resolution to 1024x576 because that way the following elements
  210. # # in the pipeline cam know the size even if the file is not yet loaded. the quadmixer
  211. # # is not resize-capable
  212. # camberabin = Gst.parse_bin_from_description("""
  213. # uridecodebin name=input
  214. # input. ! videoconvert ! videoscale ! videorate ! video/x-raw,width=1024,height=576,framerate=25/1 ! identity name=video_src
  215. # input. ! audioconvert name=audio_src
  216. # """, False)
  217. # # set name and uri
  218. # camberabin.set_name('dummy-camberabin({0})'.format(uri))
  219. # camberabin.get_by_name('input').set_property('uri', uri)
  220. # # add to pipeline and pass the bin upstream
  221. # self.pipeline.add(camberabin)
  222. # yield camberabin
  223. # def createCamSources(self):
  224. # """create real-video-sources from the bmd-drivers"""
  225. # # TODO make number of installed cams configurable
  226. # for cam in range(2):
  227. # # create a bin for camera input
  228. # camberabin = Gst.parse_bin_from_description("""
  229. # decklinksrc name=input input=sdi input-mode=1080p25
  230. # input. ! videoconvert ! videoscale ! videorate ! video/x-raw,width=1920,height=1080,framerate=25/1 ! identity name=video_src
  231. # input. ! audioconvert name=audio_src
  232. # """, False)
  233. # # set name and subdevice
  234. # camberabin.set_name('camberabin({0})'.format(cam))
  235. # camberabin.get_by_name('input').set_property('subdevice', cam)
  236. # # add to pipeline and pass the bin upstream
  237. # self.pipeline.add(camberabin)
  238. # yield camberabin
  239. ### below are access-methods for the ControlServer
  240. @controlServerEntrypoint
  241. def status(self):
  242. """System Status Query"""
  243. raise NotImplementedError("status command is not implemented yet")
  244. @controlServerEntrypoint
  245. def numAudioSources(self):
  246. """return number of available audio sources"""
  247. raise NotImplementedError("audio is not implemented yet")
  248. @controlServerEntrypoint
  249. def switchAudio(self, audiosource):
  250. """switch audio to the selected audio"""
  251. raise NotImplementedError("audio is not implemented yet")
  252. @controlServerEntrypoint
  253. def numVideoSources(self):
  254. """return number of available video sources"""
  255. livevideo = self.pipeline.get_by_name('livevideo')
  256. return str(len(self.videonames))
  257. @controlServerEntrypoint
  258. def switchVideo(self, videosource):
  259. """switch audio to the selected video"""
  260. if videosource.isnumeric():
  261. idx = int(videosource)
  262. self.log.info("interpreted input as videosource-index %u", idx)
  263. if idx >= len(self.videonames):
  264. idx = None
  265. else:
  266. try:
  267. idx = self.videonames.index(videosource)
  268. self.log.info("interpreted input as videosource-name, lookup to %u", idx)
  269. except IndexError:
  270. idx = None
  271. if idx == None:
  272. return 'unknown video-source: %s' % (videosource)
  273. self.log.info("switching quadmix to video-source %u", idx)
  274. self.quadmixer.set_active(idx)
  275. self.videomixer.set_active(idx)
  276. @controlServerEntrypoint
  277. def fadeVideo(self, videosource):
  278. """fade video to the selected video"""
  279. raise NotImplementedError("fade command is not implemented yet")
  280. @controlServerEntrypoint
  281. def setPipVideo(self, videosource):
  282. """switch video-source in the PIP to the selected video"""
  283. raise NotImplementedError("pip commands are not implemented yet")
  284. @controlServerEntrypoint
  285. def fadePipVideo(self, videosource):
  286. """fade video-source in the PIP to the selected video"""
  287. raise NotImplementedError("pip commands are not implemented yet")
  288. class PipPlacements:
  289. """enumeration of possible PIP-Placements"""
  290. TopLeft, TopRight, BottomLeft, BottomRight = range(4)
  291. @controlServerEntrypoint
  292. def setPipPlacement(self, placement):
  293. """place PIP in the selected position"""
  294. assert(isinstance(placement, PipPlacements))
  295. raise NotImplementedError("pip commands are not implemented yet")
  296. @controlServerEntrypoint
  297. def setPipStatus(self, enabled):
  298. """show or hide PIP"""
  299. raise NotImplementedError("pip commands are not implemented yet")
  300. @controlServerEntrypoint
  301. def fadePipStatus(self, enabled):
  302. """fade PIP in our out"""
  303. raise NotImplementedError("pip commands are not implemented yet")
  304. class StreamContents:
  305. """enumeration of possible PIP-Placements"""
  306. Live, Pause, NoStream = range(3)
  307. @controlServerEntrypoint
  308. def selectStreamContent(self, content):
  309. """switch the livestream-content between selected mixer output, pause-image or nostream-imag"""
  310. assert(isinstance(content, StreamContents))
  311. raise NotImplementedError("pause/nostream switching is not implemented yet")