diff --git a/main.py b/main.py index d944afe..d08fbcd 100644 --- a/main.py +++ b/main.py @@ -857,8 +857,11 @@ class Broadcast: class Video (GameObject): + cap: VideoCapture | None + current: Surface | None fps: int - pausing: bool = False + path: str + pausing: bool = False sound: Sound | None surfaces: list[Surface] @@ -868,86 +871,19 @@ class Video (GameObject): path: str, ): super ().__init__ (game) - self.pausing = False - (self.surfaces, self.fps) = self._create_surfaces (path) + self.path = path + self.cap = None + self.current = None self.sound = self._create_sound (path) self.stop () - def _create_sound ( - self, - path: str, - ) -> Sound | None: - bytes_io = BytesIO () - try: - from pydub import AudioSegment - audio = AudioSegment.from_file (path, format = path.split ('.')[-1]) - except ModuleNotFoundError: - return None - audio.export (bytes_io, format = 'wav') - bytes_io.seek (0) - return pygame.mixer.Sound (bytes_io) - - def _create_surfaces ( - self, - path: str, - ) -> tuple[list[Surface], int]: - cap = self._load (path) - surfaces: list[Surface] = [] - if cap is None: - return ([], FPS) - fps = int (cap.get (cv2.CAP_PROP_FPS)) - while cap.isOpened (): - frame = self._read_frame (cap) - if frame is None: - break - surfaces.append (self._convert_to_surface (frame)) - new_surfaces: list[Surface] = [] - for i in range (len (surfaces) * FPS // fps): - new_surfaces.append (surfaces[i * fps // FPS]) - return (new_surfaces, fps) - - def _load ( - self, - path: str, - ) -> VideoCapture | None: - """ - OpenCV で動画を読込む. - """ - cap = VideoCapture (path) - if cap.isOpened (): - return cap - return None - - def _read_frame ( - self, - cap: VideoCapture, - ) -> np.ndarray | None: - """ - 動画のフレームを読込む. - """ - ret: bool - frame: np.ndarray - (ret, frame) = cap.read () - if ret: - return frame - return None - - def _convert_to_surface ( - self, - frame: np.ndarray, - ) -> Surface: - frame = cv2.cvtColor (frame, cv2.COLOR_BGR2RGB) - frame_surface = pygame.surfarray.make_surface (frame) - frame_surface = pygame.transform.rotate (frame_surface, -90) - frame_surface = pygame.transform.flip (frame_surface, True, False) - return frame_surface - def play ( self, ) -> None: self.enabled = True - self.pausing = False - if self.sound is not None: + self.cap = VideoCapture (self.path) + self.frame = 0 + if self.sound: self.sound.play () def stop ( @@ -955,28 +891,53 @@ class Video (GameObject): ) -> None: self.enabled = False self.frame = 0 - - def pause ( - self, - ) -> None: - self.pausing = True + if self.cap: + self.cap.release () + self.cap = None def redraw ( self, ) -> None: - surface = pygame.transform.scale (self.surfaces[self.frame], (self.width, self.height)) - self.game.screen.blit (surface, (self.x, self.y)) + if (not self.enabled) or (self.cap is None): + return + ret, frame = self.cap.read () + if not ret: + self.stop () + return + surf = self._convert_to_surface (frame) + surf = pygame.transform.scale (surf, (self.width, self.height)) + self.game.screen.blit (surf, (self.x, self.y)) super ().redraw () def update ( self, ) -> None: - if self.frame >= len (self.surfaces) - 1: - self.stop () - if self.pausing: - self.frame -= 1 super ().update () + def _convert_to_surface ( + self, + frame: np.ndarray, + ) -> Surface: + frame = cv2.cvtColor (frame, cv2.COLOR_BGR2RGB) + frame_surface = pygame.surfarray.make_surface (frame) + frame_surface = pygame.transform.rotate (frame_surface, -90) + frame_surface = pygame.transform.flip (frame_surface, True, False) + return frame_surface + + def _create_sound ( + self, + path: str, + ) -> Sound | None: + bytes_io = BytesIO () + try: + from pydub import AudioSegment + audio = AudioSegment.from_file (path, format = path.split ('.')[-1]) + except ModuleNotFoundError: + return None + audio.export (bytes_io, format = 'wav') + bytes_io.seek (0) + return pygame.mixer.Sound (bytes_io) + class NicoVideo (Video): def __init__ (