From 23dfc877b821a82a3d1c2598c1bac7300cc41780 Mon Sep 17 00:00:00 2001 From: Stephane Deverly Date: Mon, 27 Jun 2022 10:49:08 +0200 Subject: [PATCH] Squashed commit of the following: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit beac09836e38a8c46ad1ec1e40308d6241f85513 Merge: 98a85b8 d48163e Author: Stephane Deverly Date: Thu Jun 23 18:30:44 2022 +0200 Merge remote-tracking branch 'asf/main' commit 98a85b89f1e5f7a1273823b93f52b2aeb726b953 Author: Stéphane Deverly <39291844+gplsteph@users.noreply.github.com> Date: Thu Jun 23 17:12:27 2022 +0200 8956 cmx transitions (#1) Revisited how transitions are handled by the cmx 3600 adapter. - Transitions lines are now collected when parsing EDL lines and treated by the ClipHandler. - The clip source and record points are replaced with the transition values when the line is parsed. - The transition in_offset is now 0 and the out_offset is the transition duration, matching in the timeline what would be done if the editing was read from another format. - The transition is added just before the clip when it is added to a track, and that's it. - Adjusted tests for the new way of handling transitions, added some other checks. - Added an EDL read/write/read roundtrip to check that the reader and writer are compatible. - Removed code which was not needed anymore. Signed-off-by: Stephane Deverly --- .../opentimelineio/adapters/cmx_3600.py | 389 +++++++----------- tests/sample_data/dissolve_test_3.edl | 8 +- tests/sample_data/dissolve_test_4.edl | 4 +- tests/test_cmx_3600_adapter.py | 161 +++++--- 4 files changed, 258 insertions(+), 304 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/adapters/cmx_3600.py b/src/py-opentimelineio/opentimelineio/adapters/cmx_3600.py index 59ec50f9b..13d8a2307 100644 --- a/src/py-opentimelineio/opentimelineio/adapters/cmx_3600.py +++ b/src/py-opentimelineio/opentimelineio/adapters/cmx_3600.py @@ -15,7 +15,6 @@ import copy import os import re -import math import collections from .. import ( @@ -88,10 +87,17 @@ def __init__(self, edl_string, rate=24, ignore_timecode_mismatch=False): # TODO: Sort the tracks V, then A1,A2,etc. - def add_clip(self, line, comments, rate=24): + def add_clip(self, line, comments, rate=24, transition_line=None): comment_handler = CommentHandler(comments) - clip_handler = ClipHandler(line, comment_handler.handled, rate=rate) + clip_handler = ClipHandler( + line, + comment_handler.handled, + rate=rate, + transition_line=transition_line + ) clip = clip_handler.clip + transition = clip_handler.transition + # Add unhandled comments as general comments to meta data. if comment_handler.unhandled: clip.metadata.setdefault("cmx_3600", {}) clip.metadata['cmx_3600'].setdefault("comments", []) @@ -108,15 +114,6 @@ def add_clip(self, line, comments, rate=24): clip.metadata.setdefault("cmx_3600", {}) clip.metadata['cmx_3600']['reel'] = clip_handler.reel - # each edit point between two clips is a transition. the default is a - # cut in the edl format the transition codes are for the transition - # into the clip - self.add_transition( - clip_handler, - clip_handler.transition_type, - clip_handler.transition_data - ) - edl_rate = clip_handler.edl_rate record_in = opentime.from_timecode( clip_handler.record_tc_in, @@ -174,8 +171,11 @@ def add_clip(self, line, comments, rate=24): # Add clip instances to the tracks tracks = self.tracks_for_channel(clip_handler.channel_code) for track in tracks: + track_transition = transition if len(tracks) > 1: track_clip = copy.deepcopy(clip) + if transition: + track_transition = copy.deepcopy(transition) else: track_clip = clip @@ -213,6 +213,12 @@ def add_clip(self, line, comments, rate=24): track.append(gap) _extend_source_range_duration(track, gap.duration()) + if track_transition: + if len(track) < 1: + raise EDLParseError( + "Transitions can't be at the very beginning of a track" + ) + track.append(track_transition) track.append(track_clip) _extend_source_range_duration(track, track_clip.duration()) @@ -243,12 +249,6 @@ def tracks_for_channel(self, channel_code): # Return a list of actual tracks return [self.tracks_by_name[c] for c in track_names] - def add_transition(self, clip_handler, transition, data): - if transition not in ['C']: - md = clip_handler.clip.metadata.setdefault("cmx_3600", {}) - md["transition"] = transition - md["transition_duration"] = float(data) - def parse_edl(self, edl_string, rate=24): # edl 'events' can be comprised of an indeterminate amount of lines # we are to translating 'events' to a single clip and transition @@ -301,7 +301,7 @@ def parse_edl(self, edl_string, rate=24): line_1 = edl_lines.pop(0) line_2 = edl_lines.pop(0) - + # TODO: check if transitions can happen in this case comments = [] while edl_lines: if re.match(r'^\D', edl_lines[0]): @@ -310,24 +310,45 @@ def parse_edl(self, edl_string, rate=24): break self.add_clip(line_1, comments, rate=rate) self.add_clip(line_2, comments, rate=rate) - + # Check if the first character in the line is a digit elif line[0].isdigit(): + transition_line = None # all 'events' start_time with an edit decision. this is # denoted by the line beginning with a padded integer 000-999 comments = [] + event_id = int(re.match(r'^\d+', line).group(0)) while edl_lines: - # any non-numbered lines after an edit decision should be - # treated as 'comments' - # comments are string tags used by the reader to get extra + # Any non-numbered lines after an edit decision should be + # treated as 'comments'. + # Comments are string tags used by the reader to get extra # information not able to be found in the restricted edl - # format - if re.match(r'^\D', edl_lines[0]): + # format. + # If the current event id is repeated it means that there is + # a transition between the current event and the preceding + # one. We collect it and process it when adding the clip. + m = re.match(r'^\d+', edl_lines[0]) + if not m: comments.append(edl_lines.pop(0)) else: - break - - self.add_clip(line, comments, rate=rate) - + if int(m.group(0)) == event_id: + # It is not possible to have multiple transitions + # for the same event. + if transition_line: + raise EDLParseError( + 'Invalid transition %s' % edl_lines[0] + ) + # Same event id, this is a transition + transition_line = edl_lines.pop(0) + else: + # New event, stop collecting comments and transitions + break + + self.add_clip( + line, + comments, + rate=rate, + transition_line=transition_line + ) else: raise EDLParseError('Unknown event type') @@ -344,20 +365,26 @@ class ClipHandler(object): r'.*\.(?P\[(?P[0-9]+)-(?P[0-9]+)\])\.\w+$' ) - def __init__(self, line, comment_data, rate=24): + def __init__(self, line, comment_data, rate=24, transition_line=None): self.clip_num = None self.reel = None self.channel_code = None self.edl_rate = rate self.transition_id = None self.transition_data = None + self.transition_type = None self.source_tc_in = None self.source_tc_out = None self.record_tc_in = None self.record_tc_out = None - + self.clip = None + self.transition = None self.parse(line) + if transition_line: + self.parse(transition_line) self.clip = self.make_clip(comment_data) + if transition_line: + self.transition = self.make_transition(comment_data) def is_image_sequence(self, comment_data): return self.image_sequence_pattern.search( @@ -476,11 +503,8 @@ def make_clip(self, comment_data): } } - # In transitions, some of the source clip metadata might fall in the - # transition clip event + # Get the clip name from "TO CLIP NAME" if present if 'dest_clip_name' in comment_data: - previous_meta = clip.metadata.setdefault('previous_metadata', {}) - previous_meta['source_clip_name'] = clip.name clip.name = comment_data['dest_clip_name'] if 'locators' in comment_data: @@ -550,7 +574,7 @@ def parse(self, line): # denotes frame count # i haven't figured out how the key transitions (K, KB, KO) work ( - self.clip_num, + self.transition_id, self.reel, self.channel_code, self.transition_type, @@ -562,19 +586,26 @@ def parse(self, line): ) = fields elif field_count == 8: + edit_type = None # no transition data # this is for basic cuts ( self.clip_num, self.reel, self.channel_code, - self.transition_type, + edit_type, self.source_tc_in, self.source_tc_out, self.record_tc_in, self.record_tc_out ) = fields - + # Double check it is a cut + if edit_type not in ["C"]: + raise EDLParseError( + 'incorrect edit type {} in form statement: {}'.format( + edit_type, line, + ) + ) else: raise EDLParseError( 'incorrect number of fields [{0}] in form statement: {1}' @@ -600,6 +631,75 @@ def parse(self, line): ) ) + def make_transition(self, comment_data): + # Do some sanity check + if not self.clip: + raise RuntimeError("Transitions can't be handled without a clip") + if self.transition_id != self.clip_num: + raise EDLParseError( + 'transition and event id mismatch: {} vs {}'.format( + self.transaction_id, self.clip_num, + ) + ) + if re.match(r'W(\d{3})', self.transition_type): + otio_transition_type = "SMPTE_Wipe" + elif self.transition_type in ['D']: + otio_transition_type = schema.TransitionTypes.SMPTE_Dissolve + else: + raise EDLParseError( + "Transition type '{}' not supported by the CMX EDL reader " + "currently.".format(self.transition_type) + ) + # TODO: support delayed transition like described here: + # https://xmil.biz/EDL-X/CMX3600.pdf + transition_duration = opentime.RationalTime( + float(self.transition_data), + self.clip.source_range.duration.rate + ) + # Note: Transitions in EDLs are unconventionally represented. + # + # Where a transition might normally be visualized like: + # |---57.0 Trans 43.0----| + # |------Clip1 102.0------|----------Clip2 143.0----------|Clip3 24.0| + # + # In an EDL it can be thought of more like this: + # |---0.0 Trans 100.0----| + # |Clip1 45.0|----------------Clip2 200.0-----------------|Clip3 24.0| + # + # So the transition starts at the beginning of the clip with `duration` + # frames from the previous clip. + + # Give the transition a detailed name if we can + transition_name = '{} to {}'.format( + otio_transition_type, + self.clip.name, + ) + if 'dest_clip_name' in comment_data: + if 'clip_name' in comment_data: + transition_name = '{} from {} to {}'.format( + otio_transition_type, + comment_data['clip_name'], + comment_data['dest_clip_name'], + ) + + new_trx = schema.Transition( + name=transition_name, + # only supported type at the moment + transition_type=otio_transition_type, + metadata={ + 'cmx_3600': { + 'transition': self.transition_type, + 'transition_duration': transition_duration.value, + } + }, + ) + new_trx.in_offset = opentime.RationalTime( + 0, + transition_duration.rate + ) + new_trx.out_offset = transition_duration + return new_trx + class CommentHandler(object): # this is the for that all comment 'id' tags take @@ -651,218 +751,6 @@ def parse(self, comment): self.unhandled.append(stripped) -def _get_next_clip(start_index, track): - """Get the next clip with a non-zero duration""" - # Iterate over the following clips and return the first "real" one - for clip in track[start_index + 1:]: - if clip.duration().value > 0: - return clip - - return None - - -def _expand_transitions(timeline): - """Convert clips with metadata/transition == 'D' into OTIO transitions.""" - - tracks = timeline.tracks - remove_list = [] - replace_or_insert_list = [] - append_list = [] - for track in tracks: - # avid inserts an extra clip for the source - prev_prev = None - prev = None - for index, clip in enumerate(track): - transition_type = clip.metadata.get('cmx_3600', {}).get( - 'transition', - 'C' - ) - - if transition_type == 'C': - # nothing to do, continue to the next iteration of the loop - prev_prev = prev - prev = clip - continue - - wipe_match = re.match(r'W(\d{3})', transition_type) - if wipe_match is not None: - otio_transition_type = "SMPTE_Wipe" - elif transition_type in ['D']: - otio_transition_type = schema.TransitionTypes.SMPTE_Dissolve - else: - raise EDLParseError( - "Transition type '{}' not supported by the CMX EDL reader " - "currently.".format(transition_type) - ) - - # Using transition data for duration (with clip duration as backup.) - # Link: https://ieeexplore.ieee.org/document/7291839 - # Citation: "ST 258:2004 - SMPTE Standard - For Television - Transfer - # of Edit Decision Lists," in ST 258:2004 , vol., no., pp.1-37, - # 6 April 2004, doi: 10.5594/SMPTE.ST258.2004. - if clip.metadata.get("cmx_3600", {}).get("transition_duration"): - transition_duration = opentime.RationalTime( - clip.metadata["cmx_3600"]["transition_duration"], - clip.duration().rate - ) - else: - transition_duration = clip.duration() - - # EDL doesn't have enough data to know where the cut point was, so - # this arbitrarily puts it in the middle of the transition - pre_cut = math.floor(transition_duration.value / 2) - post_cut = transition_duration.value - pre_cut - mid_tran_cut_pre_duration = opentime.RationalTime( - pre_cut, - transition_duration.rate - ) - mid_tran_cut_post_duration = opentime.RationalTime( - post_cut, - transition_duration.rate - ) - - # Because transitions can have two event entries followed by - # comments, some of the previous clip's metadata might land in the - # transition clip - if prev: - if 'previous_metadata' in clip.metadata: - prev_metadata = clip.metadata['previous_metadata'] - if 'source_clip_name' in prev_metadata: - # Give the transition the event name and the - # previous clip the appropriate name - prev.name = prev_metadata['source_clip_name'] - - # expand the previous - expansion_clip = None - if prev and not prev_prev: - expansion_clip = prev - elif prev_prev: - # If the previous clip is continuous to this one, we can combine - if _transition_clips_continuous(prev_prev, prev): - expansion_clip = prev_prev - if prev: - remove_list.append((track, prev)) - else: - expansion_clip = prev - - _extend_source_range_duration(expansion_clip, mid_tran_cut_pre_duration) - - # rebuild the clip as a transition - new_trx = schema.Transition( - name=clip.name, - # only supported type at the moment - transition_type=otio_transition_type, - metadata=clip.metadata, - ) - new_trx.in_offset = mid_tran_cut_pre_duration - new_trx.out_offset = mid_tran_cut_post_duration - - # expand the next_clip or contract this clip - keep_transition_clip = False - next_clip = _get_next_clip(index, track) - if next_clip: - if _transition_clips_continuous(clip, next_clip): - sr = next_clip.source_range - next_clip.source_range = opentime.TimeRange( - sr.start_time - mid_tran_cut_post_duration, - sr.duration + mid_tran_cut_post_duration, - ) - else: - # The clip was only expressed in the transition, keep it, - # though it needs the previous clip transition time removed - keep_transition_clip = True - - sr = clip.source_range - clip.source_range = opentime.TimeRange( - sr.start_time + mid_tran_cut_pre_duration, - sr.duration - mid_tran_cut_pre_duration, - ) - else: - fill = schema.Gap( - source_range=opentime.TimeRange( - duration=mid_tran_cut_post_duration, - start_time=opentime.RationalTime( - 0, - transition_duration.rate - ) - ) - ) - append_list.append((track, fill)) - - # in from to - replace_or_insert_list.append((keep_transition_clip, track, clip, new_trx)) - - # Scrub some temporary metadata stashed on clips about their - # neighbors - if 'previous_metadata' in clip.metadata: - del(clip.metadata['previous_metadata']) - - if 'previous_metadata' in new_trx.metadata: - del(new_trx.metadata['previous_metadata']) - - prev = clip - - for (insert, track, from_clip, to_transition) in replace_or_insert_list: - clip_index = track.index(from_clip) - if insert: - track.insert(clip_index, to_transition) - else: - track[clip_index] = to_transition - - for (track, clip_to_remove) in list(set(remove_list)): - # if clip_to_remove in track: - track.remove(clip_to_remove) - - for (track, clip) in append_list: - track.append(clip) - - return timeline - - -def _transition_clips_continuous(clip_a, clip_b): - """Tests if two clips are continuous. They are continuous if the following - conditions are met: - 1. clip_a's source range ends on the last frame before clip_b's - 2a. If clip_a's name matches clip_b's - - or - - 2b. clip_a name matches metadata source_clip_name in clip_b - - or - - 2c. Reel name matches - - or - - 2d. Both clips are gaps - - - This is specific to how this adapter parses EDLs and is meant to be run only - within _expand_transitions. - """ - clip_a_end = clip_a.source_range.end_time_exclusive() - if not clip_a_end == clip_b.source_range.start_time: - return False - - if all(isinstance(clip, schema.Gap) for clip in (clip_a, clip_b)): - return True - - # The time ranges are continuous, match the names - if (clip_a.name == clip_b.name): - return True - - def reelname(clip): - return clip.metadata['cmx_3600']['reel'] - - try: - if reelname(clip_a) == reelname(clip_b): - return True - except KeyError: - pass - - try: - return clip_a.name == clip_b.metadata['previous_metadata']['source_clip_name'] - except KeyError: - pass - - return False - - def read_from_string(input_str, rate=24, ignore_timecode_mismatch=False): """Reads a CMX Edit Decision List (EDL) from a string. Since EDLs don't contain metadata specifying the rate they are meant @@ -894,7 +782,6 @@ def read_from_string(input_str, rate=24, ignore_timecode_mismatch=False): ignore_timecode_mismatch=ignore_timecode_mismatch ) result = parser.timeline - result = _expand_transitions(result) return result diff --git a/tests/sample_data/dissolve_test_3.edl b/tests/sample_data/dissolve_test_3.edl index 46f6d8a74..8d875147d 100644 --- a/tests/sample_data/dissolve_test_3.edl +++ b/tests/sample_data/dissolve_test_3.edl @@ -2,14 +2,14 @@ TITLE: dissolve test 3 FCM: NON-DROP FRAME 001 AX V C 01:00:03:23 01:00:06:12 01:00:00:00 01:00:02:13 -* FROM CLIP NAME: Clip A.mov +* FROM CLIP NAME: Clip_A.mov 002 AX V C 01:00:06:00 01:00:06:00 01:00:02:13 01:00:02:13 FCM: NON-DROP FRAME 002 AX V D 030 01:00:33:22 01:00:35:04 01:00:02:13 01:00:03:19 EFFECTS NAME IS CROSS DISSOLVE -* FROM CLIP NAME: Clip B.mov -* TO CLIP NAME: Clip C.mov +* FROM CLIP NAME: Clip_B.mov +* TO CLIP NAME: Clip_C.mov 003 AX V C 01:00:00:00 01:00:01:22 01:00:03:19 01:00:05:17 -* FROM CLIP NAME: Clip D.mov +* FROM CLIP NAME: Clip_D.mov diff --git a/tests/sample_data/dissolve_test_4.edl b/tests/sample_data/dissolve_test_4.edl index 7d59ceb1c..1263d8339 100644 --- a/tests/sample_data/dissolve_test_4.edl +++ b/tests/sample_data/dissolve_test_4.edl @@ -2,10 +2,10 @@ TITLE: TRANSITION_TEST_2 FCM: NON-DROP FRAME 001 ABC0000. V C 01:00:06:18 01:00:08:00 01:04:11:17 01:04:12:23 002 ABC0010. V C 01:00:06:15 01:00:08:18 01:04:12:23 01:04:15:02 -003 ABC0010. V C 01:00:08:18 01:00:08:18 01:04:15:02 01:04:15:02 +003 ABC0020. V C 01:00:08:18 01:00:08:18 01:04:15:02 01:04:15:02 003 ABC0020. V D 035 01:00:06:22 01:00:10:07 01:04:15:02 01:04:18:11 * BLEND, DISSOLVE -004 ABC0020. V C 01:00:10:07 01:00:10:07 01:04:18:11 01:04:18:11 +004 ABC0030. V C 01:00:10:07 01:00:10:07 01:04:18:11 01:04:18:11 004 ABC0030. V D 064 01:00:06:10 01:00:09:22 01:04:18:11 01:04:21:23 * BLEND, DISSOLVE 005 ABC0040. V C 01:00:08:14 01:00:12:14 01:04:21:23 01:04:25:23 diff --git a/tests/test_cmx_3600_adapter.py b/tests/test_cmx_3600_adapter.py index 36cda76c2..5ff58e82d 100755 --- a/tests/test_cmx_3600_adapter.py +++ b/tests/test_cmx_3600_adapter.py @@ -490,51 +490,76 @@ def test_imagesequence_write(self): def test_dissolve_parse(self): tl = otio.adapters.read_from_file(DISSOLVE_TEST) - self.assertEqual(len(tl.tracks[0]), 3) + # clip/transition/clip/clip + self.assertEqual(len(tl.tracks[0]), 4) self.assertTrue(isinstance(tl.tracks[0][1], otio.schema.Transition)) - - self.assertEqual(tl.tracks[0][0].duration().value, 14) - self.assertEqual(tl.tracks[0][2].duration().value, 6) + self.assertEqual(tl.tracks[0][0].duration().value, 9) + # The visible range must contains all the frames needed for the transition + # Edit duration + transition duration + self.assertEqual(tl.tracks[0][0].visible_range().duration.to_frames(), 19) + self.assertEqual(tl.tracks[0][0].name, "clip_A") + self.assertEqual(tl.tracks[0][1].duration().value, 10) + self.assertEqual(tl.tracks[0][1].name, "SMPTE_Dissolve from clip_A to clip_B") + self.assertEqual(tl.tracks[0][2].duration().value, 10) + self.assertEqual(tl.tracks[0][2].visible_range().duration.value, 10) + self.assertEqual(tl.tracks[0][2].name, "clip_B") + self.assertEqual(tl.tracks[0][3].duration().value, 1) + self.assertEqual(tl.tracks[0][2].name, "clip_B") def test_dissolve_parse_middle(self): tl = otio.adapters.read_from_file(DISSOLVE_TEST_2) - self.assertEqual(len(tl.tracks[0]), 3) + trck = tl.tracks[0] + # 3 clips and 1 transition + self.assertEqual(len(trck), 4) - self.assertTrue(isinstance(tl.tracks[0][1], otio.schema.Transition)) + self.assertTrue(isinstance(trck[1], otio.schema.Transition)) - trck = tl.tracks[0] - self.assertEqual(trck[0].duration().value, 10) - self.assertEqual(trck[2].source_range.start_time.value, 86400 + 201) + self.assertEqual(trck[0].duration().value, 5) + self.assertEqual(trck[0].visible_range().duration.to_frames(), 15) + self.assertEqual(trck[1].duration().value, 10) + self.assertEqual(trck[1].name, "SMPTE_Dissolve from clip_A to clip_B") + + self.assertEqual( + trck[2].source_range.start_time.value, + otio.opentime.from_timecode('01:00:08:04', 24).value + ) + self.assertEqual(trck[2].name, "clip_B") self.assertEqual(trck[2].duration().value, 10) + self.assertEqual(trck[2].visible_range().duration.value, 10) + + self.assertEqual(tl.tracks[0][0].visible_range().duration.to_frames(), 15) def test_dissolve_parse_full_clip_dissolve(self): tl = otio.adapters.read_from_file(DISSOLVE_TEST_3) - self.assertEqual(len(tl.tracks[0]), 5) + self.assertEqual(len(tl.tracks[0]), 4) - self.assertTrue(isinstance(tl.tracks[0][2], otio.schema.Transition)) + self.assertTrue(isinstance(tl.tracks[0][1], otio.schema.Transition)) trck = tl.tracks[0] clip_a = trck[0] - self.assertEqual(clip_a.name, "Clip A.mov") + self.assertEqual(clip_a.name, "Clip_A.mov") self.assertEqual(clip_a.duration().value, 61) + self.assertEqual(clip_a.visible_range().duration.value, 61 + 30) - clip_b = trck[1] - self.assertEqual(clip_b.name, "Clip B.mov") - self.assertEqual(clip_b.source_range.start_time.value, 86400 + 144) - self.assertEqual(clip_b.duration().value, 15) - - transition = trck[2] - self.assertEqual(transition.in_offset.value, 15) - self.assertEqual(transition.out_offset.value, 15) + transition = trck[1] + # Note: clip names in the EDL are wrong, the transition is actually + # from Clip_A to Clip_B + self.assertEqual( + transition.name, + "SMPTE_Dissolve from Clip_B.mov to Clip_C.mov" + ) + self.assertEqual(transition.in_offset.value, 0) + self.assertEqual(transition.out_offset.value, 30) - clip_c = trck[3] - self.assertEqual(clip_c.name, "Clip C.mov") - self.assertEqual(clip_c.source_range.start_time.value, 86400 + 829) - self.assertEqual(clip_c.duration().value, 15) + clip_c = trck[2] + self.assertEqual(clip_c.name, "Clip_C.mov") + self.assertEqual(clip_c.source_range.start_time.value, 86400 + (33 * 24 + 22)) + self.assertEqual(clip_c.duration().value, 30) + self.assertEqual(clip_c.visible_range().duration.value, 30) - clip_d = trck[4] - self.assertEqual(clip_d.name, "Clip D.mov") + clip_d = trck[3] + self.assertEqual(clip_d.name, "Clip_D.mov") self.assertEqual(clip_d.source_range.start_time.value, 86400) self.assertEqual(clip_d.duration().value, 46) @@ -553,18 +578,22 @@ def test_dissolve_with_odd_frame_count_maintains_length(self): def test_wipe_parse(self): tl = otio.adapters.read_from_file(WIPE_TEST) - self.assertEqual(len(tl.tracks[0]), 3) + self.assertEqual(len(tl.tracks[0]), 4) wipe = tl.tracks[0][1] self.assertTrue(isinstance(wipe, otio.schema.Transition)) - self.assertEqual(wipe.transition_type, "SMPTE_Wipe") self.assertEqual(wipe.metadata["cmx_3600"]["transition"], "W001") - self.assertEqual(tl.tracks[0][0].duration().value, 14) - self.assertEqual(tl.tracks[0][2].duration().value, 6) + self.assertEqual(tl.tracks[0][0].duration().value, 9) + self.assertEqual(tl.tracks[0][0].visible_range().duration.value, 19) + + self.assertEqual(tl.tracks[0][2].duration().value, 10) + self.assertEqual(tl.tracks[0][2].visible_range().duration.value, 10) + + self.assertEqual(tl.tracks[0][3].duration().value, 1) - def test_fade_to_black_ends_with_gap(self): + def test_fade_to_black(self): # EXERCISE tl = otio.adapters.read_from_string( '1 CLPA V C 00:00:03:18 00:00:12:15 00:00:00:00 00:00:08:21\n' @@ -576,10 +605,45 @@ def test_fade_to_black_ends_with_gap(self): # VALIDATE self.assertEqual(len(tl.tracks[0]), 3) self.assertTrue(isinstance(tl.tracks[0][1], otio.schema.Transition)) - self.assertTrue(isinstance(tl.tracks[0][2], otio.schema.Gap)) - self.assertEqual(tl.tracks[0][2].duration().value, 12) + self.assertTrue(isinstance(tl.tracks[0][2], otio.schema.Clip)) + self.assertEqual(tl.tracks[0][2].media_reference.generator_kind, 'black') + self.assertEqual(tl.tracks[0][2].duration().value, 24) self.assertEqual(tl.tracks[0][2].source_range.start_time.value, 0) + def test_edl_round_trip_with_transitions(self): + with tempfile.TemporaryDirectory() as temp_dir: + # Notes: + # - the writer does not handle wipes, only dissolves + # - the writer can generate invalid EDLs if spaces are in reel names. + for edl_file in [ + DISSOLVE_TEST, + DISSOLVE_TEST_2, + DISSOLVE_TEST_3, + DISSOLVE_TEST_4 + ]: + edl_name = os.path.basename(edl_file) + timeline = otio.adapters.read_from_file(edl_file) + tmp_path = os.path.join( + temp_dir, + 'test_edl_round_trip_{}'.format(edl_name) + ) + otio.adapters.write_to_file(timeline, tmp_path) + + result = otio.adapters.read_from_file(tmp_path) + self.assertEqual(len(timeline.tracks), len(result.tracks)) + for track, res_track in zip(timeline.tracks, result.tracks): + self.assertEqual(len(track), len(res_track)) + for child, res_child in zip(track, res_track): + self.assertEqual(type(child), type(res_child)) + if isinstance(child, otio.schema.Transition): + self.assertEqual(child.in_offset, res_child.in_offset) + self.assertEqual(child.out_offset, res_child.out_offset) + self.assertEqual( + child.transition_type, res_child.transition_type + ) + else: + self.assertEqual(child.source_range, res_child.source_range) + def test_edl_25fps(self): # EXERCISE edl_path = EXEMPLE_25_FPS_PATH @@ -636,8 +700,8 @@ def test_read_generators(self): # EXERCISE tl = otio.adapters.read_from_string( '1 BL V C 00:00:00:00 00:00:01:00 00:00:00:00 00:00:01:00\n' - '1 BLACK V C 00:00:00:00 00:00:01:00 00:00:01:00 00:00:02:00\n' - '1 BARS V C 00:00:00:00 00:00:01:00 00:00:02:00 00:00:03:00\n', + '2 BLACK V C 00:00:00:00 00:00:01:00 00:00:01:00 00:00:02:00\n' + '3 BARS V C 00:00:00:00 00:00:01:00 00:00:02:00 00:00:03:00\n', adapter_name="cmx_3600" ) @@ -1052,11 +1116,13 @@ def test_can_read_frame_cut_points(self): # VALIDATE self.assertEqual(tl.duration().value, 276) - self.assertEqual(len(tl.tracks[0]), 3) - self.assertEqual(tl.tracks[0][0].duration().value, 70) - self.assertEqual(tl.tracks[0][1].in_offset.value, 13) - self.assertEqual(tl.tracks[0][1].out_offset.value, 14) - self.assertEqual(tl.tracks[0][2].duration().value, 206) + self.assertEqual(len(tl.tracks[0]), 4) + self.assertEqual(tl.tracks[0][0].duration().value, 57) + self.assertEqual(tl.tracks[0][0].visible_range().duration.value, 57 + 27) + self.assertEqual(tl.tracks[0][1].in_offset.value, 0) + self.assertEqual(tl.tracks[0][1].out_offset.value, 27) + self.assertEqual(tl.tracks[0][2].duration().value, 27) + self.assertEqual(tl.tracks[0][3].duration().value, 276 - 84) def test_speed_effects(self): tl = otio.adapters.read_from_file( @@ -1143,16 +1209,17 @@ def test_three_part_transition(self): tl = otio.adapters.read_from_file(DISSOLVE_TEST_4) self.assertEqual(len(tl.tracks[0]), 8) + self.assertEqual(tl.tracks[0][0].duration().value, 30.0) + self.assertEqual(tl.tracks[0][1].duration().value, 51.0) + self.assertEqual(tl.tracks[0][1].visible_range().duration.value, 51 + 35) self.assertIsInstance(tl.tracks[0][2], otio.schema.Transition) - self.assertIsInstance(tl.tracks[0][4], otio.schema.Transition) - self.assertEqual(tl.tracks[0][2].duration().value, 35.0) + self.assertEqual(tl.tracks[0][3].duration().value, 81.0) + self.assertEqual(tl.tracks[0][3].visible_range().duration.value, 81 + 64) + self.assertIsInstance(tl.tracks[0][4], otio.schema.Transition) self.assertEqual(tl.tracks[0][4].duration().value, 64.0) - - self.assertEqual(tl.tracks[0][0].duration().value, 30.0) - self.assertEqual(tl.tracks[0][1].duration().value, 68.0) - self.assertEqual(tl.tracks[0][3].duration().value, 96.0) - self.assertEqual(tl.tracks[0][5].duration().value, 52.0) + self.assertEqual(tl.tracks[0][5].duration().value, 84.0) + self.assertEqual(tl.tracks[0][5].visible_range().duration.value, 84.0) self.assertEqual(tl.tracks[0][6].duration().value, 96.0) self.assertEqual(tl.tracks[0][7].duration().value, 135.0)