-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvideosuffixes.py
executable file
·133 lines (109 loc) · 4.92 KB
/
videosuffixes.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#!/usr/bin/env python
# Copy bucket objects, fixing filename and content_type, adding Dublin Core metadata
# NOTE: environment must set: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
MAX = 9999
# TODO: rename to remove nasavideos/ directory leaving only media-01...06/
# TODO: upload Description from spreadsheet as .txt file with same basename
import datetime
from collections import defaultdict
from csv import DictReader
import logging
import mimetypes
import os
import pprint
import re
import sys
from boto.s3.connection import S3Connection
from boto.s3.key import Key
logging.basicConfig(level=logging.INFO)
metadata_filename = 'metadata.csv'
src_bucket_name = 'nasavideos-shentonfreud' # mistyped my name :-(
dst_bucket_name = 'nasavideos2-shentonfreude' # mistyped my name :-(
# Ubuntu /etc/mime.types doesn't know types that Apple does
mime_map = {'m4v': 'video/x-m4v'}
def asciify(text):
"""Force text (from Excel) to ascii for HTTP headers.
This will lose data, like más -> m?s since ascii doesn't
have accented a.
"""
return text.decode('ascii', 'ignore')
#return text.decode('latin-1').encode('ascii', errors='replace')
# Read metadata from spreadsheet file.
# Directory,Media ID,Title,Description,Author,Upload Date,Genres
metadata_dict = {}
with open(metadata_filename) as metadata_file:
for row in DictReader(metadata_file):
metadata_dict[row['Media ID']] = row
conn = S3Connection()
src_bucket = conn.get_bucket(src_bucket_name)
dst_bucket = conn.create_bucket(dst_bucket_name)
logging.info('src_bucket=%s dst_bucket=%s' % (src_bucket, dst_bucket))
suffixes = defaultdict(int)
mediaids = defaultdict(int)
for key in src_bucket.list():
name = key.name
basename = os.path.basename(name)
dirname = os.path.dirname(name)
new_dirname = re.sub('^/?nasavideos', '', dirname)
m = re.match(r'^([\d-]+)\.?(.*)$', basename)
try:
mediaid = m.group(1)
suffix = m.group(2)
suffixes[suffix] += 1
basename = mediaid + '.' + suffix # fix any broken basename
mediaids[mediaid] += 1
file_metadata = metadata_dict.get(mediaid)
if not file_metadata:
# There are 2 files that each have 3 dash-suffixed names
# and they are mostly the same but different resolutions/sizes.
dashless = mediaid.split('-')[0]
file_metadata = metadata_dict.get(dashless)
if file_metadata:
logging.warning('Metadata found in CSV for video with DASH-less name=%s' % name)
else:
logging.error('No metadata found in CSV for video file dashless=%s' % name)
(mimetype, encoding) = mimetypes.guess_type(basename)
if not mimetype:
logging.warning('No mimetype found for basename=%s (trying local map)' % basename)
mimetype = mime_map.get(suffix)
if not mimetype:
logging.error('No mimetype in our map either for basename=%s (use generic binary)' % basename)
mimetype = 'octet/stream'
new_metadata = { # Danger: possible keyError, use .get() ?
'Content-Type' : mimetype,
'dc-title' : asciify(file_metadata['Title']),
'dc-creator' : asciify(file_metadata['Author']),
'dc-date' : file_metadata['Upload Date'],
'dc-subject' : file_metadata['Genres'],
}
s3_metadata = key.metadata.copy()
s3_metadata.update(new_metadata)
new_key_name = os.path.join(new_dirname, basename)
logging.info('new_key_name=%s s3_metadata=%s' % (new_key_name, s3_metadata))
try:
new_key = key.copy(dst_bucket.name, new_key_name,
metadata=new_metadata, preserve_acl=True)
except Exception, e: # TODO: too generic
logging.error('new_metadata=%s \n(%s)' % (new_metadata, e))
#import pdb; pdb.set_trace()
# we could create a .json file of all the metadata from the spread
txt_key = Key(dst_bucket)
txt_key.key = os.path.join(new_dirname, mediaid + '.txt')
txt_key.content_type = 'text/plain' # do this here??
txt_key.set_contents_from_string(file_metadata['Description'])
except AttributeError, e:
logging.warning('Cannot match video basename=%s' % basename)
MAX -= 1
if MAX <= 1:
break
logging.info('Suffix tally: %s' % pprint.pformat(sorted(suffixes.items())))
# TODO: are mediaids recycled across different media-01...06/ directories?
logging.info('#files=%s #rows=%s' % (len(mediaids), len(metadata_dict)))
# De we have dupe mediaids from files?
# for mediaid in mediaids:
# if mediaids[mediaid] > 1:
# logging.error('Multiple files for mediaid=%s' % mediaid)
# Ensure all the mediaids in the spread are in our S3 src_bucket
# for mediaid in metadata_dict:
# if not mediaid in mediaids:
# logging.error('Cannot find spreadsheet mediaid=%s in video files' % mediaid)