{sound.id} - {sound.original_filename}
'
+ html_output += f'
{sound.id} - {sound.original_filename}
'
wav_file = os.path.join(BASE_DIR, f'{sound.id}.wav')
wav_file_st = os.path.join(BASE_DIR, f'{sound.id}.st.wav')
@@ -58,7 +58,9 @@ def handle(self, *args, **options):
audioprocessing.convert_to_pcm(sound.locations('preview.LQ.mp3.path'), wav_file)
audioprocessing.stereofy_and_find_info(settings.STEREOFY_PATH, wav_file, wav_file_st)
- for count, color_scheme in enumerate([color_schemes.FREESOUND2_COLOR_SCHEME, color_schemes.OLD_BEASTWHOOSH_COLOR_SCHEME, color_schemes.BEASTWHOOSH_COLOR_SCHEME]):
+ for count, color_scheme in enumerate([color_schemes.FREESOUND2_COLOR_SCHEME,
+ color_schemes.OLD_BEASTWHOOSH_COLOR_SCHEME,
+ color_schemes.BEASTWHOOSH_COLOR_SCHEME]):
width = 195
height = 101
fft_size = 2048
@@ -66,10 +68,12 @@ def handle(self, *args, **options):
spectral_filename = f'{sound.id}-{count}-spec.jpg'
waveform_path = os.path.join(BASE_DIR, waveform_filename)
spectral_path = os.path.join(BASE_DIR, spectral_filename)
- audioprocessing.create_wave_images(wav_file_st, waveform_path, spectral_path, width, height, fft_size, color_scheme=color_scheme)
+ audioprocessing.create_wave_images(
+ wav_file_st, waveform_path, spectral_path, width, height, fft_size, color_scheme=color_scheme
+ )
html_output += f'
{count}
'
html_output += '
'
-
+
html_output += ''
fid = open(os.path.join(BASE_DIR, '_generated_images.html'), 'w')
fid.write(html_output)
diff --git a/sounds/management/commands/update_cdn_sounds.py b/sounds/management/commands/update_cdn_sounds.py
index d76c0ab3c..ed1615118 100644
--- a/sounds/management/commands/update_cdn_sounds.py
+++ b/sounds/management/commands/update_cdn_sounds.py
@@ -34,7 +34,7 @@
cdn_host = 'fsweb@cdn.freesound.org'
cdn_sounds_dir = '/home/fsweb/sounds'
cdn_symlinks_dir = '/home/fsweb/symlinks'
-tmp_dest_sound_dir = '/home/fsweb/tmp/'
+tmp_dest_sound_dir = '/home/fsweb/tmp/'
class Command(LoggingBaseCommand):
@@ -42,19 +42,39 @@ class Command(LoggingBaseCommand):
help = 'Update the CDN map cache for sound downloads by copying new files to the remote CDN or using a JSON file with updated mapping'
def add_arguments(self, parser):
- parser.add_argument('-f', '--filepath', dest='filepath', type=str, help='Path to JSON file with sounds map. If using this option, no new sounds will be copied to the CDN but only the local map in cache will be updated')
- parser.add_argument('-k', '--keypath', dest='keypath', default='/ssh_fsweb/cdn-ssh-key-fsweb', type=str, help='Path to the SSH private key to use for connecting to CDN')
+ parser.add_argument(
+ '-f',
+ '--filepath',
+ dest='filepath',
+ type=str,
+ help=
+ 'Path to JSON file with sounds map. If using this option, no new sounds will be copied to the CDN but only the local map in cache will be updated'
+ )
+ parser.add_argument(
+ '-k',
+ '--keypath',
+ dest='keypath',
+ default='/ssh_fsweb/cdn-ssh-key-fsweb',
+ type=str,
+ help='Path to the SSH private key to use for connecting to CDN'
+ )
parser.add_argument('-d', help='Clear the existing records in the cache (if any) and don\'t do anything else')
- parser.add_argument('-l', action='store', dest='limit', default=500, help='Maximum number of sounds to copy to remote CDN and update cache')
-
+ parser.add_argument(
+ '-l',
+ action='store',
+ dest='limit',
+ default=500,
+ help='Maximum number of sounds to copy to remote CDN and update cache'
+ )
+
def handle(self, *args, **options):
self.log_start()
file_path = options['filepath']
ssh_key_path = options['keypath']
- delete_already_existing = options['d']
- limit = options['limit']
+ delete_already_existing = options['d']
+ limit = options['limit']
num_added = 0
num_failed = 0
@@ -66,7 +86,7 @@ def handle(self, *args, **options):
console_logger.info(f'Adding cache items from file {file_path}')
map_data = json.load(open(file_path))
for sound_id, cdn_filename in map_data:
- cache_cdn_map.set(str(sound_id), cdn_filename, timeout=None) # No expiration
+ cache_cdn_map.set(str(sound_id), cdn_filename, timeout=None) # No expiration
num_added = len(map_data)
else:
console_logger.info('Finding new sounds to add to the cache')
@@ -83,18 +103,18 @@ def handle(self, *args, **options):
ss = all_ss[:limit]
total = ss.count()
console_logger.info(f'Found {all_ss.count()} new sounds missing in the cache, will add first {total}')
-
+
# Copy sounds if not already there, make symlinks and add them to cache
with Connection(host=cdn_host, connect_kwargs={'key_filename': ssh_key_path}) as c:
for count, sound in enumerate(ss):
# Define useful paths for that sound
sound_id = sound.id
src_sound_path = sound.locations('path')
- folder_id = str(sound.id//1000)
- dst_sound_path = os.path.join(cdn_sounds_dir, folder_id, os.path.basename(src_sound_path))
+ folder_id = str(sound.id // 1000)
+ dst_sound_path = os.path.join(cdn_sounds_dir, folder_id, os.path.basename(src_sound_path))
console_logger.info(f'Adding sound to the CDN [{count + 1}/{total}] - {sound_id}')
- # Check if sound already exists in the expected remote location
+ # Check if sound already exists in the expected remote location
sound_exists = c.run(f'ls {dst_sound_path}', hide=True, warn=True).exited == 0
if not sound_exists:
# Copy file to remote, make intermediate folders if needed
@@ -105,10 +125,14 @@ def handle(self, *args, **options):
# Before making the symlink, check again that sound exists, otherwise don't make it as there were problems copying sound
sound_exists = c.run(f'ls {dst_sound_path}', hide=True, warn=True).exited == 0
if sound_exists:
- c.run(f"rm {os.path.join(cdn_symlinks_dir, folder_id, f'{sound_id}-*')}", hide=True, warn=True)
+ c.run(
+ f"rm {os.path.join(cdn_symlinks_dir, folder_id, f'{sound_id}-*')}",
+ hide=True,
+ warn=True
+ )
random_uuid = str(uuid.uuid4())
symlink_name = f'{sound_id}-{random_uuid}'
- dst_symlink_path = os.path.join(cdn_symlinks_dir, folder_id, symlink_name)
+ dst_symlink_path = os.path.join(cdn_symlinks_dir, folder_id, symlink_name)
c.run(f'mkdir -p {os.path.dirname(dst_symlink_path)}')
c.run(f'ln -s {dst_sound_path} {dst_symlink_path}')
@@ -117,7 +141,7 @@ def handle(self, *args, **options):
# as there were problems creating the symlink
symlink_exists = c.run(f'ls {dst_symlink_path}', hide=True, warn=True).exited == 0
if symlink_exists:
- cache_cdn_map.set(str(sound_id), symlink_name, timeout=None) # No expiration
+ cache_cdn_map.set(str(sound_id), symlink_name, timeout=None) # No expiration
num_added += 1
else:
num_failed += 1
diff --git a/sounds/models.py b/sounds/models.py
index ed47de7f4..7fe8bb556 100644
--- a/sounds/models.py
+++ b/sounds/models.py
@@ -135,12 +135,12 @@ class BulkUploadProgress(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
created = models.DateTimeField(db_index=True, auto_now_add=True)
CSV_CHOICES = (
- ("N", 'Not yet validated'), # linked CSV file has not yet been validated
- ("F", 'Finished description'), # All sounds have been described/created (but some might still be in
- # processing/moderation stage)
- ("S", 'Sounds being described and processed'), # Description (and processing) process has started
- ("V", 'Finished validation'), # CSV file has been validated (might have errors)
- ("C", 'Closed'), # Process has finished and has been closes
+ ("N", 'Not yet validated'), # linked CSV file has not yet been validated
+ ("F", 'Finished description'), # All sounds have been described/created (but some might still be in
+ # processing/moderation stage)
+ ("S", 'Sounds being described and processed'), # Description (and processing) process has started
+ ("V", 'Finished validation'), # CSV file has been validated (might have errors)
+ ("C", 'Closed'), # Process has finished and has been closes
)
progress_type = models.CharField(max_length=1, choices=CSV_CHOICES, default="N")
csv_filename = models.CharField(max_length=512, null=True, blank=True, default=None)
@@ -180,7 +180,8 @@ def validate_csv_file(self):
csv_header=header,
csv_lines=lines,
sounds_base_dir=os.path.join(settings.UPLOADS_PATH, str(self.user_id)),
- username=self.user.username)
+ username=self.user.username
+ )
except Exception:
# This is a broad exception clause intentionally placed here to make sure that BulkUploadProgress is
# updated with a global error. Otherwise it will appear to the user that the object is permanently being
@@ -194,7 +195,7 @@ def validate_csv_file(self):
'lines_with_errors': [line for line in lines_validated if line['line_errors']],
'global_errors': global_errors,
}
- self.progress_type = 'V' # Set progress to 'validated'
+ self.progress_type = 'V' # Set progress to 'validated'
self.save()
# Log information about the process
@@ -217,7 +218,8 @@ def describe_sounds(self):
force_import=True,
sounds_base_dir=os.path.join(settings.UPLOADS_PATH, str(self.user_id)),
username=self.user.username,
- bulkupload_progress_id=self.id)
+ bulkupload_progress_id=self.id
+ )
web_logger.info(f'Finished creating sound objects for bulk upload ({json.dumps(bulk_upload_basic_data)})')
def store_progress_for_line(self, line_no, message):
@@ -250,24 +252,26 @@ def get_description_progress_info(self):
n_sounds_remaining_to_describe = n_lines_validated_ok - n_sounds_described_ok - n_sounds_error
n_sounds_published = Sound.objects.filter(
- id__in=sound_ids_described_ok, processing_state="OK", moderation_state="OK").count()
+ id__in=sound_ids_described_ok, processing_state="OK", moderation_state="OK"
+ ).count()
n_sounds_moderation = Sound.objects.filter(
- id__in=sound_ids_described_ok, processing_state="OK").exclude(moderation_state="OK").count()
+ id__in=sound_ids_described_ok, processing_state="OK"
+ ).exclude(moderation_state="OK").count()
n_sounds_currently_processing = Sound.objects.filter(
- id__in=sound_ids_described_ok, processing_state="PE", processing_ongoing_state="PR").count()
+ id__in=sound_ids_described_ok, processing_state="PE", processing_ongoing_state="PR"
+ ).count()
n_sounds_pending_processing = Sound.objects.filter(
- id__in=sound_ids_described_ok, processing_state="PE").exclude(processing_ongoing_state="PR").count()
- n_sounds_failed_processing = Sound.objects.filter(
- id__in=sound_ids_described_ok, processing_state="FA").count()
+ id__in=sound_ids_described_ok, processing_state="PE"
+ ).exclude(processing_ongoing_state="PR").count()
+ n_sounds_failed_processing = Sound.objects.filter(id__in=sound_ids_described_ok, processing_state="FA").count()
# The remaining sounds that have been described ok but do not appear in any of the sets above are sounds with
# an unknown state. This could happen if sounds get deleted (e.g. as part of the moderation process or because
# a sound fails processing and the user deletes it).
- n_sounds_unknown = n_sounds_described_ok - (n_sounds_published +
- n_sounds_moderation +
- n_sounds_currently_processing +
- n_sounds_pending_processing +
- n_sounds_failed_processing)
+ n_sounds_unknown = n_sounds_described_ok - (
+ n_sounds_published + n_sounds_moderation + n_sounds_currently_processing + n_sounds_pending_processing +
+ n_sounds_failed_processing
+ )
progress = 0
if n_lines_validated_ok == 0:
@@ -275,14 +279,14 @@ def get_description_progress_info(self):
progress = 100
else:
if self.description_output is not None:
- progress = (100.0 * (n_sounds_published +
- n_sounds_moderation +
- n_sounds_failed_processing +
- n_sounds_error +
- n_sounds_unknown)
- ) / (n_sounds_described_ok +
- n_sounds_error +
- n_sounds_remaining_to_describe)
+ progress = (
+ 100.0 * (
+ n_sounds_published + n_sounds_moderation + n_sounds_failed_processing + n_sounds_error +
+ n_sounds_unknown
+ )
+ ) / (
+ n_sounds_described_ok + n_sounds_error + n_sounds_remaining_to_describe
+ )
progress = int(progress)
# NOTE: progress percentage is determined as the total number of sounds "that won't change" vs the total
# number of sounds that should have been described and processed. Sounds that fail processing or description
@@ -321,9 +325,7 @@ def has_line_validation_errors(self):
return False
class Meta:
- permissions = (
- ("can_describe_in_bulk", "Can use the Bulk Describe feature."),
- )
+ permissions = (("can_describe_in_bulk", "Can use the Bulk Describe feature."),)
class SoundManager(models.Manager):
@@ -395,8 +397,9 @@ def get_analyzers_data_select_sql(self):
analyzers_select_section_parts = []
for analyzer_name, analyzer_info in settings.ANALYZERS_CONFIGURATION.items():
if 'descriptors_map' in analyzer_info:
- analyzers_select_section_parts.append("{0}.analysis_data as {0},"
- .format(analyzer_name.replace('-', '_')))
+ analyzers_select_section_parts.append(
+ "{0}.analysis_data as {0},".format(analyzer_name.replace('-', '_'))
+ )
return "\n ".join(analyzers_select_section_parts)
def get_analyzers_data_left_join_sql(self):
@@ -406,8 +409,10 @@ def get_analyzers_data_left_join_sql(self):
for analyzer_name, analyzer_info in settings.ANALYZERS_CONFIGURATION.items():
if 'descriptors_map' in analyzer_info:
analyzers_left_join_section_parts.append(
- "LEFT JOIN sounds_soundanalysis {0} ON (sound.id = {0}.sound_id AND {0}.analyzer = '{1}')"
- .format(analyzer_name.replace('-', '_'), analyzer_name))
+ "LEFT JOIN sounds_soundanalysis {0} ON (sound.id = {0}.sound_id AND {0}.analyzer = '{1}')".format(
+ analyzer_name.replace('-', '_'), analyzer_name
+ )
+ )
return "\n ".join(analyzers_left_join_section_parts)
def get_analysis_state_essentia_exists_sql(self):
@@ -466,9 +471,10 @@ def bulk_query_solr(self, sound_ids):
LEFT JOIN sounds_license ON sound.license_id = sounds_license.id
LEFT JOIN geotags_geotag ON sound.geotag_id = geotags_geotag.id
%s
- """ % (self.get_analyzers_data_select_sql(),
- ContentType.objects.get_for_model(Sound).id,
- self.get_analyzers_data_left_join_sql())
+ """ % (
+ self.get_analyzers_data_select_sql(), ContentType.objects.get_for_model(Sound).id,
+ self.get_analyzers_data_left_join_sql()
+ )
query += "WHERE sound.id IN %s"
return self.raw(query, [tuple(sound_ids)])
@@ -530,11 +536,13 @@ def bulk_query(self, where, order_by, limit, args, include_analyzers_output=Fals
LEFT JOIN tickets_ticket ON tickets_ticket.sound_id = sound.id
%s
LEFT OUTER JOIN sounds_remixgroup_sounds ON sounds_remixgroup_sounds.sound_id = sound.id
- WHERE %s """ % (self.get_analysis_state_essentia_exists_sql(),
- self.get_analyzers_data_select_sql() if include_analyzers_output else '',
- ContentType.objects.get_for_model(Sound).id,
- self.get_analyzers_data_left_join_sql() if include_analyzers_output else '',
- where, )
+ WHERE %s """ % (
+ self.get_analysis_state_essentia_exists_sql(),
+ self.get_analyzers_data_select_sql() if include_analyzers_output else '',
+ ContentType.objects.get_for_model(Sound).id,
+ self.get_analyzers_data_left_join_sql() if include_analyzers_output else '',
+ where,
+ )
if order_by:
query = f"{query} ORDER BY {order_by}"
if limit:
@@ -548,7 +556,7 @@ def bulk_sounds_for_user(self, user_id, limit=None, include_analyzers_output=Fal
order_by = "sound.created DESC"
if limit:
limit = str(limit)
- return self.bulk_query(where, order_by, limit, (user_id, ), include_analyzers_output=include_analyzers_output)
+ return self.bulk_query(where, order_by, limit, (user_id,), include_analyzers_output=include_analyzers_output)
def bulk_sounds_for_pack(self, pack_id, limit=None, include_analyzers_output=False):
where = """sound.moderation_state = 'OK'
@@ -557,13 +565,13 @@ def bulk_sounds_for_pack(self, pack_id, limit=None, include_analyzers_output=Fal
order_by = "sound.created DESC"
if limit:
limit = str(limit)
- return self.bulk_query(where, order_by, limit, (pack_id, ), include_analyzers_output=include_analyzers_output)
+ return self.bulk_query(where, order_by, limit, (pack_id,), include_analyzers_output=include_analyzers_output)
def bulk_query_id(self, sound_ids, include_analyzers_output=False):
if not isinstance(sound_ids, list):
sound_ids = [sound_ids]
where = "sound.id = ANY(%s)"
- return self.bulk_query(where, "", "", (sound_ids, ), include_analyzers_output=include_analyzers_output)
+ return self.bulk_query(where, "", "", (sound_ids,), include_analyzers_output=include_analyzers_output)
def bulk_query_id_public(self, sound_ids, include_analyzers_output=False):
if not isinstance(sound_ids, list):
@@ -571,10 +579,13 @@ def bulk_query_id_public(self, sound_ids, include_analyzers_output=False):
where = """sound.id = ANY(%s)
AND sound.moderation_state = 'OK'
AND sound.processing_state = 'OK'"""
- return self.bulk_query(where, "", "", (sound_ids, ), include_analyzers_output=include_analyzers_output)
+ return self.bulk_query(where, "", "", (sound_ids,), include_analyzers_output=include_analyzers_output)
def dict_ids(self, sound_ids, include_analyzers_output=False):
- return {sound_obj.id: sound_obj for sound_obj in self.bulk_query_id(sound_ids, include_analyzers_output=include_analyzers_output)}
+ return {
+ sound_obj.id: sound_obj
+ for sound_obj in self.bulk_query_id(sound_ids, include_analyzers_output=include_analyzers_output)
+ }
def ordered_ids(self, sound_ids, include_analyzers_output=False):
sounds = self.dict_ids(sound_ids, include_analyzers_output=include_analyzers_output)
@@ -583,6 +594,7 @@ def ordered_ids(self, sound_ids, include_analyzers_output=False):
class PublicSoundManager(models.Manager):
""" a class which only returns public sounds """
+
def get_queryset(self):
return super().get_queryset().filter(moderation_state="OK", processing_state="OK")
@@ -612,15 +624,19 @@ class Sound(models.Model):
# The history of licenses for a sound is stored on SoundLicenseHistory 'license' references the last one
license = models.ForeignKey(License, on_delete=models.CASCADE)
sources = models.ManyToManyField('self', symmetrical=False, related_name='remixes', blank=True)
- pack = models.ForeignKey('Pack', null=True, blank=True, default=None, on_delete=models.SET_NULL, related_name='sounds')
+ pack = models.ForeignKey(
+ 'Pack', null=True, blank=True, default=None, on_delete=models.SET_NULL, related_name='sounds'
+ )
tags = fields.GenericRelation(TaggedItem)
geotag = models.ForeignKey(GeoTag, null=True, blank=True, default=None, on_delete=models.SET_NULL)
# fields for specifying if the sound was uploaded via API or via bulk upload process (or none)
uploaded_with_apiv2_client = models.ForeignKey(
- ApiV2Client, null=True, blank=True, default=None, on_delete=models.SET_NULL)
+ ApiV2Client, null=True, blank=True, default=None, on_delete=models.SET_NULL
+ )
uploaded_with_bulk_upload_progress = models.ForeignKey(
- BulkUploadProgress, null=True, blank=True, default=None, on_delete=models.SET_NULL)
+ BulkUploadProgress, null=True, blank=True, default=None, on_delete=models.SET_NULL
+ )
# file properties
type = models.CharField(db_index=True, max_length=4, choices=settings.SOUND_TYPE_CHOICES)
@@ -640,14 +656,14 @@ class Sound(models.Model):
("DE", 'Deferred'),
)
moderation_state = models.CharField(db_index=True, max_length=2, choices=MODERATION_STATE_CHOICES, default="PE")
- moderation_date = models.DateTimeField(null=True, blank=True, default=None) # Set at last moderation state change
+ moderation_date = models.DateTimeField(null=True, blank=True, default=None) # Set at last moderation state change
moderation_note = models.TextField(null=True, blank=True, default=None)
has_bad_description = models.BooleanField(default=False)
is_explicit = models.BooleanField(default=False)
# processing
PROCESSING_STATE_CHOICES = (
- ("PE", 'Pending'), # Sounds will only be in "PE" before the very first time they are processed
+ ("PE", 'Pending'), # Sounds will only be in "PE" before the very first time they are processed
("OK", 'OK'),
("FA", 'Failed'),
)
@@ -657,24 +673,30 @@ class Sound(models.Model):
("PR", 'Processing'),
("FI", 'Finished'),
)
- ANALYSIS_STATE_CHOICES = PROCESSING_STATE_CHOICES + (("SK", 'Skipped'), ("QU", 'Queued'),)
+ ANALYSIS_STATE_CHOICES = PROCESSING_STATE_CHOICES + (
+ ("SK", 'Skipped'),
+ ("QU", 'Queued'),
+ )
SIMILARITY_STATE_CHOICES = PROCESSING_STATE_CHOICES
processing_state = models.CharField(db_index=True, max_length=2, choices=PROCESSING_STATE_CHOICES, default="PE")
- processing_ongoing_state = models.CharField(db_index=True, max_length=2,
- choices=PROCESSING_ONGOING_STATE_CHOICES, default="NO")
- processing_date = models.DateTimeField(null=True, blank=True, default=None) # Set at last processing attempt
+ processing_ongoing_state = models.CharField(
+ db_index=True, max_length=2, choices=PROCESSING_ONGOING_STATE_CHOICES, default="NO"
+ )
+ processing_date = models.DateTimeField(null=True, blank=True, default=None) # Set at last processing attempt
processing_log = models.TextField(null=True, blank=True, default=None)
# state
is_index_dirty = models.BooleanField(null=False, default=True)
similarity_state = models.CharField(db_index=True, max_length=2, choices=SIMILARITY_STATE_CHOICES, default="PE")
- analysis_state = models.CharField(db_index=True, max_length=2, choices=ANALYSIS_STATE_CHOICES, default="PE") # This field is no longer used and should be removed
+ analysis_state = models.CharField(
+ db_index=True, max_length=2, choices=ANALYSIS_STATE_CHOICES, default="PE"
+ ) # This field is no longer used and should be removed
# counts, updated by django signals
num_comments = models.PositiveIntegerField(default=0)
num_downloads = models.PositiveIntegerField(default=0, db_index=True)
- avg_rating = models.FloatField(default=0) # Store average rating from 0 to 10
+ avg_rating = models.FloatField(default=0) # Store average rating from 0 to 10
num_ratings = models.PositiveIntegerField(default=0)
objects = SoundManager()
@@ -735,49 +757,57 @@ def locations(self):
display=dict(
spectral=dict(
M=dict(
- path=os.path.join(settings.DISPLAYS_PATH, id_folder, "%d_%d_spec_M.jpg" % (self.id,
- sound_user_id)),
+ path=os.path.join(
+ settings.DISPLAYS_PATH, id_folder, "%d_%d_spec_M.jpg" % (self.id, sound_user_id)
+ ),
url=displays_url + "%s/%d_%d_spec_M.jpg" % (id_folder, self.id, sound_user_id)
),
L=dict(
- path=os.path.join(settings.DISPLAYS_PATH, id_folder, "%d_%d_spec_L.jpg" % (self.id,
- sound_user_id)),
+ path=os.path.join(
+ settings.DISPLAYS_PATH, id_folder, "%d_%d_spec_L.jpg" % (self.id, sound_user_id)
+ ),
url=displays_url + "%s/%d_%d_spec_L.jpg" % (id_folder, self.id, sound_user_id)
)
),
wave=dict(
M=dict(
- path=os.path.join(settings.DISPLAYS_PATH, id_folder, "%d_%d_wave_M.png" % (self.id,
- sound_user_id)),
+ path=os.path.join(
+ settings.DISPLAYS_PATH, id_folder, "%d_%d_wave_M.png" % (self.id, sound_user_id)
+ ),
url=displays_url + "%s/%d_%d_wave_M.png" % (id_folder, self.id, sound_user_id)
),
L=dict(
- path=os.path.join(settings.DISPLAYS_PATH, id_folder, "%d_%d_wave_L.png" % (self.id,
- sound_user_id)),
+ path=os.path.join(
+ settings.DISPLAYS_PATH, id_folder, "%d_%d_wave_L.png" % (self.id, sound_user_id)
+ ),
url=displays_url + "%s/%d_%d_wave_L.png" % (id_folder, self.id, sound_user_id)
)
),
spectral_bw=dict(
M=dict(
- path=os.path.join(settings.DISPLAYS_PATH, id_folder, "%d_%d_spec_bw_M.jpg" % (self.id,
- sound_user_id)),
+ path=os.path.join(
+ settings.DISPLAYS_PATH, id_folder, "%d_%d_spec_bw_M.jpg" % (self.id, sound_user_id)
+ ),
url=displays_url + "%s/%d_%d_spec_bw_M.jpg" % (id_folder, self.id, sound_user_id)
),
L=dict(
- path=os.path.join(settings.DISPLAYS_PATH, id_folder, "%d_%d_spec_bw_L.jpg" % (self.id,
- sound_user_id)),
+ path=os.path.join(
+ settings.DISPLAYS_PATH, id_folder, "%d_%d_spec_bw_L.jpg" % (self.id, sound_user_id)
+ ),
url=displays_url + "%s/%d_%d_spec_bw_L.jpg" % (id_folder, self.id, sound_user_id)
)
),
wave_bw=dict(
M=dict(
- path=os.path.join(settings.DISPLAYS_PATH, id_folder, "%d_%d_wave_bw_M.png" % (self.id,
- sound_user_id)),
+ path=os.path.join(
+ settings.DISPLAYS_PATH, id_folder, "%d_%d_wave_bw_M.png" % (self.id, sound_user_id)
+ ),
url=displays_url + "%s/%d_%d_wave_bw_M.png" % (id_folder, self.id, sound_user_id)
),
L=dict(
- path=os.path.join(settings.DISPLAYS_PATH, id_folder, "%d_%d_wave_bw_L.png" % (self.id,
- sound_user_id)),
+ path=os.path.join(
+ settings.DISPLAYS_PATH, id_folder, "%d_%d_wave_bw_L.png" % (self.id, sound_user_id)
+ ),
url=displays_url + "%s/%d_%d_wave_bw_L.png" % (id_folder, self.id, sound_user_id)
)
)
@@ -785,16 +815,20 @@ def locations(self):
analysis=dict(
base_path=os.path.join(settings.ANALYSIS_PATH, id_folder),
statistics=dict(
- path=os.path.join(settings.ANALYSIS_PATH, id_folder, "%d-%s.yaml" % (
- self.id, settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME)),
- url=settings.ANALYSIS_URL + "%s/%d-%s.yaml" % (
- id_folder, self.id, settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME)
+ path=os.path.join(
+ settings.ANALYSIS_PATH, id_folder,
+ "%d-%s.yaml" % (self.id, settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME)
+ ),
+ url=settings.ANALYSIS_URL + "%s/%d-%s.yaml" %
+ (id_folder, self.id, settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME)
),
frames=dict(
- path=os.path.join(settings.ANALYSIS_PATH, id_folder, "%d-%s_frames.json" % (
- self.id, settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME)),
- url=settings.ANALYSIS_URL + "%s/%d-%s_frames.json" % (
- id_folder, self.id, settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME)
+ path=os.path.join(
+ settings.ANALYSIS_PATH, id_folder,
+ "%d-%s_frames.json" % (self.id, settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME)
+ ),
+ url=settings.ANALYSIS_URL + "%s/%d-%s_frames.json" %
+ (id_folder, self.id, settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME)
)
)
)
@@ -829,7 +863,7 @@ def type_warning(self):
def duration_warning(self):
# warn from 5 minutes and more
- return self.duration > 60*5
+ return self.duration > 60 * 5
def filesize_warning(self):
# warn for 50MB and up
@@ -854,7 +888,7 @@ def duration_ms(self):
def rating_percent(self):
if self.num_ratings < settings.MIN_NUMBER_RATINGS:
return 0
- return int(self.avg_rating*10)
+ return int(self.avg_rating * 10)
@property
def avg_rating_0_5(self):
@@ -889,11 +923,11 @@ def should_display_small_icons_in_second_line(self):
if self.geotag_id:
icons_count += 1
if self.num_downloads:
- icons_count +=2 # Counts double as it takes more width
+ icons_count += 2 # Counts double as it takes more width
if self.num_comments:
- icons_count +=2 # Counts double as it takes more width
+ icons_count += 2 # Counts double as it takes more width
if self.num_ratings > settings.MIN_NUMBER_RATINGS:
- icons_count +=2 # Counts double as it takes more width
+ icons_count += 2 # Counts double as it takes more width
title_num_chars = len(self.original_filename)
if icons_count >= 6:
return title_num_chars >= 15
@@ -901,7 +935,7 @@ def should_display_small_icons_in_second_line(self):
return title_num_chars >= 23
else:
return title_num_chars >= 30
-
+
@property
def license_bw_icon_name(self):
if hasattr(self, 'license_name'):
@@ -918,8 +952,8 @@ def get_license_history(self):
If a sound never had a license changed, then the list will have a single element.
List is sorted with the newest license at the top.
"""
- return [(slh.created, slh.license) for slh in
- self.soundlicensehistory_set.select_related('license').order_by('-created')]
+ return [(slh.created, slh.license)
+ for slh in self.soundlicensehistory_set.select_related('license').order_by('-created')]
def get_sound_tags(self, limit=None):
"""
@@ -972,9 +1006,9 @@ def set_sources(self, new_sources):
"""
:param set new_sources: set object with the integer IDs of the sounds which should be set as sources of the sound
"""
- new_sources.discard(self.id) # stop the universe from collapsing :-D
+ new_sources.discard(self.id) # stop the universe from collapsing :-D
old_sources = self.get_sound_sources_as_set()
-
+
# process sources in old but not in new
for sid in old_sources - new_sources:
try:
@@ -990,10 +1024,16 @@ def set_sources(self, new_sources):
source.invalidate_template_caches()
self.sources.add(source)
send_mail_template(
- settings.EMAIL_SUBJECT_SOUND_ADDED_AS_REMIX, 'emails/email_remix_update.txt',
- {'source': source, 'action': 'added', 'remix': self},
- user_to=source.user, email_type_preference_check='new_remix')
-
+ settings.EMAIL_SUBJECT_SOUND_ADDED_AS_REMIX,
+ 'emails/email_remix_update.txt', {
+ 'source': source,
+ 'action': 'added',
+ 'remix': self
+ },
+ user_to=source.user,
+ email_type_preference_check='new_remix'
+ )
+
if old_sources != new_sources:
self.invalidate_template_caches()
@@ -1134,28 +1174,30 @@ def replace_user_id_in_path(path, old_owner_id, new_owner_id):
# Rename related files in disk
paths_to_rename = [
- self.locations('path'), # original file path
- self.locations('analysis.frames.path'), # analysis frames file
- self.locations('analysis.statistics.path'), # analysis statistics file
- self.locations('display.spectral.L.path'), # spectrogram L
- self.locations('display.spectral.M.path'), # spectrogram M
- self.locations('display.wave_bw.L.path'), # waveform BW L
- self.locations('display.wave_bw.M.path'), # waveform BW M
- self.locations('display.spectral_bw.L.path'), # spectrogram BW L
- self.locations('display.spectral_bw.M.path'), # spectrogram BW M
- self.locations('display.wave.L.path'), # waveform L
- self.locations('display.wave.M.path'), # waveform M
- self.locations('preview.HQ.mp3.path'), # preview HQ mp3
- self.locations('preview.HQ.ogg.path'), # preview HQ ogg
- self.locations('preview.LQ.mp3.path'), # preview LQ mp3
- self.locations('preview.LQ.ogg.path'), # preview LQ ogg
+ self.locations('path'), # original file path
+ self.locations('analysis.frames.path'), # analysis frames file
+ self.locations('analysis.statistics.path'), # analysis statistics file
+ self.locations('display.spectral.L.path'), # spectrogram L
+ self.locations('display.spectral.M.path'), # spectrogram M
+ self.locations('display.wave_bw.L.path'), # waveform BW L
+ self.locations('display.wave_bw.M.path'), # waveform BW M
+ self.locations('display.spectral_bw.L.path'), # spectrogram BW L
+ self.locations('display.spectral_bw.M.path'), # spectrogram BW M
+ self.locations('display.wave.L.path'), # waveform L
+ self.locations('display.wave.M.path'), # waveform M
+ self.locations('preview.HQ.mp3.path'), # preview HQ mp3
+ self.locations('preview.HQ.ogg.path'), # preview HQ ogg
+ self.locations('preview.LQ.mp3.path'), # preview LQ mp3
+ self.locations('preview.LQ.ogg.path'), # preview LQ ogg
]
for path in paths_to_rename:
try:
os.rename(path, replace_user_id_in_path(path, self.user.id, new_owner.id))
except OSError:
- web_logger.info('WARNING changing owner of sound %i: Could not rename file %s because '
- 'it does not exist.\n' % (self.id, path))
+ web_logger.info(
+ 'WARNING changing owner of sound %i: Could not rename file %s because '
+ 'it does not exist.\n' % (self.id, path)
+ )
# Deal with pack
# If sound is in pack, replicate pack in new user.
@@ -1177,7 +1219,7 @@ def replace_user_id_in_path(path, old_owner_id, new_owner_id):
self.original_path = replace_user_id_in_path(self.original_path, old_owner.id, new_owner.id)
# Set index dirty
- self.mark_index_dirty(commit=True) # commit=True does save
+ self.mark_index_dirty(commit=True) # commit=True does save
# Update old owner and new owner profiles
old_owner.profile.update_num_sounds()
@@ -1219,7 +1261,7 @@ def compute_crc(self, commit=True):
for data in iter(lambda: fp.read(settings.CRC_BUFFER_SIZE), b''):
crc = zlib.crc32(data, crc)
- self.crc = f'{crc & 0xffffffff:0>8x}' # right aligned with zero-padding, width of 8 chars
+ self.crc = f'{crc & 0xffffffff:0>8x}' # right aligned with zero-padding, width of 8 chars
if commit:
self.save()
@@ -1271,18 +1313,23 @@ def process(self, force=False, skip_previews=False, skip_displays=False, high_pr
if force or ((self.processing_state != "OK" or self.processing_ongoing_state != "FI")
and self.estimate_num_processing_attemps() <= 3):
self.set_processing_ongoing_state("QU")
- tasks.process_sound.apply_async(kwargs=dict(sound_id=self.id, skip_previews=skip_previews, skip_displays=skip_displays), countdown=countdown)
+ tasks.process_sound.apply_async(
+ kwargs=dict(sound_id=self.id, skip_previews=skip_previews, skip_displays=skip_displays),
+ countdown=countdown
+ )
sounds_logger.info(f"Send sound with id {self.id} to queue 'process'")
return True
def estimate_num_processing_attemps(self):
- # Estimates how many processing attemps have been made by looking at the processing logs
+ # Estimates how many processing attemps have been made by looking at the processing logs
if self.processing_log is not None:
return max(1, self.processing_log.count('----Processed sound'))
else:
return 0
- def analyze(self, analyzer=settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME, force=False, verbose=True, high_priority=False):
+ def analyze(
+ self, analyzer=settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME, force=False, verbose=True, high_priority=False
+ ):
# Note that "high_priority" is not implemented but needs to be here for compatibility with older code
if analyzer not in settings.ANALYZERS_CONFIGURATION.keys():
# If specified analyzer is not one of the analyzers configured, do nothing
@@ -1301,12 +1348,24 @@ def analyze(self, analyzer=settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME, force=Fal
sound_path = self.locations('path')
if settings.USE_PREVIEWS_WHEN_ORIGINAL_FILES_MISSING and not os.path.exists(sound_path):
sound_path = self.locations("preview.LQ.mp3.path")
- celery_app.send_task(analyzer, kwargs={'sound_id': self.id, 'sound_path': sound_path,
- 'analysis_folder': self.locations('analysis.base_path'), 'metadata':json.dumps({
+ celery_app.send_task(
+ analyzer,
+ kwargs={
+ 'sound_id':
+ self.id,
+ 'sound_path':
+ sound_path,
+ 'analysis_folder':
+ self.locations('analysis.base_path'),
+ 'metadata':
+ json.dumps({
'duration': self.duration,
'tags': self.get_sound_tags(),
'geotag': [self.geotag.lat, self.geotag.lon] if self.geotag else None,
- })}, queue=analyzer)
+ })
+ },
+ queue=analyzer
+ )
if verbose:
sounds_logger.info(f"Sending sound {self.id} to analyzer {analyzer}")
else:
@@ -1332,7 +1391,7 @@ def invalidate_template_caches(self):
invalidate_template_cache("display_sound", self.id, is_authenticated, is_explicit)
for player_size in ['small', 'middle', 'big_no_info', 'small_no_info', 'minimal']:
- for is_authenticated in [True, False]:
+ for is_authenticated in [True, False]:
invalidate_template_cache("bw_display_sound", self.id, player_size, is_authenticated)
invalidate_template_cache("bw_sound_page", self.id)
@@ -1352,10 +1411,11 @@ def get_geotag_name(self):
return f'{self.geotag.lat:.2f}, {self.geotag.lon:.3f}'
class Meta:
- ordering = ("-created", )
+ ordering = ("-created",)
class SoundOfTheDayManager(models.Manager):
+
def create_sound_for_date(self, date_display):
"""Create a random sound of the day for a specific date.
Make sure that the sound hasn't already been chosen as a sound of the day
@@ -1372,8 +1432,10 @@ def create_sound_for_date(self, date_display):
days_for_user = settings.NUMBER_OF_DAYS_FOR_USER_RANDOM_SOUNDS
date_from = date_display - datetime.timedelta(days=days_for_user)
users = self.model.objects.filter(
- date_display__lt=date_display,
- date_display__gte=date_from).distinct().values_list('sound__user_id', flat=True)
+ date_display__lt=date_display, date_display__gte=date_from
+ ).distinct().values_list(
+ 'sound__user_id', flat=True
+ )
used_sounds = self.model.objects.values_list('sound_id', flat=True)
sound = Sound.objects.random(excludes={'user__id__in': users, 'id__in': used_sounds})
@@ -1421,14 +1483,20 @@ def notify_by_email(self):
return False
send_mail_template(
- settings.EMAIL_SUBJECT_RANDOM_SOUND_OF_THE_SAY_CHOOSEN, 'emails/email_random_sound.txt',
- {'sound': self.sound, 'user': self.sound.user},
- user_to=self.sound.user, email_type_preference_check="random_sound")
+ settings.EMAIL_SUBJECT_RANDOM_SOUND_OF_THE_SAY_CHOOSEN,
+ 'emails/email_random_sound.txt', {
+ 'sound': self.sound,
+ 'user': self.sound.user
+ },
+ user_to=self.sound.user,
+ email_type_preference_check="random_sound"
+ )
self.email_sent = True
self.save()
- sounds_logger.info("Finished sending mail to user %s of random sound of the day %s" %
- (self.sound.user, self.sound))
+ sounds_logger.info(
+ "Finished sending mail to user %s of random sound of the day %s" % (self.sound.user, self.sound)
+ )
return True
@@ -1442,9 +1510,7 @@ class DeletedSound(models.Model):
def on_delete_sound(sender, instance, **kwargs):
- ds, create = DeletedSound.objects.get_or_create(
- sound_id=instance.id,
- defaults={'data': {}})
+ ds, create = DeletedSound.objects.get_or_create(sound_id=instance.id, defaults={'data': {}})
ds.user = instance.user
# Copy relevant data to DeletedSound for future research
@@ -1488,7 +1554,7 @@ def on_delete_sound(sender, instance, **kwargs):
data['created'] = str(data['created'])
data['moderation_date'] = str(data['moderation_date'])
data['processing_date'] = str(data['processing_date'])
- data['date_recorded'] = str(data['date_recorded']) # This field is not used
+ data['date_recorded'] = str(data['date_recorded']) # This field is not used
if instance.pack:
data['pack']['created'] = str(data['pack']['created'])
data['pack']['last_updated'] = str(data['pack']['last_updated'])
@@ -1575,21 +1641,24 @@ def bulk_query_id(self, pack_ids, sound_ids_for_pack_id=dict(), exclude_deleted=
should_add_sound_to_selected_sounds = True
if should_add_sound_to_selected_sounds:
selected_sounds_data.append({
- 'id': s.id,
- 'username': p.user.username, # Packs have same username as sounds inside pack
- 'similarity_state': s.similarity_state,
- 'duration': s.duration,
- 'preview_mp3': s.locations('preview.LQ.mp3.url'),
- 'preview_ogg': s.locations('preview.LQ.ogg.url'),
- 'wave': s.locations('display.wave_bw.L.url'),
- 'spectral': s.locations('display.spectral_bw.L.url'),
- 'num_ratings': s.num_ratings,
- 'avg_rating': s.avg_rating
- })
+ 'id': s.id,
+ 'username': p.user.username, # Packs have same username as sounds inside pack
+ 'similarity_state': s.similarity_state,
+ 'duration': s.duration,
+ 'preview_mp3': s.locations('preview.LQ.mp3.url'),
+ 'preview_ogg': s.locations('preview.LQ.ogg.url'),
+ 'wave': s.locations('display.wave_bw.L.url'),
+ 'spectral': s.locations('display.spectral_bw.L.url'),
+ 'num_ratings': s.num_ratings,
+ 'avg_rating': s.avg_rating
+ })
p.num_sounds_unpublished_precomputed = p.sounds.count() - p.num_sounds
p.licenses_data_precomputed = ([lid for _, lid in licenses], [lname for lname, _ in licenses])
- p.pack_tags = [{'name': tag, 'count': count, 'browse_url': p.browse_pack_tag_url(tag)}
- for tag, count in Counter(tags).most_common(10)] # See pack.get_pack_tags_bw
+ p.pack_tags = [{
+ 'name': tag,
+ 'count': count,
+ 'browse_url': p.browse_pack_tag_url(tag)
+ } for tag, count in Counter(tags).most_common(10)] # See pack.get_pack_tags_bw
p.selected_sounds_data = selected_sounds_data
p.user_profile_locations = p.user.profile.locations()
p.has_geotags_precomputed = has_geotags
@@ -1634,8 +1703,8 @@ class Pack(models.Model):
created = models.DateTimeField(db_index=True, auto_now_add=True)
license_crc = models.CharField(max_length=8, blank=True)
last_updated = models.DateTimeField(db_index=True, auto_now_add=True)
- num_downloads = models.PositiveIntegerField(default=0) # Updated via db trigger
- num_sounds = models.PositiveIntegerField(default=0) # Updated via django Pack.process() method
+ num_downloads = models.PositiveIntegerField(default=0) # Updated via db trigger
+ num_sounds = models.PositiveIntegerField(default=0) # Updated via django Pack.process() method
is_deleted = models.BooleanField(db_index=True, default=False)
VARIOUS_LICENSES_NAME = 'Various licenses'
@@ -1693,11 +1762,14 @@ def browse_pack_tag_url(self, tag):
def get_pack_tags_bw(self):
try:
if hasattr(self, 'pack_tags'):
- return self.pack_tags # If precomputed from PackManager.bulk_query_id method
+ return self.pack_tags # If precomputed from PackManager.bulk_query_id method
else:
pack_tags_counts = get_search_engine().get_pack_tags(self.user.username, self.name)
- return [{'name': tag, 'count': count, 'browse_url': browse_pack_tag_url(tag)}
- for tag, count in pack_tags_counts]
+ return [{
+ 'name': tag,
+ 'count': count,
+ 'browse_url': browse_pack_tag_url(tag)
+ } for tag, count in pack_tags_counts]
except SearchEngineException as e:
return []
except Exception as e:
@@ -1712,7 +1784,7 @@ def delete_pack(self, remove_sounds=True):
# Instead, Pack.delete_pack() should be used
if remove_sounds:
for sound in self.sounds.all():
- sound.delete() # Create DeletedSound objects and delete original objects
+ sound.delete() # Create DeletedSound objects and delete original objects
else:
for sound in self.sounds.all():
sound.invalidate_template_caches()
@@ -1728,17 +1800,14 @@ def invalidate_template_caches(self):
invalidate_template_cache("bw_pack_stats", self.id)
def get_attribution(self):
- sounds_list = self.sounds.filter(processing_state="OK",
- moderation_state="OK").select_related('user', 'license')
+ sounds_list = self.sounds.filter(processing_state="OK", moderation_state="OK").select_related('user', 'license')
users = User.objects.filter(sounds__in=sounds_list).distinct()
# Generate text file with license info
licenses = License.objects.filter(sound__pack=self).distinct()
- attribution = render_to_string("sounds/pack_attribution.txt",
- dict(users=users,
- pack=self,
- licenses=licenses,
- sound_list=sounds_list))
+ attribution = render_to_string(
+ "sounds/pack_attribution.txt", dict(users=users, pack=self, licenses=licenses, sound_list=sounds_list)
+ )
return attribution
@property
@@ -1747,7 +1816,10 @@ def avg_rating(self):
if hasattr(self, 'avg_rating_precomputed'):
return self.avg_rating_precomputed
else:
- ratings = list(Sound.objects.filter(pack=self, num_ratings__gte=settings.MIN_NUMBER_RATINGS).values_list('avg_rating', flat=True))
+ ratings = list(
+ Sound.objects.filter(pack=self,
+ num_ratings__gte=settings.MIN_NUMBER_RATINGS).values_list('avg_rating', flat=True)
+ )
if ratings:
return sum(ratings) / len(ratings)
else:
@@ -1779,17 +1851,19 @@ def num_sounds_unpublished(self):
@cached_property
def licenses_data(self):
if hasattr(self, 'licenses_data_precomputed'):
- return self.licenses_data_precomputed # If precomputed from PackManager.bulk_query_id method
+ return self.licenses_data_precomputed # If precomputed from PackManager.bulk_query_id method
else:
- licenses_data = list(Sound.objects.select_related('license').filter(pack=self).values_list('license__name', 'license_id'))
+ licenses_data = list(
+ Sound.objects.select_related('license').filter(pack=self).values_list('license__name', 'license_id')
+ )
license_ids = [lid for _, lid in licenses_data]
license_names = [lname for lname, _ in licenses_data]
return license_ids, license_names
-
+
@property
def license_summary_name_and_id(self):
license_ids, license_names = self.licenses_data
-
+
if len(set(license_ids)) == 1:
# All sounds have same license
license_summary_name = license_names[0]
@@ -1828,7 +1902,7 @@ def has_geotags(self):
return self.has_geotags_precomputed
else:
return Sound.objects.filter(pack=self).exclude(geotag=None).count() > 0
-
+
@property
def should_display_small_icons_in_second_line(self):
# See same method in Sound class more more information
@@ -1836,9 +1910,9 @@ def should_display_small_icons_in_second_line(self):
if self.has_geotags:
icons_count += 1
if self.num_downloads:
- icons_count +=2 # Counts double as it takes more width
+ icons_count += 2 # Counts double as it takes more width
if self.num_ratings:
- icons_count +=2 # Counts double as it takes more width
+ icons_count += 2 # Counts double as it takes more width
title_num_chars = len(self.name)
if icons_count >= 6:
return title_num_chars >= 5
@@ -1886,11 +1960,13 @@ def update_num_downloads_on_delete(**kwargs):
download = kwargs['instance']
if download.sound_id:
Sound.objects.filter(id=download.sound_id).update(
- is_index_dirty=True, num_downloads=Greatest(F('num_downloads') - 1, 0))
- accounts.models.Profile.objects.filter(user_id=download.user_id).update(
- num_sound_downloads=Greatest(F('num_sound_downloads') - 1, 0))
+ is_index_dirty=True, num_downloads=Greatest(F('num_downloads') - 1, 0)
+ )
+ accounts.models.Profile.objects.filter(user_id=download.user_id
+ ).update(num_sound_downloads=Greatest(F('num_sound_downloads') - 1, 0))
accounts.models.Profile.objects.filter(user_id=download.sound.user_id).update(
- num_user_sounds_downloads=Greatest(F('num_user_sounds_downloads') - 1, 0))
+ num_user_sounds_downloads=Greatest(F('num_user_sounds_downloads') - 1, 0)
+ )
@receiver(post_save, sender=Download)
@@ -1899,11 +1975,14 @@ def update_num_downloads_on_insert(**kwargs):
if kwargs['created']:
if download.sound_id:
Sound.objects.filter(id=download.sound_id).update(
- is_index_dirty=True, num_downloads=Greatest(F('num_downloads') + 1, 0))
+ is_index_dirty=True, num_downloads=Greatest(F('num_downloads') + 1, 0)
+ )
accounts.models.Profile.objects.filter(user_id=download.user_id).update(
- num_sound_downloads=Greatest(F('num_sound_downloads') + 1, 0))
+ num_sound_downloads=Greatest(F('num_sound_downloads') + 1, 0)
+ )
accounts.models.Profile.objects.filter(user_id=download.sound.user_id).update(
- num_user_sounds_downloads=Greatest(F('num_user_sounds_downloads') + 1, 0))
+ num_user_sounds_downloads=Greatest(F('num_user_sounds_downloads') + 1, 0)
+ )
class PackDownload(models.Model):
@@ -1925,10 +2004,11 @@ class PackDownloadSound(models.Model):
def update_num_downloads_on_delete_pack(**kwargs):
download = kwargs['instance']
Pack.objects.filter(id=download.pack_id).update(num_downloads=Greatest(F('num_downloads') - 1, 0))
- accounts.models.Profile.objects.filter(user_id=download.user_id).update(
- num_pack_downloads=Greatest(F('num_pack_downloads') - 1, 0))
+ accounts.models.Profile.objects.filter(user_id=download.user_id
+ ).update(num_pack_downloads=Greatest(F('num_pack_downloads') - 1, 0))
accounts.models.Profile.objects.filter(user_id=download.pack.user_id).update(
- num_user_packs_downloads=Greatest(F('num_user_packs_downloads') - 1, 0))
+ num_user_packs_downloads=Greatest(F('num_user_packs_downloads') - 1, 0)
+ )
@receiver(post_save, sender=PackDownload)
@@ -1936,18 +2016,16 @@ def update_num_downloads_on_insert_pack(**kwargs):
download = kwargs['instance']
if kwargs['created']:
Pack.objects.filter(id=download.pack_id).update(num_downloads=Greatest(F('num_downloads') + 1, 0))
- accounts.models.Profile.objects.filter(user_id=download.user_id).update(
- num_pack_downloads=Greatest(F('num_pack_downloads') + 1, 0))
+ accounts.models.Profile.objects.filter(user_id=download.user_id
+ ).update(num_pack_downloads=Greatest(F('num_pack_downloads') + 1, 0))
accounts.models.Profile.objects.filter(user_id=download.pack.user_id).update(
- num_user_packs_downloads=Greatest(F('num_user_packs_downloads') + 1, 0))
+ num_user_packs_downloads=Greatest(F('num_user_packs_downloads') + 1, 0)
+ )
class RemixGroup(models.Model):
protovis_data = models.TextField(null=True, blank=True, default=None)
- sounds = models.ManyToManyField(Sound,
- symmetrical=False,
- related_name='remix_group',
- blank=True)
+ sounds = models.ManyToManyField(Sound, symmetrical=False, related_name='remix_group', blank=True)
group_size = models.PositiveIntegerField(null=False, default=0)
@@ -1967,16 +2045,16 @@ class SoundAnalysis(models.Model):
or can be stored in a JSON/YAML file in disk.
"""
STATUS_CHOICES = (
- ("QU", 'Queued'),
- ("OK", 'Ok'),
- ("SK", 'Skipped'),
- ("FA", 'Failed'),
- )
+ ("QU", 'Queued'),
+ ("OK", 'Ok'),
+ ("SK", 'Skipped'),
+ ("FA", 'Failed'),
+ )
sound = models.ForeignKey(Sound, related_name='analyses', on_delete=models.CASCADE)
last_sent_to_queue = models.DateTimeField(auto_now_add=True)
last_analyzer_finished = models.DateTimeField(null=True)
- analyzer = models.CharField(db_index=True, max_length=255) # Analyzer name including version
+ analyzer = models.CharField(db_index=True, max_length=255) # Analyzer name including version
analysis_data = models.JSONField(null=True)
analysis_status = models.CharField(null=False, default="QU", db_index=True, max_length=2, choices=STATUS_CHOICES)
num_analysis_attempts = models.IntegerField(default=0)
@@ -2070,7 +2148,8 @@ def __str__(self):
return f'Analysis of sound {self.sound_id} with {self.analyzer}'
class Meta:
- unique_together = (("sound", "analyzer")) # one sounds.SoundAnalysis object per sound<>analyzer combination
+ unique_together = (("sound", "analyzer")) # one sounds.SoundAnalysis object per sound<>analyzer combination
+
def on_delete_sound_analysis(sender, instance, **kwargs):
# Right before deleting a SoundAnalysis object, delete also the associated log and analysis files (if any)
@@ -2080,4 +2159,5 @@ def on_delete_sound_analysis(sender, instance, **kwargs):
except Exception as e:
pass
+
pre_delete.connect(on_delete_sound_analysis, sender=SoundAnalysis)
diff --git a/sounds/templatetags/display_pack.py b/sounds/templatetags/display_pack.py
index af70bb0a8..e9ac1a5db 100644
--- a/sounds/templatetags/display_pack.py
+++ b/sounds/templatetags/display_pack.py
@@ -18,7 +18,6 @@
# See AUTHORS file.
#
-
from django import template
from sounds.models import Pack
@@ -76,9 +75,9 @@ def display_pack_big(context, pack):
@register.inclusion_tag('sounds/display_pack_selectable.html', takes_context=True)
def display_pack_small_selectable(context, pack, selected=False):
- context = context.get('original_context', context) # This is to allow passing context in nested inclusion tags
+ context = context.get('original_context', context) # This is to allow passing context in nested inclusion tags
tvars = display_pack(context, pack, size='small')
tvars.update({
'selected': selected,
})
- return tvars
\ No newline at end of file
+ return tvars
diff --git a/sounds/templatetags/display_remix.py b/sounds/templatetags/display_remix.py
index cdc730837..a302ea266 100644
--- a/sounds/templatetags/display_remix.py
+++ b/sounds/templatetags/display_remix.py
@@ -22,64 +22,62 @@
from django import template
import json
-
register = template.Library()
-@register.inclusion_tag('sounds/display_remix.html', takes_context=True)
+
+@register.inclusion_tag('sounds/display_remix.html', takes_context=True)
# TODO: ***just a reminder***
# there is probably a more efficient way to prepare the data
# CHECK ===> documentation for v.Layout.Network.Link #sourceNode
#
# FIXME: pagination doesn't work with this, we're missing the source....
def display_remix(context, sound, sounds):
-
+
nodes = []
links = []
tempList = []
# get position in queryset related to ids
# we need this to create the links between the nodes
- for idx,val in enumerate(sounds):
+ for idx, val in enumerate(sounds):
tempList.append({'id': val.id, 'pos': idx})
-
- for idx,val in enumerate(sounds):
- nodes.append({
- 'nodeName':val.original_filename,
- 'group':1,
- 'id':val.id,
- 'username': val.user.username
- })
-
+
+ for idx, val in enumerate(sounds):
+ nodes.append({'nodeName': val.original_filename, 'group': 1, 'id': val.id, 'username': val.user.username})
+
# since we go forward in time, if a sound has sources you can assign its sources
# the target will always be the current object
for src in val.sources.all():
- # we don't want the sources of the first item
+ # we don't want the sources of the first item
# since that could give us the whole graph
if idx > 0:
links.append({
- 'source': str([t['pos'] for t in tempList if t['id']==src.id]).strip('[,]'),
- 'source_id': src.id,
- 'target': idx,
- 'target_id': val.id,
- 'value': 1
- })
-
-
- return { 'data' : json.dumps({
- 'nodes' : nodes,
- 'links' : links,
- 'length': len(sounds), # to calculate canvas height
- 'color': '#F1D9FF',
- 'eccentricity' : __calculateEccentricity(len(sounds))
- }) }
-
+ 'source': str([t['pos'] for t in tempList if t['id'] == src.id]).strip('[,]'),
+ 'source_id': src.id,
+ 'target': idx,
+ 'target_id': val.id,
+ 'value': 1
+ })
+
+ return {
+ 'data':
+ json.dumps({
+ 'nodes': nodes,
+ 'links': links,
+ 'length': len(sounds), # to calculate canvas height
+ 'color': '#F1D9FF',
+ 'eccentricity': __calculateEccentricity(len(sounds))
+ })
+ }
+
+
# Calculate eccentricity so the arcs don't get clipped
-# N.B. this is not the canonical way to calculate eccentricity but protovis uses this formula
+# N.B. this is not the canonical way to calculate eccentricity but protovis uses this formula
def __calculateEccentricity(sounds_length):
eccentricity = 0
if sounds_length > 3:
- a = (sounds_length-2) * 80
+ a = (sounds_length - 2) * 80
b = 200
- eccentricity = (1 - b/a) * (1 - b/a)
-
+ eccentricity = (1 - b / a) * (1 - b / a)
+
return eccentricity
diff --git a/sounds/templatetags/display_sound.py b/sounds/templatetags/display_sound.py
index 71cf40499..14622b388 100644
--- a/sounds/templatetags/display_sound.py
+++ b/sounds/templatetags/display_sound.py
@@ -18,7 +18,6 @@
# See AUTHORS file.
#
-
from django import template
from django.conf import settings
from random import randint
@@ -30,7 +29,16 @@
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
-def display_sound(context, sound, player_size='small', show_bookmark=None, show_similar_sounds=None, show_remix=None, show_rate_widget=False, show_timesince=False):
+def display_sound(
+ context,
+ sound,
+ player_size='small',
+ show_bookmark=None,
+ show_similar_sounds=None,
+ show_remix=None,
+ show_rate_widget=False,
+ show_timesince=False
+):
"""This templatetag is used to display a sound with its player. It prepares some variables that are then passed
to the display_sound.html template to show sound information together with the player.
@@ -93,7 +101,7 @@ def sound_object_retrieved_using_bulk_query_id(sound):
"""
return hasattr(sound, 'tag_array')
-
+
if isinstance(sound, Sound):
if sound_object_retrieved_using_bulk_query_id(sound):
sound_obj = sound
@@ -119,22 +127,40 @@ def sound_object_retrieved_using_bulk_query_id(sound):
else:
request = context['request']
return {
- 'sound': sound_obj,
- 'user_profile_locations': Profile.locations_static(sound_obj.user_id, getattr(sound_obj, 'user_has_avatar', False)),
- 'request': request,
- 'is_explicit': sound_obj.is_explicit and
- (not request.user.is_authenticated or not request.user.profile.is_adult),
- 'is_authenticated': request.user.is_authenticated,
- 'show_bookmark_button': show_bookmark if show_bookmark is not None else (player_size == 'small' or player_size == 'small_no_info' or player_size == 'big_no_info'), # Only BW
- 'show_similar_sounds_button': show_similar_sounds if show_similar_sounds is not None else (player_size == 'small' or player_size == 'small_no_info' or player_size == 'big_no_info'), # Only BW
- 'show_remix_group_button': show_remix if show_remix is not None else (player_size == 'small' or player_size == 'small_no_info' or player_size == 'big_no_info'), # Only BW
- 'show_rate_widget': show_rate_widget if (player_size == 'small' or player_size == 'small_no_info') else False, # Only BW
- 'request_user_is_author': request.user.is_authenticated and sound_obj.user_id == request.user.id,
- 'player_size': player_size,
- 'show_milliseconds': 'true' if (player_size == 'big_no_info' or sound_obj.duration < 10) else 'false', # Only BW
- 'show_timesince': show_timesince,
- 'min_num_ratings': settings.MIN_NUMBER_RATINGS,
- 'random_number': randint(1, 1000000), # Used to generate IDs for HTML elements that need to be unique per sound/player instance
+ 'sound':
+ sound_obj,
+ 'user_profile_locations':
+ Profile.locations_static(sound_obj.user_id, getattr(sound_obj, 'user_has_avatar', False)),
+ 'request':
+ request,
+ 'is_explicit':
+ sound_obj.is_explicit and (not request.user.is_authenticated or not request.user.profile.is_adult),
+ 'is_authenticated':
+ request.user.is_authenticated,
+ 'show_bookmark_button':
+ show_bookmark if show_bookmark is not None else
+ (player_size == 'small' or player_size == 'small_no_info' or player_size == 'big_no_info'), # Only BW
+ 'show_similar_sounds_button':
+ show_similar_sounds if show_similar_sounds is not None else
+ (player_size == 'small' or player_size == 'small_no_info' or player_size == 'big_no_info'), # Only BW
+ 'show_remix_group_button':
+ show_remix if show_remix is not None else
+ (player_size == 'small' or player_size == 'small_no_info' or player_size == 'big_no_info'), # Only BW
+ 'show_rate_widget':
+ show_rate_widget if (player_size == 'small' or player_size == 'small_no_info') else False, # Only BW
+ 'request_user_is_author':
+ request.user.is_authenticated and sound_obj.user_id == request.user.id,
+ 'player_size':
+ player_size,
+ 'show_milliseconds':
+ 'true' if (player_size == 'big_no_info' or sound_obj.duration < 10) else 'false', # Only BW
+ 'show_timesince':
+ show_timesince,
+ 'min_num_ratings':
+ settings.MIN_NUMBER_RATINGS,
+ 'random_number':
+ randint(1, 1000000
+ ), # Used to generate IDs for HTML elements that need to be unique per sound/player instance
}
@@ -142,48 +168,97 @@ def sound_object_retrieved_using_bulk_query_id(sound):
def display_sound_small(context, sound):
return display_sound(context, sound, player_size='small', show_rate_widget=True)
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_small_with_timesince(context, sound):
return display_sound(context, sound, player_size='small', show_rate_widget=True, show_timesince=True)
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_moderation(context, sound):
- return display_sound(context, sound, player_size='moderation', show_bookmark=False, show_similar_sounds=False, show_remix=True, show_rate_widget=False)
+ return display_sound(
+ context,
+ sound,
+ player_size='moderation',
+ show_bookmark=False,
+ show_similar_sounds=False,
+ show_remix=True,
+ show_rate_widget=False
+ )
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_small_no_bookmark(context, sound):
- return display_sound(context, sound, player_size='small', show_bookmark=False, show_similar_sounds=False, show_remix=False, show_rate_widget=True)
+ return display_sound(
+ context,
+ sound,
+ player_size='small',
+ show_bookmark=False,
+ show_similar_sounds=False,
+ show_remix=False,
+ show_rate_widget=True
+ )
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_small_no_bookmark_no_ratings(context, sound):
- return display_sound(context, sound, player_size='small', show_bookmark=False, show_similar_sounds=False, show_remix=False, show_rate_widget=False)
+ return display_sound(
+ context,
+ sound,
+ player_size='small',
+ show_bookmark=False,
+ show_similar_sounds=False,
+ show_remix=False,
+ show_rate_widget=False
+ )
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_middle(context, sound):
- return display_sound(context, sound, player_size='middle', show_bookmark=True, show_similar_sounds=True, show_remix=True)
+ return display_sound(
+ context, sound, player_size='middle', show_bookmark=True, show_similar_sounds=True, show_remix=True
+ )
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_big_no_info(context, sound):
return display_sound(context, sound, player_size='big_no_info')
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_big_no_info_no_bookmark(context, sound):
- return display_sound(context, sound, player_size='big_no_info', show_bookmark=False, show_similar_sounds=False, show_remix=False)
+ return display_sound(
+ context, sound, player_size='big_no_info', show_bookmark=False, show_similar_sounds=False, show_remix=False
+ )
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_small_no_info(context, sound):
return display_sound(context, sound, player_size='small_no_info', show_rate_widget=True)
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_small_no_info_no_buttons(context, sound):
- return display_sound(context, sound, player_size='small_no_info', show_rate_widget=False, show_bookmark=False, show_similar_sounds=False, show_remix=False)
+ return display_sound(
+ context,
+ sound,
+ player_size='small_no_info',
+ show_rate_widget=False,
+ show_bookmark=False,
+ show_similar_sounds=False,
+ show_remix=False
+ )
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_minimal(context, sound):
return display_sound(context, sound, player_size='minimal')
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
-def display_sound_no_sound_object(context, file_data, player_size, show_bookmark=True, show_similar_sounds=True, show_remix=True):
+def display_sound_no_sound_object(
+ context, file_data, player_size, show_bookmark=True, show_similar_sounds=True, show_remix=True
+):
'''
This player works for sounds which have no Sound object. It requires
URLs to the sound files (mp3 and ogg)a and the wave/spectral images, and
@@ -208,7 +283,8 @@ def display_sound_no_sound_object(context, file_data, player_size, show_bookmark
'''
return {
'sound': {
- 'id': file_data.get('id', file_data['preview_mp3'].split('/')[-2]), # If no id, use a unique fake ID to avoid caching problems
+ 'id': file_data.get('id', file_data['preview_mp3'].split('/')[-2]
+ ), # If no id, use a unique fake ID to avoid caching problems
'username': file_data.get('username', 'nousername'),
'similarity_state': file_data.get('similarity_state', 'FA'),
'duration': file_data['duration'],
@@ -218,23 +294,35 @@ def display_sound_no_sound_object(context, file_data, player_size, show_bookmark
'locations': {
'preview': {
'LQ': {
- 'mp3': {'url': file_data['preview_mp3']},
- 'ogg': {'url': file_data['preview_ogg']}
+ 'mp3': {
+ 'url': file_data['preview_mp3']
+ },
+ 'ogg': {
+ 'url': file_data['preview_ogg']
+ }
}
},
'display': {
'wave_bw': {
- 'M': {'url': file_data['wave']},
- 'L': {'url': file_data['wave']}
- },
+ 'M': {
+ 'url': file_data['wave']
+ },
+ 'L': {
+ 'url': file_data['wave']
+ }
+ },
'spectral_bw': {
- 'M': {'url': file_data['spectral']},
- 'L': {'url': file_data['spectral']}
+ 'M': {
+ 'url': file_data['spectral']
+ },
+ 'L': {
+ 'url': file_data['spectral']
+ }
}
}
}
},
- 'show_milliseconds': 'true' if ('big' in player_size ) else 'false',
+ 'show_milliseconds': 'true' if ('big' in player_size) else 'false',
'show_bookmark_button': show_bookmark and 'id' in file_data,
'show_similar_sounds_button': show_similar_sounds and 'similarity_state' in file_data,
'show_remix_group_button': show_remix and 'remixgroup_id' in file_data,
@@ -243,6 +331,7 @@ def display_sound_no_sound_object(context, file_data, player_size, show_bookmark
'request': context['request']
}
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_big_no_sound_object(context, file_data):
return display_sound_no_sound_object(context, file_data, player_size='big_no_info')
@@ -251,14 +340,23 @@ def display_sound_big_no_sound_object(context, file_data):
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_small_no_sound_object(context, file_data):
return display_sound_no_sound_object(context, file_data, player_size='small_no_info')
-
+
+
@register.inclusion_tag('sounds/display_sound.html', takes_context=True)
def display_sound_small_no_sound_object_no_bookmark(context, file_data):
- return display_sound_no_sound_object(context, file_data, player_size='small_no_info', show_bookmark=False, show_similar_sounds=False, show_remix=False)
+ return display_sound_no_sound_object(
+ context,
+ file_data,
+ player_size='small_no_info',
+ show_bookmark=False,
+ show_similar_sounds=False,
+ show_remix=False
+ )
+
@register.inclusion_tag('sounds/display_sound_selectable.html', takes_context=True)
def display_sound_small_selectable(context, sound, selected=False):
- context = context.get('original_context', context) # This is to allow passing context in nested inclusion tags
+ context = context.get('original_context', context) # This is to allow passing context in nested inclusion tags
tvars = display_sound_small_no_bookmark_no_ratings(context, sound)
tvars.update({
'selected': selected,
diff --git a/sounds/templatetags/sound_signature.py b/sounds/templatetags/sound_signature.py
index aadec9c76..414224d6a 100644
--- a/sounds/templatetags/sound_signature.py
+++ b/sounds/templatetags/sound_signature.py
@@ -8,13 +8,13 @@
SOUND_SIGNATURE_SOUND_ID_PLACEHOLDER = "${sound_id}"
SOUND_SIGNATURE_SOUND_URL_PLACEHOLDER = "${sound_url}"
+
@register.filter(name='sound_signature_replace')
def sound_signature_replace(value, sound):
domain = f"https://{Site.objects.get_current().domain}"
abs_url = urllib.parse.urljoin(domain, reverse('sound', args=[sound.user.username, sound.id]))
- replace = [(SOUND_SIGNATURE_SOUND_ID_PLACEHOLDER, str(sound.id)),
- (SOUND_SIGNATURE_SOUND_URL_PLACEHOLDER, abs_url)]
+ replace = [(SOUND_SIGNATURE_SOUND_ID_PLACEHOLDER, str(sound.id)), (SOUND_SIGNATURE_SOUND_URL_PLACEHOLDER, abs_url)]
for placeholder, v in replace:
value = value.replace(placeholder, v)
return value
diff --git a/sounds/templatetags/sounds_selector.py b/sounds/templatetags/sounds_selector.py
index b34b5af55..b666140f5 100644
--- a/sounds/templatetags/sounds_selector.py
+++ b/sounds/templatetags/sounds_selector.py
@@ -18,7 +18,6 @@
# See AUTHORS file.
#
-
from django import template
from django.conf import settings
@@ -39,9 +38,10 @@ def sounds_selector(context, sounds, selected_sound_ids=[], show_select_all_butt
'objects': sounds,
'type': 'sounds',
'show_select_all_buttons': show_select_all_buttons,
- 'original_context': context # This will be used so a nested inclusion tag can get the original context
+ 'original_context': context # This will be used so a nested inclusion tag can get the original context
}
+
@register.inclusion_tag('molecules/object_selector.html', takes_context=True)
def sounds_selector_with_select_buttons(context, sounds, selected_sound_ids=[]):
return sounds_selector(context, sounds, selected_sound_ids=selected_sound_ids, show_select_all_buttons=True)
@@ -59,5 +59,5 @@ def packs_selector_with_select_buttons(context, packs, selected_pack_ids=[]):
'objects': packs,
'type': 'packs',
'show_select_all_buttons': True,
- 'original_context': context # This will be used so a nested inclusion tag can get the original context
- }
\ No newline at end of file
+ 'original_context': context # This will be used so a nested inclusion tag can get the original context
+ }
diff --git a/sounds/tests/test_manager.py b/sounds/tests/test_manager.py
index 2bfe29715..d494b6224 100644
--- a/sounds/tests/test_manager.py
+++ b/sounds/tests/test_manager.py
@@ -178,8 +178,10 @@ class PackManagerQueryMethods(TestCase):
fixtures = ['licenses']
- fields_to_check_bulk_query_id = ['id', 'user_id', 'name', 'description', 'is_dirty', 'created', 'license_crc',
- 'last_updated', 'num_downloads', 'num_sounds', 'is_deleted']
+ fields_to_check_bulk_query_id = [
+ 'id', 'user_id', 'name', 'description', 'is_dirty', 'created', 'license_crc', 'last_updated', 'num_downloads',
+ 'num_sounds', 'is_deleted'
+ ]
def setUp(self):
user, packs, sounds = create_user_and_sounds(num_sounds=3, num_packs=3, tags="tag1 tag2 tag3")
diff --git a/sounds/tests/test_random_sound.py b/sounds/tests/test_random_sound.py
index 09b72ac54..a847de2a0 100644
--- a/sounds/tests/test_random_sound.py
+++ b/sounds/tests/test_random_sound.py
@@ -77,7 +77,7 @@ def test_random_sound_view_bad_solr(self, random_sound):
sound.save()
# We only use the ID field from solr
- random_sound.return_value = sound.id+100
+ random_sound.return_value = sound.id + 100
# Even though solr returns sound.id+100, we find we are redirected to the db sound, because
# we call Sound.objects.random
@@ -175,8 +175,13 @@ def test_flag(self):
"""Doesn't select a sound if it's flagged"""
sound = self._create_test_sound()
sound.save()
- Flag.objects.create(sound=sound, reporting_user=User.objects.all()[0], email="testemail@freesound.org",
- reason_type="O", reason="Not a good sound")
+ Flag.objects.create(
+ sound=sound,
+ reporting_user=User.objects.all()[0],
+ email="testemail@freesound.org",
+ reason_type="O",
+ reason="Not a good sound"
+ )
random = Sound.objects.random()
self.assertIsNone(random)
diff --git a/sounds/tests/test_sound.py b/sounds/tests/test_sound.py
index 889b82ca5..536ee5f80 100644
--- a/sounds/tests/test_sound.py
+++ b/sounds/tests/test_sound.py
@@ -18,7 +18,6 @@
# See AUTHORS file.
#
-
import json
import os
import time
@@ -139,8 +138,9 @@ class ChangeSoundOwnerTestCase(TestCase):
def test_change_sound_owner(self, delete_sounds_from_search_engine):
# Prepare some content
userA, packsA, soundsA = create_user_and_sounds(num_sounds=4, num_packs=1, tags="tag1 tag2 tag3 tag4 tag5")
- userB, _, _ = create_user_and_sounds(num_sounds=0, num_packs=0,
- user=User.objects.create_user("testuser2", password="testpass2"))
+ userB, _, _ = create_user_and_sounds(
+ num_sounds=0, num_packs=0, user=User.objects.create_user("testuser2", password="testpass2")
+ )
fake_original_path_template = '/test/path/{sound_id}_{user_id}.wav'
@@ -161,7 +161,7 @@ def test_change_sound_owner(self, delete_sounds_from_search_engine):
target_sound_id = target_sound.id
target_sound_pack = target_sound.pack
target_sound_tags = [ti.id for ti in target_sound.tags.all()]
- remaining_sound_ids = [s.id for s in soundsA[1:]] # Other sounds that the user owns
+ remaining_sound_ids = [s.id for s in soundsA[1:]] # Other sounds that the user owns
# Change ownership of sound
target_sound.change_owner(userB)
@@ -193,23 +193,25 @@ class ProfileNumSoundsTestCase(TestCase):
def test_moderation_and_processing_state_changes(self, delete_sounds_from_search_engine):
user, packs, sounds = create_user_and_sounds()
sound = sounds[0]
- self.assertEqual(user.profile.num_sounds, 0) # Sound not yet moderated or processed
+ self.assertEqual(user.profile.num_sounds, 0) # Sound not yet moderated or processed
sound.change_moderation_state("OK")
- self.assertEqual(user.profile.num_sounds, 0) # Sound not yet processed
+ self.assertEqual(user.profile.num_sounds, 0) # Sound not yet processed
sound.change_processing_state("OK")
- self.assertEqual(user.profile.num_sounds, 1) # Sound now processed and moderated
+ self.assertEqual(user.profile.num_sounds, 1) # Sound now processed and moderated
sound.change_processing_state("OK")
- self.assertEqual(user.profile.num_sounds, 1) # Sound reprocessed and again set as ok
+ self.assertEqual(user.profile.num_sounds, 1) # Sound reprocessed and again set as ok
sound.change_processing_state("OK")
- self.assertEqual(user.profile.num_sounds, 1) # Sound reprocessed second time and again set as ok
+ self.assertEqual(user.profile.num_sounds, 1) # Sound reprocessed second time and again set as ok
sound.change_processing_state("FA")
- self.assertEqual(user.profile.num_sounds, 0) # Sound failed processing
+ self.assertEqual(user.profile.num_sounds, 0) # Sound failed processing
delete_sounds_from_search_engine.assert_called_once_with([sound.id])
sound.change_processing_state("OK")
- self.assertEqual(user.profile.num_sounds, 1) # Sound processed again as ok
+ self.assertEqual(user.profile.num_sounds, 1) # Sound processed again as ok
sound.change_moderation_state("DE")
- self.assertEqual(user.profile.num_sounds, 0) # Sound unmoderated
- self.assertEqual(delete_sounds_from_search_engine.call_count, 2) # Sound deleted once when going to FA, once when DE
+ self.assertEqual(user.profile.num_sounds, 0) # Sound unmoderated
+ self.assertEqual(
+ delete_sounds_from_search_engine.call_count, 2
+ ) # Sound deleted once when going to FA, once when DE
@mock.patch('sounds.models.delete_sounds_from_search_engine')
def test_sound_delete(self, delete_sounds_from_search_engine):
@@ -240,10 +242,10 @@ def test_deletedsound_creation(self, delete_sounds_from_search_engine):
ds = DeletedSound.objects.get(sound_id=sound_id)
# Check this elements are in the json saved on DeletedSound
- keys = ['num_ratings', 'duration', 'id', 'geotag_id', 'comments',
- 'base_filename_slug', 'num_downloads', 'md5', 'description',
- 'original_path', 'pack_id', 'license', 'created',
- 'original_filename', 'geotag']
+ keys = [
+ 'num_ratings', 'duration', 'id', 'geotag_id', 'comments', 'base_filename_slug', 'num_downloads', 'md5',
+ 'description', 'original_path', 'pack_id', 'license', 'created', 'original_filename', 'geotag'
+ ]
json_data = list(ds.data.keys())
for k in keys:
@@ -257,7 +259,7 @@ def test_pack_delete(self):
self.assertEqual(user.profile.num_sounds, 5)
pack = packs[0]
pack.delete_pack(remove_sounds=False)
- self.assertEqual(User.objects.get(id=user.id).profile.num_sounds, 5) # Should be 5 as sounds are not deleted
+ self.assertEqual(User.objects.get(id=user.id).profile.num_sounds, 5) # Should be 5 as sounds are not deleted
self.assertEqual(pack.is_deleted, True)
@@ -274,13 +276,13 @@ def test_create_and_delete_sounds(self, delete_sounds_from_search_engine):
for count, sound in enumerate(pack.sounds.all()):
sound.change_processing_state("OK")
sound.change_moderation_state("OK")
- self.assertEqual(Pack.objects.get(id=pack.id).num_sounds, count + 1) # Check pack has all sounds
+ self.assertEqual(Pack.objects.get(id=pack.id).num_sounds, count + 1) # Check pack has all sounds
sound_to_delete = sounds[0]
sound_to_delete_id = sound_to_delete.id
sound_to_delete.delete()
delete_sounds_from_search_engine.assert_called_once_with([sound_to_delete_id])
- self.assertEqual(Pack.objects.get(id=pack.id).num_sounds, N_SOUNDS - 1) # Check num_sounds on delete sound
+ self.assertEqual(Pack.objects.get(id=pack.id).num_sounds, N_SOUNDS - 1) # Check num_sounds on delete sound
def test_edit_sound(self):
N_SOUNDS = 1
@@ -290,20 +292,22 @@ def test_edit_sound(self):
self.assertEqual(pack.num_sounds, 0)
sound.change_processing_state("OK")
sound.change_moderation_state("OK")
- self.assertEqual(Pack.objects.get(id=pack.id).num_sounds, 1) # Check pack has all sounds
+ self.assertEqual(Pack.objects.get(id=pack.id).num_sounds, 1) # Check pack has all sounds
self.client.force_login(user)
- resp = self.client.post(reverse('sound-edit', args=[sound.user.username, sound.id]), {
- '0-sound_id': sound.id,
- '0-description': 'this is a description for the sound',
- '0-name': sound.original_filename,
- '0-tags': 'tag1 tag2 tag3',
- '0-license': '3',
- '0-new_pack': 'new pack name',
- '0-pack': ''
- })
+ resp = self.client.post(
+ reverse('sound-edit', args=[sound.user.username, sound.id]), {
+ '0-sound_id': sound.id,
+ '0-description': 'this is a description for the sound',
+ '0-name': sound.original_filename,
+ '0-tags': 'tag1 tag2 tag3',
+ '0-license': '3',
+ '0-new_pack': 'new pack name',
+ '0-pack': ''
+ }
+ )
self.assertRedirects(resp, reverse('sound', args=[sound.user.username, sound.id]))
- self.assertEqual(Pack.objects.get(id=pack.id).num_sounds, 0) # Sound changed from pack
+ self.assertEqual(Pack.objects.get(id=pack.id).num_sounds, 0) # Sound changed from pack
def test_edit_pack(self):
user, packs, sounds = create_user_and_sounds(num_sounds=4, num_packs=2)
@@ -320,11 +324,13 @@ def test_edit_pack(self):
sound_ids_pack2 = [s.id for s in pack2.sounds.all()]
sound_ids_pack2.append(sound_ids_pack1.pop())
self.client.force_login(user)
- resp = self.client.post(reverse('pack-edit', args=[pack2.user.username, pack2.id]), {
- 'pack_sounds': ','.join([str(sid) for sid in sound_ids_pack2]),
- 'name': 'Test pack 1 (edited)',
- 'description': 'A new description'
- })
+ resp = self.client.post(
+ reverse('pack-edit', args=[pack2.user.username, pack2.id]), {
+ 'pack_sounds': ','.join([str(sid) for sid in sound_ids_pack2]),
+ 'name': 'Test pack 1 (edited)',
+ 'description': 'A new description'
+ }
+ )
self.assertRedirects(resp, reverse('pack', args=[pack2.user.username, pack2.id]))
self.assertEqual(Pack.objects.get(id=pack1.id).num_sounds, 1)
self.assertEqual(Pack.objects.get(id=pack2.id).num_sounds, 3)
@@ -334,12 +340,16 @@ def test_edit_pack(self):
sound = sounds[0]
sound.change_processing_state("OK")
sound.change_moderation_state("OK")
- resp = self.client.post(reverse('pack-edit', args=[pack2.user.username, pack2.id]), {
- 'pack_sounds':
- ','.join([str(snd.id) for snd in Pack.objects.get(id=pack2.id).sounds.all()] + [str(sound.id)]),
- 'name': 'Test pack 1 (edited again)',
- 'description': 'A new description'
- })
+ resp = self.client.post(
+ reverse('pack-edit', args=[pack2.user.username, pack2.id]), {
+ 'pack_sounds':
+ ','.join([str(snd.id) for snd in Pack.objects.get(id=pack2.id).sounds.all()] + [str(sound.id)]),
+ 'name':
+ 'Test pack 1 (edited again)',
+ 'description':
+ 'A new description'
+ }
+ )
self.assertRedirects(resp, reverse('pack', args=[pack2.user.username, pack2.id]))
self.assertEqual(Pack.objects.get(id=pack1.id).num_sounds, 1)
self.assertEqual(Pack.objects.get(id=pack2.id).num_sounds, 4)
@@ -361,14 +371,23 @@ def test_delete_sound(self, delete_sounds_from_search_engine):
_, _, sounds_other_user = create_user_and_sounds(num_sounds=1, username="testuser4")
# Try to delete a sound not owned by the request user (should return 200 but not delete the sound)
- resp = self.client.post(reverse('accounts-manage-sounds', args=['published']),
- {"delete_confirm": "delete_confirm", "object-ids": [sounds_other_user[0].id]})
+ resp = self.client.post(
+ reverse('accounts-manage-sounds', args=['published']), {
+ "delete_confirm": "delete_confirm",
+ "object-ids": [sounds_other_user[0].id]
+ }
+ )
self.assertEqual(resp.status_code, 200)
self.assertEqual(Sound.objects.filter(id=sounds_other_user[0].id).count(), 1)
# Try to delete a sound owned the user (should delete sound)
- resp = self.client.post(reverse('accounts-manage-sounds', args=['published']),
- {"delete_confirm": "delete_confirm", "object-ids": [sound.id]}, follow=True)
+ resp = self.client.post(
+ reverse('accounts-manage-sounds', args=['published']), {
+ "delete_confirm": "delete_confirm",
+ "object-ids": [sound.id]
+ },
+ follow=True
+ )
self.assertEqual(Sound.objects.filter(id=sound_id).count(), 0)
self.assertEqual(resp.status_code, 200)
delete_sounds_from_search_engine.assert_called_once_with([sound.id])
@@ -379,8 +398,12 @@ def test_embed_iframe(self):
sound.moderation_state = 'OK'
sound.processing_state = 'OK'
sound.save()
- resp = self.client.get(reverse('embed-simple-sound-iframe',
- kwargs={"sound_id": sound.id, 'player_size': 'medium'}))
+ resp = self.client.get(
+ reverse('embed-simple-sound-iframe', kwargs={
+ "sound_id": sound.id,
+ 'player_size': 'medium'
+ })
+ )
self.assertEqual(resp.status_code, 200)
def test_sound_short_link(self):
@@ -401,7 +424,7 @@ def test_oembed_sound(self):
# Get url of the sound
url = reverse('sound', args=[sound.user.username, sound_id])
- resp = self.client.get(reverse('oembed-sound')+'?url='+url)
+ resp = self.client.get(reverse('oembed-sound') + '?url=' + url)
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.content != '')
@@ -447,8 +470,10 @@ def test_download_sound(self):
# Check sound can't be downloaded if user not logged in
resp = self.client.get(reverse('sound-download', args=[self.sound.user.username, self.sound.id]))
- self.assertRedirects(resp, '{}?next={}'.format(
- reverse('login'), reverse('sound', args=[self.sound.user.username, self.sound.id])))
+ self.assertRedirects(
+ resp,
+ '{}?next={}'.format(reverse('login'), reverse('sound', args=[self.sound.user.username, self.sound.id]))
+ )
# Check download works successfully if user logged in
self.client.force_login(self.user)
@@ -504,8 +529,10 @@ def test_download_pack(self):
# Check sound can't be downloaded if user not logged in
resp = self.client.get(reverse('pack-download', args=[self.sound.user.username, self.pack.id]))
- self.assertRedirects(resp, '{}?next={}'.format(
- reverse('login'), reverse('pack', args=[self.sound.user.username, self.pack.id])))
+ self.assertRedirects(
+ resp,
+ '{}?next={}'.format(reverse('login'), reverse('pack', args=[self.sound.user.username, self.pack.id]))
+ )
# Check donwload works successfully if user logged in
self.client.force_login(self.user)
@@ -516,8 +543,10 @@ def test_download_pack(self):
self.assertEqual(PackDownload.objects.filter(user=self.user, pack=self.pack).count(), 1)
# Check the number of PackDownloadSounds
- self.assertEqual(PackDownloadSound.objects.filter(
- pack_download__user=self.user, pack_download__pack=self.pack).count(), 1)
+ self.assertEqual(
+ PackDownloadSound.objects.filter(pack_download__user=self.user, pack_download__pack=self.pack).count(),
+ 1
+ )
# Download again and check n download objects is still 1
self.client.get(reverse('pack-download', args=[self.sound.user.username, self.pack.id]))
@@ -577,8 +606,7 @@ def setUp(self):
self.sound.processing_state = "OK"
self.sound.save()
self.user = user
- self.user_visitor = User.objects.create_user(
- username='testuservisitor', password='testpassword')
+ self.user_visitor = User.objects.create_user(username='testuservisitor', password='testpassword')
def test_no_signature(self):
"""Check signature is not present in sound page (regardless of the user who visits and the authentication)"""
@@ -635,16 +663,20 @@ class SoundTemplateCacheTests(TestCase):
def setUp(self):
cache.clear()
- user, packs, sounds = create_user_and_sounds(num_sounds=1, tags="tag1 tag2 tag3", description="Test description")
+ user, packs, sounds = create_user_and_sounds(
+ num_sounds=1, tags="tag1 tag2 tag3", description="Test description"
+ )
self.sound = sounds[0]
self.sound.change_processing_state("OK")
self.sound.change_moderation_state("OK")
self.user = user
def _get_sound_view_cache_keys(self):
- return ([get_template_cache_key('bw_sound_page', self.sound.id),
- get_template_cache_key('bw_sound_page_sidebar', self.sound.id)])
-
+ return ([
+ get_template_cache_key('bw_sound_page', self.sound.id),
+ get_template_cache_key('bw_sound_page_sidebar', self.sound.id)
+ ])
+
def _get_sound_view_footer_top_cache_keys(self):
return [get_template_cache_key('bw_sound_page', self.sound.id)]
@@ -673,7 +705,7 @@ def _print_cache(self, cache_keys):
print(cache_keys)
def test_update_description(self):
-
+
cache_keys = self._get_sound_view_cache_keys()
self._assertCacheAbsent(cache_keys)
@@ -688,13 +720,15 @@ def test_update_description(self):
# Edit sound
new_description = 'New description'
new_name = 'New name'
- resp = self.client.post(self._get_sound_url('sound-edit'), {
- '0-sound_id': self.sound.id,
- '0-description': new_description,
- '0-name': new_name,
- '0-tags': 'tag1 tag2 tag3',
- '0-license': ['3'],
- })
+ resp = self.client.post(
+ self._get_sound_url('sound-edit'), {
+ '0-sound_id': self.sound.id,
+ '0-description': new_description,
+ '0-name': new_name,
+ '0-tags': 'tag1 tag2 tag3',
+ '0-license': ['3'],
+ }
+ )
self.assertEqual(resp.status_code, 302)
# Check that keys are no longer in cache
@@ -722,9 +756,9 @@ def test_add_remove_comment(self):
self._assertCachePresent(cache_keys)
# Add comment
- resp = self.client.post(self._get_sound_url('sound'), {
- 'comment': 'Test comment'
- }, follow=True) # we are testing sound-display, rendering sound view is ok
+ resp = self.client.post(
+ self._get_sound_url('sound'), {'comment': 'Test comment'}, follow=True
+ ) # we are testing sound-display, rendering sound view is ok
delete_url = self._get_delete_comment_url(resp.content)
self._assertCacheAbsent(cache_keys)
@@ -757,9 +791,9 @@ def test_download(self, sendfile):
# Download
resp = self.client.get(self._get_sound_url('sound-download'))
- sendfile.assert_called_once_with(self.sound.locations("path"),
- self.sound.friendly_filename(),
- self.sound.locations("sendfile_url"))
+ sendfile.assert_called_once_with(
+ self.sound.locations("path"), self.sound.friendly_filename(), self.sound.locations("sendfile_url")
+ )
self.assertEqual(resp.status_code, 200)
self._assertCacheAbsent(cache_keys)
@@ -771,7 +805,9 @@ def test_download(self, sendfile):
@mock.patch('general.management.commands.similarity_update.Similarity.add', return_value='Dummy response')
def _test_similarity_update(self, cache_keys, expected, request_func, similarity_add, user=None):
# Create a SoundAnalysis object with status OK so "similarity_update" command will pick it up
- SoundAnalysis.objects.create(sound=self.sound, analyzer=settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME, analysis_status="OK")
+ SoundAnalysis.objects.create(
+ sound=self.sound, analyzer=settings.FREESOUND_ESSENTIA_EXTRACTOR_NAME, analysis_status="OK"
+ )
self.sound.save()
self._assertCacheAbsent(cache_keys)
@@ -820,32 +856,38 @@ def _test_add_remove_pack(self, cache_keys, text, request_func, user=None):
# Add sound to pack
pack_name = 'New pack'
- resp = self.client.post(self._get_sound_url('sound-edit'), {
- '0-sound_id': self.sound.id,
- '0-description': self.sound.description,
- '0-name': self.sound.original_filename,
- '0-tags': self.sound.get_sound_tags_string(),
- '0-license': self.sound.license_id,
- '0-new_pack': pack_name,
- })
+ resp = self.client.post(
+ self._get_sound_url('sound-edit'), {
+ '0-sound_id': self.sound.id,
+ '0-description': self.sound.description,
+ '0-name': self.sound.original_filename,
+ '0-tags': self.sound.get_sound_tags_string(),
+ '0-license': self.sound.license_id,
+ '0-new_pack': pack_name,
+ }
+ )
self.assertEqual(resp.status_code, 302)
self._assertCacheAbsent(cache_keys)
# Check pack icon
self.sound.refresh_from_db()
self.assertIsNotNone(self.sound.pack)
- self.assertContains(request_func(user) if user is not None else request_func(), text) # request_func should render the template
+ self.assertContains(
+ request_func(user) if user is not None else request_func(), text
+ ) # request_func should render the template
self._assertCachePresent(cache_keys)
# Remove sound from pack
- resp = self.client.post(self._get_sound_url('sound-edit'), {
- '0-sound_id': self.sound.id,
- '0-description': self.sound.description,
- '0-name': self.sound.original_filename,
- '0-tags': self.sound.get_sound_tags_string(),
- '0-license': self.sound.license_id,
- '0-pack': '',
- })
+ resp = self.client.post(
+ self._get_sound_url('sound-edit'), {
+ '0-sound_id': self.sound.id,
+ '0-description': self.sound.description,
+ '0-name': self.sound.original_filename,
+ '0-tags': self.sound.get_sound_tags_string(),
+ '0-license': self.sound.license_id,
+ '0-pack': '',
+ }
+ )
self.assertEqual(resp.status_code, 302)
self._assertCacheAbsent(cache_keys)
@@ -876,20 +918,24 @@ def _test_add_remove_geotag(self, cache_keys, text, request_func, user=None):
self.client.force_login(self.user)
self.assertIsNone(self.sound.geotag)
- self.assertNotContains(request_func(user) if user is not None else request_func() if user is not None else request_func(), text)
+ self.assertNotContains(
+ request_func(user) if user is not None else request_func() if user is not None else request_func(), text
+ )
self._assertCachePresent(cache_keys)
# Add a geotag to the sound
- resp = self.client.post(self._get_sound_url('sound-edit'), {
- '0-sound_id': self.sound.id,
- '0-description': self.sound.description,
- '0-name': self.sound.original_filename,
- '0-tags': self.sound.get_sound_tags_string(),
- '0-license': self.sound.license_id,
- '0-lat': 20,
- '0-lon': 20,
- '0-zoom': 18
- })
+ resp = self.client.post(
+ self._get_sound_url('sound-edit'), {
+ '0-sound_id': self.sound.id,
+ '0-description': self.sound.description,
+ '0-name': self.sound.original_filename,
+ '0-tags': self.sound.get_sound_tags_string(),
+ '0-license': self.sound.license_id,
+ '0-lat': 20,
+ '0-lon': 20,
+ '0-zoom': 18
+ }
+ )
self.assertEqual(resp.status_code, 302)
self._assertCacheAbsent(cache_keys)
@@ -900,14 +946,16 @@ def _test_add_remove_geotag(self, cache_keys, text, request_func, user=None):
self._assertCachePresent(cache_keys)
# Remove geotag from the sound
- resp = self.client.post(self._get_sound_url('sound-edit'), {
- '0-sound_id': self.sound.id,
- '0-description': self.sound.description,
- '0-name': self.sound.original_filename,
- '0-tags': self.sound.get_sound_tags_string(),
- '0-license': self.sound.license_id,
- '0-remove_geotag': 'on',
- })
+ resp = self.client.post(
+ self._get_sound_url('sound-edit'), {
+ '0-sound_id': self.sound.id,
+ '0-description': self.sound.description,
+ '0-name': self.sound.original_filename,
+ '0-tags': self.sound.get_sound_tags_string(),
+ '0-license': self.sound.license_id,
+ '0-remove_geotag': 'on',
+ }
+ )
self.assertEqual(resp.status_code, 302)
self._assertCacheAbsent(cache_keys)
@@ -942,13 +990,15 @@ def _test_change_license(self, cache_keys, new_license, expected_text, request_f
self._assertCachePresent(cache_keys)
# Change license
- resp = self.client.post(self._get_sound_url('sound-edit'), {
- '0-sound_id': self.sound.id,
- '0-description': self.sound.description,
- '0-name': self.sound.original_filename,
- '0-tags': self.sound.get_sound_tags_string(),
- '0-license': new_license.id,
- })
+ resp = self.client.post(
+ self._get_sound_url('sound-edit'), {
+ '0-sound_id': self.sound.id,
+ '0-description': self.sound.description,
+ '0-name': self.sound.original_filename,
+ '0-tags': self.sound.get_sound_tags_string(),
+ '0-license': new_license.id,
+ }
+ )
self.assertEqual(resp.status_code, 302)
self._assertCacheAbsent(cache_keys)
@@ -985,14 +1035,16 @@ def _test_add_remove_remixes(self, cache_keys, text, request_func, user=None):
self._assertCachePresent(cache_keys)
# Indicate another sound as source
- resp = self.client.post(self._get_sound_url('sound-edit'), {
- '0-sound_id': self.sound.id,
- '0-description': self.sound.description,
- '0-name': self.sound.original_filename,
- '0-tags': self.sound.get_sound_tags_string(),
- '0-license': self.sound.license_id,
- '0-sources': str(another_sound.id)
- })
+ resp = self.client.post(
+ self._get_sound_url('sound-edit'), {
+ '0-sound_id': self.sound.id,
+ '0-description': self.sound.description,
+ '0-name': self.sound.original_filename,
+ '0-tags': self.sound.get_sound_tags_string(),
+ '0-license': self.sound.license_id,
+ '0-sources': str(another_sound.id)
+ }
+ )
self.assertEqual(resp.status_code, 302)
call_command('create_remix_groups')
self._assertCacheAbsent(cache_keys)
@@ -1004,14 +1056,16 @@ def _test_add_remove_remixes(self, cache_keys, text, request_func, user=None):
self._assertCachePresent(cache_keys)
# Remove remix from the sound
- resp = self.client.post(self._get_sound_url('sound-edit'), {
- '0-sound_id': self.sound.id,
- '0-description': self.sound.description,
- '0-name': self.sound.original_filename,
- '0-tags': self.sound.get_sound_tags_string(),
- '0-license': self.sound.license_id,
- '0-sources': ''
- })
+ resp = self.client.post(
+ self._get_sound_url('sound-edit'), {
+ '0-sound_id': self.sound.id,
+ '0-description': self.sound.description,
+ '0-name': self.sound.original_filename,
+ '0-tags': self.sound.get_sound_tags_string(),
+ '0-license': self.sound.license_id,
+ '0-sources': ''
+ }
+ )
self.assertEqual(resp.status_code, 302)
call_command('create_remix_groups')
self._assertCacheAbsent(cache_keys)
@@ -1031,9 +1085,7 @@ def test_add_remove_remixes_display(self):
def test_add_remove_remixes_view(self):
self._test_add_remove_remixes(
- self._get_sound_view_footer_top_cache_keys(),
- 'class="bw-icon-remix',
- self._get_sound_view
+ self._get_sound_view_footer_top_cache_keys(), 'class="bw-icon-remix', self._get_sound_view
)
def _test_state_change(self, cache_keys, change_state, texts):
@@ -1083,8 +1135,9 @@ def test_get_analysis(self):
analysis_data = {'descriptor1': 0.56, 'descirptor2': 1.45, 'descriptor3': 'label'}
# Create one analysis object that stores the data in the model. Check that get_analysis returns correct data.
- sa = SoundAnalysis.objects.create(sound=sound, analyzer="TestExtractor1", analysis_data=analysis_data,
- analysis_status="OK")
+ sa = SoundAnalysis.objects.create(
+ sound=sound, analyzer="TestExtractor1", analysis_data=analysis_data, analysis_status="OK"
+ )
self.assertEqual(sound.analyses.all().count(), 1)
self.assertEqual(list(sa.get_analysis_data().keys()), list(analysis_data.keys()))
self.assertEqual(sa.get_analysis_data()['descriptor1'], 0.56)
@@ -1153,7 +1206,6 @@ def test_edit_pack_owner(self):
self.assertEqual(resp.status_code, 403)
-
class SoundEditTestCase(TestCase):
fixtures = ['licenses', 'email_preference_type']
@@ -1164,9 +1216,9 @@ def setUp(self):
self.sound.change_processing_state("OK")
self.sound.change_moderation_state("OK")
self.user = user
-
+
def test_update_description(self):
-
+
self.client.force_login(self.user)
new_description = 'New description'
new_name = 'New name'
@@ -1174,19 +1226,21 @@ def test_update_description(self):
new_pack_name = 'Name of a new pack'
new_sound_sources = Sound.objects.exclude(id=self.sound.id)
geotag_lat = 46.31658418182218
- resp = self.client.post(reverse('sound-edit', args=[self.sound.user.username, self.sound.id]), {
- '0-sound_id': self.sound.id,
- '0-description': new_description,
- '0-name': new_name,
- '0-tags': ' '.join(new_tags),
- '0-license': '3',
- '0-sources': ','.join([f'{s.id}' for s in new_sound_sources]),
- '0-pack': PackForm.NEW_PACK_CHOICE_VALUE,
- '0-new_pack': new_pack_name,
- '0-lat': f'{geotag_lat}',
- '0-lon': '3.515625',
- '0-zoom': '16',
- })
+ resp = self.client.post(
+ reverse('sound-edit', args=[self.sound.user.username, self.sound.id]), {
+ '0-sound_id': self.sound.id,
+ '0-description': new_description,
+ '0-name': new_name,
+ '0-tags': ' '.join(new_tags),
+ '0-license': '3',
+ '0-sources': ','.join([f'{s.id}' for s in new_sound_sources]),
+ '0-pack': PackForm.NEW_PACK_CHOICE_VALUE,
+ '0-new_pack': new_pack_name,
+ '0-lat': f'{geotag_lat}',
+ '0-lon': '3.515625',
+ '0-zoom': '16',
+ }
+ )
self.assertEqual(resp.status_code, 302)
self.sound.refresh_from_db()
diff --git a/sounds/tests/test_templatetags.py b/sounds/tests/test_templatetags.py
index 9a04332ea..01f2090a4 100644
--- a/sounds/tests/test_templatetags.py
+++ b/sounds/tests/test_templatetags.py
@@ -44,10 +44,12 @@ def test_display_sound_from_id(self):
request = HttpRequest()
request.user = AnonymousUser()
with self.assertNumQueries(1):
- Template("{% load display_sound %}{% display_sound sound %}").render(Context({
- 'sound': self.sound.id,
- 'request': request,
- }))
+ Template("{% load display_sound %}{% display_sound sound %}").render(
+ Context({
+ 'sound': self.sound.id,
+ 'request': request,
+ })
+ )
# If the template could not be rendered, the test will have failed by that time, no need to assert anything
@override_settings(TEMPLATES=[settings.TEMPLATES[0]])
@@ -58,10 +60,12 @@ def test_display_sound_from_standard_sound_obj(self):
request = HttpRequest()
request.user = AnonymousUser()
with self.assertNumQueries(1):
- Template("{% load display_sound %}{% display_sound sound %}").render(Context({
- 'sound': self.sound,
- 'request': request,
- }))
+ Template("{% load display_sound %}{% display_sound sound %}").render(
+ Context({
+ 'sound': self.sound,
+ 'request': request,
+ })
+ )
# If the template could not be rendered, the test will have failed by that time, no need to assert anything
@override_settings(TEMPLATES=[settings.TEMPLATES[0]])
@@ -74,10 +78,12 @@ def test_display_sound_from_bulk_query_id_sound_obj(self):
request = HttpRequest()
request.user = AnonymousUser()
with self.assertNumQueries(0):
- Template("{% load display_sound %}{% display_sound sound %}").render(Context({
- 'sound': self.sound,
- 'request': request,
- }))
+ Template("{% load display_sound %}{% display_sound sound %}").render(
+ Context({
+ 'sound': self.sound,
+ 'request': request,
+ })
+ )
# If the template could not be rendered, the test will have failed by that time, no need to assert anything
@override_settings(TEMPLATES=[settings.TEMPLATES[0]])
@@ -88,10 +94,12 @@ def test_display_sound_from_bad_id(self):
request = HttpRequest()
request.user = AnonymousUser()
with self.assertNumQueries(0):
- Template("{% load display_sound %}{% display_sound sound %}").render(Context({
- 'sound': 'not_an_integer',
- 'request': request,
- }))
+ Template("{% load display_sound %}{% display_sound sound %}").render(
+ Context({
+ 'sound': 'not_an_integer',
+ 'request': request,
+ })
+ )
# If the template could not be rendered, the test will have failed by that time, no need to assert anything
@override_settings(TEMPLATES=[settings.TEMPLATES[0]])
@@ -102,15 +110,17 @@ def test_display_sound_from_unexisting_sound_id(self):
request = HttpRequest()
request.user = AnonymousUser()
with self.assertNumQueries(1):
- Template("{% load display_sound %}{% display_sound sound %}").render(Context({
- 'sound': -1,
- 'request': request,
- }))
+ Template("{% load display_sound %}{% display_sound sound %}").render(
+ Context({
+ 'sound': -1,
+ 'request': request,
+ })
+ )
# If the template could not be rendered, the test will have failed by that time, no need to assert anything
def test_display_sound_wrapper_view(self):
- response = self.client.get(reverse('sound-display', args=[self.sound.user.username, 921])) # Non existent ID
+ response = self.client.get(reverse('sound-display', args=[self.sound.user.username, 921])) # Non existent ID
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('sound-display', args=[self.sound.user.username, self.sound.id]))
- self.assertEqual(response.status_code, 200)
\ No newline at end of file
+ self.assertEqual(response.status_code, 200)
diff --git a/sounds/views.py b/sounds/views.py
index 34ff14fdf..c30a455a3 100644
--- a/sounds/views.py
+++ b/sounds/views.py
@@ -54,7 +54,7 @@
from follow import follow_utils
from forum.views import get_hot_threads
from geotags.models import GeoTag
-from sounds.forms import FlagForm,PackEditForm, SoundEditAndDescribeForm
+from sounds.forms import FlagForm, PackEditForm, SoundEditAndDescribeForm
from sounds.models import PackDownload, PackDownloadSound
from sounds.models import Sound, Pack, Download, RemixGroup, DeletedSound, SoundOfTheDay
from tickets import TICKET_STATUS_CLOSED
@@ -143,8 +143,9 @@ def random(request):
pass
if sound_obj is None:
raise Http404
- return HttpResponseRedirect('{}?random_browsing=true'.format(
- reverse('sound', args=[sound_obj.user.username, sound_obj.id])))
+ return HttpResponseRedirect(
+ '{}?random_browsing=true'.format(reverse('sound', args=[sound_obj.user.username, sound_obj.id]))
+ )
def packs(request):
@@ -163,8 +164,9 @@ def front_page(request):
top_donor_user_id = cache.get("top_donor_user_id", None)
top_donor_donation_amount = cache.get("top_donor_donation_amount", None)
if popular_searches is not None:
- popular_searches = [(query_terms, f"{reverse('sounds-search')}?q={query_terms}")
- for query_terms in popular_searches]
+ popular_searches = [
+ (query_terms, f"{reverse('sounds-search')}?q={query_terms}") for query_terms in popular_searches
+ ]
current_forum_threads = get_hot_threads(n=10)
@@ -232,9 +234,11 @@ def sound(request, username, sound_id):
form = CommentForm(request, request.POST)
if request.user.is_authenticated:
if request.user.profile.is_blocked_for_spam_reports():
- messages.add_message(request, messages.INFO, "You're not allowed to post the comment because your "
- "account has been temporaly blocked after multiple spam "
- "reports")
+ messages.add_message(
+ request, messages.INFO, "You're not allowed to post the comment because your "
+ "account has been temporaly blocked after multiple spam "
+ "reports"
+ )
else:
if form.is_valid():
comment_text = form.cleaned_data["comment"]
@@ -242,9 +246,15 @@ def sound(request, username, sound_id):
sound.invalidate_template_caches()
if request.user != sound.user:
send_mail_template(
- settings.EMAIL_SUBJECT_NEW_COMMENT, 'emails/email_new_comment.txt',
- {'sound': sound, 'user': request.user, 'comment': comment_text},
- user_to=sound.user, email_type_preference_check="new_comment")
+ settings.EMAIL_SUBJECT_NEW_COMMENT,
+ 'emails/email_new_comment.txt', {
+ 'sound': sound,
+ 'user': request.user,
+ 'comment': comment_text
+ },
+ user_to=sound.user,
+ email_type_preference_check="new_comment"
+ )
return HttpResponseRedirect(sound.get_absolute_url())
else:
@@ -262,7 +272,7 @@ def sound(request, username, sound_id):
'form': form,
'display_random_link': display_random_link,
'is_following': is_following,
- 'is_explicit': is_explicit, # if the sound should be shown blurred, already checks for adult profile
+ 'is_explicit': is_explicit, # if the sound should be shown blurred, already checks for adult profile
'sizes': settings.IFRAME_PLAYER_SIZE,
'min_num_ratings': settings.MIN_NUMBER_RATINGS
}
@@ -276,8 +286,8 @@ def after_download_modal(request):
This view checks if a modal should be shown after the user has downloaded a sound, and returns either the contents
of the modal if needed.
"""
- response_content = None # Default content of the response set to None (no modal)
- sound_name = request.GET.get('sound_name', 'this sound') # Gets some data sent by the client
+ response_content = None # Default content of the response set to None (no modal)
+ sound_name = request.GET.get('sound_name', 'this sound') # Gets some data sent by the client
should_show_modal = False
bw_response = None
@@ -294,13 +304,16 @@ def modal_shown_timestamps_cache_key(user):
if should_suggest_donation(request.user, len(modal_shown_timestamps)):
web_logger.info(f"Showing after download donate modal ({json.dumps({'user_id': request.user.id})})")
modal_shown_timestamps.append(time.time())
- cache.set(modal_shown_timestamps_cache_key(request.user), modal_shown_timestamps,
- 60 * 60 * 24) # 24 lifetime cache
+ cache.set(
+ modal_shown_timestamps_cache_key(request.user), modal_shown_timestamps, 60 * 60 * 24
+ ) # 24 lifetime cache
should_show_modal = True
if should_show_modal:
- return render(request, 'donations/modal_after_download_donation_request.html',
- {'donation_amount_request_param': settings.DONATION_AMOUNT_REQUEST_PARAM})
+ return render(
+ request, 'donations/modal_after_download_donation_request.html',
+ {'donation_amount_request_param': settings.DONATION_AMOUNT_REQUEST_PARAM}
+ )
else:
return HttpResponse()
@@ -309,8 +322,7 @@ def modal_shown_timestamps_cache_key(user):
@transaction.atomic()
def sound_download(request, username, sound_id):
if not request.user.is_authenticated:
- return HttpResponseRedirect('{}?next={}'.format(reverse("login"),
- reverse("sound", args=[username, sound_id])))
+ return HttpResponseRedirect('{}?next={}'.format(reverse("login"), reverse("sound", args=[username, sound_id])))
sound = get_object_or_404(Sound, id=sound_id, moderation_state="OK", processing_state="OK")
if sound.user.username.lower() != username.lower():
raise Http404
@@ -327,13 +339,15 @@ def sound_download(request, username, sound_id):
if cache.get(cache_key, None) is None:
Download.objects.create(user=request.user, sound=sound, license_id=sound.license_id)
sound.invalidate_template_caches()
- cache.set(cache_key, True, 60 * 5) # Don't save downloads for the same user/sound in 5 minutes
+ cache.set(cache_key, True, 60 * 5) # Don't save downloads for the same user/sound in 5 minutes
if settings.USE_CDN_FOR_DOWNLOADS:
cdn_filename = cache_cdn_map.get(str(sound_id), None)
if cdn_filename is not None:
# If USE_CDN_FOR_DOWNLOADS option is on and we find an URL for that sound in the CDN, then we redirect to that one
- cdn_url = settings.CDN_DOWNLOADS_TEMPLATE_URL.format(int(sound_id) // 1000, cdn_filename, sound.friendly_filename())
+ cdn_url = settings.CDN_DOWNLOADS_TEMPLATE_URL.format(
+ int(sound_id) // 1000, cdn_filename, sound.friendly_filename()
+ )
return HttpResponseRedirect(cdn_url)
return sendfile(*prepare_sendfile_arguments_for_sound_download(sound))
@@ -343,8 +357,7 @@ def sound_download(request, username, sound_id):
@transaction.atomic()
def pack_download(request, username, pack_id):
if not request.user.is_authenticated:
- return HttpResponseRedirect('{}?next={}'.format(reverse("login"),
- reverse("pack", args=[username, pack_id])))
+ return HttpResponseRedirect('{}?next={}'.format(reverse("login"), reverse("pack", args=[username, pack_id])))
pack = get_object_or_404(Pack, id=pack_id)
if pack.user.username.lower() != username.lower():
raise Http404
@@ -364,7 +377,7 @@ def pack_download(request, username, pack_id):
for sound in pack.sounds.all():
pds.append(PackDownloadSound(sound=sound, license_id=sound.license_id, pack_download=pd))
PackDownloadSound.objects.bulk_create(pds)
- cache.set(cache_key, True, 60 * 5) # Don't save downloads for the same user/pack in the next 5 minutes
+ cache.set(cache_key, True, 60 * 5) # Don't save downloads for the same user/pack in the next 5 minutes
licenses_url = (reverse('pack-licenses', args=[username, pack_id]))
return download_sounds(licenses_url, pack)
@@ -385,8 +398,13 @@ def sound_edit(request, username, sound_id):
if not (request.user.is_superuser or sound.user == request.user):
raise PermissionDenied
- session_key_prefix = request.GET.get('session', str(uuid.uuid4())[0:8]) # Get existing session key if we are already in an edit session or create a new one
- request.session[f'{session_key_prefix}-edit_sounds'] = [sound] # Add the list of sounds to edit in the session object
+ session_key_prefix = request.GET.get(
+ 'session',
+ str(uuid.uuid4())[0:8]
+ ) # Get existing session key if we are already in an edit session or create a new one
+ request.session[f'{session_key_prefix}-edit_sounds'] = [
+ sound
+ ] # Add the list of sounds to edit in the session object
request.session[f'{session_key_prefix}-len_original_edit_sounds'] = 1
return edit_and_describe_sounds_helper(request, session_key_prefix=session_key_prefix)
@@ -399,7 +417,7 @@ def clear_session_edit_data(request, session_key_prefix=''):
def clear_session_describe_data(request, session_key_prefix=''):
# Clear pre-existing edit/describe sound related data in the session
- for key in ['describe_sounds','describe_license', 'describe_pack', 'len_original_describe_sounds']:
+ for key in ['describe_sounds', 'describe_license', 'describe_pack', 'len_original_describe_sounds']:
request.session.pop(f'{session_key_prefix}-{key}', None)
@@ -408,22 +426,18 @@ def edit_and_describe_sounds_helper(request, describing=False, session_key_prefi
def update_sound_tickets(sound, text):
tickets = Ticket.objects.filter(sound_id=sound.id).exclude(status=TICKET_STATUS_CLOSED)
for ticket in tickets:
- tc = TicketComment(sender=request.user,
- ticket=ticket,
- moderator_only=False,
- text=text)
+ tc = TicketComment(sender=request.user, ticket=ticket, moderator_only=False, text=text)
tc.save()
- ticket.send_notification_emails(ticket.NOTIFICATION_UPDATED,
- ticket.MODERATOR_ONLY)
+ ticket.send_notification_emails(ticket.NOTIFICATION_UPDATED, ticket.MODERATOR_ONLY)
def create_sounds(request, forms):
# Create actual Sound objects, trigger processing of sounds and of affected packs
sounds_to_process = []
dirty_packs = []
for form in forms:
- file_full_path=form.file_full_path
+ file_full_path = form.file_full_path
if not file_full_path.endswith(form.audio_filename_from_form):
- continue # Double check that we are not writing to the wrong file
+ continue # Double check that we are not writing to the wrong file
sound_fields = {
'name': form.cleaned_data['name'],
'dest_path': file_full_path,
@@ -440,10 +454,13 @@ def create_sounds(request, forms):
elif pack:
sound_fields['pack'] = pack
- if not form.cleaned_data.get('remove_geotag') and form.cleaned_data.get('lat'): # if 'lat' is in data, we assume other fields are too
- geotag = '%s,%s,%d' % (form.cleaned_data.get('lat'), form.cleaned_data.get('lon'), form.cleaned_data.get('zoom'))
+ if not form.cleaned_data.get('remove_geotag') and form.cleaned_data.get(
+ 'lat'): # if 'lat' is in data, we assume other fields are too
+ geotag = '%s,%s,%d' % (
+ form.cleaned_data.get('lat'), form.cleaned_data.get('lon'), form.cleaned_data.get('zoom')
+ )
sound_fields['geotag'] = geotag
-
+
try:
user = request.user
sound = create_sound(user, sound_fields, process=False)
@@ -466,8 +483,10 @@ def create_sounds(request, forms):
except NoAudioException:
# If for some reason audio file does not exist, skip creating this sound
- messages.add_message(request, messages.ERROR,
- f"Something went wrong with accessing the file {form.cleaned_data['name']}.")
+ messages.add_message(
+ request, messages.ERROR,
+ f"Something went wrong with accessing the file {form.cleaned_data['name']}."
+ )
except AlreadyExistsException as e:
messages.add_message(request, messages.WARNING, str(e))
except CantMoveException as e:
@@ -487,16 +506,16 @@ def update_edited_sound(sound, data):
sound.set_tags(data["tags"])
sound.description = remove_control_chars(data["description"])
sound.original_filename = data["name"]
-
+
new_license = data["license"]
if new_license != sound.license:
sound.set_license(new_license)
-
+
packs_to_process = []
if data['new_pack']:
pack, _ = Pack.objects.get_or_create(user=sound.user, name=data['new_pack'])
if sound.pack:
- packs_to_process.append(sound.pack) # Append previous sound pack if exists
+ packs_to_process.append(sound.pack) # Append previous sound pack if exists
sound.pack = pack
packs_to_process.append(pack)
else:
@@ -523,23 +542,28 @@ def update_edited_sound(sound, data):
sound.geotag.save()
else:
sound.geotag = GeoTag.objects.create(
- lat=data["lat"], lon=data["lon"], zoom=data["zoom"], user=request.user)
+ lat=data["lat"], lon=data["lon"], zoom=data["zoom"], user=request.user
+ )
sound_sources = data["sources"]
if sound_sources != sound.get_sound_sources_as_set():
sound.set_sources(sound_sources)
-
- sound.mark_index_dirty() # Sound is saved here
+
+ sound.mark_index_dirty() # Sound is saved here
sound.invalidate_template_caches()
update_sound_tickets(sound, f'{request.user.username} updated one or more fields of the sound description.')
- messages.add_message(request, messages.INFO,
- f'Sound
{sound.original_filename} successfully edited!')
+ messages.add_message(
+ request, messages.INFO,
+ f'Sound
{sound.original_filename} successfully edited!'
+ )
for packs_to_process in packs_to_process:
packs_to_process.process()
- files = request.session.get(f'{session_key_prefix}-describe_sounds', None) # List of File objects of sounds to describe
- sounds = request.session.get(f'{session_key_prefix}-edit_sounds', None) # List of Sound objects to edit
+ files = request.session.get(
+ f'{session_key_prefix}-describe_sounds', None
+ ) # List of File objects of sounds to describe
+ sounds = request.session.get(f'{session_key_prefix}-edit_sounds', None) # List of Sound objects to edit
if (describing and files is None) or (not describing and sounds is None):
# Expecting either a list of sounds or audio files to describe, got none. Redirect to main manage sounds page.
return HttpResponseRedirect(reverse('accounts-manage-sounds', args=['published']))
@@ -548,13 +572,20 @@ def update_edited_sound(sound, data):
all_forms_validated_ok = True
all_remaining_sounds_to_edit_or_describe = files if describing else sounds
sounds_to_edit_or_describe = all_remaining_sounds_to_edit_or_describe[:forms_per_round]
- len_original_describe_edit_sounds = request.session.get(f'{session_key_prefix}-len_original_describe_sounds', 0) if describing else request.session.get(f'{session_key_prefix}-len_original_edit_sounds', 0)
- num_rounds = int(math.ceil(len_original_describe_edit_sounds/forms_per_round))
- current_round = int((len_original_describe_edit_sounds - len(all_remaining_sounds_to_edit_or_describe))/forms_per_round + 1)
- files_data_for_players = [] # Used when describing sounds (not when editing) to be able to show sound players
- preselected_license = request.session.get(f'{session_key_prefix}-describe_license', False) # Pre-selected from the license selection page when describing mulitple sounds
- preselected_pack = request.session.get(f'{session_key_prefix}-describe_pack', False) # Pre-selected from the pack selection page when describing mulitple sounds
-
+ len_original_describe_edit_sounds = request.session.get(
+ f'{session_key_prefix}-len_original_describe_sounds', 0
+ ) if describing else request.session.get(f'{session_key_prefix}-len_original_edit_sounds', 0)
+ num_rounds = int(math.ceil(len_original_describe_edit_sounds / forms_per_round))
+ current_round = int((len_original_describe_edit_sounds - len(all_remaining_sounds_to_edit_or_describe)) /
+ forms_per_round + 1)
+ files_data_for_players = [] # Used when describing sounds (not when editing) to be able to show sound players
+ preselected_license = request.session.get(
+ f'{session_key_prefix}-describe_license', False
+ ) # Pre-selected from the license selection page when describing mulitple sounds
+ preselected_pack = request.session.get(
+ f'{session_key_prefix}-describe_pack', False
+ ) # Pre-selected from the pack selection page when describing mulitple sounds
+
for count, element in enumerate(sounds_to_edit_or_describe):
prefix = str(count)
if describing:
@@ -579,12 +610,14 @@ def update_edited_sound(sound, data):
if request.method == "POST":
form = SoundEditAndDescribeForm(
- request.POST,
- prefix=prefix,
+ request.POST,
+ prefix=prefix,
file_full_path=element.full_path if describing else None,
explicit_disable=element.is_explicit if not describing else False,
hide_old_license_versions="3.0" not in element.license.deed_url if not describing else True,
- user_packs=Pack.objects.filter(user=request.user if describing else element.user).exclude(is_deleted=True))
+ user_packs=Pack.objects.filter(user=request.user if describing else element.user
+ ).exclude(is_deleted=True)
+ )
forms.append(form)
if form.is_valid():
if not describing:
@@ -600,10 +633,12 @@ def update_edited_sound(sound, data):
# Don't do anything here except storing the audio filename from the POST data which will be later used
# as a double check to make sure we don't write the description to the wrong file
audio_filename_from_form = request.POST.get(f'{prefix}-audio_filename', None)
- form.audio_filename_from_form = audio_filename_from_form
+ form.audio_filename_from_form = audio_filename_from_form
else:
all_forms_validated_ok = False
- form.sound_sources_ids = list(form.cleaned_data['sources']) # Add sources ids to list so sources sound selector can be initialized
+ form.sound_sources_ids = list(
+ form.cleaned_data['sources']
+ ) # Add sources ids to list so sources sound selector can be initialized
if describing:
form.audio_filename = element.name
else:
@@ -611,15 +646,17 @@ def update_edited_sound(sound, data):
else:
if not describing:
sound_sources_ids = list(element.get_sound_sources_as_set())
- initial = dict(tags=element.get_sound_tags_string(),
- description=element.description,
- name=element.original_filename,
- license=element.license,
- pack=element.pack.id if element.pack else None,
- lat=element.geotag.lat if element.geotag else None,
- lon=element.geotag.lon if element.geotag else None,
- zoom=element.geotag.zoom if element.geotag else None,
- sources=','.join([str(item) for item in sound_sources_ids]))
+ initial = dict(
+ tags=element.get_sound_tags_string(),
+ description=element.description,
+ name=element.original_filename,
+ license=element.license,
+ pack=element.pack.id if element.pack else None,
+ lat=element.geotag.lat if element.geotag else None,
+ lon=element.geotag.lon if element.geotag else None,
+ zoom=element.geotag.zoom if element.geotag else None,
+ sources=','.join([str(item) for item in sound_sources_ids])
+ )
else:
sound_sources_ids = []
initial = dict(name=os.path.splitext(element.name)[0])
@@ -628,17 +665,19 @@ def update_edited_sound(sound, data):
if preselected_pack:
initial['pack'] = preselected_pack.id
form = SoundEditAndDescribeForm(
- prefix=prefix,
+ prefix=prefix,
explicit_disable=element.is_explicit if not describing else False,
initial=initial,
hide_old_license_versions="3.0" not in element.license.deed_url if not describing else True,
- user_packs=Pack.objects.filter(user=request.user if describing else element.user).exclude(is_deleted=True))
+ user_packs=Pack.objects.filter(user=request.user if describing else element.user
+ ).exclude(is_deleted=True)
+ )
form.sound_sources_ids = sound_sources_ids
if describing:
form.audio_filename = element.name
else:
form.sound_id = element.id
- forms.append(form)
+ forms.append(form)
tvars = {
'session_key_prefix': session_key_prefix,
@@ -655,7 +694,7 @@ def update_edited_sound(sound, data):
'total_sounds_to_describe': len_original_describe_edit_sounds,
'next': request.GET.get('next', '')
}
-
+
if request.method == "POST" and all_forms_validated_ok:
if describing:
# Create Sound objects, trigger moderation, processing, etc...
@@ -665,8 +704,10 @@ def update_edited_sound(sound, data):
request.session[f'{session_key_prefix}-describe_sounds'] = files[forms_per_round:]
# If no more sounds to describe, redirect to manage sound page, otherwise redirect to same page to proceed with second round
- messages.add_message(request, messages.INFO,
- f'Successfully finished sound description round {current_round} of {num_rounds}!')
+ messages.add_message(
+ request, messages.INFO,
+ f'Successfully finished sound description round {current_round} of {num_rounds}!'
+ )
if not request.session[f'{session_key_prefix}-describe_sounds']:
clear_session_describe_data(request, session_key_prefix=session_key_prefix)
return HttpResponseRedirect(reverse('accounts-manage-sounds', args=['processing']))
@@ -682,8 +723,9 @@ def update_edited_sound(sound, data):
redirect_to = request.GET.get('next', sounds[0].get_absolute_url())
return HttpResponseRedirect(redirect_to)
- messages.add_message(request, messages.INFO,
- f'Successfully finished sound editing round {current_round} of {num_rounds}!')
+ messages.add_message(
+ request, messages.INFO, f'Successfully finished sound editing round {current_round} of {num_rounds}!'
+ )
if not request.session[f'{session_key_prefix}-edit_sounds']:
# If no more sounds to edit, redirect to manage sounds page
clear_session_edit_data(request, session_key_prefix=session_key_prefix)
@@ -692,8 +734,10 @@ def update_edited_sound(sound, data):
else:
# Otherwise, redirect to the same page to continue with next round of sounds
next_arg = request.GET.get('next', '')
- return HttpResponseRedirect(reverse('accounts-edit-sounds') + f'?next={next_arg}&session={session_key_prefix}')
-
+ return HttpResponseRedirect(
+ reverse('accounts-edit-sounds') + f'?next={next_arg}&session={session_key_prefix}'
+ )
+
return render(request, 'sounds/edit_and_describe.html', tvars)
@@ -752,8 +796,11 @@ def add_sounds_modal_helper(request, username=None):
exclude_part = 'NOT (' + ' OR '.join(exclude_parts) + ')'
filter_parts.append(exclude_part)
query_filter = ' AND '.join(filter_parts)
- results, _ = perform_search_engine_query(
- {'textual_query': query, 'query_filter': query_filter, 'num_sounds': 9})
+ results, _ = perform_search_engine_query({
+ 'textual_query': query,
+ 'query_filter': query_filter,
+ 'num_sounds': 9
+ })
tvars['sounds_to_select'] = [doc['id'] for doc in results.docs]
return tvars
@@ -764,8 +811,9 @@ def add_sounds_modal_for_pack_edit(request, pack_id):
tvars = add_sounds_modal_helper(request, username=pack.user.username)
tvars.update({
'modal_title': 'Add sounds to pack',
- 'help_text': 'Note that when adding a sound that already belongs to another pack it will be '
- 'removed from the former pack.',
+ 'help_text':
+ 'Note that when adding a sound that already belongs to another pack it will be '
+ 'removed from the former pack.',
})
return render(request, 'sounds/modal_add_sounds.html', tvars)
@@ -778,25 +826,23 @@ def add_sounds_modal_for_edit_sources(request):
})
return render(request, 'sounds/modal_add_sounds.html', tvars)
+
def _remix_group_view_helper(request, group_id):
group = get_object_or_404(RemixGroup, id=group_id)
data = group.protovis_data
- sounds = Sound.objects.ordered_ids(
- [element['id'] for element in group.sounds.all().order_by('created').values('id')])
- tvars = {
- 'sounds': sounds,
- 'last_sound': sounds[len(sounds)-1],
- 'group_sound': sounds[0],
- 'data': data
- }
+ sounds = Sound.objects.ordered_ids([
+ element['id'] for element in group.sounds.all().order_by('created').values('id')
+ ])
+ tvars = {'sounds': sounds, 'last_sound': sounds[len(sounds) - 1], 'group_sound': sounds[0], 'data': data}
return tvars
+
@redirect_if_old_username_or_404
def remixes(request, username, sound_id):
if not request.GET.get('ajax'):
# If not loaded as modal, redirect to sound page with parameter to open modal
return HttpResponseRedirect(reverse('sound', args=[username, sound_id]) + '?remixes=1')
-
+
sound = get_object_or_404(Sound, id=sound_id, moderation_state="OK", processing_state="OK")
if sound.user.username.lower() != username.lower():
raise Http404
@@ -804,8 +850,8 @@ def remixes(request, username, sound_id):
remix_group = sound.remix_group.all()[0]
except:
raise Http404
-
- tvars = _remix_group_view_helper(request, remix_group.id)
+
+ tvars = _remix_group_view_helper(request, remix_group.id)
tvars.update({'sound': sound})
return render(request, 'sounds/modal_remix_group.html', tvars)
@@ -817,17 +863,16 @@ def similar(request, username, sound_id):
# If not loaded as modal, redirect to sound page with parameter to open modal
return HttpResponseRedirect(reverse('sound', args=[username, sound_id]) + '?similar=1')
- sound = get_object_or_404(Sound,
- id=sound_id,
- moderation_state="OK",
- processing_state="OK",
- similarity_state="OK")
+ sound = get_object_or_404(Sound, id=sound_id, moderation_state="OK", processing_state="OK", similarity_state="OK")
if sound.user.username.lower() != username.lower():
raise Http404
similarity_results, _ = get_similar_sounds(
- sound, request.GET.get('preset', None), settings.NUM_SIMILAR_SOUNDS_PER_PAGE * settings.NUM_SIMILAR_SOUNDS_PAGES)
- paginator = paginate(request, [sound_id for sound_id, _ in similarity_results], settings.NUM_SIMILAR_SOUNDS_PER_PAGE)
+ sound, request.GET.get('preset', None), settings.NUM_SIMILAR_SOUNDS_PER_PAGE * settings.NUM_SIMILAR_SOUNDS_PAGES
+ )
+ paginator = paginate(
+ request, [sound_id for sound_id, _ in similarity_results], settings.NUM_SIMILAR_SOUNDS_PER_PAGE
+ )
similar_sounds = Sound.objects.ordered_ids(paginator['page'].object_list)
tvars = {'similar_sounds': similar_sounds, 'sound': sound}
tvars.update(paginator)
@@ -854,8 +899,9 @@ def pack(request, username, pack_id):
num_sounds_ok = qs.count()
if num_sounds_ok < pack.num_sounds:
- messages.add_message(request, messages.INFO,
- 'Some sounds of this pack might
not have been moderated or processed yet.')
+ messages.add_message(
+ request, messages.INFO, 'Some sounds of this pack might
not have been moderated or processed yet.'
+ )
is_following = None
geotags_in_pack_serialized = []
@@ -877,7 +923,7 @@ def pack(request, username, pack_id):
@redirect_if_old_username_or_404
def pack_stats_section(request, username, pack_id):
if not request.GET.get('ajax'):
- raise Http404 # Only accessible via ajax
+ raise Http404 # Only accessible via ajax
try:
pack = Pack.objects.bulk_query_id(pack_id)[0]
if pack.user.username.lower() != username.lower():
@@ -900,7 +946,7 @@ def packs_for_user(request, username):
def for_user(request, username):
user = request.parameter_user
return HttpResponseRedirect(user.profile.get_user_sounds_in_search_url())
-
+
@redirect_if_old_username_or_404
@transaction.atomic()
@@ -908,7 +954,7 @@ def flag(request, username, sound_id):
if not request.GET.get('ajax'):
# If not loaded as a modal, redirect to the sound page with parameter to open modal
return HttpResponseRedirect(reverse('sound', args=[username, sound_id]) + '?flag=1')
-
+
sound = get_object_or_404(Sound, id=sound_id, moderation_state="OK", processing_state="OK")
if sound.user.username.lower() != username.lower():
raise Http404
@@ -930,9 +976,12 @@ def flag(request, username, sound_id):
else:
user_email = flag_form.cleaned_data["email"]
- send_mail_template_to_support(settings.EMAIL_SUBJECT_SOUND_FLAG, "emails/email_flag.txt", {"flag": flag},
- extra_subject=f"{sound.user.username} - {sound.original_filename}",
- reply_to=user_email)
+ send_mail_template_to_support(
+ settings.EMAIL_SUBJECT_SOUND_FLAG,
+ "emails/email_flag.txt", {"flag": flag},
+ extra_subject=f"{sound.user.username} - {sound.original_filename}",
+ reply_to=user_email
+ )
return JsonResponse({'success': True})
else:
initial = {}
@@ -940,8 +989,7 @@ def flag(request, username, sound_id):
initial["email"] = user.email
flag_form = FlagForm(initial=initial)
- tvars = {"sound": sound,
- "flag_form": flag_form}
+ tvars = {"sound": sound, "flag_form": flag_form}
return render(request, 'sounds/modal_flag_sound.html', tvars)
@@ -1031,7 +1079,8 @@ def embed_iframe(request, sound_id, player_size):
The sizes 'medium' and 'medium_no_info' can optionally show a button to toggle the background image between the
waveform and the spectrogram by passing the request parameter 'td=1'. Bigger sizes always show that button.
"""
- if player_size not in ['mini', 'small', 'medium', 'large', 'large_no_info', 'medium_no_info', 'full_size', 'full_size_no_info']:
+ if player_size not in ['mini', 'small', 'medium', 'large', 'large_no_info', 'medium_no_info', 'full_size',
+ 'full_size_no_info']:
raise Http404
try:
sound = Sound.objects.bulk_query_id_public(sound_id)[0]
@@ -1096,23 +1145,21 @@ def downloaders(request, username, sound_id):
download_list.append({"created": s.created, "user": user_map[s.user_id]})
download_list = sorted(download_list, key=itemgetter("created"), reverse=True)
- tvars = {"sound": sound,
- "username": username,
- "download_list": download_list}
+ tvars = {"sound": sound, "username": username, "download_list": download_list}
tvars.update(pagination)
return render(request, 'sounds/modal_downloaders.html', tvars)
def pack_downloaders(request, username, pack_id):
if not request.GET.get('ajax'):
- # If not loaded as a modal, redirect to sound page with parameter to open modal
+ # If not loaded as a modal, redirect to sound page with parameter to open modal
return HttpResponseRedirect(reverse('pack', args=[username, pack_id]) + '?downloaders=1')
-
+
pack = get_object_or_404(Pack, id=pack_id)
# Retrieve all users that downloaded a sound
qs = PackDownload.objects.filter(pack_id=pack_id)
-
+
num_items_per_page = settings.USERS_PER_DOWNLOADS_MODAL_PAGE
pagination = paginate(request, qs, num_items_per_page, object_count=pack.num_downloads)
page = pagination["page"]
@@ -1129,8 +1176,6 @@ def pack_downloaders(request, username, pack_id):
download_list.append({"created": s.created, "user": user_map[s.user_id]})
download_list = sorted(download_list, key=itemgetter("created"), reverse=True)
- tvars = {'username': username,
- 'pack': pack,
- "download_list": download_list}
+ tvars = {'username': username, 'pack': pack, "download_list": download_list}
tvars.update(pagination)
return render(request, 'sounds/modal_downloaders.html', tvars)
diff --git a/support/tests.py b/support/tests.py
index 1f402a9d4..5581fbd88 100644
--- a/support/tests.py
+++ b/support/tests.py
@@ -35,12 +35,12 @@ def test_send_support_request_email(self):
# try with existing email address
request_email = 'test.user+1@gmail.com'
send_email_to_support(request_email, subject, message)
- self.assertTrue(True) # This call is not really needed, but makes sense to me
+ self.assertTrue(True) # This call is not really needed, but makes sense to me
# try with non-existing email address
request_email = 'test.user+1234678235@gmail.com'
send_email_to_support(request_email, subject, message)
- self.assertTrue(True) # This call is not really needed, but makes sense to me
+ self.assertTrue(True) # This call is not really needed, but makes sense to me
def test_create_zendesk_ticket(self):
subject = 'test subject'
@@ -66,6 +66,6 @@ def test_create_zendesk_ticket(self):
ticket = create_zendesk_ticket(request_email, subject, message)
sticket = serialize(ticket)
self.assertEqual(sticket['requester']['email'], request_email)
- self.assertEqual(sticket['requester']['name'], 'Unknown username') # Set unknown username
- self.assertTrue('custom_fields' not in sticket) # no custom fields
- self.assertTrue(len(sticket['description']) == len(message)) # No extra description
+ self.assertEqual(sticket['requester']['name'], 'Unknown username') # Set unknown username
+ self.assertTrue('custom_fields' not in sticket) # no custom fields
+ self.assertTrue(len(sticket['description']) == len(message)) # No extra description
diff --git a/support/views.py b/support/views.py
index d5462d917..c7933f185 100644
--- a/support/views.py
+++ b/support/views.py
@@ -66,30 +66,18 @@ def create_zendesk_ticket(request_email, subject, message, user=None):
zendesk_api.CustomField(id=30153729, value=num_comments),
]
- user_url = "https://{}{}".format(
- Site.objects.get_current().domain,
- reverse('account', args=[user.username])
- )
+ user_url = "https://{}{}".format(Site.objects.get_current().domain, reverse('account', args=[user.username]))
message += f"\n\n-- \n{user_url}"
requester.name = user.username
- return zendesk_api.Ticket(
- requester=requester,
- subject=subject,
- description=message,
- custom_fields=custom_fields
- )
+ return zendesk_api.Ticket(requester=requester, subject=subject, description=message, custom_fields=custom_fields)
def send_to_zendesk(request_email, subject, message, user=None):
ticket = create_zendesk_ticket(request_email, subject, message, user)
- zenpy = Zenpy(
- email=settings.ZENDESK_EMAIL,
- token=settings.ZENDESK_TOKEN,
- subdomain='freesound'
- )
+ zenpy = Zenpy(email=settings.ZENDESK_EMAIL, token=settings.ZENDESK_TOKEN, subdomain='freesound')
try:
zenpy.tickets.create(ticket)
except (ZendeskAPIException, HTTPError, ZenpyException) as e:
@@ -103,8 +91,15 @@ def send_email_to_support(request_email, subject, message, user=None):
except User.DoesNotExist:
pass
- send_mail_template_to_support(settings.EMAIL_SUBJECT_SUPPORT_EMAIL, "emails/email_support.txt",
- {'message': message, 'user': user}, extra_subject=subject, reply_to=request_email)
+ send_mail_template_to_support(
+ settings.EMAIL_SUBJECT_SUPPORT_EMAIL,
+ "emails/email_support.txt", {
+ 'message': message,
+ 'user': user
+ },
+ extra_subject=subject,
+ reply_to=request_email
+ )
def contact(request):
diff --git a/tagrecommendation/client/__init__.py b/tagrecommendation/client/__init__.py
index a5b2c18ac..8e96bcfbe 100644
--- a/tagrecommendation/client/__init__.py
+++ b/tagrecommendation/client/__init__.py
@@ -19,6 +19,7 @@
#
from future import standard_library
+
standard_library.install_aliases()
from builtins import str
from builtins import object
@@ -26,14 +27,14 @@
import json
import urllib.request, urllib.error, urllib.parse
-_BASE_URL = 'http://%s:%i/tagrecommendation/' % (settings.TAGRECOMMENDATION_ADDRESS, settings.TAGRECOMMENDATION_PORT)
-_URL_RECOMMEND_TAGS = 'recommend_tags/'
-_URL_LAST_INDEXED_ID = 'last_indexed_id/'
-_URL_ADD_TO_INDEX = 'add_to_index/'
+_BASE_URL = 'http://%s:%i/tagrecommendation/' % (settings.TAGRECOMMENDATION_ADDRESS, settings.TAGRECOMMENDATION_PORT)
+_URL_RECOMMEND_TAGS = 'recommend_tags/'
+_URL_LAST_INDEXED_ID = 'last_indexed_id/'
+_URL_ADD_TO_INDEX = 'add_to_index/'
def _get_url_as_json(url):
- f = urllib.request.urlopen(url.replace(" ","%20"), timeout=settings.TAGRECOMMENDATION_TIMEOUT)
+ f = urllib.request.urlopen(url.replace(" ", "%20"), timeout=settings.TAGRECOMMENDATION_TIMEOUT)
resp = f.read()
return json.loads(resp)
@@ -61,5 +62,7 @@ def get_last_indexed_id(cls):
@classmethod
def add_to_index(cls, sound_ids, sound_tagss):
- url = _BASE_URL + _URL_ADD_TO_INDEX + '?' + 'sound_ids=' + ",".join([str(sid) for sid in sound_ids]) + '&sound_tagss=' + "-!-!-".join([",".join(stags) for stags in sound_tagss])
+ url = _BASE_URL + _URL_ADD_TO_INDEX + '?' + 'sound_ids=' + ",".join(
+ [str(sid) for sid in sound_ids]
+ ) + '&sound_tagss=' + "-!-!-".join([",".join(stags) for stags in sound_tagss])
return _result_or_exception(_get_url_as_json(url))
diff --git a/tagrecommendation/tag_recommendation/community_detector.py b/tagrecommendation/tag_recommendation/community_detector.py
index 36829ec27..788ab81ac 100644
--- a/tagrecommendation/tag_recommendation/community_detector.py
+++ b/tagrecommendation/tag_recommendation/community_detector.py
@@ -39,13 +39,7 @@ class CommunityDetector(object):
selected_instances = None
tag_names = None
- def __init__(self,
- verbose=True,
- classifier_type="svm",
- PATH=None,
- INIT_METHOD="ZeroInit",
- selected_instances=None
- ):
+ def __init__(self, verbose=True, classifier_type="svm", PATH=None, INIT_METHOD="ZeroInit", selected_instances=None):
self.verbose = verbose
self.n_training_instances = 0
@@ -67,10 +61,9 @@ def __init__(self,
self.tag_names = load(RECOMMENDATION_DATA_DIR + 'Classifier_TAG_NAMES.npy')
def __repr__(self):
- return "Community Detector (%s, %i classes, %i instances, %s init) " % (self.clf_type,
- len(self.class_name_ids.keys()),
- self.n_training_instances,
- self.init_method)
+ return "Community Detector (%s, %i classes, %i instances, %s init) " % (
+ self.clf_type, len(self.class_name_ids.keys()), self.n_training_instances, self.init_method
+ )
def load_instance_vector_from_tags(self, tags):
tags_t = tags[:]
diff --git a/tagrecommendation/tag_recommendation/community_tag_recommender.py b/tagrecommendation/tag_recommendation/community_tag_recommender.py
index 25903156a..e2cc7630d 100644
--- a/tagrecommendation/tag_recommendation/community_tag_recommender.py
+++ b/tagrecommendation/tag_recommendation/community_tag_recommender.py
@@ -42,13 +42,15 @@ class CommunityBasedTagRecommender(object):
recommendation_heuristic = None
classes = None
- def __init__(self,
- dataset="",
- classes=[],
- metric="cosine",
- community_detection_heuristic="ZeroInit",
- recommendation_heuristic="hRankPercentage015",
- classifier_type="bayes"):
+ def __init__(
+ self,
+ dataset="",
+ classes=[],
+ metric="cosine",
+ community_detection_heuristic="ZeroInit",
+ recommendation_heuristic="hRankPercentage015",
+ classifier_type="bayes"
+ ):
self.dataset = dataset
self.classes = classes
@@ -73,14 +75,20 @@ def load_recommenders(self):
self.recommenders[class_name].set_heuristic(self.recommendation_heuristic)
data = {
- 'TAG_NAMES': load(RECOMMENDATION_DATA_DIR + self.dataset + '_%s_SIMILARITY_MATRIX_' % class_name + self.metric + '_SUBSET_TAG_NAMES.npy'),
- 'SIMILARITY_MATRIX': load(RECOMMENDATION_DATA_DIR + self.dataset + '_%s_SIMILARITY_MATRIX_' % class_name + self.metric + '_SUBSET.npy'),
+ 'TAG_NAMES':
+ load(
+ RECOMMENDATION_DATA_DIR + self.dataset + '_%s_SIMILARITY_MATRIX_' % class_name + self.metric +
+ '_SUBSET_TAG_NAMES.npy'
+ ),
+ 'SIMILARITY_MATRIX':
+ load(
+ RECOMMENDATION_DATA_DIR + self.dataset + '_%s_SIMILARITY_MATRIX_' % class_name + self.metric +
+ '_SUBSET.npy'
+ ),
}
self.recommenders[class_name].load_data(
- data=data,
- dataset="%s-%s" % (self.dataset, class_name),
- metric=self.metric
+ data=data, dataset="%s-%s" % (self.dataset, class_name), metric=self.metric
)
print(self.recommenders[class_name])
diff --git a/tagrecommendation/tag_recommendation/data_processor.py b/tagrecommendation/tag_recommendation/data_processor.py
index 0489e17a8..2834374ed 100644
--- a/tagrecommendation/tag_recommendation/data_processor.py
+++ b/tagrecommendation/tag_recommendation/data_processor.py
@@ -94,7 +94,7 @@ def tas_to_association_matrix(self, tag_threshold=0, line_limit=1000000000):
stats = {
'n_sounds_in_matrix': len(sound_ids),
- #'biggest_id': max([int(sid) for sid in sound_ids])
+ #'biggest_id': max([int(sid) for sid in sound_ids])
}
saveToJson(RECOMMENDATION_TMP_DATA_DIR + 'Current_index_stats.json', stats)
if self.verbose:
@@ -107,7 +107,7 @@ def tas_to_association_matrix(self, tag_threshold=0, line_limit=1000000000):
tag_occurrences[t] = ts.count(t)
if self.verbose:
- sys.stdout.write("\rComputing tag occurrences %.2f%%"%(float(100*(id+1))/len(unique_ts)))
+ sys.stdout.write("\rComputing tag occurrences %.2f%%" % (float(100 * (id + 1)) / len(unique_ts)))
sys.stdout.flush()
print("")
tags = []
@@ -119,7 +119,7 @@ def tas_to_association_matrix(self, tag_threshold=0, line_limit=1000000000):
tags_ids.append(id)
if self.verbose:
- sys.stdout.write("\rFiltering tags %.2f%%"%(float(100*(id+1))/len(unique_ts)))
+ sys.stdout.write("\rFiltering tags %.2f%%" % (float(100 * (id + 1)) / len(unique_ts)))
sys.stdout.flush()
nTags = len(tags)
@@ -154,7 +154,7 @@ def tas_to_association_matrix(self, tag_threshold=0, line_limit=1000000000):
resources = list(res_tags.keys())
nResources = len(resources)
- resources_ids = list(range(0,nResources))
+ resources_ids = list(range(0, nResources))
if self.verbose:
print("done!")
@@ -172,10 +172,12 @@ def tas_to_association_matrix(self, tag_threshold=0, line_limit=1000000000):
done = 0
for r_id in resources:
for t in res_tags[r_id]:
- M[resources.index(r_id),tags.index(t)] = 1
+ M[resources.index(r_id), tags.index(t)] = 1
done += 1
if self.verbose:
- sys.stdout.write("\rGenerating association matrix %.2f%%" % (float(100*done)/n_filtered_associations))
+ sys.stdout.write(
+ "\rGenerating association matrix %.2f%%" % (float(100 * done) / n_filtered_associations)
+ )
sys.stdout.flush()
if self.verbose:
print("")
@@ -186,23 +188,24 @@ def tas_to_association_matrix(self, tag_threshold=0, line_limit=1000000000):
filename = "FS%.4i%.2i%.2i" % (datetime.today().year, datetime.today().month, datetime.today().day)
M.export_mtx(RECOMMENDATION_TMP_DATA_DIR + filename + '_ASSOCIATION_MATRIX.mtx')
- save(RECOMMENDATION_TMP_DATA_DIR + filename + '_RESOURCE_IDS.npy',resources)
- save(RECOMMENDATION_TMP_DATA_DIR + filename + '_TAG_IDS.npy',tags_ids)
- save(RECOMMENDATION_TMP_DATA_DIR + filename + '_TAG_NAMES.npy',tags)
- saveToJson(RECOMMENDATION_TMP_DATA_DIR + filename + '_RESOURCES_TAGS.json',res_tags, verbose = self.verbose)
+ save(RECOMMENDATION_TMP_DATA_DIR + filename + '_RESOURCE_IDS.npy', resources)
+ save(RECOMMENDATION_TMP_DATA_DIR + filename + '_TAG_IDS.npy', tags_ids)
+ save(RECOMMENDATION_TMP_DATA_DIR + filename + '_TAG_NAMES.npy', tags)
+ saveToJson(RECOMMENDATION_TMP_DATA_DIR + filename + '_RESOURCES_TAGS.json', res_tags, verbose=self.verbose)
#saveToJson(RECOMMENDATION_TMP_DATA_DIR + filename + '_RESOURCES_TAGS_NO_FILTER.json',res_tags_no_filt, verbose = self.verbose)
#saveToJson(RECOMMENDATION_TMP_DATA_DIR + filename + '_RESOURCES_USER.json',res_user, verbose = self.verbose)
return filename
-
- def association_matrix_to_similarity_matrix(self,
- metric="cosine",
- dataset="FREESOUND",
- save_sim=False,
- training_set=None,
- out_name_prefix="",
- is_general_recommender=False):
+ def association_matrix_to_similarity_matrix(
+ self,
+ metric="cosine",
+ dataset="FREESOUND",
+ save_sim=False,
+ training_set=None,
+ out_name_prefix="",
+ is_general_recommender=False
+ ):
if self.verbose:
print("Loading association matrix and tag names, ids files...")
@@ -225,17 +228,21 @@ def association_matrix_to_similarity_matrix(self,
MM = spmatrix.dot(M[resource_id_positions, :], M[resource_id_positions, :])
# Get similarity matrix
- sim_matrix = spmatrix.ll_mat(MM.shape[0],MM.shape[0])
+ sim_matrix = spmatrix.ll_mat(MM.shape[0], MM.shape[0])
non_zero_index = list(MM.keys())
for index in non_zero_index:
if metric == 'cosine':
- sim_matrix[index[0], index[1]] = MM[index[0], index[1]] * (old_div(1, (sqrt(MM[index[0], index[0]]) * sqrt(MM[index[1], index[1]]))))
+ sim_matrix[index[0], index[1]] = MM[index[0], index[1]] * (
+ old_div(1, (sqrt(MM[index[0], index[0]]) * sqrt(MM[index[1], index[1]])))
+ )
elif metric == 'coocurrence':
sim_matrix[index[0], index[1]] = MM[index[0], index[1]]
elif metric == 'binary':
- sim_matrix[index[0], index[1]] = old_div(MM[index[0], index[1]],MM[index[0], index[1]])
+ sim_matrix[index[0], index[1]] = old_div(MM[index[0], index[1]], MM[index[0], index[1]])
elif metric == 'jaccard':
- sim_matrix[index[0], index[1]] = MM[index[0], index[1]] * (old_div(1, (MM[index[0], index[0]] + MM[index[1], index[1]] - MM[index[0], index[1]])))
+ sim_matrix[index[0], index[1]] = MM[index[0], index[1]] * (
+ old_div(1, (MM[index[0], index[0]] + MM[index[1], index[1]] - MM[index[0], index[1]]))
+ )
# Clean out similarity matrix (clean tags that are not used)
tag_positions = []
@@ -244,7 +251,7 @@ def association_matrix_to_similarity_matrix(self,
tag_positions.append(i)
# Transform sparse similarity matrix to npy format
- sim_matrix_npy = mtx2npy(sim_matrix[tag_positions,tag_positions])
+ sim_matrix_npy = mtx2npy(sim_matrix[tag_positions, tag_positions])
tag_names_sim_matrix = tag_names[tag_positions]
if save_sim:
@@ -275,12 +282,14 @@ def association_matrix_to_similarity_matrix(self,
return {'SIMILARITY_MATRIX': sim_matrix_npy, 'TAG_NAMES': tag_names_sim_matrix}
- def process_tag_recommendation_data(self,
- resources_limit=None,
- tag_threshold=10,
- line_limit=99999999999999,
- recompute_all_classes=False,
- similarity_metric="cosine"):
+ def process_tag_recommendation_data(
+ self,
+ resources_limit=None,
+ tag_threshold=10,
+ line_limit=99999999999999,
+ recompute_all_classes=False,
+ similarity_metric="cosine"
+ ):
# Process tas file and turn into association matrix and derived files
database_name = self.tas_to_association_matrix(tag_threshold=tag_threshold, line_limit=line_limit)
@@ -305,7 +314,7 @@ def process_tag_recommendation_data(self,
resource_class[id] = cd.detectCommunity(input_tags=resources_tags[id])
if self.verbose:
- sys.stdout.write("\rClassifying resources... %.2f%%"%(float(100*(count+1))/len(instances_ids)))
+ sys.stdout.write("\rClassifying resources... %.2f%%" % (float(100 * (count + 1)) / len(instances_ids)))
sys.stdout.flush()
print("")
@@ -371,7 +380,7 @@ def clear_temp_files(self):
for filename in os.listdir(RECOMMENDATION_DATA_DIR):
file_extension = filename.split(".")[-1]
if file_extension in ['npy', 'json', 'pkl']:
- if "Classifier" not in filename and "Index" not in filename: # Do not alter Classifier files
+ if "Classifier" not in filename and "Index" not in filename: # Do not alter Classifier files
if filename[0:6] == "backup":
# Delete old backups
print("Removing %s" % RECOMMENDATION_DATA_DIR + filename)
@@ -399,7 +408,12 @@ def clear_temp_files(self):
os.remove(RECOMMENDATION_TMP_DATA_DIR + filename)
class_names = list(set(class_names))
- saveToJson(RECOMMENDATION_DATA_DIR + 'Current_database_and_class_names.json', {'database': current_database_name, 'classes':class_names})
+ saveToJson(
+ RECOMMENDATION_DATA_DIR + 'Current_database_and_class_names.json', {
+ 'database': current_database_name,
+ 'classes': class_names
+ }
+ )
# NOTE: after the cleaning, tag recommendation needs to be reloaded manually
@@ -416,7 +430,7 @@ def rollback_last_backup(self):
for filename in os.listdir(RECOMMENDATION_DATA_DIR):
file_extension = filename.split(".")[-1]
if file_extension in ['npy', 'json']:
- if "Classifier" not in filename and "Index" not in filename: # Do not alter Classifier files and index
+ if "Classifier" not in filename and "Index" not in filename: # Do not alter Classifier files and index
if filename[0:6] != "backup":
print("Removing %s" % RECOMMENDATION_DATA_DIR + filename)
os.remove(RECOMMENDATION_DATA_DIR + filename)
@@ -424,7 +438,7 @@ def rollback_last_backup(self):
for filename in os.listdir(RECOMMENDATION_DATA_DIR):
file_extension = filename.split(".")[-1]
if file_extension in ['npy', 'json']:
- if "Classifier" not in filename: # Do not alter Classifier files
+ if "Classifier" not in filename: # Do not alter Classifier files
if filename[0:6] == "backup":
# Set previous matrixs to "backup mode" (will be deleted in the next update)
print("Rolling back backup %s" % RECOMMENDATION_DATA_DIR + filename)
diff --git a/tagrecommendation/tag_recommendation/heuristics.py b/tagrecommendation/tag_recommendation/heuristics.py
index 4ca27334a..c716c6d35 100644
--- a/tagrecommendation/tag_recommendation/heuristics.py
+++ b/tagrecommendation/tag_recommendation/heuristics.py
@@ -23,5 +23,15 @@
from .tag_recommendation_utils import *
heuristics = {
- 'hRankPercentage015': {'name':'RankP@0.15','c':cNMostSimilar, 'a':aNormalizedRankSum,'s':sPercentage, 'options':{'cNMostSimilar_N':100, 'aNormalizedRankSum_factor':1.0, 'sPercentage_percentage': 0.15}},
+ 'hRankPercentage015': {
+ 'name': 'RankP@0.15',
+ 'c': cNMostSimilar,
+ 'a': aNormalizedRankSum,
+ 's': sPercentage,
+ 'options': {
+ 'cNMostSimilar_N': 100,
+ 'aNormalizedRankSum_factor': 1.0,
+ 'sPercentage_percentage': 0.15
+ }
+ },
}
diff --git a/tagrecommendation/tag_recommendation/tag_recommendation_utils.py b/tagrecommendation/tag_recommendation/tag_recommendation_utils.py
index cce20be25..20998d1ea 100644
--- a/tagrecommendation/tag_recommendation/tag_recommendation_utils.py
+++ b/tagrecommendation/tag_recommendation/tag_recommendation_utils.py
@@ -34,23 +34,26 @@ def cNMostSimilar(input_tags, tag_names, similarity_matrix, options):
# Find N most similar tags in the row
idx = unicode_tag_names.index(tag)
#where(tag_names == tag)[0][0]
- row_idx = nonzero(similarity_matrix[idx,:])
+ row_idx = nonzero(similarity_matrix[idx, :])
row_idx = row_idx[0]
- row = similarity_matrix[idx,row_idx]
+ row = similarity_matrix[idx, row_idx]
MAX = N
- most_similar_idx = row.argsort()[-MAX-1:-1][::-1] # We pick the first N most similar tags (practically the same as no threshold but more efficient)
+ most_similar_idx = row.argsort(
+ )[-MAX - 1:-1
+ ][::-1
+ ] # We pick the first N most similar tags (practically the same as no threshold but more efficient)
most_similar_dist = row[most_similar_idx]
most_similar_tags = tag_names[row_idx[most_similar_idx]]
rank = N
- for count,item in enumerate(most_similar_tags):
+ for count, item in enumerate(most_similar_tags):
if item not in input_tags:
- candidate_tags.append( {'name':item, 'rank':rank, 'dist':most_similar_dist[count], 'from':tag} )
+ candidate_tags.append({'name': item, 'rank': rank, 'dist': most_similar_dist[count], 'from': tag})
rank -= 1
else:
- pass # recommended tag was already present in input tags
+ pass # recommended tag was already present in input tags
else:
- pass # If tag does not exist we do not recommend anything. Maybe we could do something else here
+ pass # If tag does not exist we do not recommend anything. Maybe we could do something else here
return candidate_tags
@@ -64,20 +67,22 @@ def aNormalizedRankSum(candidate_tags, input_tags, options):
aggregated_candiate_tags = {}
for item in candidate_tags:
- if item['name'] in aggregated_candiate_tags: # Item already there
- aggregated_candiate_tags[item['name']] = (aggregated_candiate_tags[item['name']] + float(item['rank'])/(len(input_tags))) * factor
+ if item['name'] in aggregated_candiate_tags: # Item already there
+ aggregated_candiate_tags[
+ item['name']
+ ] = (aggregated_candiate_tags[item['name']] + float(item['rank']) / (len(input_tags))) * factor
else:
- aggregated_candiate_tags[item['name']] = float(item['rank'])/(len(input_tags))
+ aggregated_candiate_tags[item['name']] = float(item['rank']) / (len(input_tags))
aggregated_candiate_tags_list = []
for key in aggregated_candiate_tags.keys():
- aggregated_candiate_tags_list.append({"name":key, "rank": aggregated_candiate_tags[key]})
+ aggregated_candiate_tags_list.append({"name": key, "rank": aggregated_candiate_tags[key]})
aggregated_candiate_tags_list.sort(key=operator.itemgetter('rank'))
aggregated_candiate_tags_list.reverse()
return aggregated_candiate_tags, aggregated_candiate_tags_list
-def sThreshold(aggregated_candiate_tags_list, aggregated_candiate_tags, input_tags, options, threshold = None):
+def sThreshold(aggregated_candiate_tags_list, aggregated_candiate_tags, input_tags, options, threshold=None):
if not threshold:
threshold = options['sThreshold_threshold']
@@ -98,5 +103,5 @@ def sPercentage(aggregated_candiate_tags_list, aggregated_candiate_tags, input_t
percentage = options['sPercentage_percentage']
max_score = aggregated_candiate_tags_list[0]['rank']
- threshold = max_score*(1.0-percentage)
+ threshold = max_score * (1.0 - percentage)
return sThreshold(aggregated_candiate_tags_list, aggregated_candiate_tags, input_tags, options, threshold)
diff --git a/tagrecommendation/tag_recommendation/tag_recommender.py b/tagrecommendation/tag_recommendation/tag_recommender.py
index fd14d7494..768066269 100644
--- a/tagrecommendation/tag_recommendation/tag_recommender.py
+++ b/tagrecommendation/tag_recommendation/tag_recommender.py
@@ -73,14 +73,20 @@ def recommend_tags(self, input_tags=None):
selectAlgorithm = self.heuristic['s']
# CHOOSE candidate tags
- candidate_tags = chooseAlgorithm(input_tags, self.data['TAG_NAMES'], self.data['SIMILARITY_MATRIX'], self.heuristic['options'])
+ candidate_tags = chooseAlgorithm(
+ input_tags, self.data['TAG_NAMES'], self.data['SIMILARITY_MATRIX'], self.heuristic['options']
+ )
# AGGREGATE candidate tags
- aggregated_candiate_tags, aggregated_candiate_tags_list = aggregateAlgorithm(candidate_tags, input_tags, self.heuristic['options'])
+ aggregated_candiate_tags, aggregated_candiate_tags_list = aggregateAlgorithm(
+ candidate_tags, input_tags, self.heuristic['options']
+ )
# SELECT the number of tags to recommend
if len(aggregated_candiate_tags_list) > 1:
- added_tags = selectAlgorithm(aggregated_candiate_tags_list, aggregated_candiate_tags, input_tags, self.heuristic['options'])
+ added_tags = selectAlgorithm(
+ aggregated_candiate_tags_list, aggregated_candiate_tags, input_tags, self.heuristic['options']
+ )
else:
if len(aggregated_candiate_tags_list) == 1:
added_tags = list()
diff --git a/tagrecommendation/tagrecommendation_server.py b/tagrecommendation/tagrecommendation_server.py
index e4f9a9044..b1ac9f4c1 100644
--- a/tagrecommendation/tagrecommendation_server.py
+++ b/tagrecommendation/tagrecommendation_server.py
@@ -42,14 +42,17 @@
def server_interface(resource):
return {
- 'recommend_tags': resource.recommend_tags, # input_tags (tags separated by commas), max_number_of_tags (optional)
+ 'recommend_tags':
+ resource.recommend_tags, # input_tags (tags separated by commas), max_number_of_tags (optional)
'reload': resource.reload,
'last_indexed_id': resource.last_indexed_id,
- 'add_to_index': resource.add_to_index, # sound_ids (str separated by commas), sound_tagss (sets of tags separated by #)
+ 'add_to_index':
+ resource.add_to_index, # sound_ids (str separated by commas), sound_tagss (sets of tags separated by #)
}
class TagRecommendationServer(resource.Resource):
+
def __init__(self):
resource.Resource.__init__(self)
self.methods = server_interface(self)
@@ -60,7 +63,8 @@ def __init__(self):
def load(self):
try:
tag_recommendation_data = loadFromJson(
- tr_settings.RECOMMENDATION_DATA_DIR + 'Current_database_and_class_names.json')
+ tr_settings.RECOMMENDATION_DATA_DIR + 'Current_database_and_class_names.json'
+ )
DATABASE = tag_recommendation_data['database']
CLASSES = tag_recommendation_data['classes']
self.cbtr = CommunityBasedTagRecommender(dataset=DATABASE, classes=CLASSES)
@@ -68,12 +72,13 @@ def load(self):
except:
self.cbtr = None
- logger.info("No computed matrices were found, recommendation system not loading for the moment ("
- "but service listening for data to come).")
+ logger.info(
+ "No computed matrices were found, recommendation system not loading for the moment ("
+ "but service listening for data to come)."
+ )
try:
- self.index_stats = loadFromJson(
- tr_settings.RECOMMENDATION_DATA_DIR + 'Current_index_stats.json')
+ self.index_stats = loadFromJson(tr_settings.RECOMMENDATION_DATA_DIR + 'Current_index_stats.json')
logger.info("Matrices computed out of information from %i sounds" % self.index_stats['n_sounds_in_matrix'])
except Exception as e:
print(e)
@@ -91,7 +96,7 @@ def load(self):
self.index_stats['n_sounds_in_index'] = 0
self.index = dict()
- def error(self,message):
+ def error(self, message):
return json.dumps({'Error': message})
def getChild(self, name, request):
@@ -107,8 +112,7 @@ def recommend_tags(self, input_tags, max_number_of_tags=None):
input_tags = input_tags[0].split(",")
if max_number_of_tags:
max_number_of_tags = int(max_number_of_tags[0])
- recommended_tags, com_name = self.cbtr.recommend_tags(input_tags,
- max_number_of_tags=max_number_of_tags)
+ recommended_tags, com_name = self.cbtr.recommend_tags(input_tags, max_number_of_tags=max_number_of_tags)
result = {'error': False, 'result': {'tags': recommended_tags, 'community': com_name}}
except Exception as e:
@@ -125,10 +129,12 @@ def reload(self):
def last_indexed_id(self):
result = {'error': False, 'result': self.index_stats['biggest_id_in_index']}
- logger.info('Getting last indexed id information (%i, %i sounds in index, %i sounds in matrix)'
- % (self.index_stats['biggest_id_in_index'],
- self.index_stats['n_sounds_in_index'],
- self.index_stats['n_sounds_in_matrix']))
+ logger.info(
+ 'Getting last indexed id information (%i, %i sounds in index, %i sounds in matrix)' % (
+ self.index_stats['biggest_id_in_index'], self.index_stats['n_sounds_in_index'],
+ self.index_stats['n_sounds_in_matrix']
+ )
+ )
return json.dumps(result)
def add_to_index(self, sound_ids, sound_tagss):
@@ -161,7 +167,10 @@ def add_to_index(self, sound_ids, sound_tagss):
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if tr_settings.LOG_TO_FILE:
handler = ConcurrentRotatingFileHandler(
- tr_settings.LOGFILE, mode="a", maxBytes=2 * 1024 * 1024, backupCount=5,
+ tr_settings.LOGFILE,
+ mode="a",
+ maxBytes=2 * 1024 * 1024,
+ backupCount=5,
)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
diff --git a/tagrecommendation/utils.py b/tagrecommendation/utils.py
index 51d20500a..b25085bf7 100644
--- a/tagrecommendation/utils.py
+++ b/tagrecommendation/utils.py
@@ -31,27 +31,29 @@ def loadFromJson(path, verbose=False):
print("Loading data from '" + path + "'")
return json.load(f)
+
def saveToJson(path="", data="", verbose=True):
with open(path, mode='w') as f:
if verbose:
print("Saving data to '" + path + "'")
- json.dump(data,f,indent=4)
+ json.dump(data, f, indent=4)
+
-def mtx2npy(M, verbose = True):
+def mtx2npy(M, verbose=True):
n = M.shape[0]
m = M.shape[1]
- npy = zeros((n, m) , 'float32')
+ npy = zeros((n, m), 'float32')
#non_zero_index = M.keys()
items = list(M.items())
nItems = len(M.items())
done = 0
#for index in non_zero_index :
for index, value in items:
- npy[ index[0] ][ index[1] ] = value #M[ index[0] , index[1] ]
+ npy[index[0]][index[1]] = value #M[ index[0] , index[1] ]
done += 1
if verbose:
- sys.stdout.write("\rConverting to npy... " + '%.2f'%((float(done)*100)/float(nItems)) + "% ")
+ sys.stdout.write("\rConverting to npy... " + '%.2f' % ((float(done) * 100) / float(nItems)) + "% ")
sys.stdout.flush()
if verbose:
diff --git a/tags/models.py b/tags/models.py
index c5c805a3a..11d0c756a 100644
--- a/tags/models.py
+++ b/tags/models.py
@@ -25,6 +25,7 @@
from django.utils.encoding import smart_str
from django.urls import reverse
+
class Tag(models.Model):
name = models.SlugField(unique=True, db_index=True, max_length=100)
@@ -60,6 +61,7 @@ class Meta:
ordering = ("-created",)
unique_together = (('tag', 'content_type', 'object_id'),)
+
# Class to get old tags ids linked to new tag ids
# The goal is to at some point deprecate the old tag ids completely
class FS1Tag(models.Model):
diff --git a/tags/templatetags/tags.py b/tags/templatetags/tags.py
index e9463f1d9..6eacb7e23 100644
--- a/tags/templatetags/tags.py
+++ b/tags/templatetags/tags.py
@@ -25,19 +25,23 @@
register = template.Library()
+
@register.filter
def add_sizes(tags, arguments):
sort, small_size, large_size = arguments.split(":")
return annotate_tags(tags, sort, float(small_size), float(large_size))
+
@register.filter
def join_tags_exclude(list, exclude):
return "/".join(sorted([x for x in list if x != exclude])) if list else None
+
@register.filter
def join_tags_include(list, include):
return "/".join(sorted(list + [include])) if list else include
+
@register.inclusion_tag('molecules/bw_follow_tags_widget.html', takes_context=True)
def bw_follow_tags_widget(context):
request = context['request']
@@ -52,10 +56,10 @@ def bw_follow_tags_widget(context):
if request.user.is_authenticated:
show_unfollow_button = follow_utils.is_user_following_tag(request.user, slash_tag)
-
+
return {
'follow_tags_url': follow_tags_url,
'unfollow_tags_url': unfollow_tags_url,
'show_unfollow_button': show_unfollow_button,
'is_authenticated': request.user.is_authenticated
- }
\ No newline at end of file
+ }
diff --git a/tags/tests.py b/tags/tests.py
index 8f37f5c98..d9d2537be 100644
--- a/tags/tests.py
+++ b/tags/tests.py
@@ -25,41 +25,41 @@
class OldTagLinksRedirectTestCase(TestCase):
-
+
fixtures = ['fs1tags']
-
+
def setUp(self):
self.fs1tags = [tag.fs1_id for tag in FS1Tag.objects.all()[0:2]]
-
+
def test_old_tag_link_redirect_single_ok(self):
# 301 permanent redirect, single tag result exists
- response = self.client.get(reverse('old-tag-page'), data={'id' : self.fs1tags[0]})
+ response = self.client.get(reverse('old-tag-page'), data={'id': self.fs1tags[0]})
self.assertEqual(response.status_code, 301)
-
- def test_old_tag_link_redirect_multi_ok(self):
+
+ def test_old_tag_link_redirect_multi_ok(self):
# 301 permanent redirect, multiple tags result exists
- ids = '_'.join([ str(temp) for temp in self.fs1tags])
- response = self.client.get(reverse('old-tag-page'), data={'id' : ids})
+ ids = '_'.join([str(temp) for temp in self.fs1tags])
+ response = self.client.get(reverse('old-tag-page'), data={'id': ids})
self.assertEqual(response.status_code, 301)
-
+
def test_old_tag_link_redirect_partial_ids_list(self):
# 301 permanent redirect, one of the tags in the list exists
partial_ids = str(self.fs1tags[0]) + '_0'
- response = self.client.get(reverse('old-tag-page'), data={'id' : partial_ids})
- self.assertEqual(response.status_code, 301)
-
+ response = self.client.get(reverse('old-tag-page'), data={'id': partial_ids})
+ self.assertEqual(response.status_code, 301)
+
def test_old_tag_link_redirect_not_exists_id(self):
# 404 id exists does not exist
- response = self.client.get(reverse('old-tag-page'), data={'id' : 0}, follow=True)
+ response = self.client.get(reverse('old-tag-page'), data={'id': 0}, follow=True)
self.assertEqual(response.status_code, 404)
-
+
def test_old_tag_link_redirect_invalid_id(self):
# 404 invalid id
- response = self.client.get(reverse('old-tag-page'), data={'id' : 'invalid_id'}, follow=True)
- self.assertEqual(response.status_code, 404)
-
+ response = self.client.get(reverse('old-tag-page'), data={'id': 'invalid_id'}, follow=True)
+ self.assertEqual(response.status_code, 404)
+
def test_old_tag_link_redirect_partial_invalid_id(self):
# 404 invalid id in the id list
partial_ids = str(self.fs1tags[0]) + '_invalidValue'
- response = self.client.get(reverse('old-tag-page'), data={'id' : partial_ids}, follow=True)
- self.assertEqual(response.status_code, 404)
+ response = self.client.get(reverse('old-tag-page'), data={'id': partial_ids}, follow=True)
+ self.assertEqual(response.status_code, 404)
diff --git a/tags/views.py b/tags/views.py
index ed80ceac2..045b4c291 100644
--- a/tags/views.py
+++ b/tags/views.py
@@ -48,7 +48,7 @@ def tags(request, multiple_tags=None):
if pack_flt is not None:
# If username is passed as a GET parameter, add it as well to the filter
search_filter += f'+grouping_pack:{pack_flt}'
-
+
return HttpResponseRedirect(f"{reverse('tags')}?f={search_filter}")
else:
# Share same view code as for the search view, but set "tags mode" on
diff --git a/tickets/__init__.py b/tickets/__init__.py
index 45b723009..0acd5ce86 100644
--- a/tickets/__init__.py
+++ b/tickets/__init__.py
@@ -6,105 +6,125 @@
TICKET_STATUS_CLOSED = 'closed'
MODERATION_TEXTS = [
- ('Illegal', "Hey there. Thanks for contributing to Freesound.\n"
- "Unfortunately we've had to delete this sound.\n"
- "\n"
- "Freesound only hosts files that are not copyright infringing. We reject audio taken from copyright "
- "protected media without permission. Please do not upload other people's works. "
- "Only sounds that you have made yourself or own the copyrights.\n"
- "\n"
- "If you'd like to find out what you can upload, please have a look here:\n"
- "https://freesound.org/help/faq/#what-sounds-are-legal-to-put-on-freesound\n"
- "\n"
- "Thanks!"),
- ('Music', "Hey there. Thanks for contributing to Freesound.\n"
- "Unfortunately, you've uploaded some music which doesn't fit with the content we allow onto the site. "
- "We do however genrally allow music samples that are under 1 minute long, not songs.\n"
- "\n"
- "Some recommended sites for sharing produced music/songs: "
- "Soundcloud, Bandcamp, CCMixter and The Free Music Archive\n"
- "\n"
- "By the way, we welcome material such as loops, riffs, melodies etc. So you could try cutting up your "
- "music into short instrumental loops and uploading them that way. In fact, music and drum loops "
- "are some of the most searched and downloaded sounds on Freesound!\n"
- "\n"
- "Thanks for understanding!"),
- ('Not a Sound', "Hey there. Thanks for contributing to Freesound.\n"
- "\n"
- "You have uploaded a file that does not fit with the type of content Freesound is looking for. "
- "Content we reject includes songs, audiobooks, adverts/commercials, podcasts and copyrighted "
- "material.\n"
- "\n"
- "Thanks for understanding!"),
- ('Language', "Hey there. Thanks for contributing to Freesound.\n"
- "This is a great sound, but could you possibly add an English title, description and tags?\n"
- "\n"
- "You can keep your original description, just add the english in too. This will ensure that your "
- "sounds are discoverable in the search. Because our user-base is mainly English speaking, it makes "
- "sense to do this.\n"
- "\n"
- "Also, please include as much detail as you can in the description.\n"
- "\n"
- "If you can't find how to edit your sound here's a little visual guide:\n"
- "https://i.imgur.com/s4w2ysv.jpg\n"
- "\n"
- "Many thanks!"),
- ('Description/Tags', "Hey there. Thanks for contributing to Freesound.\n"
- "We noticed that your upload is missing a description / tags. "
- "Before approving, Please could you update this to include more detail? "
- "It's important to help other users find your sound in the search.\n"
- "\n"
- "If you need some guidance on describing please see the following FAQ page:\n"
- "https://freesound.org/help/faq/#how-should-i-describe-my-sounds\n"
- "\n"
- "Also, if you can't find how to edit your sound, here's a little visual guide:\n"
- "https://i.imgur.com/s4w2ysv.jpg\n"
- "\n"
- "Thanks!"),
- ('Credit Sounds', "Hey there. Thanks for contributing to Freesound.\n"
- "We've noticed that you have used one or more sounds from this site that have the "
- "'Attribution' and/or 'Non-Commercial' license. Other users need to know this, so before we "
- "can approve it onto the site, we need you to credit these sounds so that everyone can follow "
- "the respective license terms.\n"
- "\n"
- "Here is an example of crediting sounds within your description:\n"
- "https://freesound.org/s/544453\n"
- "\n"
- "If you can't find how to edit your sound, here's a little visual guide:\n"
- "https://i.imgur.com/s4w2ysv.jpg\n"
- "\n"
- "Many thanks!"),
- ('Verify Details', "Hey there. Thanks for contributing to Freesound.\n"
- "\n"
- "Before we can moderate your upload, could you possibly update the description/tags? "
- "Any details such as how it was created, recording device, software used, date/location etc- "
- "-are extremely useful.\n"
- "\n"
- "If you can't find how to edit your sound, here's a little visual guide:\n"
- "https://i.imgur.com/s4w2ysv.jpg\n"
- "\n"
- "Many thanks!\n"
- "\n"
- "(If there is no response to this ticket within 2 weeks, the sound will be removed)"),
- ('License Mismatch', "Hey there. Thanks for sharing your work on Freesound.\n"
- "We noticed that the sound you've edited/remixed and uploaded doesn't match the original CC "
- "license. This is really important to get correct.\n"
- "\n"
- "Could you please update the license type of the sound by clicking on "
- "'edit sound information' on the sound's page?\n"
- "\n"
- "Many thanks!"),
- ('Permission', "Hey there. Thanks for contributing to Freesound.\n"
- "Please could you clarify for us that you have permission to upload the recording of the "
- "performer, singer or speaker to Freesound?\n"
- "\n"
- "It's important that you don't share things that you don't have permission to upload.\n"
- "Please let us know.\n"
- "\n"
- "Thanks!"),
- ('Timeout', "Deleting due to the lack of response to the ticket.\n"
- "If you believe this was in error, or you didn't have time to respond, "
- "do feel free to re-upload the sound or get in touch with us.\n"
- "\n"
- "Thanks for understanding!")
-]
\ No newline at end of file
+ (
+ 'Illegal', "Hey there. Thanks for contributing to Freesound.\n"
+ "Unfortunately we've had to delete this sound.\n"
+ "\n"
+ "Freesound only hosts files that are not copyright infringing. We reject audio taken from copyright "
+ "protected media without permission. Please do not upload other people's works. "
+ "Only sounds that you have made yourself or own the copyrights.\n"
+ "\n"
+ "If you'd like to find out what you can upload, please have a look here:\n"
+ "https://freesound.org/help/faq/#what-sounds-are-legal-to-put-on-freesound\n"
+ "\n"
+ "Thanks!"
+ ),
+ (
+ 'Music', "Hey there. Thanks for contributing to Freesound.\n"
+ "Unfortunately, you've uploaded some music which doesn't fit with the content we allow onto the site. "
+ "We do however genrally allow music samples that are under 1 minute long, not songs.\n"
+ "\n"
+ "Some recommended sites for sharing produced music/songs: "
+ "Soundcloud, Bandcamp, CCMixter and The Free Music Archive\n"
+ "\n"
+ "By the way, we welcome material such as loops, riffs, melodies etc. So you could try cutting up your "
+ "music into short instrumental loops and uploading them that way. In fact, music and drum loops "
+ "are some of the most searched and downloaded sounds on Freesound!\n"
+ "\n"
+ "Thanks for understanding!"
+ ),
+ (
+ 'Not a Sound', "Hey there. Thanks for contributing to Freesound.\n"
+ "\n"
+ "You have uploaded a file that does not fit with the type of content Freesound is looking for. "
+ "Content we reject includes songs, audiobooks, adverts/commercials, podcasts and copyrighted "
+ "material.\n"
+ "\n"
+ "Thanks for understanding!"
+ ),
+ (
+ 'Language', "Hey there. Thanks for contributing to Freesound.\n"
+ "This is a great sound, but could you possibly add an English title, description and tags?\n"
+ "\n"
+ "You can keep your original description, just add the english in too. This will ensure that your "
+ "sounds are discoverable in the search. Because our user-base is mainly English speaking, it makes "
+ "sense to do this.\n"
+ "\n"
+ "Also, please include as much detail as you can in the description.\n"
+ "\n"
+ "If you can't find how to edit your sound here's a little visual guide:\n"
+ "https://i.imgur.com/s4w2ysv.jpg\n"
+ "\n"
+ "Many thanks!"
+ ),
+ (
+ 'Description/Tags', "Hey there. Thanks for contributing to Freesound.\n"
+ "We noticed that your upload is missing a description / tags. "
+ "Before approving, Please could you update this to include more detail? "
+ "It's important to help other users find your sound in the search.\n"
+ "\n"
+ "If you need some guidance on describing please see the following FAQ page:\n"
+ "https://freesound.org/help/faq/#how-should-i-describe-my-sounds\n"
+ "\n"
+ "Also, if you can't find how to edit your sound, here's a little visual guide:\n"
+ "https://i.imgur.com/s4w2ysv.jpg\n"
+ "\n"
+ "Thanks!"
+ ),
+ (
+ 'Credit Sounds', "Hey there. Thanks for contributing to Freesound.\n"
+ "We've noticed that you have used one or more sounds from this site that have the "
+ "'Attribution' and/or 'Non-Commercial' license. Other users need to know this, so before we "
+ "can approve it onto the site, we need you to credit these sounds so that everyone can follow "
+ "the respective license terms.\n"
+ "\n"
+ "Here is an example of crediting sounds within your description:\n"
+ "https://freesound.org/s/544453\n"
+ "\n"
+ "If you can't find how to edit your sound, here's a little visual guide:\n"
+ "https://i.imgur.com/s4w2ysv.jpg\n"
+ "\n"
+ "Many thanks!"
+ ),
+ (
+ 'Verify Details', "Hey there. Thanks for contributing to Freesound.\n"
+ "\n"
+ "Before we can moderate your upload, could you possibly update the description/tags? "
+ "Any details such as how it was created, recording device, software used, date/location etc- "
+ "-are extremely useful.\n"
+ "\n"
+ "If you can't find how to edit your sound, here's a little visual guide:\n"
+ "https://i.imgur.com/s4w2ysv.jpg\n"
+ "\n"
+ "Many thanks!\n"
+ "\n"
+ "(If there is no response to this ticket within 2 weeks, the sound will be removed)"
+ ),
+ (
+ 'License Mismatch', "Hey there. Thanks for sharing your work on Freesound.\n"
+ "We noticed that the sound you've edited/remixed and uploaded doesn't match the original CC "
+ "license. This is really important to get correct.\n"
+ "\n"
+ "Could you please update the license type of the sound by clicking on "
+ "'edit sound information' on the sound's page?\n"
+ "\n"
+ "Many thanks!"
+ ),
+ (
+ 'Permission', "Hey there. Thanks for contributing to Freesound.\n"
+ "Please could you clarify for us that you have permission to upload the recording of the "
+ "performer, singer or speaker to Freesound?\n"
+ "\n"
+ "It's important that you don't share things that you don't have permission to upload.\n"
+ "Please let us know.\n"
+ "\n"
+ "Thanks!"
+ ),
+ (
+ 'Timeout', "Deleting due to the lack of response to the ticket.\n"
+ "If you believe this was in error, or you didn't have time to respond, "
+ "do feel free to re-upload the sound or get in touch with us.\n"
+ "\n"
+ "Thanks for understanding!"
+ )
+]
diff --git a/tickets/admin.py b/tickets/admin.py
index 3543cf05d..dd8a726e4 100644
--- a/tickets/admin.py
+++ b/tickets/admin.py
@@ -26,10 +26,13 @@
@admin.register(Ticket)
class TicketAdmin(admin.ModelAdmin):
- raw_id_fields = ('sender', 'assignee', 'sound')
+ raw_id_fields = ('sender', 'assignee', 'sound')
list_display = ('id', 'status', 'assignee', 'sender', 'sound_link', 'created')
- list_filter = ('status', )
- search_fields = ('=sender__username', '=sound__id', )
+ list_filter = ('status',)
+ search_fields = (
+ '=sender__username',
+ '=sound__id',
+ )
def has_add_permission(self, request):
return False
@@ -41,7 +44,6 @@ def has_add_permission(self, request):
def sound_link(self, obj):
if obj.sound_id is None:
return '-'
- return mark_safe('
{}'.format(
- reverse('short-sound-link', args=[obj.sound_id]), obj.sound))
-
-
+ return mark_safe(
+ '
{}'.format(reverse('short-sound-link', args=[obj.sound_id]), obj.sound)
+ )
diff --git a/tickets/forms.py b/tickets/forms.py
index 5c263e0d9..316afb4f8 100644
--- a/tickets/forms.py
+++ b/tickets/forms.py
@@ -48,35 +48,28 @@ class AnonymousMessageForm(forms.Form):
# Sound moderation forms
-MODERATION_CHOICES = [(x, x) for x in
- ['Approve',
- 'Delete',
- 'Defer',
- 'Return',
- 'Whitelist']]
+MODERATION_CHOICES = [(x, x) for x in ['Approve', 'Delete', 'Defer', 'Return', 'Whitelist']]
IS_EXPLICIT_KEEP_USER_PREFERENCE_KEY = "K"
IS_EXPLICIT_ADD_FLAG_KEY = "A"
IS_EXPLICIT_REMOVE_FLAG_KEY = "R"
-IS_EXPLICIT_FLAG_CHOICES = ((IS_EXPLICIT_KEEP_USER_PREFERENCE_KEY, 'Keep user preference'),
- (IS_EXPLICIT_ADD_FLAG_KEY, 'Add "is explicit" flag'),
+IS_EXPLICIT_FLAG_CHOICES = ((IS_EXPLICIT_KEEP_USER_PREFERENCE_KEY,
+ 'Keep user preference'), (IS_EXPLICIT_ADD_FLAG_KEY, 'Add "is explicit" flag'),
(IS_EXPLICIT_REMOVE_FLAG_KEY, 'Remove "is explicit" flag'))
class SoundModerationForm(forms.Form):
- action = forms.ChoiceField(choices=MODERATION_CHOICES,
- required=True,
- widget=forms.RadioSelect(),
- label='')
-
- ticket = forms.CharField(widget=forms.widgets.HiddenInput,
- error_messages={'required': 'No sound selected...'})
-
- is_explicit = forms.ChoiceField(choices=IS_EXPLICIT_FLAG_CHOICES,
- initial=IS_EXPLICIT_KEEP_USER_PREFERENCE_KEY,
- required=True,
- label=mark_safe("
Is explicit flag"))
-
+ action = forms.ChoiceField(choices=MODERATION_CHOICES, required=True, widget=forms.RadioSelect(), label='')
+
+ ticket = forms.CharField(widget=forms.widgets.HiddenInput, error_messages={'required': 'No sound selected...'})
+
+ is_explicit = forms.ChoiceField(
+ choices=IS_EXPLICIT_FLAG_CHOICES,
+ initial=IS_EXPLICIT_KEEP_USER_PREFERENCE_KEY,
+ required=True,
+ label=mark_safe("
Is explicit flag")
+ )
+
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['action'].widget.attrs['class'] = 'bw-radio'
@@ -84,8 +77,7 @@ def __init__(self, *args, **kwargs):
class ModerationMessageForm(forms.Form):
message = HtmlCleaningCharField(widget=forms.Textarea, required=False, label=False)
- moderator_only = forms.BooleanField(
- required=False, label='Make this message only visible to moderators')
+ moderator_only = forms.BooleanField(required=False, label='Make this message only visible to moderators')
def __init__(self, *args, **kwargs):
kwargs.update(dict(label_suffix=''))
@@ -97,9 +89,7 @@ def __init__(self, *args, **kwargs):
class UserAnnotationForm(forms.Form):
- text = HtmlCleaningCharField(widget=forms.Textarea,
- required=True,
- label='')
+ text = HtmlCleaningCharField(widget=forms.Textarea, required=True, label='')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@@ -109,6 +99,4 @@ def __init__(self, *args, **kwargs):
class SoundStateForm(forms.Form):
- action = forms.ChoiceField(choices=MODERATION_CHOICES,
- required=False,
- label='Action:')
+ action = forms.ChoiceField(choices=MODERATION_CHOICES, required=False, label='Action:')
diff --git a/tickets/models.py b/tickets/models.py
index 1014af113..3887ba3e1 100644
--- a/tickets/models.py
+++ b/tickets/models.py
@@ -30,8 +30,8 @@
class Queue(models.Model):
- name = models.CharField(max_length=128)
- groups = models.ManyToManyField(Group)
+ name = models.CharField(max_length=128)
+ groups = models.ManyToManyField(Group)
notify_by_email = models.BooleanField()
def __str__(self):
@@ -39,29 +39,29 @@ def __str__(self):
def defaultkey():
- return str(uuid.uuid4()).replace('-','')
+ return str(uuid.uuid4()).replace('-', '')
class Ticket(models.Model):
- title = models.CharField(max_length=256)
- status = models.CharField(max_length=128)
- key = models.CharField(max_length=32, db_index=True, default=defaultkey)
- created = models.DateTimeField(db_index=True, auto_now_add=True)
- modified = models.DateTimeField(auto_now=True)
- comment_date = models.DateTimeField(null=True)
- last_commenter = models.ForeignKey(User, related_name='commented_tickets', null=True, on_delete=models.SET_NULL)
- sender = models.ForeignKey(User, related_name='sent_tickets', null=True, on_delete=models.SET_NULL)
- sender_email = models.EmailField(null=True)
- assignee = models.ForeignKey(User, related_name='assigned_tickets', null=True, on_delete=models.SET_NULL)
- queue = models.ForeignKey(Queue, related_name='tickets', on_delete=models.CASCADE)
- sound = models.OneToOneField('sounds.Sound', null=True, on_delete=models.SET_NULL)
-
- NOTIFICATION_QUESTION = 'emails/email_notification_question.txt'
- NOTIFICATION_APPROVED = 'emails/email_notification_approved.txt'
+ title = models.CharField(max_length=256)
+ status = models.CharField(max_length=128)
+ key = models.CharField(max_length=32, db_index=True, default=defaultkey)
+ created = models.DateTimeField(db_index=True, auto_now_add=True)
+ modified = models.DateTimeField(auto_now=True)
+ comment_date = models.DateTimeField(null=True)
+ last_commenter = models.ForeignKey(User, related_name='commented_tickets', null=True, on_delete=models.SET_NULL)
+ sender = models.ForeignKey(User, related_name='sent_tickets', null=True, on_delete=models.SET_NULL)
+ sender_email = models.EmailField(null=True)
+ assignee = models.ForeignKey(User, related_name='assigned_tickets', null=True, on_delete=models.SET_NULL)
+ queue = models.ForeignKey(Queue, related_name='tickets', on_delete=models.CASCADE)
+ sound = models.OneToOneField('sounds.Sound', null=True, on_delete=models.SET_NULL)
+
+ NOTIFICATION_QUESTION = 'emails/email_notification_question.txt'
+ NOTIFICATION_APPROVED = 'emails/email_notification_approved.txt'
NOTIFICATION_APPROVED_BUT = 'emails/email_notification_approved_but.txt'
- NOTIFICATION_DELETED = 'emails/email_notification_deleted.txt'
- NOTIFICATION_UPDATED = 'emails/email_notification_updated.txt'
- NOTIFICATION_WHITELISTED = 'emails/email_notification_whitelisted.txt'
+ NOTIFICATION_DELETED = 'emails/email_notification_deleted.txt'
+ NOTIFICATION_UPDATED = 'emails/email_notification_updated.txt'
+ NOTIFICATION_WHITELISTED = 'emails/email_notification_whitelisted.txt'
MODERATOR_ONLY = 1
USER_ONLY = 2
@@ -72,27 +72,24 @@ def get_n_last_non_moderator_only_comments(self, n):
Get the last n comments that are not 'moderator only' from the self ticket
"""
ticket_comments = self.messages.all().filter(moderator_only=False).order_by('-created')
- return list(ticket_comments)[:n] # converting from Django QuerySet to python list in order to use negative indexing
+ return list(ticket_comments
+ )[:n] # converting from Django QuerySet to python list in order to use negative indexing
def send_notification_emails(self, notification_type, sender_moderator):
# send message to assigned moderator
if sender_moderator in [Ticket.MODERATOR_ONLY, Ticket.USER_AND_MODERATOR]:
if self.assignee:
- tvars = {'ticket': self,
- 'user_to': self.assignee}
- send_mail_template(settings.EMAIL_SUBJECT_MODERATION_HANDLED,
- notification_type,
- tvars,
- user_to=self.assignee)
+ tvars = {'ticket': self, 'user_to': self.assignee}
+ send_mail_template(
+ settings.EMAIL_SUBJECT_MODERATION_HANDLED, notification_type, tvars, user_to=self.assignee
+ )
# send message to user
if sender_moderator in [Ticket.USER_ONLY, Ticket.USER_AND_MODERATOR]:
if self.sender:
- tvars = {'ticket': self,
- 'user_to': self.sender}
- send_mail_template(settings.EMAIL_SUBJECT_MODERATION_HANDLED,
- notification_type,
- tvars,
- user_to=self.sender)
+ tvars = {'ticket': self, 'user_to': self.sender}
+ send_mail_template(
+ settings.EMAIL_SUBJECT_MODERATION_HANDLED, notification_type, tvars, user_to=self.sender
+ )
def get_absolute_url(self):
return reverse('ticket', args=[smart_str(self.key)])
@@ -102,17 +99,15 @@ def __str__(self):
class Meta:
ordering = ("-created",)
- permissions = (
- ("can_moderate", "Can moderate stuff."),
- )
+ permissions = (("can_moderate", "Can moderate stuff."),)
class TicketComment(models.Model):
- sender = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
- text = models.TextField()
- created = models.DateTimeField(auto_now_add=True)
- ticket = models.ForeignKey(Ticket, related_name='messages', on_delete=models.CASCADE)
- moderator_only = models.BooleanField(default=False)
+ sender = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
+ text = models.TextField()
+ created = models.DateTimeField(auto_now_add=True)
+ ticket = models.ForeignKey(Ticket, related_name='messages', on_delete=models.CASCADE)
+ moderator_only = models.BooleanField(default=False)
def __str__(self):
return "<# Message - ticket_id: %s, ticket_key: %s>" % \
diff --git a/tickets/templatetags/display_ticket.py b/tickets/templatetags/display_ticket.py
index 69b18863f..9ebc6d0a8 100644
--- a/tickets/templatetags/display_ticket.py
+++ b/tickets/templatetags/display_ticket.py
@@ -43,7 +43,7 @@ def display_ticket(context, ticket, include_last_message=False):
if not hasattr(ticket, 'last_message'):
if ticket_messages is None:
- ticket_messages = ticket.messages.all()
+ ticket_messages = ticket.messages.all()
last_message = ticket_messages[0] if num_messages and include_last_message else None
if last_message is not None:
last_message_text = last_message.text
@@ -56,7 +56,7 @@ def display_ticket(context, ticket, include_last_message=False):
if last_message is not None:
last_message_text = last_message['text']
last_message_sender_username = last_message['sender_username']
-
+
tvars = {
'request': context['request'],
'ticket': ticket,
@@ -68,6 +68,7 @@ def display_ticket(context, ticket, include_last_message=False):
}
return tvars
+
@register.inclusion_tag('moderation/display_ticket.html', takes_context=True)
def display_ticket_with_message(context, ticket):
return display_ticket(context, ticket, include_last_message=True)
diff --git a/tickets/templatetags/sound_tickets_count.py b/tickets/templatetags/sound_tickets_count.py
index 7d3c4418d..ba96ddb91 100644
--- a/tickets/templatetags/sound_tickets_count.py
+++ b/tickets/templatetags/sound_tickets_count.py
@@ -23,6 +23,7 @@
register = template.Library()
+
@register.simple_tag
def new_sound_tickets_count():
return TicketViews.new_sound_tickets_count()
diff --git a/tickets/tests.py b/tickets/tests.py
index 1f755828e..4da22c45d 100644
--- a/tickets/tests.py
+++ b/tickets/tests.py
@@ -18,7 +18,6 @@
# See AUTHORS file.
#
-
import hashlib
from unittest import mock
@@ -78,7 +77,8 @@ def _create_test_sound(user, filename='test_sound.wav', moderation_state='PE', p
license=sounds.models.License.objects.get(pk=1),
user=user,
md5=hashlib.md5(filename.encode()).hexdigest(),
- original_filename=filename)
+ original_filename=filename
+ )
return sound
@staticmethod
@@ -103,8 +103,9 @@ def setUp(self):
def _create_assigned_ticket(self):
"""Creates ticket that is already assigned to the moderator"""
- return self._create_ticket(self.sound, self.test_user, ticket_status=TICKET_STATUS_ACCEPTED,
- ticket_assignee=self.test_moderator)
+ return self._create_ticket(
+ self.sound, self.test_user, ticket_status=TICKET_STATUS_ACCEPTED, ticket_assignee=self.test_moderator
+ )
class TicketAccessTest(TicketTests):
@@ -159,18 +160,19 @@ def test_send_notification_user(self, send_mail_mock):
ticket = self._create_assigned_ticket()
ticket.send_notification_emails(
- tickets.models.Ticket.NOTIFICATION_APPROVED_BUT,
- tickets.models.Ticket.USER_ONLY)
+ tickets.models.Ticket.NOTIFICATION_APPROVED_BUT, tickets.models.Ticket.USER_ONLY
+ )
local_vars = {
- 'ticket': ticket,
- 'user_to': ticket.sender,
- }
+ 'ticket': ticket,
+ 'user_to': ticket.sender,
+ }
send_mail_mock.assert_called_once_with(
- settings.EMAIL_SUBJECT_MODERATION_HANDLED,
- tickets.models.Ticket.NOTIFICATION_APPROVED_BUT,
- local_vars,
- user_to=ticket.sender)
+ settings.EMAIL_SUBJECT_MODERATION_HANDLED,
+ tickets.models.Ticket.NOTIFICATION_APPROVED_BUT,
+ local_vars,
+ user_to=ticket.sender
+ )
class TicketTestsFromQueue(TicketTests):
@@ -181,9 +183,14 @@ def setUp(self):
self.ticket = self._create_assigned_ticket()
def _perform_action(self, action):
- return self.client.post(reverse('tickets-moderation-assigned', args=[self.test_moderator.id]), {
- 'action': action, 'message': '', 'ticket': self.ticket.id,
- 'is_explicit': IS_EXPLICIT_KEEP_USER_PREFERENCE_KEY})
+ return self.client.post(
+ reverse('tickets-moderation-assigned', args=[self.test_moderator.id]), {
+ 'action': action,
+ 'message': '',
+ 'ticket': self.ticket.id,
+ 'is_explicit': IS_EXPLICIT_KEEP_USER_PREFERENCE_KEY
+ }
+ )
@mock.patch('sounds.models.delete_sounds_from_search_engine')
def test_delete_ticket_from_queue(self, delete_sound_solr):
@@ -229,20 +236,20 @@ def test_defer_ticket_from_queue(self):
class TicketTestsFromTicketViewOwn(TicketTestsFromQueue):
"""Ticket state changes in a response to actions from ticket inspection page for own ticket"""
+
def _perform_action(self, action):
- return self.client.post(reverse('tickets-ticket', args=[self.ticket.key]), {
- 'ss-action': action})
+ return self.client.post(reverse('tickets-ticket', args=[self.ticket.key]), {'ss-action': action})
class TicketTestsFromTicketViewNew(TicketTestsFromQueue):
"""Ticket state changes in a response to actions from ticket inspection page for new ticket"""
+
def setUp(self):
TicketTests.setUp(self)
self.ticket = self._create_ticket(self.sound, self.test_user)
def _perform_action(self, action):
- return self.client.post(reverse('tickets-ticket', args=[self.ticket.key]), {
- 'ss-action': action})
+ return self.client.post(reverse('tickets-ticket', args=[self.ticket.key]), {'ss-action': action})
class TicketTestsIsExplicitFlagFromQueue(TicketTests):
@@ -253,8 +260,14 @@ def setUp(self):
self.ticket = self._create_assigned_ticket()
def _perform_action(self, action, is_explicit_flag_key):
- return self.client.post(reverse('tickets-moderation-assigned', args=[self.test_moderator.id]), {
- 'action': action, 'message': '', 'ticket': self.ticket.id, 'is_explicit': is_explicit_flag_key})
+ return self.client.post(
+ reverse('tickets-moderation-assigned', args=[self.test_moderator.id]), {
+ 'action': action,
+ 'message': '',
+ 'ticket': self.ticket.id,
+ 'is_explicit': is_explicit_flag_key
+ }
+ )
def test_keep_is_explicit_preference_for_explicit_sound(self):
"""Test that when approving a sound marked as 'is_explicit' it continues to be marked as such the moderator
diff --git a/tickets/urls.py b/tickets/urls.py
index 4239c1279..e53ad59e7 100644
--- a/tickets/urls.py
+++ b/tickets/urls.py
@@ -22,60 +22,32 @@
from tickets import views
urlpatterns = [
-
- path('moderation/',
- views.assign_sounds,
- name='tickets-moderation-home'),
-
- path('moderation/guide',
- views.guide,
- name='tickets-moderation-guide'),
-
- path('moderation/tardy_users_sounds/',
- views.moderation_tardy_users_sounds,
- name='tickets-moderation-tardy-users'),
-
- path('moderation/tardy_moderators_sounds/',
+ path('moderation/', views.assign_sounds, name='tickets-moderation-home'),
+ path('moderation/guide', views.guide, name='tickets-moderation-guide'),
+ path('moderation/tardy_users_sounds/', views.moderation_tardy_users_sounds, name='tickets-moderation-tardy-users'),
+ path(
+ 'moderation/tardy_moderators_sounds/',
views.moderation_tardy_moderators_sounds,
- name='tickets-moderation-tardy-moderators'),
-
- path('moderation/assign/new/',
- views.moderation_assign_all_new,
- name='tickets-moderation-assign-all-new'),
-
- path('moderation/assign/
/new',
- views.moderation_assign_user,
- name='tickets-moderation-assign-user-new'),
-
- path('moderation/assign//pending',
+ name='tickets-moderation-tardy-moderators'
+ ),
+ path('moderation/assign/new/', views.moderation_assign_all_new, name='tickets-moderation-assign-all-new'),
+ path(
+ 'moderation/assign//new', views.moderation_assign_user, name='tickets-moderation-assign-user-new'
+ ),
+ path(
+ 'moderation/assign//pending',
views.moderation_assign_user_pending,
- name='tickets-moderation-assign-user-pending'),
-
- path('moderation/assigned//',
- views.moderation_assigned,
- name='tickets-moderation-assigned'),
-
- path('moderation/assign/ticket//',
+ name='tickets-moderation-assign-user-pending'
+ ),
+ path('moderation/assigned//', views.moderation_assigned, name='tickets-moderation-assigned'),
+ path(
+ 'moderation/assign/ticket//',
views.moderation_assign_single_ticket,
- name='tickets-moderation-assign-single-ticket'),
-
- path('moderation/annotations//',
- views.user_annotations,
- name='tickets-user-annotations'),
-
- path('moderation/annotations/add//',
- views.add_user_annotation,
- name='tickets-add-user-annotation'),
-
- path('moderation/pending//',
- views.pending_tickets_per_user,
- name='tickets-user-pending_sounds'),
-
- path('/',
- views.ticket,
- name='tickets-ticket'),
-
- path('moderation/whitelist//',
- views.whitelist_user,
- name='tickets-whitelist-user'),
+ name='tickets-moderation-assign-single-ticket'
+ ),
+ path('moderation/annotations//', views.user_annotations, name='tickets-user-annotations'),
+ path('moderation/annotations/add//', views.add_user_annotation, name='tickets-add-user-annotation'),
+ path('moderation/pending//', views.pending_tickets_per_user, name='tickets-user-pending_sounds'),
+ path('/', views.ticket, name='tickets-ticket'),
+ path('moderation/whitelist//', views.whitelist_user, name='tickets-whitelist-user'),
]
diff --git a/tickets/views.py b/tickets/views.py
index de9e15cf3..aefcd0782 100644
--- a/tickets/views.py
+++ b/tickets/views.py
@@ -18,7 +18,6 @@
# See AUTHORS file.
#
-
import datetime
import json
@@ -32,7 +31,7 @@
from django.http import HttpResponseRedirect, JsonResponse, Http404
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
-from django.shortcuts import render
+from django.shortcuts import render
from general.tasks import whitelist_user as whitelist_user_task
from .models import Ticket, TicketComment, UserAnnotation
@@ -47,10 +46,7 @@
def _get_tc_form(request, use_post=True):
- return _get_anon_or_user_form(request,
- AnonymousMessageForm,
- UserMessageForm,
- use_post)
+ return _get_anon_or_user_form(request, AnonymousMessageForm, UserMessageForm, use_post)
def _get_anon_or_user_form(request, anonymous_form, user_form, use_post=True):
@@ -123,8 +119,7 @@ def ticket(request, ticket_key):
email_to = Ticket.MODERATOR_ONLY
else:
email_to = Ticket.USER_ONLY
- ticket.send_notification_emails(ticket.NOTIFICATION_UPDATED,
- email_to)
+ ticket.send_notification_emails(ticket.NOTIFICATION_UPDATED, email_to)
else:
clean_comment_form = False
# update sound ticket
@@ -158,7 +153,7 @@ def ticket(request, ticket_key):
elif sound_action == 'Defer':
ticket.status = TICKET_STATUS_DEFERRED
- ticket.sound.change_moderation_state('PE') # not sure if this state have been used before
+ ticket.sound.change_moderation_state('PE') # not sure if this state have been used before
comment += 'deferred the ticket'
elif sound_action == "Return":
@@ -174,28 +169,24 @@ def ticket(request, ticket_key):
notification = ticket.NOTIFICATION_APPROVED
elif sound_action == 'Whitelist':
- whitelist_user_task.delay(ticket_ids=[ticket.id]) # async job should take care of whitelisting
+ whitelist_user_task.delay(ticket_ids=[ticket.id]) # async job should take care of whitelisting
comment += f'whitelisted all sounds from user {ticket.sender}'
notification = ticket.NOTIFICATION_WHITELISTED
elif sound_action == 'Close':
# This option in never shown in the form, but used when needing to close a ticket which has no sound associated (see ticket.html)
ticket.status = TICKET_STATUS_CLOSED
- comment = None # Avoid adding a comment to the ticket
+ comment = None # Avoid adding a comment to the ticket
if notification is not None:
- ticket.send_notification_emails(notification,
- ticket.USER_ONLY)
+ ticket.send_notification_emails(notification, ticket.USER_ONLY)
if ticket.sound is not None:
ticket.sound.save()
-
+
ticket.save()
if comment is not None:
- tc = TicketComment(sender=request.user,
- text=comment,
- ticket=ticket,
- moderator_only=False)
+ tc = TicketComment(sender=request.user, text=comment, ticket=ticket, moderator_only=False)
tc.save()
if clean_status_forms:
@@ -209,33 +200,29 @@ def ticket(request, ticket_key):
else:
num_mod_annotations = None
- tvars = {"ticket": ticket,
- "tc_form": tc_form,
- "sound_form": sound_form,
- "num_mod_annotations": num_mod_annotations,
- "can_view_moderator_only_messages": can_view_moderator_only_messages,
- "num_sounds_pending": ticket.sender.profile.num_sounds_pending_moderation()
+ tvars = {
+ "ticket": ticket,
+ "tc_form": tc_form,
+ "sound_form": sound_form,
+ "num_mod_annotations": num_mod_annotations,
+ "can_view_moderator_only_messages": can_view_moderator_only_messages,
+ "num_sounds_pending": ticket.sender.profile.num_sounds_pending_moderation()
}
sound_object = Sound.objects.bulk_query_id(sound_ids=[ticket.sound_id])[0] if ticket.sound_id is not None else None
if sound_object is not None:
sound_object.show_processing_status = True
sound_object.show_moderation_status = True
- tvars.update({
- 'sound': sound_object
- })
+ tvars.update({'sound': sound_object})
return render(request, 'moderation/ticket.html', tvars)
-
# In the next 2 functions we return a queryset os the evaluation is lazy.
# N.B. these functions are used in the home page as well.
def new_sound_tickets_count():
return Ticket.objects.filter(
- assignee=None,
- sound__moderation_state='PE',
- sound__processing_state='OK',
- status=TICKET_STATUS_NEW).count()
+ assignee=None, sound__moderation_state='PE', sound__processing_state='OK', status=TICKET_STATUS_NEW
+ ).count()
def _get_new_uploaders_by_ticket():
@@ -247,16 +234,19 @@ def _get_new_uploaders_by_ticket():
.annotate(total=Count('sender'), older=Min('created'))\
.order_by('older')
- users = User.objects.filter(id__in=[t['sender'] for t in tickets]).annotate(num_mod_annotations=Count('annotations')).select_related('profile')
+ users = User.objects.filter(id__in=[t['sender'] for t in tickets]
+ ).annotate(num_mod_annotations=Count('annotations')).select_related('profile')
users_dict = {u.id: u for u in users}
new_sounds_users = []
for t in tickets:
- new_sounds_users.append({"user": users_dict[t['sender']],
- "username": users_dict[t['sender']].username,
- "new_count": t['total'],
- "num_uploaded_sounds": users_dict[t['sender']].profile.num_sounds,
- "time": (datetime.datetime.now() - t['older']).days})
+ new_sounds_users.append({
+ "user": users_dict[t['sender']],
+ "username": users_dict[t['sender']].username,
+ "new_count": t['total'],
+ "num_uploaded_sounds": users_dict[t['sender']].profile.num_sounds,
+ "time": (datetime.datetime.now() - t['older']).days
+ })
return new_sounds_users
@@ -264,14 +254,20 @@ def _annotate_tickets_queryset_with_message_info(qs, include_mod_messages=True):
if include_mod_messages:
return qs.select_related('assignee', 'sender').annotate(
num_messages=Count('messages'),
- last_message=TicketComment.objects.filter(ticket_id=OuterRef('id')).select_related('sender').order_by('-created').values(
- data=JSONObject(text="text", sender_username='sender__username'))[:1])
+ last_message=TicketComment.objects.filter(
+ ticket_id=OuterRef('id')
+ ).select_related('sender').order_by('-created').values(
+ data=JSONObject(text="text", sender_username='sender__username')
+ )[:1]
+ )
else:
return qs.select_related('assignee', 'sender').annotate(
num_messages=Count('messages', filter=Q(messages__moderator_only=False)),
- last_message=TicketComment.objects.filter(ticket_id=OuterRef('id'), moderator_only=False).select_related('sender').order_by('-created').values(
- data=JSONObject(text="text", sender_username='sender__username'))[:1])
-
+ last_message=TicketComment.objects.filter(ticket_id=OuterRef('id'),
+ moderator_only=False).select_related('sender').
+ order_by('-created').values(data=JSONObject(text="text", sender_username='sender__username'))[:1]
+ )
+
def _add_sound_objects_to_tickets(tickets):
sound_objects = Sound.objects.dict_ids(sound_ids=[ticket.sound_id for ticket in tickets])
@@ -341,7 +337,7 @@ def assign_sounds(request):
# If a order is specified, update the session parameter with that order
request.session["mod_assign_sounds_order"] = order_from_req_param
order = request.session.get("mod_assign_sounds_order", "days_in_queue")
-
+
if order == "username":
new_sounds_users = sorted(new_sounds_users, key=lambda x: x["username"])
elif order == "new_count":
@@ -354,37 +350,42 @@ def assign_sounds(request):
# Default option, sort by number of days in queue
new_sounds_users = sorted(new_sounds_users, key=lambda x: x["time"], reverse=True)
- tardy_moderator_tickets, tardy_moderator_tickets_count = _get_tardy_moderator_tickets_and_count(num=8, include_mod_messages=True)
+ tardy_moderator_tickets, tardy_moderator_tickets_count = _get_tardy_moderator_tickets_and_count(
+ num=8, include_mod_messages=True
+ )
tardy_user_tickets, tardy_user_tickets_count = _get_tardy_user_tickets_and_count(num=8, include_mod_messages=True)
- tvars = {"new_sounds_users": new_sounds_users,
- "num_sounds_pending": num_sounds_pending,
- "order": order,
- "tardy_moderator_tickets": tardy_moderator_tickets,
- "tardy_user_tickets": tardy_user_tickets,
- "tardy_moderator_tickets_count": tardy_moderator_tickets_count,
- "tardy_user_tickets_count": tardy_user_tickets_count,
- "moderator_tickets_count": sounds_in_moderators_queue_count
- }
+ tvars = {
+ "new_sounds_users": new_sounds_users,
+ "num_sounds_pending": num_sounds_pending,
+ "order": order,
+ "tardy_moderator_tickets": tardy_moderator_tickets,
+ "tardy_user_tickets": tardy_user_tickets,
+ "tardy_moderator_tickets_count": tardy_moderator_tickets_count,
+ "tardy_user_tickets_count": tardy_user_tickets_count,
+ "moderator_tickets_count": sounds_in_moderators_queue_count
+ }
_add_sound_objects_to_tickets(tardy_moderator_tickets)
_add_sound_objects_to_tickets(tardy_user_tickets)
tvars.update({'section': 'assign'})
- return render(request, 'moderation/assign_sounds.html', tvars)
-
+ return render(request, 'moderation/assign_sounds.html', tvars)
+
@permission_required('tickets.can_moderate')
def moderation_tardy_users_sounds(request):
if not request.GET.get('ajax'):
# If not loaded as a modal, redirect to moderation home with parameter to open modal
return HttpResponseRedirect(reverse('tickets-moderation-home') + '?tardy_users=1')
-
+
sounds_in_moderators_queue_count = _get_sounds_in_moderators_queue_count(request.user)
tardy_user_tickets, _ = _get_tardy_user_tickets_and_count(include_mod_messages=True)
paginated = paginate(request, tardy_user_tickets, settings.SOUNDS_PENDING_MODERATION_PER_PAGE)
- tvars = {"moderator_tickets_count": sounds_in_moderators_queue_count,
- "tardy_user_tickets": tardy_user_tickets,
- "selected": "assigned"}
+ tvars = {
+ "moderator_tickets_count": sounds_in_moderators_queue_count,
+ "tardy_user_tickets": tardy_user_tickets,
+ "selected": "assigned"
+ }
tvars.update(paginated)
# Retrieve sound objects using bulk stuff so extra sound information is retrieved in one query
@@ -398,14 +399,16 @@ def moderation_tardy_moderators_sounds(request):
if not request.GET.get('ajax'):
# If not loaded as a modal, redirect to moderation home with parameter to open modal
return HttpResponseRedirect(reverse('tickets-moderation-home') + '?tardy_moderators=1')
-
+
sounds_in_moderators_queue_count = _get_sounds_in_moderators_queue_count(request.user)
tardy_moderators_tickets, _ = _get_tardy_moderator_tickets_and_count(include_mod_messages=True)
paginated = paginate(request, tardy_moderators_tickets, settings.SOUNDS_PENDING_MODERATION_PER_PAGE)
- tvars = {"moderator_tickets_count": sounds_in_moderators_queue_count,
- "tardy_moderators_tickets": tardy_moderators_tickets,
- "selected": "assigned"}
+ tvars = {
+ "moderator_tickets_count": sounds_in_moderators_queue_count,
+ "tardy_moderators_tickets": tardy_moderators_tickets,
+ "selected": "assigned"
+ }
tvars.update(paginated)
# Retrieve sound objects using bulk stuff so extra sound information is retrieved in one query
@@ -420,10 +423,9 @@ def moderation_assign_all_new(request):
Assigns all new unassigned tickets to the current user logged in
"""
- tickets = Ticket.objects.filter(assignee=None,
- sound__processing_state='OK',
- sound__moderation_state='PE',
- status=TICKET_STATUS_NEW)
+ tickets = Ticket.objects.filter(
+ assignee=None, sound__processing_state='OK', sound__moderation_state='PE', status=TICKET_STATUS_NEW
+ )
tickets.update(assignee=request.user, status=TICKET_STATUS_ACCEPTED, modified=datetime.datetime.now())
@@ -485,11 +487,11 @@ def moderation_assign_single_ticket(request, ticket_id):
if next == "tardy_users":
return redirect("tickets-moderation-tardy-users")
elif next == "tardy_moderators":
- return redirect(reverse("tickets-moderation-tardy-moderators")+f"?page={p}")
+ return redirect(reverse("tickets-moderation-tardy-moderators") + f"?page={p}")
elif next == "ticket":
return redirect(reverse("tickets-ticket", kwargs={'ticket_key': ticket.key}))
else:
- return redirect(reverse("tickets-moderation-home")+f"?page={p}")
+ return redirect(reverse("tickets-moderation-home") + f"?page={p}")
else:
return redirect("tickets-moderation-home")
@@ -567,16 +569,17 @@ def moderation_assigned(request, user_id):
notification = Ticket.NOTIFICATION_DELETED
elif action == "Whitelist":
- ticket_ids = list(tickets.values_list('id',flat=True))
- whitelist_user_task.delay(ticket_ids=ticket_ids) # async job should take care of whitelisting
+ ticket_ids = list(tickets.values_list('id', flat=True))
+ whitelist_user_task.delay(ticket_ids=ticket_ids) # async job should take care of whitelisting
notification = Ticket.NOTIFICATION_WHITELISTED
users = set(tickets.values_list('sender__username', flat=True))
- messages.add_message(request, messages.INFO,
- """User(s) %s has/have been whitelisted. Some of tickets might
+ messages.add_message(
+ request, messages.INFO, """User(s) %s has/have been whitelisted. Some of tickets might
still appear on this list for some time. Please reload the
page in a few seconds to see the updated list of pending
- tickets""" % ", ".join(users))
+ tickets""" % ", ".join(users)
+ )
for ticket in tickets:
if action != "Delete":
@@ -590,10 +593,7 @@ def moderation_assigned(request, user_id):
moderator_only = msg_form.cleaned_data.get("moderator_only", False)
if msg:
- tc = TicketComment(sender=ticket.assignee,
- text=msg,
- ticket=ticket,
- moderator_only=moderator_only)
+ tc = TicketComment(sender=ticket.assignee, text=msg, ticket=ticket, moderator_only=moderator_only)
tc.save()
# Send emails
@@ -643,7 +643,7 @@ def moderation_assigned(request, user_id):
else:
num_mod_annotations = users_num_mod_annotations[ticket.sender_id]
ticket.num_mod_annotations = num_mod_annotations
-
+
moderator_tickets_count = qs.count()
show_pagination = moderator_tickets_count > page_size
@@ -669,13 +669,16 @@ def user_annotations(request, user_id):
if not request.GET.get('ajax'):
# If not loaded as a modal, redirect to account page with parameter to open modal
return HttpResponseRedirect(reverse('account', args=[user.username]) + '?mod_annotations=1')
-
+
annotations = UserAnnotation.objects.filter(user=user)
- user_recent_ticket_comments = TicketComment.objects.filter(sender=user).select_related('ticket').order_by('-created')[:15]
- tvars = {"user": user,
- "recent_comments": user_recent_ticket_comments,
- "form": UserAnnotationForm(),
- "annotations": annotations}
+ user_recent_ticket_comments = TicketComment.objects.filter(sender=user
+ ).select_related('ticket').order_by('-created')[:15]
+ tvars = {
+ "user": user,
+ "recent_comments": user_recent_ticket_comments,
+ "form": UserAnnotationForm(),
+ "annotations": annotations
+ }
return render(request, 'moderation/modal_annotations.html', tvars)
@@ -686,12 +689,16 @@ def add_user_annotation(request, user_id):
if request.method == 'POST':
form = UserAnnotationForm(request.POST)
if form.is_valid():
- ua = UserAnnotation(sender=request.user,
- user=user,
- text=form.cleaned_data['text'])
+ ua = UserAnnotation(sender=request.user, user=user, text=form.cleaned_data['text'])
ua.save()
- return JsonResponse({'message': 'Annotation successfully added', 'num_annotations': UserAnnotation.objects.filter(user=user).count()})
- return JsonResponse({'message': 'Annotation could not be added', 'num_annotations': UserAnnotation.objects.filter(user=user).count()})
+ return JsonResponse({
+ 'message': 'Annotation successfully added',
+ 'num_annotations': UserAnnotation.objects.filter(user=user).count()
+ })
+ return JsonResponse({
+ 'message': 'Annotation could not be added',
+ 'num_annotations': UserAnnotation.objects.filter(user=user).count()
+ })
@permission_required('tickets.can_moderate')
@@ -700,7 +707,7 @@ def pending_tickets_per_user(request, username):
if not request.GET.get('ajax'):
# If not loaded as a modal, redirect to account page with parameter to open modal
return HttpResponseRedirect(reverse('account', args=[username]) + '?pending_moderation=1')
-
+
user = request.parameter_user
tickets, _ = _get_pending_tickets_for_user(user, include_mod_messages=True)
_add_sound_objects_to_tickets(tickets)
@@ -708,25 +715,28 @@ def pending_tickets_per_user(request, username):
for ticket in tickets:
mods.add(ticket.assignee)
show_pagination = len(tickets) > settings.SOUNDS_PENDING_MODERATION_PER_PAGE
-
+
n_unprocessed_sounds = Sound.objects.select_related().filter(user=user).exclude(processing_state="OK").count()
if n_unprocessed_sounds:
- messages.add_message(request, messages.WARNING,
- """%i of %s's recently uploaded sounds are still in processing
+ messages.add_message(
+ request, messages.WARNING, """%i of %s's recently uploaded sounds are still in processing
phase and therefore are not yet ready for moderation. These
sounds won't appear in this list until they are successfully
- processed.""" % (n_unprocessed_sounds, user.username))
+ processed.""" % (n_unprocessed_sounds, user.username)
+ )
moderators_version = True
own_page = user == request.user
no_assign_button = len(mods) == 0 or (len(mods) == 1 and request.user in mods)
paginated = paginate(request, tickets, settings.SOUNDS_PENDING_MODERATION_PER_PAGE)
- tvars = {"show_pagination": show_pagination,
- "moderators_version": moderators_version,
- "user": user,
- "own_page": own_page,
- "no_assign_button": no_assign_button}
+ tvars = {
+ "show_pagination": show_pagination,
+ "moderators_version": moderators_version,
+ "user": user,
+ "own_page": own_page,
+ "no_assign_button": no_assign_button
+ }
tvars.update(paginated)
return render(request, 'moderation/modal_pending.html', tvars)
@@ -750,15 +760,15 @@ def whitelist_user(request, user_id):
try:
user = User.objects.get(id=user_id)
except (User.DoesNotExist, AttributeError):
- messages.add_message(request, messages.ERROR,
- """The user you are trying to whitelist does not exist""")
+ messages.add_message(request, messages.ERROR, """The user you are trying to whitelist does not exist""")
return HttpResponseRedirect(reverse('tickets-moderation-home'))
-
- whitelist_user_task(user_id=user_id) # async job should take care of whitelisting
-
- messages.add_message(request, messages.INFO,
- f"""User {user.username} has been whitelisted. Note that some of tickets might
- still appear on her pending tickets list for some time.""")
+
+ whitelist_user_task(user_id=user_id) # async job should take care of whitelisting
+
+ messages.add_message(
+ request, messages.INFO, f"""User {user.username} has been whitelisted. Note that some of tickets might
+ still appear on her pending tickets list for some time."""
+ )
redirect_to = request.GET.get('next', None)
if redirect_to is not None:
diff --git a/utils/admin_helpers.py b/utils/admin_helpers.py
index ddc767e0f..56b4383b0 100644
--- a/utils/admin_helpers.py
+++ b/utils/admin_helpers.py
@@ -26,26 +26,29 @@
class NoPkDescOrderedChangeList(ChangeList):
+
def get_ordering(self, request, queryset):
rv = super().get_ordering(request, queryset)
rv = list(rv)
rv.remove('-pk') if '-pk' in rv else None
return tuple(rv)
-
+
class LargeTablePaginator(Paginator):
""" We use the information on postgres table 'reltuples' to avoid using count(*) for performance. """
+
@cached_property
def count(self):
try:
if not self.object_list.query.where:
cursor = connection.cursor()
- cursor.execute("SELECT reltuples FROM pg_class WHERE relname = %s",
- [self.object_list.query.model._meta.db_table])
+ cursor.execute(
+ "SELECT reltuples FROM pg_class WHERE relname = %s", [self.object_list.query.model._meta.db_table]
+ )
ret = int(cursor.fetchone()[0])
return ret
- else :
+ else:
return self.object_list.count()
- except :
+ except:
# AttributeError if object_list has no count() method.
return len(self.object_list)
diff --git a/utils/audioprocessing/color_schemes.py b/utils/audioprocessing/color_schemes.py
index 65945ba04..560a18273 100644
--- a/utils/audioprocessing/color_schemes.py
+++ b/utils/audioprocessing/color_schemes.py
@@ -27,80 +27,81 @@ def color_from_value(value):
RAINFOREST_COLOR_SCHEME = 'Rainforest'
DEFAULT_COLOR_SCHEME_KEY = FREESOUND2_COLOR_SCHEME
-
COLOR_SCHEMES = {
FREESOUND2_COLOR_SCHEME: {
'wave_colors': [
- (0, 0, 0), # Background color
- (50, 0, 200), # Low spectral centroid
+ (0, 0, 0), # Background color
+ (50, 0, 200), # Low spectral centroid
(0, 220, 80),
(255, 224, 0),
- (255, 70, 0), # High spectral centroid
+ (255, 70, 0), # High spectral centroid
],
'spec_colors': [
- (0, 0, 0), # Background color
- (old_div(58,4), old_div(68,4), old_div(65,4)),
- (old_div(80,2), old_div(100,2), old_div(153,2)),
+ (0, 0, 0), # Background color
+ (old_div(58, 4), old_div(68, 4), old_div(65, 4)),
+ (old_div(80, 2), old_div(100, 2), old_div(153, 2)),
(90, 180, 100),
(224, 224, 44),
(255, 60, 30),
(255, 255, 255)
- ],
- 'wave_zero_line_alpha': 25,
+ ],
+ 'wave_zero_line_alpha': 25,
},
OLD_BEASTWHOOSH_COLOR_SCHEME: {
'wave_colors': [
- (255, 255, 255), # Background color
- (29, 159, 181), # 1D9FB5, Low spectral centroid
- (28, 174, 72), # 1CAE48
- (255, 158, 53), # FF9E35
- (255, 53, 70), # FF3546, High spectral centroid
+ (255, 255, 255), # Background color
+ (29, 159, 181), # 1D9FB5, Low spectral centroid
+ (28, 174, 72), # 1CAE48
+ (255, 158, 53), # FF9E35
+ (255, 53, 70), # FF3546, High spectral centroid
],
'spec_colors': [
- (0, 0, 0), # Background color/Low spectral energy
- (29, 159, 181), # 1D9FB5
- (28, 174, 72), # 1CAE48
- (255, 158, 53), # FF9E35
- (255, 53, 70), # FF3546, High spectral energy
- ]
+ (0, 0, 0), # Background color/Low spectral energy
+ (29, 159, 181), # 1D9FB5
+ (28, 174, 72), # 1CAE48
+ (255, 158, 53), # FF9E35
+ (255, 53, 70), # FF3546, High spectral energy
+ ]
},
BEASTWHOOSH_COLOR_SCHEME: {
'wave_colors': [
- (20, 20, 36), # Background color (not really used as we use transparent mode)
- (29, 159, 181), # Low spectral centroid
+ (20, 20, 36), # Background color (not really used as we use transparent mode)
+ (29, 159, 181), # Low spectral centroid
(0, 220, 80),
(255, 200, 58),
- (255, 0, 70), # High spectral centroid
+ (255, 0, 70), # High spectral centroid
],
'spec_colors': [
- (20, 20, 36), # Low spectral energy
+ (20, 20, 36), # Low spectral energy
(0, 18, 25),
(0, 37, 56),
- (11, 95, 118),
+ (11, 95, 118),
(29, 159, 181),
(0, 220, 80),
(255, 200, 58),
(255, 125, 0),
- (255, 0, 70),
- (255, 0, 20), # High spectral energy
+ (255, 0, 70),
+ (255, 0, 20), # High spectral energy
],
'wave_transparent_background': True,
'wave_zero_line_alpha': 12,
},
CYBERPUNK_COLOR_SCHEME: {
- 'wave_colors': [(0, 0, 0)] + [color_from_value(value/29.0) for value in range(0, 30)],
- 'spec_colors': [(0, 0, 0)] + [color_from_value(value/29.0) for value in range(0, 30)],
+ 'wave_colors': [(0, 0, 0)] + [color_from_value(value / 29.0) for value in range(0, 30)],
+ 'spec_colors': [(0, 0, 0)] + [color_from_value(value / 29.0) for value in range(0, 30)],
},
RAINFOREST_COLOR_SCHEME: {
- 'wave_colors': [(213, 217, 221)] + list(map(partial(desaturate, amount=0.7), [
- (50, 0, 200),
- (0, 220, 80),
- (255, 224, 0),
- ])),
- 'spec_colors': [(213, 217, 221)] + list(map(partial(desaturate, amount=0.7), [
- (50, 0, 200),
- (0, 220, 80),
- (255, 224, 0),
- ])),
+ 'wave_colors': [(213, 217, 221)] +
+ list(map(partial(desaturate, amount=0.7), [
+ (50, 0, 200),
+ (0, 220, 80),
+ (255, 224, 0),
+ ])),
+ 'spec_colors': [(213, 217, 221)] +
+ list(map(partial(desaturate, amount=0.7), [
+ (50, 0, 200),
+ (0, 220, 80),
+ (255, 224, 0),
+ ])),
}
}
diff --git a/utils/audioprocessing/convert_to_wav.py b/utils/audioprocessing/convert_to_wav.py
index 5e1b7aff2..6c0859d15 100644
--- a/utils/audioprocessing/convert_to_wav.py
+++ b/utils/audioprocessing/convert_to_wav.py
@@ -27,7 +27,7 @@
try:
info = audio_info(sys.argv[2])
- if not ( info["bits"] == 16 and info["samplerate"] == 44100 and info["channels"] == 2 and info["duration"] > 0 ):
+ if not (info["bits"] == 16 and info["samplerate"] == 44100 and info["channels"] == 2 and info["duration"] > 0):
print("warning, created file is not 44.1, stereo, 16bit!")
except AudioProcessingException as e:
print("warning, audio processing seems to have failed:", e)
diff --git a/utils/audioprocessing/freesound_audio_processing.py b/utils/audioprocessing/freesound_audio_processing.py
index 82972f9ba..ddac0dc5d 100644
--- a/utils/audioprocessing/freesound_audio_processing.py
+++ b/utils/audioprocessing/freesound_audio_processing.py
@@ -18,7 +18,6 @@
# See AUTHORS file.
#
-
from past.utils import old_div
import json
import os
@@ -70,8 +69,9 @@ def cancel_timeout_alarm():
signal.alarm(0)
-def check_if_free_space(directory=settings.PROCESSING_TEMP_DIR,
- min_disk_space_percentage=settings.WORKER_MIN_FREE_DISK_SPACE_PERCENTAGE):
+def check_if_free_space(
+ directory=settings.PROCESSING_TEMP_DIR, min_disk_space_percentage=settings.WORKER_MIN_FREE_DISK_SPACE_PERCENTAGE
+):
"""
Checks if there is free disk space in the volume of the given 'directory'. If percentage of free disk space in this
volume is lower than 'min_disk_space_percentage', this function raises WorkerException.
@@ -82,8 +82,10 @@ def check_if_free_space(directory=settings.PROCESSING_TEMP_DIR,
stats = os.statvfs(directory)
percentage_free = stats.f_bfree * 1.0 / stats.f_blocks
if percentage_free < min_disk_space_percentage:
- raise WorkerException("Disk is running out of space, "
- "aborting task as there might not be enough space for temp files")
+ raise WorkerException(
+ "Disk is running out of space, "
+ "aborting task as there might not be enough space for temp files"
+ )
class FreesoundAudioProcessorBase:
@@ -107,7 +109,7 @@ def log_error(self, message):
self.work_log += message + '\n'
def set_failure(self, message, error=None):
- logging_message = f"sound with id {self.sound.id} failed\n"
+ logging_message = f"sound with id {self.sound.id} failed\n"
logging_message += f"\tmessage: {message}\n"
if error:
logging_message += f"\terror: {str(error)}"
@@ -150,15 +152,17 @@ def convert_to_pcm(self, sound_path, tmp_directory, force_use_ffmpeg=False, mono
# Close file handler as we don't use it from Python
os.close(fh)
if force_use_ffmpeg:
- raise AudioProcessingException() # Go to directly to ffmpeg conversion
+ raise AudioProcessingException() # Go to directly to ffmpeg conversion
if not audioprocessing.convert_to_pcm(sound_path, tmp_wavefile):
tmp_wavefile = sound_path
self.log_info("no need to convert, this file is already PCM data")
except OSError as e:
# Could not create tmp file
- raise AudioProcessingException(f"could not create tmp_wavefile file, "
- f"make sure that format conversion executables exist: {str(e)}")
+ raise AudioProcessingException(
+ f"could not create tmp_wavefile file, "
+ f"make sure that format conversion executables exist: {str(e)}"
+ )
except AudioProcessingException as e:
# Conversion with format codecs has failed (or skipped using 'force_use_ffmpeg' argument)
@@ -168,8 +172,10 @@ def convert_to_pcm(self, sound_path, tmp_directory, force_use_ffmpeg=False, mono
except AudioProcessingException as e:
raise AudioProcessingException(f"conversion to PCM failed: {e}")
except OSError as e:
- raise AudioProcessingException("conversion to PCM failed, "
- "make sure that ffmpeg executable exists: %s" % e)
+ raise AudioProcessingException(
+ "conversion to PCM failed, "
+ "make sure that ffmpeg executable exists: %s" % e
+ )
except Exception as e:
raise AudioProcessingException(f"unhandled exception while converting to PCM: {e}")
@@ -186,9 +192,8 @@ def set_failure(self, message, error=None):
def process(self, skip_previews=False, skip_displays=False, update_sound_processing_state_in_db=True):
- with TemporaryDirectory(
- prefix=f'processing_{self.sound.id}_',
- dir=settings.PROCESSING_TEMP_DIR) as tmp_directory:
+ with TemporaryDirectory(prefix=f'processing_{self.sound.id}_',
+ dir=settings.PROCESSING_TEMP_DIR) as tmp_directory:
# Change ongoing processing state to "processing" in Sound model
if update_sound_processing_state_in_db:
@@ -210,8 +215,10 @@ def process(self, skip_previews=False, skip_displays=False, update_sound_process
info = audioprocessing.stereofy_and_find_info(settings.STEREOFY_PATH, tmp_wavefile, tmp_wavefile2)
except OSError as e:
# Could not create tmp file
- self.set_failure(f"could not create tmp_wavefile2 file, "
- f"make stereofy sure executable exists at {settings.SOUNDS_PATH}", e)
+ self.set_failure(
+ f"could not create tmp_wavefile2 file, "
+ f"make stereofy sure executable exists at {settings.SOUNDS_PATH}", e
+ )
return False
except AudioProcessingException as e:
if "File contains data in an unknown format" in str(e):
@@ -222,8 +229,9 @@ def process(self, skip_previews=False, skip_displays=False, update_sound_process
self.log_info("stereofy failed, trying re-creating PCM file with ffmpeg and re-running stereofy")
try:
tmp_wavefile = self.convert_to_pcm(sound_path, tmp_directory, force_use_ffmpeg=True)
- info = audioprocessing.stereofy_and_find_info(settings.STEREOFY_PATH,
- tmp_wavefile, tmp_wavefile2)
+ info = audioprocessing.stereofy_and_find_info(
+ settings.STEREOFY_PATH, tmp_wavefile, tmp_wavefile2
+ )
except AudioProcessingException as e:
self.set_failure("re-run of stereofy with ffmpeg conversion has failed", str(e))
return False
@@ -242,7 +250,7 @@ def process(self, skip_previews=False, skip_displays=False, update_sound_process
# Fill audio information fields in Sound object
try:
if self.sound.type in settings.LOSSY_FILE_EXTENSIONS:
- info['bitdepth'] = 0 # mp3 and ogg don't have bitdepth
+ info['bitdepth'] = 0 # mp3 and ogg don't have bitdepth
if info['duration'] > 0:
raw_bitrate = int(round(old_div(old_div(self.sound.filesize * 8, info['duration']), 1000)))
# Here we post-process a bit the bitrate to account for small rounding errors
@@ -250,7 +258,8 @@ def process(self, skip_previews=False, skip_displays=False, update_sound_process
differences_with_common_bitrates = [abs(cbt - raw_bitrate) for cbt in settings.COMMON_BITRATES]
min_difference = min(differences_with_common_bitrates)
if min_difference <= 2:
- info['bitrate'] = settings.COMMON_BITRATES[differences_with_common_bitrates.index(min_difference)]
+ info['bitrate'] = settings.COMMON_BITRATES[
+ differences_with_common_bitrates.index(min_difference)]
else:
info['bitrate'] = raw_bitrate
else:
@@ -258,7 +267,7 @@ def process(self, skip_previews=False, skip_displays=False, update_sound_process
else:
info['bitrate'] = 0
self.sound.set_audio_info_fields(**info)
- except Exception as e: # Could not catch a more specific exception
+ except Exception as e: # Could not catch a more specific exception
self.set_failure("failed writting audio info fields to db", e)
return False
@@ -274,13 +283,17 @@ def process(self, skip_previews=False, skip_displays=False, update_sound_process
return False
# Generate MP3 previews
- for mp3_path, quality in [(self.sound.locations("preview.LQ.mp3.path"), settings.MP3_LQ_PREVIEW_QUALITY),
- (self.sound.locations("preview.HQ.mp3.path"), settings.MP3_HQ_PREVIEW_QUALITY)]:
+ for mp3_path, quality in [
+ (self.sound.locations("preview.LQ.mp3.path"), settings.MP3_LQ_PREVIEW_QUALITY),
+ (self.sound.locations("preview.HQ.mp3.path"), settings.MP3_HQ_PREVIEW_QUALITY)
+ ]:
try:
audioprocessing.convert_to_mp3(tmp_wavefile2, mp3_path, quality)
except OSError as e:
- self.set_failure("conversion to mp3 (preview) has failed, "
- "make sure that lame executable exists: %s" % e)
+ self.set_failure(
+ "conversion to mp3 (preview) has failed, "
+ "make sure that lame executable exists: %s" % e
+ )
return False
except AudioProcessingException as e:
@@ -292,13 +305,17 @@ def process(self, skip_previews=False, skip_displays=False, update_sound_process
self.log_info("created mp3: " + mp3_path)
# Generate OGG previews
- for ogg_path, quality in [(self.sound.locations("preview.LQ.ogg.path"), settings.OGG_LQ_PREVIEW_QUALITY),
- (self.sound.locations("preview.HQ.ogg.path"), settings.OGG_HQ_PREVIEW_QUALITY)]:
+ for ogg_path, quality in [
+ (self.sound.locations("preview.LQ.ogg.path"), settings.OGG_LQ_PREVIEW_QUALITY),
+ (self.sound.locations("preview.HQ.ogg.path"), settings.OGG_HQ_PREVIEW_QUALITY)
+ ]:
try:
audioprocessing.convert_to_ogg(tmp_wavefile2, ogg_path, quality)
except OSError as e:
- self.set_failure("conversion to ogg (preview) has failed, "
- "make sure that oggenc executable exists: %s" % e)
+ self.set_failure(
+ "conversion to ogg (preview) has failed, "
+ "make sure that oggenc executable exists: %s" % e
+ )
return False
except AudioProcessingException as e:
self.set_failure("conversion to ogg (preview) has failed", e)
@@ -321,19 +338,26 @@ def process(self, skip_previews=False, skip_displays=False, update_sound_process
# Generate display images, M and L sizes for NG and BW front-ends
for width, height, color_scheme, waveform_path, spectral_path in [
- (120, 71, color_schemes.FREESOUND2_COLOR_SCHEME,
- self.sound.locations("display.wave.M.path"), self.sound.locations("display.spectral.M.path")),
- (900, 201, color_schemes.FREESOUND2_COLOR_SCHEME,
- self.sound.locations("display.wave.L.path"), self.sound.locations("display.spectral.L.path")),
- (195, 101, color_schemes.BEASTWHOOSH_COLOR_SCHEME,
- self.sound.locations("display.wave_bw.M.path"), self.sound.locations("display.spectral_bw.M.path")),
- (780, 301, color_schemes.BEASTWHOOSH_COLOR_SCHEME,
- self.sound.locations("display.wave_bw.L.path"), self.sound.locations("display.spectral_bw.L.path")),
+ (120, 71, color_schemes.FREESOUND2_COLOR_SCHEME, self.sound.locations("display.wave.M.path"),
+ self.sound.locations("display.spectral.M.path")),
+ (900, 201, color_schemes.FREESOUND2_COLOR_SCHEME, self.sound.locations("display.wave.L.path"),
+ self.sound.locations("display.spectral.L.path")),
+ (195, 101, color_schemes.BEASTWHOOSH_COLOR_SCHEME, self.sound.locations("display.wave_bw.M.path"),
+ self.sound.locations("display.spectral_bw.M.path")),
+ (780, 301, color_schemes.BEASTWHOOSH_COLOR_SCHEME, self.sound.locations("display.wave_bw.L.path"),
+ self.sound.locations("display.spectral_bw.L.path")),
]:
try:
fft_size = 2048
- audioprocessing.create_wave_images(tmp_wavefile2, waveform_path, spectral_path, width, height,
- fft_size, color_scheme=color_scheme)
+ audioprocessing.create_wave_images(
+ tmp_wavefile2,
+ waveform_path,
+ spectral_path,
+ width,
+ height,
+ fft_size,
+ color_scheme=color_scheme
+ )
self.log_info(f"created wave and spectrogram images: {waveform_path}, {spectral_path}")
except AudioProcessingException as e:
self.set_failure("creation of display images has failed", e)
@@ -388,9 +412,8 @@ def get_sound_path(self):
pass
def process(self):
- with TemporaryDirectory(
- prefix=f'processing_before_description_{os.path.basename(self.output_folder)}_',
- dir=settings.PROCESSING_TEMP_DIR) as tmp_directory:
+ with TemporaryDirectory(prefix=f'processing_before_description_{os.path.basename(self.output_folder)}_',
+ dir=settings.PROCESSING_TEMP_DIR) as tmp_directory:
# Get the path of the original sound and convert to PCM
try:
@@ -407,8 +430,10 @@ def process(self):
info = audioprocessing.stereofy_and_find_info(settings.STEREOFY_PATH, tmp_wavefile, tmp_wavefile2)
except OSError as e:
# Could not create tmp file
- self.set_failure(f"could not create tmp_wavefile2 file, "
- f"make stereofy sure executable exists at {settings.SOUNDS_PATH}", e)
+ self.set_failure(
+ f"could not create tmp_wavefile2 file, "
+ f"make stereofy sure executable exists at {settings.SOUNDS_PATH}", e
+ )
return False
except AudioProcessingException as e:
if "File contains data in an unknown format" in str(e):
@@ -419,8 +444,9 @@ def process(self):
self.log_info("stereofy failed, trying re-creating PCM file with ffmpeg and re-running stereofy")
try:
tmp_wavefile = self.convert_to_pcm(self.audio_file_path, tmp_directory, force_use_ffmpeg=True)
- info = audioprocessing.stereofy_and_find_info(settings.STEREOFY_PATH,
- tmp_wavefile, tmp_wavefile2)
+ info = audioprocessing.stereofy_and_find_info(
+ settings.STEREOFY_PATH, tmp_wavefile, tmp_wavefile2
+ )
except AudioProcessingException as e:
self.set_failure("re-run of stereofy with ffmpeg conversion has failed", str(e))
return False
@@ -440,8 +466,10 @@ def process(self):
try:
audioprocessing.convert_to_mp3(tmp_wavefile2, self.output_preview_mp3, 70)
except OSError as e:
- self.set_failure("conversion to mp3 (preview) has failed, "
- "make sure that lame executable exists: %s" % e)
+ self.set_failure(
+ "conversion to mp3 (preview) has failed, "
+ "make sure that lame executable exists: %s" % e
+ )
return False
except AudioProcessingException as e:
@@ -455,8 +483,10 @@ def process(self):
try:
audioprocessing.convert_to_ogg(tmp_wavefile2, self.output_preview_ogg, 1)
except OSError as e:
- self.set_failure("conversion to ogg (preview) has failed, "
- "make sure that oggenc executable exists: %s" % e)
+ self.set_failure(
+ "conversion to ogg (preview) has failed, "
+ "make sure that oggenc executable exists: %s" % e
+ )
return False
except AudioProcessingException as e:
self.set_failure("conversion to ogg (preview) has failed", e)
@@ -468,9 +498,18 @@ def process(self):
# Generate displays
try:
- audioprocessing.create_wave_images(tmp_wavefile2, self.output_wave_path, self.output_spectral_path, 780, 301,
- fft_size=2048, color_scheme=color_schemes.BEASTWHOOSH_COLOR_SCHEME)
- self.log_info(f"created wave and spectrogram images: {self.output_wave_path}, {self.output_spectral_path}")
+ audioprocessing.create_wave_images(
+ tmp_wavefile2,
+ self.output_wave_path,
+ self.output_spectral_path,
+ 780,
+ 301,
+ fft_size=2048,
+ color_scheme=color_schemes.BEASTWHOOSH_COLOR_SCHEME
+ )
+ self.log_info(
+ f"created wave and spectrogram images: {self.output_wave_path}, {self.output_spectral_path}"
+ )
except AudioProcessingException as e:
self.set_failure("creation of display images has failed", e)
return False
@@ -479,7 +518,7 @@ def process(self):
return False
# Save info data to json file
- info['audio_file_path'] = self.audio_file_path
+ info['audio_file_path'] = self.audio_file_path
with open(self.output_info_file, 'w') as f:
json.dump(info, f)
return True
diff --git a/utils/audioprocessing/processing.py b/utils/audioprocessing/processing.py
index 8f32a136a..a94dbf244 100644
--- a/utils/audioprocessing/processing.py
+++ b/utils/audioprocessing/processing.py
@@ -20,7 +20,6 @@
# See AUTHORS file.
#
-
from past.utils import old_div
import math
import os
@@ -133,7 +132,7 @@ def read(self, start, size, resize_if_less=False):
else:
self.audio_file.seek(0)
- add_to_start = -start # remember: start is negative!
+ add_to_start = -start # remember: start is negative!
to_read = size + start
if to_read > self.nframes:
@@ -174,7 +173,7 @@ def spectral_centroid(self, seek_point, spec_range=110.0):
samples *= self.window
fft = numpy.fft.rfft(samples)
- spectrum = self.scale * numpy.abs(fft) # normalized abs(FFT) between 0 and 1
+ spectrum = self.scale * numpy.abs(fft) # normalized abs(FFT) between 0 and 1
length = numpy.float64(spectrum.shape[0])
# scale the db spectrum from [- spec_range db ... 0 db] > [0..1]
@@ -189,11 +188,14 @@ def spectral_centroid(self, seek_point, spec_range=110.0):
if self.spectrum_range is None:
self.spectrum_range = numpy.arange(length)
- spectral_centroid = old_div((spectrum * self.spectrum_range).sum(), (energy * (length - 1))) * self.samplerate * 0.5
+ spectral_centroid = old_div((spectrum * self.spectrum_range).sum(),
+ (energy * (length - 1))) * self.samplerate * 0.5
# clip > log10 > scale between 0 and 1
- spectral_centroid = old_div((math.log10(self.clip(spectral_centroid, self.lower, self.higher)) - self.lower_log), (
- self.higher_log - self.lower_log))
+ spectral_centroid = old_div(
+ (math.log10(self.clip(spectral_centroid, self.lower, self.higher)) - self.lower_log),
+ (self.higher_log - self.lower_log)
+ )
return spectral_centroid, db_spectrum
@@ -288,13 +290,14 @@ def __init__(self, image_width, image_height, color_scheme):
self.color_scheme_to_use = color_scheme
else:
self.color_scheme_to_use = COLOR_SCHEMES.get(color_scheme, COLOR_SCHEMES[DEFAULT_COLOR_SCHEME_KEY])
-
+
self.transparent_background = self.color_scheme_to_use.get('wave_transparent_background', False)
if self.transparent_background:
self.image = Image.new("RGBA", (image_width, image_height), (0, 0, 0, 0))
else:
- background_color = self.color_scheme_to_use['wave_colors'][0] # Only used if transparent_background is False
- self.image = Image.new("RGB", (image_width, image_height), background_color)
+ background_color = self.color_scheme_to_use['wave_colors'][
+ 0] # Only used if transparent_background is False
+ self.image = Image.new("RGB", (image_width, image_height), background_color)
self.image_width = image_width
self.image_height = image_height
@@ -325,7 +328,7 @@ def draw_peaks(self, x, peaks, spectral_centroid):
def draw_anti_aliased_pixels(self, x, y1, y2, color):
""" vertical anti-aliasing at y1 and y2 """
-
+
y_max = max(y1, y2)
y_max_int = int(y_max)
alpha = y_max - y_max_int
@@ -340,7 +343,6 @@ def draw_anti_aliased_pixels(self, x, y1, y2, color):
else:
# If using transparent background, don't do anti-aliasing
self.pix[x, y_max_int + 1] = (color[0], color[1], color[2], 255)
-
y_min = min(y1, y2)
y_min_int = int(y_min)
@@ -361,7 +363,9 @@ def save(self, filename):
a = self.color_scheme_to_use.get('wave_zero_line_alpha', 0)
if a:
for x in range(self.image_width):
- self.pix[x, old_div(self.image_height, 2)] = tuple([p + a for p in self.pix[x, old_div(self.image_height, 2)]])
+ self.pix[x, old_div(self.image_height, 2)] = tuple([
+ p + a for p in self.pix[x, old_div(self.image_height, 2)]
+ ])
self.image.save(filename)
@@ -418,8 +422,17 @@ def save(self, filename, quality=80):
self.image.transpose(Image.ROTATE_90).save(filename, quality=quality)
-def create_wave_images(input_filename, output_filename_w, output_filename_s, image_width, image_height, fft_size,
- progress_callback=None, color_scheme=None, use_transparent_background=False):
+def create_wave_images(
+ input_filename,
+ output_filename_w,
+ output_filename_s,
+ image_width,
+ image_height,
+ fft_size,
+ progress_callback=None,
+ color_scheme=None,
+ use_transparent_background=False
+):
"""
Utility function for creating both wavefile and spectrum images from an audio input file.
:param input_filename: input audio filename (must be PCM)
@@ -483,31 +496,34 @@ def convert_to_pcm(input_filename, output_filename, use_ffmpeg_for_unknown_type=
error_messages = []
elif sound_type == "m4a":
cmd = ["faad", "-o", output_filename, input_filename]
- error_messages = ["Unable to find correct AAC sound track in the MP4 file",
- "Error: Bitstream value not allowed by specification",
- "Error opening file"]
+ error_messages = [
+ "Unable to find correct AAC sound track in the MP4 file",
+ "Error: Bitstream value not allowed by specification", "Error opening file"
+ ]
elif sound_type == "wv":
cmd = ["wvunpack", input_filename, "-o", output_filename]
error_messages = []
-
+
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
stdout = stdout.decode(errors='ignore')
stderr = stderr.decode(errors='ignore')
-
+
# If external process returned an error (return code != 0) or the expected PCM file does not
# exist, raise exception
if process.returncode != 0 or not os.path.exists(output_filename):
if "No space left on device" in stderr + " " + stdout:
raise NoSpaceLeftException
- raise AudioProcessingException("failed converting to pcm data:\n"
- + " ".join(cmd) + "\n" + stderr + "\n" + stdout)
-
+ raise AudioProcessingException(
+ "failed converting to pcm data:\n" + " ".join(cmd) + "\n" + stderr + "\n" + stdout
+ )
+
# If external process apparently returned no error (return code = 0) but we see some errors from our list of
# known errors have been printed in stderr, raise an exception as well
if any([error_message in stderr for error_message in error_messages]):
- raise AudioProcessingException("failed converting to pcm data:\n"
- + " ".join(cmd) + "\n" + stderr + "\n" + stdout)
+ raise AudioProcessingException(
+ "failed converting to pcm data:\n" + " ".join(cmd) + "\n" + stderr + "\n" + stdout
+ )
else:
if use_ffmpeg_for_unknown_type:
convert_using_ffmpeg(input_filename, output_filename)
@@ -535,7 +551,8 @@ def stereofy_and_find_info(stereofy_executble_path, input_filename, output_filen
if "No space left on device" in stderr + " " + stdout:
raise NoSpaceLeftException
raise AudioProcessingException(
- "failed calling stereofy data:\n" + " ".join(cmd) + "\n" + stderr + "\n" + stdout)
+ "failed calling stereofy data:\n" + " ".join(cmd) + "\n" + stderr + "\n" + stdout
+ )
stdout = (stdout + " " + stderr).replace("\n", " ")
@@ -594,7 +611,7 @@ def convert_to_ogg(input_filename, output_filename, quality=1):
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, _) = process.communicate()
stdout = stdout.decode(errors='ignore')
-
+
if process.returncode != 0 or not os.path.exists(output_filename):
raise AudioProcessingException(stdout)
diff --git a/utils/audioprocessing/wav2png.py b/utils/audioprocessing/wav2png.py
index 4c95af267..0c73246d6 100755
--- a/utils/audioprocessing/wav2png.py
+++ b/utils/audioprocessing/wav2png.py
@@ -20,7 +20,6 @@
# See AUTHORS file.
#
-
from past.utils import old_div
import argparse
@@ -29,7 +28,7 @@
def progress_callback(position, width):
- percentage = old_div((position*100),width)
+ percentage = old_div((position * 100), width)
if position % (old_div(width, 10)) == 0:
sys.stdout.write(str(percentage) + "% ")
sys.stdout.flush()
@@ -42,8 +41,10 @@ def main(args):
output_file_w = input_file + "_w.png"
output_file_s = input_file + "_s.jpg"
- this_args = (input_file, output_file_w, output_file_s, args.width, args.height, args.fft_size,
- progress_callback, args.color_scheme)
+ this_args = (
+ input_file, output_file_w, output_file_s, args.width, args.height, args.fft_size, progress_callback,
+ args.color_scheme
+ )
print(f"processing file {input_file}:\n\t", end="")
@@ -71,17 +72,21 @@ def main(args):
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("files", help="files to process", nargs="+")
- parser.add_argument("-w", "--width", type=int, default=500, dest="width",
- help="image width in pixels")
- parser.add_argument("-H", "--height", type=int, default=171, dest="height",
- help="image height in pixels")
- parser.add_argument("-f", "--fft", type=int, default=2048, dest="fft_size",
- help="fft size, power of 2 for increased performance")
- parser.add_argument("-c", "--color_scheme", type=str, default='Freesound2', dest="color_scheme",
- help="name of the color scheme to use (one of: 'Freesound2' (default), 'FreesoundBeastWhoosh', "
- "'Cyberpunk', 'Rainforest')")
- parser.add_argument("-p", "--profile", action="store_true",
- help="run profiler and output profiling information")
+ parser.add_argument("-w", "--width", type=int, default=500, dest="width", help="image width in pixels")
+ parser.add_argument("-H", "--height", type=int, default=171, dest="height", help="image height in pixels")
+ parser.add_argument(
+ "-f", "--fft", type=int, default=2048, dest="fft_size", help="fft size, power of 2 for increased performance"
+ )
+ parser.add_argument(
+ "-c",
+ "--color_scheme",
+ type=str,
+ default='Freesound2',
+ dest="color_scheme",
+ help="name of the color scheme to use (one of: 'Freesound2' (default), 'FreesoundBeastWhoosh', "
+ "'Cyberpunk', 'Rainforest')"
+ )
+ parser.add_argument("-p", "--profile", action="store_true", help="run profiler and output profiling information")
args = parser.parse_args()
main(args)
diff --git a/utils/aws.py b/utils/aws.py
index 1793ca6e2..775b14be2 100644
--- a/utils/aws.py
+++ b/utils/aws.py
@@ -42,9 +42,12 @@ def init_client(service):
if not settings.AWS_REGION or not settings.AWS_SECRET_ACCESS_KEY or not settings.AWS_SECRET_ACCESS_KEY:
raise AwsCredentialsNotConfigured()
- return client(service, region_name=settings.AWS_SQS_REGION,
- aws_access_key_id=settings.AWS_SQS_ACCESS_KEY_ID,
- aws_secret_access_key=settings.AWS_SQS_SECRET_ACCESS_KEY)
+ return client(
+ service,
+ region_name=settings.AWS_SQS_REGION,
+ aws_access_key_id=settings.AWS_SQS_ACCESS_KEY_ID,
+ aws_secret_access_key=settings.AWS_SQS_SECRET_ACCESS_KEY
+ )
class EmailStats:
@@ -91,7 +94,7 @@ def get_ses_stats(sample_size, n_points):
raise AwsConnectionError(e)
data = response['SendDataPoints']
- data.sort(key=lambda x: x['Timestamp'], reverse=True) # array of datapoints is not sorted originally
+ data.sort(key=lambda x: x['Timestamp'], reverse=True) # array of datapoints is not sorted originally
email_stats = EmailStats()
count = 0
diff --git a/utils/cache.py b/utils/cache.py
index 4416b1926..ee7d34e4d 100644
--- a/utils/cache.py
+++ b/utils/cache.py
@@ -49,4 +49,3 @@ def invalidate_all_moderators_header_cache():
mods = Group.objects.get(name='moderators').user_set.all()
for mod in mods:
invalidate_user_template_caches(mod.id)
-
diff --git a/utils/chunks.py b/utils/chunks.py
index 0489f498f..f662e6cd2 100644
--- a/utils/chunks.py
+++ b/utils/chunks.py
@@ -18,7 +18,8 @@
# See AUTHORS file.
#
+
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
- yield l[i:i + n]
\ No newline at end of file
+ yield l[i:i + n]
diff --git a/utils/downloads.py b/utils/downloads.py
index 28f9061c9..aba4475eb 100644
--- a/utils/downloads.py
+++ b/utils/downloads.py
@@ -37,10 +37,9 @@ def download_sounds(licenses_url, pack):
"""
attribution = pack.get_attribution()
license_crc = zlib.crc32(attribution.encode('UTF-8')) & 0xffffffff
- filelist = "%02x %i %s %s\r\n" % (license_crc,
- len(attribution.encode('UTF-8')),
- licenses_url,
- "_readme_and_license.txt")
+ filelist = "%02x %i %s %s\r\n" % (
+ license_crc, len(attribution.encode('UTF-8')), licenses_url, "_readme_and_license.txt"
+ )
sounds_list = pack.sounds.filter(processing_state="OK", moderation_state="OK").select_related('user', 'license')
diff --git a/utils/filesystem.py b/utils/filesystem.py
index bddf2a7c3..fa1ef9df2 100644
--- a/utils/filesystem.py
+++ b/utils/filesystem.py
@@ -18,7 +18,6 @@
# See AUTHORS file.
#
-
import hashlib
import os
import shutil
diff --git a/utils/forms.py b/utils/forms.py
index 9d3922170..13418d0bf 100644
--- a/utils/forms.py
+++ b/utils/forms.py
@@ -35,8 +35,8 @@ def filename_has_valid_extension(filename):
class HtmlCleaningCharField(forms.CharField):
""" A field that removes disallowed HTML tags as implemented in utils.text.clean_html and checks for
too many upper chase characters"""
-
- ok_tags = ["a", "img", "strong", "b", "em", "i", "u", "ul", "li", "p", "br", "blockquote", "code"]
+
+ ok_tags = ["a", "img", "strong", "b", "em", "i", "u", "ul", "li", "p", "br", "blockquote", "code"]
ok_attributes = {"a": ["href", "rel"], "img": ["src", "alt", "title"]}
@classmethod
@@ -48,10 +48,10 @@ def clean(self, value):
if is_shouting(value):
raise forms.ValidationError('Please moderate the amount of upper case characters in your post...')
return clean_html(value, ok_tags=self.ok_tags, ok_attributes=self.ok_attributes)
-
+
class HtmlCleaningCharFieldWithCenterTag(HtmlCleaningCharField):
- ok_tags = ["a", "img", "strong", "b", "em", "i", "u", "ul", "li", "p", "br", "blockquote", "code", "center"]
+ ok_tags = ["a", "img", "strong", "b", "em", "i", "u", "ul", "li", "p", "br", "blockquote", "code", "center"]
ok_attributes = {"a": ["href", "rel"], "img": ["src", "alt", "title"], "p": ["align"]}
@@ -62,10 +62,14 @@ class TagField(forms.CharField):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.validators.append(
- validators.MinLengthValidator(3, 'You should add at least 3 different tags. Tags must be separated by '
- 'spaces.'))
+ validators.MinLengthValidator(
+ 3, 'You should add at least 3 different tags. Tags must be separated by '
+ 'spaces.'
+ )
+ )
self.validators.append(
- validators.MaxLengthValidator(30, 'There can be maximum 30 tags, please select the most relevant ones!'))
+ validators.MaxLengthValidator(30, 'There can be maximum 30 tags, please select the most relevant ones!')
+ )
def to_python(self, value):
value = super().to_python(value)
diff --git a/utils/locations.py b/utils/locations.py
index 29f545fbd..7a64dc5b5 100644
--- a/utils/locations.py
+++ b/utils/locations.py
@@ -29,7 +29,9 @@ def locations_decorator(cache=True):
locations("a.b.c")
but is much easier on the typing and easier for copy-pasting
"""
+
def decorator(locations_function):
+
def wrapped(self, path=None):
# cache the call to the locations function so it's only calculated once
if not cache or not hasattr(self, '_locations_cache'):
@@ -42,23 +44,29 @@ def wrapped(self, path=None):
return lookup
else:
return self._locations_cache
+
return wrapped
+
return decorator
+
def pretty_print_locations(locations, indent=0):
for (key, value) in locations.items():
if isinstance(value, dict):
- print(" "*indent, "*", key)
- pretty_print_locations(value, indent+1)
+ print(" " * indent, "*", key)
+ pretty_print_locations(value, indent + 1)
else:
- print(" "*indent, "*", key)
+ print(" " * indent, "*", key)
if __name__ == "__main__":
+
class X:
+
@locations_decorator()
def locations(self):
return dict(a=5)
+
x = X()
print(x.locations())
print(x.locations())
diff --git a/utils/logging_filters.py b/utils/logging_filters.py
index a01dbe329..a026f7854 100644
--- a/utils/logging_filters.py
+++ b/utils/logging_filters.py
@@ -57,6 +57,7 @@ class GenericDataFilter(logging.Filter):
properties of the message so graylog can process them. If the parsing does not succeed, the
message is sent as is.
"""
+
def filter(self, record):
try:
message = record.getMessage()
@@ -65,7 +66,7 @@ def filter(self, record):
for key, value in fields.items():
setattr(record, key, value)
except (IndexError, ValueError, AttributeError):
- pass # Message is not formatted for json parsing
+ pass # Message is not formatted for json parsing
return True
diff --git a/utils/mail.py b/utils/mail.py
index 5cb23da4c..4302ad379 100644
--- a/utils/mail.py
+++ b/utils/mail.py
@@ -44,8 +44,16 @@ def _ensure_list(item):
return item
-def send_mail(subject, email_body, user_to=None, email_to=None, email_from=None, reply_to=None,
- email_type_preference_check=None, extra_subject=''):
+def send_mail(
+ subject,
+ email_body,
+ user_to=None,
+ email_to=None,
+ email_from=None,
+ reply_to=None,
+ email_type_preference_check=None,
+ extra_subject=''
+):
"""Sends email with a lot of defaults.
The function will check if user's email is valid based on bounce info. The function will also check email
@@ -101,7 +109,7 @@ def send_mail(subject, email_body, user_to=None, email_to=None, email_from=None,
# usernames to '-'
email_to = [('-', email) for email in email_to]
- if settings.ALLOWED_EMAILS: # for testing purposes, so we don't accidentally send emails to users
+ if settings.ALLOWED_EMAILS: # for testing purposes, so we don't accidentally send emails to users
email_to = [(username, email) for username, email in email_to if email in settings.ALLOWED_EMAILS]
full_subject = f'{settings.EMAIL_SUBJECT_PREFIX} {subject}'
@@ -109,8 +117,7 @@ def send_mail(subject, email_body, user_to=None, email_to=None, email_from=None,
full_subject = f'{full_subject} - {extra_subject}'
try:
- emails = tuple(((full_subject, email_body, email_from, [email])
- for _, email in email_to))
+ emails = tuple(((full_subject, email_body, email_from, [email]) for _, email in email_to))
# Replicating send_mass_mail functionality and adding reply-to header if requires
connection = get_connection(username=None, password=None, fail_silently=False)
@@ -118,47 +125,76 @@ def send_mail(subject, email_body, user_to=None, email_to=None, email_from=None,
if reply_to:
headers = {'Reply-To': reply_to}
- messages = [EmailMessage(email_subject, message, sender, recipient, headers=headers)
- for email_subject, message, sender, recipient in emails]
+ messages = [
+ EmailMessage(email_subject, message, sender, recipient, headers=headers)
+ for email_subject, message, sender, recipient in emails
+ ]
connection.send_messages(messages)
# Log emails being sent
for username, email in email_to:
- emails_logger.info('Email sent (%s)' % json.dumps({
- 'subject': subject,
- 'extra_subject': extra_subject,
- 'email_from': email_from,
- 'email_to': email,
- 'email_to_username': username,
- }))
+ emails_logger.info(
+ 'Email sent (%s)' % json.dumps({
+ 'subject': subject,
+ 'extra_subject': extra_subject,
+ 'email_from': email_from,
+ 'email_to': email,
+ 'email_to_username': username,
+ })
+ )
return True
except Exception as e:
- emails_logger.info('Error in send_mail (%s)' % json.dumps({
- 'subject': subject,
- 'extra_subject': extra_subject,
- 'email_to': str(email_to),
- 'error': str(e)
- }))
+ emails_logger.info(
+ 'Error in send_mail (%s)' % json.dumps({
+ 'subject': subject,
+ 'extra_subject': extra_subject,
+ 'email_to': str(email_to),
+ 'error': str(e)
+ })
+ )
return False
-def send_mail_template(subject, template, context, user_to=None, email_to=None, email_from=None, reply_to=None,
- email_type_preference_check=None, extra_subject=''):
+def send_mail_template(
+ subject,
+ template,
+ context,
+ user_to=None,
+ email_to=None,
+ email_from=None,
+ reply_to=None,
+ email_type_preference_check=None,
+ extra_subject=''
+):
context["settings"] = settings
- return send_mail(subject, render_to_string(template, context), user_to=user_to, email_to=email_to,
- email_from=email_from, reply_to=reply_to, email_type_preference_check=email_type_preference_check,
- extra_subject=extra_subject)
+ return send_mail(
+ subject,
+ render_to_string(template, context),
+ user_to=user_to,
+ email_to=email_to,
+ email_from=email_from,
+ reply_to=reply_to,
+ email_type_preference_check=email_type_preference_check,
+ extra_subject=extra_subject
+ )
def send_mail_template_to_support(subject, template, context, email_from=None, reply_to=None, extra_subject=''):
email_to = []
for email in settings.SUPPORT:
email_to.append(email[1])
- return send_mail_template(subject, template, context, email_to=email_to, email_from=email_from, reply_to=reply_to,
- extra_subject=extra_subject)
+ return send_mail_template(
+ subject,
+ template,
+ context,
+ email_to=email_to,
+ email_from=email_from,
+ reply_to=reply_to,
+ extra_subject=extra_subject
+ )
def render_mail_template(template, context):
diff --git a/utils/management_commands.py b/utils/management_commands.py
index 9a8162ba8..4af09e831 100644
--- a/utils/management_commands.py
+++ b/utils/management_commands.py
@@ -25,7 +25,6 @@
from django.core.management.base import BaseCommand
-
commands_logger = logging.getLogger('commands')
diff --git a/utils/mirror_files.py b/utils/mirror_files.py
index b185e079e..daabc494c 100644
--- a/utils/mirror_files.py
+++ b/utils/mirror_files.py
@@ -16,10 +16,14 @@ def copy_files(source_destination_tuples):
if '@' in destination_path:
# The destination path is in a remote server, use scp
try:
- subprocess.check_output(f'rsync -e "ssh -o StrictHostKeyChecking=no -i /ssh_fsweb/cdn-ssh-key-fsweb" -aq --rsync-path="mkdir -p {os.path.dirname(destination_path)} && rsync" {source_path} {os.path.dirname(destination_path)}/', stderr=subprocess.STDOUT, shell=True)
+ subprocess.check_output(
+ f'rsync -e "ssh -o StrictHostKeyChecking=no -i /ssh_fsweb/cdn-ssh-key-fsweb" -aq --rsync-path="mkdir -p {os.path.dirname(destination_path)} && rsync" {source_path} {os.path.dirname(destination_path)}/',
+ stderr=subprocess.STDOUT,
+ shell=True
+ )
if settings.LOG_START_AND_END_COPYING_FILES:
web_logger.info(f'Finished copying file {source_path} to {destination_path}')
- except subprocess.CalledProcessError as e:
+ except subprocess.CalledProcessError as e:
web_logger.info(f'Failed copying {source_path} ({str(e)}: {e.output})')
else:
# The destination path is a local volume
@@ -43,22 +47,20 @@ def copy_files_to_mirror_locations(object, source_location_keys, source_base_pat
for destination_base_path in destination_base_paths:
for location_path in source_location_keys:
source_path = object.locations(location_path)
- source_destination_tuples.append((
- source_path,
- source_path.replace(source_base_path, destination_base_path)
- ))
+ source_destination_tuples.append(
+ (source_path, source_path.replace(source_base_path, destination_base_path))
+ )
- copy_files(source_destination_tuples) # Do the actual copying of the files
+ copy_files(source_destination_tuples) # Do the actual copying of the files
def copy_uploaded_file_to_mirror_locations(source_file_path):
source_destination_tuples = []
if settings.MIRROR_UPLOADS:
for destination_base_path in settings.MIRROR_UPLOADS:
- source_destination_tuples.append((
- source_file_path,
- source_file_path.replace(settings.UPLOADS_PATH, destination_base_path)
- ))
+ source_destination_tuples.append(
+ (source_file_path, source_file_path.replace(settings.UPLOADS_PATH, destination_base_path))
+ )
copy_files(source_destination_tuples)
@@ -66,10 +68,9 @@ def remove_uploaded_file_from_mirror_locations(source_file_path):
source_destination_tuples = []
if settings.MIRROR_UPLOADS:
for destination_base_path in settings.MIRROR_UPLOADS:
- source_destination_tuples.append((
- source_file_path,
- source_file_path.replace(settings.UPLOADS_PATH, destination_base_path)
- ))
+ source_destination_tuples.append(
+ (source_file_path, source_file_path.replace(settings.UPLOADS_PATH, destination_base_path))
+ )
for _, destination_path in source_destination_tuples:
try:
os.remove(destination_path)
@@ -91,23 +92,27 @@ def copy_sound_to_mirror_locations(sound):
def copy_previews_to_mirror_locations(sound):
copy_files_to_mirror_locations(
sound, ['preview.HQ.mp3.path', 'preview.HQ.ogg.path', 'preview.LQ.mp3.path', 'preview.LQ.ogg.path'],
- settings.PREVIEWS_PATH, settings.MIRROR_PREVIEWS)
+ settings.PREVIEWS_PATH, settings.MIRROR_PREVIEWS
+ )
def copy_displays_to_mirror_locations(sound):
copy_files_to_mirror_locations(
- sound, ['display.spectral.L.path', 'display.spectral.M.path',
- 'display.wave.L.path', 'display.wave.M.path',
- 'display.spectral_bw.L.path', 'display.spectral_bw.M.path',
- 'display.wave_bw.L.path', 'display.wave_bw.M.path'],
- settings.DISPLAYS_PATH, settings.MIRROR_DISPLAYS)
+ sound, [
+ 'display.spectral.L.path', 'display.spectral.M.path', 'display.wave.L.path', 'display.wave.M.path',
+ 'display.spectral_bw.L.path', 'display.spectral_bw.M.path', 'display.wave_bw.L.path',
+ 'display.wave_bw.M.path'
+ ], settings.DISPLAYS_PATH, settings.MIRROR_DISPLAYS
+ )
def copy_analysis_to_mirror_locations(sound):
copy_files_to_mirror_locations(
- sound, ['analysis.frames.path', 'analysis.statistics.path'], settings.ANALYSIS_PATH, settings.MIRROR_ANALYSIS)
+ sound, ['analysis.frames.path', 'analysis.statistics.path'], settings.ANALYSIS_PATH, settings.MIRROR_ANALYSIS
+ )
def copy_avatar_to_mirror_locations(profile):
copy_files_to_mirror_locations(
- profile, ['avatar.L.path', 'avatar.M.path', 'avatar.S.path'], settings.AVATARS_PATH, settings.MIRROR_AVATARS)
+ profile, ['avatar.L.path', 'avatar.M.path', 'avatar.S.path'], settings.AVATARS_PATH, settings.MIRROR_AVATARS
+ )
diff --git a/utils/nginxsendfile.py b/utils/nginxsendfile.py
index 6cdeea760..5a5490322 100644
--- a/utils/nginxsendfile.py
+++ b/utils/nginxsendfile.py
@@ -20,7 +20,7 @@
import os
-from django.http import HttpResponse,Http404
+from django.http import HttpResponse, Http404
from django.conf import settings
from wsgiref.util import FileWrapper
@@ -41,7 +41,7 @@ def prepare_sendfile_arguments_for_sound_download(sound):
def sendfile(path, attachment_name, secret_url=None):
if not os.path.exists(path):
raise Http404
-
+
if settings.DEBUG:
response = HttpResponse(FileWrapper(open(path, "rb")))
response['Content-Length'] = os.path.getsize(path)
@@ -51,5 +51,5 @@ def sendfile(path, attachment_name, secret_url=None):
response['Content-Type'] = "application/octet-stream"
response['Content-Disposition'] = f"attachment; filename=\"{attachment_name}\""
-
+
return response
diff --git a/utils/onlineusers.py b/utils/onlineusers.py
index ab1bff39b..210f3d598 100644
--- a/utils/onlineusers.py
+++ b/utils/onlineusers.py
@@ -26,30 +26,32 @@
_last_purged = datetime.now()
+
def get_online_users():
user_dict = cache.get(CACHE_KEY)
return hasattr(user_dict, 'keys') and user_dict.keys() or []
+
def cache_online_users(request):
- if request.user.is_anonymous:
- return
- user_dict = cache.get(CACHE_KEY)
- if not user_dict:
- user_dict = {}
-
- now = datetime.now()
-
- # Check if user has marked the option for not being shown in online users list
- if not request.user.profile.not_shown_in_online_users_list :
- user_dict[request.user.id] = now
-
- # purge
- global _last_purged
- if _last_purged + timedelta(minutes=ONLINE_MINUTES) < now:
- purge_older_than = now - timedelta(minutes=ONLINE_MINUTES)
- for user_id, last_seen in user_dict.copy().items():
- if last_seen < purge_older_than:
- del(user_dict[user_id])
- _last_purged = now
-
- cache.set(CACHE_KEY, user_dict, 60*60*24)
+ if request.user.is_anonymous:
+ return
+ user_dict = cache.get(CACHE_KEY)
+ if not user_dict:
+ user_dict = {}
+
+ now = datetime.now()
+
+ # Check if user has marked the option for not being shown in online users list
+ if not request.user.profile.not_shown_in_online_users_list:
+ user_dict[request.user.id] = now
+
+ # purge
+ global _last_purged
+ if _last_purged + timedelta(minutes=ONLINE_MINUTES) < now:
+ purge_older_than = now - timedelta(minutes=ONLINE_MINUTES)
+ for user_id, last_seen in user_dict.copy().items():
+ if last_seen < purge_older_than:
+ del (user_dict[user_id])
+ _last_purged = now
+
+ cache.set(CACHE_KEY, user_dict, 60 * 60 * 24)
diff --git a/utils/paypal/wrapper.py b/utils/paypal/wrapper.py
index f545ad087..2c89eac24 100644
--- a/utils/paypal/wrapper.py
+++ b/utils/paypal/wrapper.py
@@ -1,6 +1,6 @@
# PayPal python NVP API wrapper class.
# This is a sample to help others get started on working
-# with the PayPal NVP API in Python.
+# with the PayPal NVP API in Python.
# This is not a complete reference! Be sure to understand
# what this class is doing before you try it on production servers!
# ...use at your own peril.
@@ -11,7 +11,7 @@
# for more information.
# by Mike Atlas / LowSingle.com / MassWrestling.com, September 2007
-# No License Expressed. Feel free to distribute, modify,
+# No License Expressed. Feel free to distribute, modify,
# and use in any open or closed source project without credit to the author
# lot's of changed by Bram de Jong, but no fundamental changes to how the
@@ -38,7 +38,7 @@
class Paypal:
-
+
def __init__(self, debug=True):
# fill these in with the API values
self.signature = dict(
@@ -46,7 +46,7 @@ def __init__(self, debug=True):
pwd='QFZCWN5HZM8VBG7Q',
signature='A-IzJhZZjhg29XQ2qnhapuwxIDzyAZQ92FRP5dqBzVesOkzbdUONzmOU',
version='3.0',
- )
+ )
self.urls = dict(
returnurl='http://www.test.com/',
@@ -60,32 +60,29 @@ def __init__(self, debug=True):
else:
self.API_ENDPOINT = 'https://api-3t.paypal.com/nvp'
-
def get_paypal_forward_url(self, token):
if self.debug:
return 'https://www.sandbox.paypal.com/webscr?cmd=_express-checkout&token=' + token
else:
return 'https://www.paypal.com/webscr?cmd=_express-checkout&token=' + token
-
def query(self, parameters, add_urls=True):
"""for a dict of parameters, create the query-string, get the paypal URL and return the parsed dict"""
params = list(self.signature.items()) + list(parameters.items())
-
+
print(parameters)
if add_urls:
params += list(self.urls.items())
-
+
# encode the urls into a query string
- params_string = urllib.parse.urlencode( params )
-
+ params_string = urllib.parse.urlencode(params)
+
# get the response and parse it
response = urllib.parse.parse_qs(urllib.request.urlopen(self.API_ENDPOINT, params_string).read())
-
- # the parsed dict has a list for each value, but all Paypal replies are unique
- return {key.lower(): value[0] for (key,value) in response.items()}
+ # the parsed dict has a list for each value, but all Paypal replies are unique
+ return {key.lower(): value[0] for (key, value) in response.items()}
def set_express_checkout(self, amount):
"""Set up an express checkout"""
@@ -95,14 +92,13 @@ def set_express_checkout(self, amount):
paymentaction='Authorization',
amt=amount,
currencycode='EUR',
- email='', # used by paypal to set email for account creation
- desc='Freesound donation of %d euro' % amount, # description of what the person is buying
- custom=amount, # custom field, can be anything you want
- hdrimg='', # url to image for header, recomended to be stored on https server
+ email='', # used by paypal to set email for account creation
+ desc='Freesound donation of %d euro' % amount, # description of what the person is buying
+ custom=amount, # custom field, can be anything you want
+ hdrimg='', # url to image for header, recomended to be stored on https server
)
return self.query(params)
-
def get_express_checkout_details(self, token):
"""Once the user returns to the return url, call this to get the detailsthe user returns to the return url, call this"""
@@ -110,10 +106,9 @@ def get_express_checkout_details(self, token):
method="GetExpressCheckoutDetails",
token=token,
)
-
+
return self.query(params)
-
-
+
def do_express_checkout_payment(self, token, payer_id, amt):
"""do the actual transaction..."""
params = dict(
@@ -126,13 +121,9 @@ def do_express_checkout_payment(self, token, payer_id, amt):
)
return self.query(params)
-
-
+
def get_transaction_details(self, tx_id):
"""get all the details of a transaction that has finished"""
- params = dict(
- method="GetTransactionDetails",
- transactionid=tx_id
- )
-
+ params = dict(method="GetTransactionDetails", transactionid=tx_id)
+
return self.query(params, add_urls=False)
diff --git a/utils/search/__init__.py b/utils/search/__init__.py
index 44873af81..97be1c46c 100644
--- a/utils/search/__init__.py
+++ b/utils/search/__init__.py
@@ -41,8 +41,17 @@ def get_search_engine(backend_class=settings.SEARCH_ENGINE_BACKEND_CLASS, sounds
class SearchResults:
- def __init__(self, docs=None, num_found=-1, start=-1, num_rows=-1, non_grouped_number_of_results=-1,
- facets=None, highlighting=None, q_time=-1):
+ def __init__(
+ self,
+ docs=None,
+ num_found=-1,
+ start=-1,
+ num_rows=-1,
+ non_grouped_number_of_results=-1,
+ facets=None,
+ highlighting=None,
+ q_time=-1
+ ):
"""
Class that holds the results of a search query. It must contain the fields defined below.
@@ -215,10 +224,22 @@ def sound_exists_in_index(self, sound_object_or_id):
"""
raise NotImplementedError
- def search_sounds(self, textual_query='', query_fields=None, query_filter='', offset=0, current_page=None,
- num_sounds=settings.SOUNDS_PER_PAGE, sort=settings.SEARCH_SOUNDS_SORT_OPTION_AUTOMATIC,
- group_by_pack=False, num_sounds_per_pack_group=1, facets=None, only_sounds_with_pack=False,
- only_sounds_within_ids=False, group_counts_as_one_in_facets=False):
+ def search_sounds(
+ self,
+ textual_query='',
+ query_fields=None,
+ query_filter='',
+ offset=0,
+ current_page=None,
+ num_sounds=settings.SOUNDS_PER_PAGE,
+ sort=settings.SEARCH_SOUNDS_SORT_OPTION_AUTOMATIC,
+ group_by_pack=False,
+ num_sounds_per_pack_group=1,
+ facets=None,
+ only_sounds_with_pack=False,
+ only_sounds_within_ids=False,
+ group_counts_as_one_in_facets=False
+ ):
"""Search for sounds that match specific criteria and return them in a SearchResults object
Args:
@@ -272,7 +293,6 @@ def get_random_sound_id(self):
"""
raise NotImplementedError
-
# Forum search related methods
def add_forum_posts_to_index(self, forum_post_objects):
@@ -306,9 +326,16 @@ def forum_post_exists_in_index(self, forum_post_object_or_id):
"""
raise NotImplementedError
-
- def search_forum_posts(self, textual_query='', query_filter='', offset=0, sort=None, current_page=None,
- num_posts=settings.FORUM_POSTS_PER_PAGE, group_by_thread=True):
+ def search_forum_posts(
+ self,
+ textual_query='',
+ query_filter='',
+ offset=0,
+ sort=None,
+ current_page=None,
+ num_posts=settings.FORUM_POSTS_PER_PAGE,
+ group_by_thread=True
+ ):
"""Search for forum posts that match specific criteria and return them in a SearchResults object
Args:
@@ -330,7 +357,6 @@ def search_forum_posts(self, textual_query='', query_filter='', offset=0, sort=N
"""
raise NotImplementedError
-
# Tag clouds methods
def get_user_tags(self, username):
diff --git a/utils/search/backends/solr555pysolr.py b/utils/search/backends/solr555pysolr.py
index be7e64c62..b7bf7b947 100644
--- a/utils/search/backends/solr555pysolr.py
+++ b/utils/search/backends/solr555pysolr.py
@@ -33,11 +33,9 @@
from utils.search import SearchEngineBase, SearchResults, SearchEngineException
from utils.search.backends.solr_common import SolrQuery, SolrResponseInterpreter
-
SOLR_FORUM_URL = f"{settings.SOLR5_BASE_URL}/forum"
SOLR_SOUNDS_URL = f"{settings.SOLR5_BASE_URL}/freesound"
-
# Mapping from db sound field names to solr sound field names
FIELD_NAMES_MAP = {
settings.SEARCH_SOUNDS_FIELD_ID: 'id',
@@ -55,7 +53,6 @@
settings.SEARCH_SOUNDS_FIELD_LICENSE_NAME: 'license'
}
-
# Map "web" sorting options to solr sorting options
SORT_OPTIONS_MAP = {
settings.SEARCH_SOUNDS_SORT_OPTION_AUTOMATIC: "score desc",
@@ -84,13 +81,7 @@
list: '_ls',
}
-
-SOLR_SOUND_FACET_DEFAULT_OPTIONS = {
- 'limit': 5,
- 'sort': True,
- 'mincount': 1,
- 'count_missing': False
-}
+SOLR_SOUND_FACET_DEFAULT_OPTIONS = {'limit': 5, 'sort': True, 'mincount': 1, 'count_missing': False}
def convert_sound_to_search_engine_document(sound):
@@ -102,8 +93,10 @@ def convert_sound_to_search_engine_document(sound):
document = {}
# Basic sound fields
- keep_fields = ['username', 'created', 'is_explicit', 'is_remix', 'num_ratings', 'channels', 'md5',
- 'was_remixed', 'original_filename', 'duration', 'id', 'num_downloads', 'filesize']
+ keep_fields = [
+ 'username', 'created', 'is_explicit', 'is_remix', 'num_ratings', 'channels', 'md5', 'was_remixed',
+ 'original_filename', 'duration', 'id', 'num_downloads', 'filesize'
+ ]
for key in keep_fields:
document[key] = getattr(sound, key)
if sound.type == '':
@@ -121,8 +114,8 @@ def convert_sound_to_search_engine_document(sound):
if getattr(sound, "pack_id"):
document["pack"] = remove_control_chars(getattr(sound, "pack_name"))
- document["grouping_pack"] = str(getattr(sound, "pack_id")) + "_" + remove_control_chars(
- getattr(sound, "pack_name"))
+ document["grouping_pack"] = str(getattr(sound, "pack_id")
+ ) + "_" + remove_control_chars(getattr(sound, "pack_name"))
else:
document["grouping_pack"] = str(getattr(sound, "id"))
@@ -176,14 +169,11 @@ def convert_post_to_search_engine_document(post):
"thread_title": remove_control_chars(post.thread.title),
"thread_author": post.thread.author.username,
"thread_created": post.thread.created,
-
"forum_name": post.thread.forum.name,
"forum_name_slug": post.thread.forum.name_slug,
-
"post_author": post.author.username,
"post_created": post.created,
"post_body": body,
-
"num_posts": post.thread.num_posts,
"has_posts": False if post.thread.num_posts == 0 else True
}
@@ -201,11 +191,11 @@ def add_solr_suffix_to_dynamic_fieldname(fieldname):
for _, db_descriptor_key, descriptor_type in descriptors_map:
if descriptor_type is not None:
dynamic_fields_map[db_descriptor_key] = '{}{}'.format(
- db_descriptor_key, SOLR_DYNAMIC_FIELDS_SUFFIX_MAP[descriptor_type])
+ db_descriptor_key, SOLR_DYNAMIC_FIELDS_SUFFIX_MAP[descriptor_type]
+ )
return dynamic_fields_map.get(fieldname, fieldname)
-
def add_solr_suffix_to_dynamic_fieldnames_in_filter(query_filter):
"""Processes a filter string containing field names and replaces the occurrences of fieldnames that match with
descriptor names from the descriptors_map of different configured analyzers with updated fieldnames with
@@ -219,8 +209,9 @@ def add_solr_suffix_to_dynamic_fieldnames_in_filter(query_filter):
for _, db_descriptor_key, descriptor_type in descriptors_map:
if descriptor_type is not None:
query_filter = query_filter.replace(
- f'{db_descriptor_key}:','{}{}:'.format(
- db_descriptor_key, SOLR_DYNAMIC_FIELDS_SUFFIX_MAP[descriptor_type]))
+ f'{db_descriptor_key}:',
+ '{}{}:'.format(db_descriptor_key, SOLR_DYNAMIC_FIELDS_SUFFIX_MAP[descriptor_type])
+ )
return query_filter
@@ -292,7 +283,9 @@ def search_process_filter(query_filter, only_sounds_within_ids=False, only_sound
if 'geotag:"Intersects(' in query_filter:
# Replace geotag:"Intersects( )"
# with geotag:[", " TO " "]
- query_filter = re.sub(r'geotag:"Intersects\((.+?) (.+?) (.+?) (.+?)\)"', r'geotag:["\2,\1" TO "\4,\3"]', query_filter)
+ query_filter = re.sub(
+ r'geotag:"Intersects\((.+?) (.+?) (.+?) (.+?)\)"', r'geotag:["\2,\1" TO "\4,\3"]', query_filter
+ )
query_filter = search_filter_make_intersection(query_filter)
@@ -309,6 +302,7 @@ def search_process_filter(query_filter, only_sounds_within_ids=False, only_sound
class FreesoundSoundJsonEncoder(json.JSONEncoder):
+
def default(self, value):
if isinstance(value, datetime):
return value.strftime('%Y-%m-%dT%H:%M:%S.000Z')
@@ -420,7 +414,9 @@ def search_process_filter(self, query_filter, only_sounds_within_ids=False, only
if 'geotag:"Intersects(' in query_filter:
# Replace geotag:"Intersects( )"
# with geotag:[", " TO " "]
- query_filter = re.sub('geotag:"Intersects\((.+?) (.+?) (.+?) (.+?)\)"', r'geotag:["\2,\1" TO "\4,\3"]', query_filter)
+ query_filter = re.sub(
+ 'geotag:"Intersects\((.+?) (.+?) (.+?) (.+?)\)"', r'geotag:["\2,\1" TO "\4,\3"]', query_filter
+ )
query_filter = search_filter_make_intersection(query_filter)
@@ -435,41 +431,57 @@ def search_process_filter(self, query_filter, only_sounds_within_ids=False, only
return query_filter
- def search_sounds(self, textual_query='', query_fields=None, query_filter='', offset=0, current_page=None,
- num_sounds=settings.SOUNDS_PER_PAGE, sort=settings.SEARCH_SOUNDS_SORT_OPTION_AUTOMATIC,
- group_by_pack=False, num_sounds_per_pack_group=1, facets=None, only_sounds_with_pack=False,
- only_sounds_within_ids=False, group_counts_as_one_in_facets=False):
+ def search_sounds(
+ self,
+ textual_query='',
+ query_fields=None,
+ query_filter='',
+ offset=0,
+ current_page=None,
+ num_sounds=settings.SOUNDS_PER_PAGE,
+ sort=settings.SEARCH_SOUNDS_SORT_OPTION_AUTOMATIC,
+ group_by_pack=False,
+ num_sounds_per_pack_group=1,
+ facets=None,
+ only_sounds_with_pack=False,
+ only_sounds_within_ids=False,
+ group_counts_as_one_in_facets=False
+ ):
query = SolrQuery()
-
# Process search fields: replace "db" field names by solr field names and set default weights if needed
if query_fields is None:
# If no fields provided, use the default
query_fields = settings.SEARCH_SOUNDS_DEFAULT_FIELD_WEIGHTS
if isinstance(query_fields, list):
- query_fields = [add_solr_suffix_to_dynamic_fieldname(FIELD_NAMES_MAP.get(field, field)) for field in query_fields]
+ query_fields = [
+ add_solr_suffix_to_dynamic_fieldname(FIELD_NAMES_MAP.get(field, field)) for field in query_fields
+ ]
elif isinstance(query_fields, dict):
# Also remove fields with weight <= 0
query_fields = [(add_solr_suffix_to_dynamic_fieldname(FIELD_NAMES_MAP.get(field, field)), weight)
- for field, weight in query_fields.items() if weight > 0]
+ for field, weight in query_fields.items()
+ if weight > 0]
# Set main query options
query.set_dismax_query(textual_query, query_fields=query_fields)
# Process filter
- query_filter = self.search_process_filter(query_filter,
- only_sounds_within_ids=only_sounds_within_ids,
- only_sounds_with_pack=only_sounds_with_pack)
+ query_filter = self.search_process_filter(
+ query_filter, only_sounds_within_ids=only_sounds_within_ids, only_sounds_with_pack=only_sounds_with_pack
+ )
# Set other query options
if current_page is not None:
offset = (current_page - 1) * num_sounds
- query.set_query_options(start=offset,
- rows=num_sounds,
- field_list=["id", "score"], # We only want the sound IDs of the results as we load data from DB
- filter_query=query_filter,
- sort=search_process_sort(sort))
+ query.set_query_options(
+ start=offset,
+ rows=num_sounds,
+ field_list=["id", "score"], # We only want the sound IDs of the results as we load data from DB
+ filter_query=query_filter,
+ sort=search_process_sort(sort)
+ )
# Configure facets
if facets is not None:
@@ -485,9 +497,10 @@ def search_sounds(self, textual_query='', query_fields=None, query_filter='', of
query.set_group_options(
group_func=None,
group_query=None,
- group_rows=10, # TODO: if limit is lower than rows and start=0, this should probably be equal to limit
+ group_rows=10, # TODO: if limit is lower than rows and start=0, this should probably be equal to limit
group_start=0,
- group_limit=num_sounds_per_pack_group, # This is the number of documents that will be returned for each group.
+ group_limit=
+ num_sounds_per_pack_group, # This is the number of documents that will be returned for each group.
group_offset=0,
group_sort=None,
group_sort_ingroup=None,
@@ -495,7 +508,8 @@ def search_sounds(self, textual_query='', query_fields=None, query_filter='', of
group_main=False,
group_num_groups=True,
group_cache_percent=0,
- group_truncate=group_counts_as_one_in_facets)
+ group_truncate=group_counts_as_one_in_facets
+ )
# Do the query!
# Note: we create a SearchResults with the same members as SolrResponseInterpreter (the response from .search()).
@@ -572,38 +586,42 @@ def forum_post_exists_in_index(self, forum_post_object_or_id):
response = self.search_forum_posts(query_filter=f'id:{post_id}', offset=0, num_posts=1)
return response.num_found > 0
- def search_forum_posts(self, textual_query='', query_filter='', sort=settings.SEARCH_FORUM_SORT_DEFAULT,
- offset=0, current_page=None, num_posts=settings.FORUM_POSTS_PER_PAGE, group_by_thread=True):
+ def search_forum_posts(
+ self,
+ textual_query='',
+ query_filter='',
+ sort=settings.SEARCH_FORUM_SORT_DEFAULT,
+ offset=0,
+ current_page=None,
+ num_posts=settings.FORUM_POSTS_PER_PAGE,
+ group_by_thread=True
+ ):
query = SolrQuery()
- query.set_dismax_query(textual_query, query_fields=[("thread_title", 4),
- ("post_body", 3),
- ("thread_author", 3),
- ("post_author", 3),
- ("forum_name", 2)])
- query.set_highlighting_options_default(field_list=["post_body"],
- fragment_size=200,
- alternate_field="post_body",
- require_field_match=False,
- pre="",
- post="")
+ query.set_dismax_query(
+ textual_query,
+ query_fields=[("thread_title", 4), ("post_body", 3), ("thread_author", 3), ("post_author", 3),
+ ("forum_name", 2)]
+ )
+ query.set_highlighting_options_default(
+ field_list=["post_body"],
+ fragment_size=200,
+ alternate_field="post_body",
+ require_field_match=False,
+ pre="",
+ post=""
+ )
if current_page is not None:
offset = (current_page - 1) * num_posts
- query.set_query_options(start=offset,
- rows=num_posts,
- field_list=["id",
- "score",
- "forum_name",
- "forum_name_slug",
- "thread_id",
- "thread_title",
- "thread_author",
- "thread_created",
- "post_body",
- "post_author",
- "post_created",
- "num_posts"],
- filter_query=query_filter,
- sort=search_process_sort(sort, forum=True))
+ query.set_query_options(
+ start=offset,
+ rows=num_posts,
+ field_list=[
+ "id", "score", "forum_name", "forum_name_slug", "thread_id", "thread_title", "thread_author",
+ "thread_created", "post_body", "post_author", "post_created", "num_posts"
+ ],
+ filter_query=query_filter,
+ sort=search_process_sort(sort, forum=True)
+ )
if group_by_thread:
query.set_group_field("thread_title_grouped")
diff --git a/utils/search/backends/solr9pysolr.py b/utils/search/backends/solr9pysolr.py
index 396b6aa79..420936049 100644
--- a/utils/search/backends/solr9pysolr.py
+++ b/utils/search/backends/solr9pysolr.py
@@ -31,6 +31,7 @@
class Solr9PySolrSearchEngine(solr555pysolr.Solr555PySolrSearchEngine):
+
def __init__(self, sounds_index_url=None, forum_index_url=None):
if sounds_index_url is None:
sounds_index_url = SOLR_SOUNDS_URL
@@ -39,7 +40,6 @@ def __init__(self, sounds_index_url=None, forum_index_url=None):
self.sounds_index_url = sounds_index_url
self.forum_index_url = forum_index_url
-
def get_sounds_index(self):
if self.sounds_index is None:
self.sounds_index = pysolr.Solr(
@@ -60,7 +60,6 @@ def get_forum_index(self):
)
return self.forum_index
-
def search_process_filter(self, query_filter, only_sounds_within_ids=False, only_sounds_with_pack=False):
"""Process the filter to make a number of adjustments
@@ -98,7 +97,9 @@ def search_process_filter(self, query_filter, only_sounds_within_ids=False, only
if 'geotag:"Intersects(' in query_filter:
# Replace geotag:"Intersects( )"
# with geotag:[", " TO " "]
- query_filter = re.sub('geotag:"Intersects\((.+?) (.+?) (.+?) (.+?)\)"', r'geotag:["\2,\1" TO "\4,\3"]', query_filter)
+ query_filter = re.sub(
+ 'geotag:"Intersects\((.+?) (.+?) (.+?) (.+?)\)"', r'geotag:["\2,\1" TO "\4,\3"]', query_filter
+ )
query_filter = solr555pysolr.search_filter_make_intersection(query_filter)
@@ -111,4 +112,4 @@ def search_process_filter(self, query_filter, only_sounds_within_ids=False, only
else:
query_filter = '({})'.format(sounds_within_ids_filter)
- return query_filter
\ No newline at end of file
+ return query_filter
diff --git a/utils/search/backends/solr_common.py b/utils/search/backends/solr_common.py
index 7128d6aa9..17de15eb3 100644
--- a/utils/search/backends/solr_common.py
+++ b/utils/search/backends/solr_common.py
@@ -43,8 +43,18 @@ def __init__(self, debug_query=None):
def set_query(self, query):
self.params['q'] = query
- def set_dismax_query(self, query, query_fields=None, minimum_match=None, phrase_fields=None, phrase_slop=None,
- query_phrase_slop=None, tie_breaker=None, boost_query=None, boost_functions=None):
+ def set_dismax_query(
+ self,
+ query,
+ query_fields=None,
+ minimum_match=None,
+ phrase_fields=None,
+ phrase_slop=None,
+ query_phrase_slop=None,
+ tie_breaker=None,
+ boost_query=None,
+ boost_functions=None
+ ):
"""Created a dismax query: http://wiki.apache.org/solr/DisMaxRequestHandler
The DisMaxRequestHandler is designed to process simple user entered phrases (without heavy syntax) and search for the individual words
across several fields using different weighting (boosts) based on the significance of each field. Additional options let you influence
@@ -112,8 +122,16 @@ def set_facet_query(self, query):
self.params['facet.query'] = query
# set global faceting options for regular fields
- def set_facet_options_default(self, limit=None, offset=None, prefix=None, sort=None, mincount=None,
- count_missing=None, enum_cache_mindf=None):
+ def set_facet_options_default(
+ self,
+ limit=None,
+ offset=None,
+ prefix=None,
+ sort=None,
+ mincount=None,
+ count_missing=None,
+ enum_cache_mindf=None
+ ):
"""Set default facet options: these will be applied to all facets, but overridden by particular options (see set_facet_options())
prefix: retun only facets with this prefix
sort: sort facets, True or False
@@ -132,8 +150,9 @@ def set_facet_options_default(self, limit=None, offset=None, prefix=None, sort=N
self.params['facet.enum.cache.minDf'] = enum_cache_mindf
# set faceting options for one particular field
- def set_facet_options(self, field, prefix=None, sort=None, limit=None, offset=None, mincount=None,
- count_missing=None):
+ def set_facet_options(
+ self, field, prefix=None, sort=None, limit=None, offset=None, mincount=None, count_missing=None
+ ):
"""Set facet options for one particular field... see set_facet_options_default() for parameter explanation
"""
try:
@@ -187,11 +206,24 @@ def set_date_facet_options(self, field, start=None, end=None, gap=None, hardened
self.params[f'f.{field}.date.hardend'] = hardened
self.params[f'f.{field}.date.other'] = count_other
- def set_highlighting_options_default(self, field_list=None, snippets=None, fragment_size=None,
- merge_contiguous=None, require_field_match=None, max_analyzed_chars=None,
- alternate_field=None, max_alternate_field_length=None, pre=None, post=None,
- fragmenter=None, use_phrase_highlighter=None, regex_slop=None,
- regex_pattern=None, regex_max_analyzed_chars=None):
+ def set_highlighting_options_default(
+ self,
+ field_list=None,
+ snippets=None,
+ fragment_size=None,
+ merge_contiguous=None,
+ require_field_match=None,
+ max_analyzed_chars=None,
+ alternate_field=None,
+ max_alternate_field_length=None,
+ pre=None,
+ post=None,
+ fragmenter=None,
+ use_phrase_highlighter=None,
+ regex_slop=None,
+ regex_pattern=None,
+ regex_max_analyzed_chars=None
+ ):
"""Set default highlighting options: these will be applied to all highlighting, but overridden by particular options (see set_highlighting_options())
field_list: list of fields to highlight space separated
snippets: number of snippets to generate
@@ -227,8 +259,16 @@ def set_highlighting_options_default(self, field_list=None, snippets=None, fragm
self.params['hl.regex.pattern'] = regex_pattern
self.params['hl.regex.maxAnalyzedChars'] = regex_max_analyzed_chars
- def set_highlighting_options(self, field, snippets=None, fragment_size=None, merge_contiguous=None,
- alternate_field=None, pre=None, post=None):
+ def set_highlighting_options(
+ self,
+ field,
+ snippets=None,
+ fragment_size=None,
+ merge_contiguous=None,
+ alternate_field=None,
+ pre=None,
+ post=None
+ ):
"""Set highlighting options for one particular field... see set_highlighting_options_default() for parameter explanation
"""
try:
@@ -250,9 +290,22 @@ def __str__(self):
def set_group_field(self, group_field=None):
self.params['group.field'] = group_field
- def set_group_options(self, group_func=None, group_query=None, group_rows=10, group_start=0, group_limit=1,
- group_offset=0, group_sort=None, group_sort_ingroup=None, group_format='grouped',
- group_main=False, group_num_groups=True, group_cache_percent=0, group_truncate=False):
+ def set_group_options(
+ self,
+ group_func=None,
+ group_query=None,
+ group_rows=10,
+ group_start=0,
+ group_limit=1,
+ group_offset=0,
+ group_sort=None,
+ group_sort_ingroup=None,
+ group_format='grouped',
+ group_main=False,
+ group_num_groups=True,
+ group_cache_percent=0,
+ group_truncate=False
+ ):
self.params['group'] = True
self.params['group.func'] = group_func
self.params['group.query'] = group_query
@@ -278,6 +331,7 @@ def as_kwargs(self):
class SolrResponseInterpreter:
+
def __init__(self, response, next_page_query=None):
if "grouped" in response:
if "thread_title_grouped" in list(response["grouped"].keys()):
@@ -308,7 +362,6 @@ def __init__(self, response, next_page_query=None):
self.facets = response["facet_counts"]["facet_fields"]
except KeyError:
self.facets = {}
-
"""Facets are given in a list: [facet, number, facet, number, None, number] where the last one
is the missing field count. Converting all of them to a dict for easier usage:
{facet:number, facet:number, ..., None:number}
diff --git a/utils/search/backends/test_search_engine_backend.py b/utils/search/backends/test_search_engine_backend.py
index eba5f80cb..7072277f8 100644
--- a/utils/search/backends/test_search_engine_backend.py
+++ b/utils/search/backends/test_search_engine_backend.py
@@ -18,7 +18,6 @@
# See AUTHORS file.
#
-
import datetime
import logging
import os
@@ -32,15 +31,16 @@
from sounds.models import Sound, Download
from utils.search import get_search_engine
-
console_logger = logging.getLogger("console")
+
def assert_and_continue(expression, error_message):
if not expression:
console_logger.info(f'Error: {error_message}')
class TestSearchEngineBackend():
+
def __init__(self, backend_name, write_output, sounds_index_url=None, forum_index_url=None):
self.search_engine = get_search_engine(
backend_class=backend_name, sounds_index_url=sounds_index_url, forum_index_url=forum_index_url
@@ -50,8 +50,9 @@ def __init__(self, backend_name, write_output, sounds_index_url=None, forum_inde
if not os.path.exists(base_dir):
os.makedirs(base_dir)
date_label = datetime.datetime.today().strftime('%Y%m%d_%H%M')
- self.output_file = open(os.path.join(base_dir, '{}_test_results_{}.txt'
- .format(date_label, backend_name)), 'w')
+ self.output_file = open(
+ os.path.join(base_dir, '{}_test_results_{}.txt'.format(date_label, backend_name)), 'w'
+ )
self.output_file.write(f'TESTING SEARCH ENGINE BACKEND: {backend_name}\n')
else:
self.output_file = None
@@ -59,13 +60,12 @@ def __init__(self, backend_name, write_output, sounds_index_url=None, forum_inde
def save_query_results(self, results, query_data, elapsed_time, query_type):
self.output_file.write(f'\n* QUERY {query_type}: {str(query_data)} (took {elapsed_time:.2f} seconds)\n')
self.output_file.write(
- 'num_found: {}\nnon_grouped_number_of_results: {}\nq_time: {}\nfacets: {}\nhighlighting: {}\ndocs:\n'.format(
- results.num_found,
- results.non_grouped_number_of_results,
- results.q_time,
- results.facets,
+ 'num_found: {}\nnon_grouped_number_of_results: {}\nq_time: {}\nfacets: {}\nhighlighting: {}\ndocs:\n'.
+ format(
+ results.num_found, results.non_grouped_number_of_results, results.q_time, results.facets,
results.highlighting
- ))
+ )
+ )
for count, doc in enumerate(results.docs):
self.output_file.write(f"\t{count + 1}. {doc['id']}: {doc}\n")
@@ -84,7 +84,9 @@ def run_sounds_query_and_save_results(self, query_data):
end = time.time()
# Assert that the result is of the expected type
- assert_and_continue(isinstance(results, utils.search.SearchResults), 'Returned search results object of wrong type')
+ assert_and_continue(
+ isinstance(results, utils.search.SearchResults), 'Returned search results object of wrong type'
+ )
# Save results to file so the later we can compare between different search engine backends
if self.output_file:
@@ -107,7 +109,9 @@ def run_forum_query_and_save_results(self, query_data):
end = time.time()
# Assert that the result is of the expected type
- assert_and_continue(isinstance(results, utils.search.SearchResults), 'Returned search results object of wrong type')
+ assert_and_continue(
+ isinstance(results, utils.search.SearchResults), 'Returned search results object of wrong type'
+ )
# Save results to file so the later we can compare between different search engine backends
if self.output_file:
@@ -123,18 +127,26 @@ def sound_check_mandatory_doc_fields(self):
results = self.run_sounds_query_and_save_results(dict(num_sounds=1, group_by_pack=False))
for result in results.docs:
for field in mandatory_fields:
- assert_and_continue(field in result,
- 'Mandatory field {} not present in result when not grouping (available fields: {})'
- .format(field, ', '.join(result.keys())))
+ assert_and_continue(
+ field in result,
+ 'Mandatory field {} not present in result when not grouping (available fields: {})'.format(
+ field, ', '.join(result.keys())
+ )
+ )
# Check the case of grouped search results
- mandatory_fields = ['id', 'score', 'group_name', 'n_more_in_group', 'group_docs']
- results = self.run_sounds_query_and_save_results(dict(num_sounds=1, group_by_pack=True, only_sounds_with_pack=True))
+ mandatory_fields = ['id', 'score', 'group_name', 'n_more_in_group', 'group_docs']
+ results = self.run_sounds_query_and_save_results(
+ dict(num_sounds=1, group_by_pack=True, only_sounds_with_pack=True)
+ )
for result in results.docs:
for field in mandatory_fields:
- assert_and_continue(field in result,
- 'Mandatory field {} not present in result when grouping by pack (available fields: {})'
- .format(field, ', '.join(result.keys())))
+ assert_and_continue(
+ field in result,
+ 'Mandatory field {} not present in result when grouping by pack (available fields: {})'.format(
+ field, ', '.join(result.keys())
+ )
+ )
def sound_check_random_sound(self):
# Get random sound IDs and make sure these are different
@@ -143,8 +155,9 @@ def sound_check_random_sound(self):
last_id = 0
for i in range(0, 10):
new_id = self.search_engine.get_random_sound_id()
- assert_and_continue(new_id != last_id,
- 'Repeated sound IDs in subsequent calls to "get random sound id" method')
+ assert_and_continue(
+ new_id != last_id, 'Repeated sound IDs in subsequent calls to "get random sound id" method'
+ )
last_id = new_id
def sound_check_offsets(self):
@@ -155,22 +168,23 @@ def sound_check_offsets(self):
offset_1_ids = [r['id'] for r in results.docs]
assert_and_continue(len(offset_0_ids) == 10, 'Unexpected num_sounds/offset/current_page behaviour')
assert_and_continue(len(offset_1_ids) == 10, 'Unexpected num_sounds/offset/current_page behaviour')
- assert_and_continue(offset_0_ids[1:] == offset_1_ids[:-1],
- 'Unexpected num_sounds/offset/current_page behaviour')
+ assert_and_continue(
+ offset_0_ids[1:] == offset_1_ids[:-1], 'Unexpected num_sounds/offset/current_page behaviour'
+ )
results = self.run_sounds_query_and_save_results(dict(num_sounds=1, offset=4))
offset_4_num_sounds_1_ids = [r['id'] for r in results.docs]
- assert_and_continue(len(offset_4_num_sounds_1_ids) == 1,
- 'Unexpected num_sounds/offset/current_page behaviour')
- assert_and_continue(offset_0_ids[4] == offset_4_num_sounds_1_ids[0],
- 'Unexpected num_sounds/offset/current_page behaviour')
+ assert_and_continue(len(offset_4_num_sounds_1_ids) == 1, 'Unexpected num_sounds/offset/current_page behaviour')
+ assert_and_continue(
+ offset_0_ids[4] == offset_4_num_sounds_1_ids[0], 'Unexpected num_sounds/offset/current_page behaviour'
+ )
results = self.run_sounds_query_and_save_results(dict(num_sounds=5, current_page=2))
page_2_num_sounds_5_ids = [r['id'] for r in results.docs]
- assert_and_continue(len(page_2_num_sounds_5_ids) == 5,
- 'Unexpected num_sounds/offset/current_page behaviour')
- assert_and_continue(page_2_num_sounds_5_ids == offset_0_ids[5:],
- 'Unexpected num_sounds/offset/current_page behaviour')
+ assert_and_continue(len(page_2_num_sounds_5_ids) == 5, 'Unexpected num_sounds/offset/current_page behaviour')
+ assert_and_continue(
+ page_2_num_sounds_5_ids == offset_0_ids[5:], 'Unexpected num_sounds/offset/current_page behaviour'
+ )
def sound_check_empty_query(self):
# Test empty query returns results
@@ -181,13 +195,12 @@ def sound_check_sort_parameter(self, test_sound_ids):
# Test sort parameter (only use sounds within test_sound_ids to make sure these were indexed "correctly")
# This also tests parameter only_sounds_within_ids
for sort_option_web in settings.SEARCH_SOUNDS_SORT_OPTIONS_WEB:
- results = self.run_sounds_query_and_save_results(dict(sort=sort_option_web,
- num_sounds=len(test_sound_ids),
- only_sounds_within_ids=test_sound_ids))
+ results = self.run_sounds_query_and_save_results(
+ dict(sort=sort_option_web, num_sounds=len(test_sound_ids), only_sounds_within_ids=test_sound_ids)
+ )
result_ids = [r['id'] for r in results.docs]
sounds = Sound.objects.ordered_ids(result_ids)
- assert_and_continue(sorted(test_sound_ids) == sorted(result_ids),
- 'only_sounds_within_ids not respected')
+ assert_and_continue(sorted(test_sound_ids) == sorted(result_ids), 'only_sounds_within_ids not respected')
# Assert that sorting criteria is preserved
for sound1, sound2 in zip(sounds[:-1], sounds[1:]):
@@ -195,37 +208,35 @@ def sound_check_sort_parameter(self, test_sound_ids):
# Nothing to test here as there's no expected result
pass
elif sort_option_web == settings.SEARCH_SOUNDS_SORT_OPTION_DOWNLOADS_MOST_FIRST:
- assert_and_continue(Download.objects.filter(sound=sound1).count() >=
- Download.objects.filter(sound=sound2).count(),
- f'Wrong ordering in {sort_option_web}')
+ assert_and_continue(
+ Download.objects.filter(sound=sound1).count() >= Download.objects.filter(sound=sound2).count(),
+ f'Wrong ordering in {sort_option_web}'
+ )
elif sort_option_web == settings.SEARCH_SOUNDS_SORT_OPTION_DOWNLOADS_LEAST_FIRST:
- assert_and_continue(Download.objects.filter(sound=sound1).count() <=
- Download.objects.filter(sound=sound2).count(),
- f'Wrong ordering in {sort_option_web}')
+ assert_and_continue(
+ Download.objects.filter(sound=sound1).count() <= Download.objects.filter(sound=sound2).count(),
+ f'Wrong ordering in {sort_option_web}'
+ )
elif sort_option_web == settings.SEARCH_SOUNDS_SORT_OPTION_DATE_OLD_FIRST:
- assert_and_continue(sound1.created <= sound2.created,
- f'Wrong ordering in {sort_option_web}')
+ assert_and_continue(sound1.created <= sound2.created, f'Wrong ordering in {sort_option_web}')
elif sort_option_web == settings.SEARCH_SOUNDS_SORT_OPTION_DATE_NEW_FIRST:
- assert_and_continue(sound1.created >= sound2.created,
- f'Wrong ordering in {sort_option_web}')
+ assert_and_continue(sound1.created >= sound2.created, f'Wrong ordering in {sort_option_web}')
elif sort_option_web == settings.SEARCH_SOUNDS_SORT_OPTION_RATING_LOWEST_FIRST:
- assert_and_continue(sound1.avg_rating <= sound2.avg_rating,
- f'Wrong ordering in {sort_option_web}')
+ assert_and_continue(sound1.avg_rating <= sound2.avg_rating, f'Wrong ordering in {sort_option_web}')
if sound1.avg_rating == sound2.avg_rating:
- assert_and_continue(sound1.num_ratings >= sound2.num_ratings,
- f'Wrong ordering in {sort_option_web}')
+ assert_and_continue(
+ sound1.num_ratings >= sound2.num_ratings, f'Wrong ordering in {sort_option_web}'
+ )
elif sort_option_web == settings.SEARCH_SOUNDS_SORT_OPTION_RATING_HIGHEST_FIRST:
- assert_and_continue(sound1.avg_rating >= sound2.avg_rating,
- f'Wrong ordering in {sort_option_web}')
+ assert_and_continue(sound1.avg_rating >= sound2.avg_rating, f'Wrong ordering in {sort_option_web}')
if sound1.avg_rating == sound2.avg_rating:
- assert_and_continue(sound1.num_ratings >= sound2.num_ratings,
- f'Wrong ordering in {sort_option_web}')
+ assert_and_continue(
+ sound1.num_ratings >= sound2.num_ratings, f'Wrong ordering in {sort_option_web}'
+ )
elif sort_option_web == settings.SEARCH_SOUNDS_SORT_OPTION_DURATION_LONG_FIRST:
- assert_and_continue(sound1.duration >= sound2.duration,
- f'Wrong ordering in {sort_option_web}')
+ assert_and_continue(sound1.duration >= sound2.duration, f'Wrong ordering in {sort_option_web}')
elif sort_option_web == settings.SEARCH_SOUNDS_SORT_OPTION_DURATION_SHORT_FIRST:
- assert_and_continue(sound1.duration <= sound2.duration,
- f'Wrong ordering in {sort_option_web}')
+ assert_and_continue(sound1.duration <= sound2.duration, f'Wrong ordering in {sort_option_web}')
def sound_check_group_by_pack(self):
# Test group by pack
@@ -250,8 +261,12 @@ def sound_check_sounds_with_pack(self):
def sound_check_facets(self):
# Test facets included in results
test_facet_options = {
- settings.SEARCH_SOUNDS_FIELD_USER_NAME: {'limit': 3},
- settings.SEARCH_SOUNDS_FIELD_SAMPLERATE: {'limit': 1},
+ settings.SEARCH_SOUNDS_FIELD_USER_NAME: {
+ 'limit': 3
+ },
+ settings.SEARCH_SOUNDS_FIELD_SAMPLERATE: {
+ 'limit': 1
+ },
settings.SEARCH_SOUNDS_FIELD_TYPE: {},
}
results = self.run_sounds_query_and_save_results(dict(facets=test_facet_options))
@@ -259,8 +274,10 @@ def sound_check_facets(self):
for facet_field, facet_options in test_facet_options.items():
assert_and_continue(facet_field in results.facets, f'Facet {facet_field} not found in facets')
if 'limit' in facet_options:
- assert_and_continue(len(results.facets[facet_field]) == facet_options['limit'],
- f'Wrong number of items in facet {facet_field}')
+ assert_and_continue(
+ len(results.facets[facet_field]) == facet_options['limit'],
+ f'Wrong number of items in facet {facet_field}'
+ )
# Test if no facets requested, no facets returned
results = self.run_sounds_query_and_save_results(dict())
@@ -289,7 +306,9 @@ def sound_check_get_user_tags(self, sound):
assert_and_continue(len(remaining_tags) == 0, "get_user_tags returned tags which the user hasn't tagged")
if self.output_file:
- self.output_file.write(f'\n* USER "{sound.user.username}" TOP TAGS FROM SEARCH ENGINE: {search_engine_tags}\n')
+ self.output_file.write(
+ f'\n* USER "{sound.user.username}" TOP TAGS FROM SEARCH ENGINE: {search_engine_tags}\n'
+ )
def sound_check_get_pack_tags(self, sounds):
"""
@@ -303,7 +322,9 @@ def sound_check_get_pack_tags(self, sounds):
target_sound = sound
break
- assert_and_continue(target_sound is not None, "Sample sounds dataset doesn't have any sounds with a pack and tags")
+ assert_and_continue(
+ target_sound is not None, "Sample sounds dataset doesn't have any sounds with a pack and tags"
+ )
if target_sound:
pack = target_sound.pack
all_sound_tags = []
@@ -320,9 +341,10 @@ def sound_check_get_pack_tags(self, sounds):
def test_search_enginge_backend_sounds(self):
# Get sounds for testing
- test_sound_ids = list(Sound.public
- .filter(is_index_dirty=False, num_ratings__gt=settings.MIN_NUMBER_RATINGS)
- .values_list('id', flat=True)[0:20])
+ test_sound_ids = list(
+ Sound.public.filter(is_index_dirty=False,
+ num_ratings__gt=settings.MIN_NUMBER_RATINGS).values_list('id', flat=True)[0:20]
+ )
sounds = list(Sound.objects.bulk_query_solr(test_sound_ids))
if len(sounds) < 20:
raise Exception("Can't test SearchEngine backend as there are not enough sounds for testing")
@@ -330,36 +352,44 @@ def test_search_enginge_backend_sounds(self):
# Remove sounds from the search index (in case sounds are there)
self.search_engine.remove_sounds_from_index(sounds)
for sound in sounds:
- assert_and_continue(not self.search_engine.sound_exists_in_index(sound),
- f'Sound ID {sound.id} should not be in the search index')
+ assert_and_continue(
+ not self.search_engine.sound_exists_in_index(sound),
+ f'Sound ID {sound.id} should not be in the search index'
+ )
# Index the sounds again
self.search_engine.add_sounds_to_index(sounds)
# Check that sounds are indexed (test with sound object and with ID)
for sound in sounds:
- assert_and_continue(self.search_engine.sound_exists_in_index(sound),
- f'Sound ID {sound.id} should be in the search index')
- assert_and_continue(self.search_engine.sound_exists_in_index(sound.id),
- f'Sound ID {sound.id} should be in the search index')
+ assert_and_continue(
+ self.search_engine.sound_exists_in_index(sound), f'Sound ID {sound.id} should be in the search index'
+ )
+ assert_and_continue(
+ self.search_engine.sound_exists_in_index(sound.id), f'Sound ID {sound.id} should be in the search index'
+ )
# Remove some sounds form the ones just indexed and check they do not exist
removed_sounds_by_sound_object = sounds[0:3]
self.search_engine.remove_sounds_from_index(removed_sounds_by_sound_object)
for sound in removed_sounds_by_sound_object:
- assert_and_continue(not self.search_engine.sound_exists_in_index(sound),
- f'Sound ID {sound.id} should not be in the search index')
+ assert_and_continue(
+ not self.search_engine.sound_exists_in_index(sound),
+ f'Sound ID {sound.id} should not be in the search index'
+ )
removed_sounds_by_sound_id = [s.id for s in sounds[3:6]]
self.search_engine.remove_sounds_from_index(removed_sounds_by_sound_id)
for sid in removed_sounds_by_sound_id:
- assert_and_continue(not self.search_engine.sound_exists_in_index(sid),
- f'Sound ID {sid} should not be in the search index')
+ assert_and_continue(
+ not self.search_engine.sound_exists_in_index(sid), f'Sound ID {sid} should not be in the search index'
+ )
# Check that all sounds which were not removed are still in the index
remaining_sounds = sounds[6:]
for sound in remaining_sounds:
- assert_and_continue(self.search_engine.sound_exists_in_index(sound),
- f'Sound ID {sound.id} should be in search index')
+ assert_and_continue(
+ self.search_engine.sound_exists_in_index(sound), f'Sound ID {sound.id} should be in search index'
+ )
# Re-index all sounds to leave index in "correct" state
self.search_engine.add_sounds_to_index(sounds)
@@ -376,31 +406,38 @@ def test_search_enginge_backend_sounds(self):
self.sound_check_get_user_tags(sounds[0])
self.sound_check_get_pack_tags(sounds)
- console_logger.info('Testing of sound search methods finished. You might want to run the '
- 'reindex_search_engine_sounds -c command to make sure the index is left in a correct '
- 'state after having run these tests')
+ console_logger.info(
+ 'Testing of sound search methods finished. You might want to run the '
+ 'reindex_search_engine_sounds -c command to make sure the index is left in a correct '
+ 'state after having run these tests'
+ )
def forum_check_mandatory_doc_fields(self):
# Check that returned forum posts (docs) from search engine include the mandatory fields
-
+
# Check the case of non-grouped search results
mandatory_fields = ['id', 'score', 'post_body', 'thread_author', 'forum_name', 'forum_name_slug']
results = self.run_forum_query_and_save_results(dict(num_posts=1, group_by_thread=False))
for result in results.docs:
for field in mandatory_fields:
- assert_and_continue(field in result,
- 'Mandatory field {} not present in result when not grouping by thread (available fields: {})'
- .format(field, ', '.join(result.keys())))
+ assert_and_continue(
+ field in result,
+ 'Mandatory field {} not present in result when not grouping by thread (available fields: {})'.
+ format(field, ', '.join(result.keys()))
+ )
# Check the case of grouped search results
- mandatory_fields = ['id', 'score', 'group_name', 'n_more_in_group', 'group_docs']
+ mandatory_fields = ['id', 'score', 'group_name', 'n_more_in_group', 'group_docs']
results = self.run_forum_query_and_save_results(dict(num_posts=1, group_by_thread=True))
for result in results.docs:
for field in mandatory_fields:
- assert_and_continue(field in result,
- 'Mandatory field {} not present in result when grouping by thread (available fields: {})'
- .format(field, ', '.join(result.keys())))
-
+ assert_and_continue(
+ field in result,
+ 'Mandatory field {} not present in result when grouping by thread (available fields: {})'.format(
+ field, ', '.join(result.keys())
+ )
+ )
+
def forum_check_offsets(self):
# Test num_posts/offset/current_page parameters
results = self.run_forum_query_and_save_results(dict(num_posts=10, offset=0))
@@ -409,32 +446,36 @@ def forum_check_offsets(self):
offset_1_ids = [r['id'] for r in results.docs]
assert_and_continue(len(offset_0_ids) == 10, 'Unexpected num_posts/offset/current_page behaviour 1')
assert_and_continue(len(offset_1_ids) == 10, 'Unexpected num_posts/offset/current_page behaviour 2')
- assert_and_continue(offset_0_ids[1:] == offset_1_ids[:-1],
- 'Unexpected num_posts/offset/current_page behaviour 3')
+ assert_and_continue(
+ offset_0_ids[1:] == offset_1_ids[:-1], 'Unexpected num_posts/offset/current_page behaviour 3'
+ )
results = self.run_forum_query_and_save_results(dict(num_posts=1, offset=4))
offset_4_num_sounds_1_ids = [r['id'] for r in results.docs]
- assert_and_continue(len(offset_4_num_sounds_1_ids) == 1,
- 'Unexpected num_posts/offset/current_page behaviour 4')
- assert_and_continue(offset_0_ids[4] == offset_4_num_sounds_1_ids[0],
- 'Unexpected num_posts/offset/current_page behaviour 5')
+ assert_and_continue(len(offset_4_num_sounds_1_ids) == 1, 'Unexpected num_posts/offset/current_page behaviour 4')
+ assert_and_continue(
+ offset_0_ids[4] == offset_4_num_sounds_1_ids[0], 'Unexpected num_posts/offset/current_page behaviour 5'
+ )
results = self.run_forum_query_and_save_results(dict(num_posts=5, current_page=2))
page_2_num_sounds_5_ids = [r['id'] for r in results.docs]
- assert_and_continue(len(page_2_num_sounds_5_ids) == 5,
- 'Unexpected num_posts/offset/current_page behaviour 6')
- assert_and_continue(page_2_num_sounds_5_ids == offset_0_ids[5:],
- 'Unexpected num_posts/offset/current_page behaviour 7')
+ assert_and_continue(len(page_2_num_sounds_5_ids) == 5, 'Unexpected num_posts/offset/current_page behaviour 6')
+ assert_and_continue(
+ page_2_num_sounds_5_ids == offset_0_ids[5:], 'Unexpected num_posts/offset/current_page behaviour 7'
+ )
# Test that results are sorted by newest posts first. Also assess results have the required fields
- expected_fields = ["id", "forum_name", "forum_name_slug", "thread_id", "thread_title", "thread_author",
- "thread_created", "post_body", "post_author", "post_created", "num_posts"]
+ expected_fields = [
+ "id", "forum_name", "forum_name_slug", "thread_id", "thread_title", "thread_author", "thread_created",
+ "post_body", "post_author", "post_created", "num_posts"
+ ]
results = self.run_forum_query_and_save_results(dict(group_by_thread=False))
for result1, result2 in zip(results.docs[:-1], results.docs[1:]):
for field in expected_fields:
assert_and_continue(field in result1, f"{field} not present in result ID {result1['id']}")
- assert_and_continue(result1["thread_created"] >= result2["thread_created"],
- 'Wrong sorting in query results')
+ assert_and_continue(
+ result1["thread_created"] >= result2["thread_created"], 'Wrong sorting in query results'
+ )
def forum_check_empty_query(self):
# Test empty query returns results
@@ -466,7 +507,7 @@ def forum_check_extra_queries(self):
# later manually compared with results from other search backends
self.run_forum_query_and_save_results(dict(textual_query='microphone'))
self.run_forum_query_and_save_results(dict(textual_query='technique'))
- self.run_forum_query_and_save_results(dict(textual_query='freesound'))
+ self.run_forum_query_and_save_results(dict(textual_query='freesound'))
def test_search_enginge_backend_forum(self):
# Get posts for testing
@@ -478,36 +519,46 @@ def test_search_enginge_backend_forum(self):
# Remove posts from the search index (in case posts are there)
self.search_engine.remove_forum_posts_from_index(posts)
for post in posts:
- assert_and_continue(not self.search_engine.forum_post_exists_in_index(post),
- f'Post ID {post.id} should not be in the search index')
+ assert_and_continue(
+ not self.search_engine.forum_post_exists_in_index(post),
+ f'Post ID {post.id} should not be in the search index'
+ )
# Index the posts again
self.search_engine.add_forum_posts_to_index(posts)
# Check that posts are indexed (test with sound object and with ID)
for post in posts:
- assert_and_continue(self.search_engine.forum_post_exists_in_index(post),
- f'Post ID {post.id} should be in the search index')
- assert_and_continue(self.search_engine.forum_post_exists_in_index(post.id),
- f'Post ID {post.id} should be in the search index')
+ assert_and_continue(
+ self.search_engine.forum_post_exists_in_index(post), f'Post ID {post.id} should be in the search index'
+ )
+ assert_and_continue(
+ self.search_engine.forum_post_exists_in_index(post.id),
+ f'Post ID {post.id} should be in the search index'
+ )
# Remove some posts form the ones just indexed and check they do not exist
removed_posts_by_post_object = posts[0:3]
self.search_engine.remove_forum_posts_from_index(removed_posts_by_post_object)
for post in removed_posts_by_post_object:
- assert_and_continue(not self.search_engine.forum_post_exists_in_index(post),
- f'Post ID {post.id} should not be in the search index')
+ assert_and_continue(
+ not self.search_engine.forum_post_exists_in_index(post),
+ f'Post ID {post.id} should not be in the search index'
+ )
removed_posts_by_post_id = [s.id for s in posts[3:6]]
self.search_engine.remove_forum_posts_from_index(removed_posts_by_post_id)
for pid in removed_posts_by_post_id:
- assert_and_continue(not self.search_engine.forum_post_exists_in_index(pid),
- f'Post ID {pid} should not be in the search index')
+ assert_and_continue(
+ not self.search_engine.forum_post_exists_in_index(pid),
+ f'Post ID {pid} should not be in the search index'
+ )
# Check that all posts which were not removed are still in the index
remaining_posts = posts[6:]
for post in remaining_posts:
- assert_and_continue(self.search_engine.forum_post_exists_in_index(post),
- f'Post ID {post.id} should be in search index')
+ assert_and_continue(
+ self.search_engine.forum_post_exists_in_index(post), f'Post ID {post.id} should be in search index'
+ )
# Re-index all posts to leave index in "correct" state
self.search_engine.add_forum_posts_to_index(posts)
@@ -519,6 +570,8 @@ def test_search_enginge_backend_forum(self):
self.forum_check_highlighting()
self.forum_check_extra_queries()
- console_logger.info('Testing of forum search methods finished. You might want to run the '
- 'reindex_search_engine_forum -c command to make sure the index is left in a correct '
- 'state after having run these tests')
+ console_logger.info(
+ 'Testing of forum search methods finished. You might want to run the '
+ 'reindex_search_engine_forum -c command to make sure the index is left in a correct '
+ 'state after having run these tests'
+ )
diff --git a/utils/search/backends/tests/test_solr555pysolr.py b/utils/search/backends/tests/test_solr555pysolr.py
index 63a7a5f11..7c5346401 100644
--- a/utils/search/backends/tests/test_solr555pysolr.py
+++ b/utils/search/backends/tests/test_solr555pysolr.py
@@ -2,7 +2,9 @@
from utils.search.backends import solr555pysolr
+
class Solr555PySolrTest(TestCase):
+
def test_search_filter_make_intersection(self):
filter_query = "username:alastairp"
diff --git a/utils/search/backends/tests/test_solr_common.py b/utils/search/backends/tests/test_solr_common.py
index 337b5aadb..0d7672e51 100644
--- a/utils/search/backends/tests/test_solr_common.py
+++ b/utils/search/backends/tests/test_solr_common.py
@@ -3,7 +3,9 @@
from utils.search.backends import solr_common
from utils.search.backends import solr555pysolr
+
class SolrCommonTest(TestCase):
+
def test_search_filter_make_intersection(self):
filter_query = "username:alastairp"
diff --git a/utils/search/lucene_parser.py b/utils/search/lucene_parser.py
index c531ad4a8..fa01e71ba 100644
--- a/utils/search/lucene_parser.py
+++ b/utils/search/lucene_parser.py
@@ -22,7 +22,6 @@
import pyparsing as pp
from pyparsing import pyparsing_common as ppc
-
pp.ParserElement.enablePackrat()
COLON, LBRACK, RBRACK, LBRACE, RBRACE, TILDE, CARAT = list(map(pp.Literal, ":[]{}~^"))
@@ -32,12 +31,8 @@
expression = pp.Forward()
-valid_word = pp.Regex(
- r'([a-zA-Z0-9*_+.-]|\\\\|\\([+\-!(){}\[\]^"~*?:]|\|\||&&))+'
-).setName("word")
-valid_word.setParseAction(
- lambda t: t[0].replace("\\\\", chr(127)).replace("\\", "").replace(chr(127), "\\")
-)
+valid_word = pp.Regex(r'([a-zA-Z0-9*_+.-]|\\\\|\\([+\-!(){}\[\]^"~*?:]|\|\||&&))+').setName("word")
+valid_word.setParseAction(lambda t: t[0].replace("\\\\", chr(127)).replace("\\", "").replace(chr(127), "\\"))
string = pp.QuotedString('"', unquoteResults=False)
alphanums_plus = pp.alphanums + '_'
@@ -62,9 +57,8 @@
string_expr = pp.Group(string + proximity_modifier) | string
word_expr = pp.Group(valid_word + fuzzy_modifier) | valid_word
term << (
- pp.Optional(field_name("field") + COLON)
- + (word_expr | string_expr | range_search | pp.Group(LPAR + expression + RPAR))
- + pp.Optional(boost)
+ pp.Optional(field_name("field") + COLON) +
+ (word_expr | string_expr | range_search | pp.Group(LPAR + expression + RPAR)) + pp.Optional(boost)
)
term.setParseAction(lambda t: [t] if "field" in t or "boost" in t else None)
@@ -134,9 +128,7 @@ def parse_query_filter_string(filter_query):
filter_list_str = flatten_sub(filter_list_str)
# remove empty filter values
- filter_list_str = [
- filter_str for filter_str in filter_list_str if filter_str[-1] != ":"
- ]
+ filter_list_str = [filter_str for filter_str in filter_list_str if filter_str[-1] != ":"]
return filter_list_str
else:
return []
diff --git a/utils/search/search_forum.py b/utils/search/search_forum.py
index f05fcf109..0790d6572 100644
--- a/utils/search/search_forum.py
+++ b/utils/search/search_forum.py
@@ -85,8 +85,9 @@ def get_all_post_ids_from_search_engine(page_size=2000):
current_page = 1
try:
while solr_count is None or len(solr_ids) < solr_count:
- response = search_engine.search_forum_posts(query_filter='*:*', group_by_thread=False,
- offset=(current_page - 1) * page_size, num_posts=page_size)
+ response = search_engine.search_forum_posts(
+ query_filter='*:*', group_by_thread=False, offset=(current_page - 1) * page_size, num_posts=page_size
+ )
solr_ids += [element['id'] for element in response.docs]
solr_count = response.num_found
current_page += 1
diff --git a/utils/search/search_sounds.py b/utils/search/search_sounds.py
index bea52458f..3f6e9fb2e 100644
--- a/utils/search/search_sounds.py
+++ b/utils/search/search_sounds.py
@@ -52,7 +52,8 @@ def should_use_compact_mode(request):
request.user.profile.use_compact_mode = False
request.user.profile.save()
return False
-
+
+
def contains_active_advanced_search_filters(request, query_params, extra_vars):
duration_filter_is_default = True
if 'duration:' in query_params['query_filter']:
@@ -102,12 +103,12 @@ def search_prepare_parameters(request):
# If the query is filtered by pack, do not collapse sounds of the same pack (makes no sense)
# If the query is through AJAX (for sources remix editing), do not collapse by pack
- group_by_pack = request.GET.get("g", "1") == "1" # Group by default
+ group_by_pack = request.GET.get("g", "1") == "1" # Group by default
if "pack" in filter_query or request.GET.get("ajax", "") == "1":
group_by_pack = False
# If the query is filtered by pack, do not add the "only sounds with pack" filter (makes no sense)
- only_sounds_with_pack = request.GET.get("only_p", "0") == "1" # By default, do not limit to sounds with pack
+ only_sounds_with_pack = request.GET.get("only_p", "0") == "1" # By default, do not limit to sounds with pack
if "pack" in filter_query:
only_sounds_with_pack = False
@@ -189,7 +190,7 @@ def search_prepare_parameters(request):
custom_field_weights = parse_weights_parameter(weights_parameter)
if custom_field_weights is not None:
field_weights = custom_field_weights
-
+
# parse query filter string and remove empty value fields
parsing_error = False
try:
@@ -207,7 +208,9 @@ def search_prepare_parameters(request):
# This because returning a large number of packs makes the search page very slow
num_sounds = settings.SOUNDS_PER_PAGE
else:
- num_sounds = settings.SOUNDS_PER_PAGE if not should_use_compact_mode(request) else settings.SOUNDS_PER_PAGE_COMPACT_MODE
+ num_sounds = settings.SOUNDS_PER_PAGE if not should_use_compact_mode(
+ request
+ ) else settings.SOUNDS_PER_PAGE_COMPACT_MODE
query_params = {
'textual_query': search_query,
@@ -284,7 +287,7 @@ def split_filter_query(filter_query, parsed_filters, cluster_id):
for filter_list_str in parsed_filters:
# filter_list_str is a list of str ['', ':', '"', '', '"']
filter_name = filter_list_str[0]
- if filter_name != "duration" and filter_name != "is_geotagged" and filter_name != "in_remix_group":
+ if filter_name != "duration" and filter_name != "is_geotagged" and filter_name != "in_remix_group":
valid_filter = True
filter_str = ''.join(filter_list_str)
filter_display = ''.join(filter_list_str)
@@ -292,10 +295,10 @@ def split_filter_query(filter_query, parsed_filters, cluster_id):
filter_value = filter_list_str[-1].rstrip('"')
# If pack does not contain "_" then it's not a valid pack filter
if "_" in filter_value:
- filter_display = "pack:"+ ''.join(filter_value.split("_")[1:])
+ filter_display = "pack:" + ''.join(filter_value.split("_")[1:])
else:
valid_filter = False
-
+
if valid_filter:
filter = {
'name': filter_display,
@@ -334,29 +337,29 @@ def remove_facet_filters(parsed_filters):
has_facet_filter (bool): boolean indicating if there exist facet filters in the processed string.
"""
facet_filter_strings = (
- "samplerate",
- "grouping_pack",
- "username",
- "tag",
- "bitrate",
- "bitdepth",
- "type",
- "channels",
+ "samplerate",
+ "grouping_pack",
+ "username",
+ "tag",
+ "bitrate",
+ "bitdepth",
+ "type",
+ "channels",
"license",
)
has_facet_filter = False
filter_query = ""
- if parsed_filters:
+ if parsed_filters:
filter_query_parts = []
for parsed_filter in parsed_filters:
if parsed_filter[0] in facet_filter_strings:
- has_facet_filter = True
+ has_facet_filter = True
else:
filter_query_parts.append(''.join(parsed_filter))
filter_query = ' '.join(filter_query_parts)
-
+
return filter_query, has_facet_filter
@@ -445,10 +448,12 @@ def get_all_sound_ids_from_search_engine(page_size=2000):
current_page = 1
try:
while solr_count is None or len(solr_ids) < solr_count:
- response = search_engine.search_sounds(query_filter="*:*",
- sort=settings.SEARCH_SOUNDS_SORT_OPTION_DATE_NEW_FIRST,
- offset=(current_page - 1) * page_size,
- num_sounds=page_size)
+ response = search_engine.search_sounds(
+ query_filter="*:*",
+ sort=settings.SEARCH_SOUNDS_SORT_OPTION_DATE_NEW_FIRST,
+ offset=(current_page - 1) * page_size,
+ num_sounds=page_size
+ )
solr_ids += [int(element['id']) for element in response.docs]
solr_count = response.num_found
current_page += 1
diff --git a/utils/session_checks.py b/utils/session_checks.py
index 173a73d69..079ca6807 100644
--- a/utils/session_checks.py
+++ b/utils/session_checks.py
@@ -18,23 +18,25 @@
# See AUTHORS file.
#
-
from functools import wraps
from django.http import HttpResponseRedirect
from django.urls import reverse
+
def login_redirect(function=None):
"""
Decorator for views that checks that the user is not logged in,
redirecting to the home page if necessary.
"""
+
def decorator(view_func):
+
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if not request.user.is_authenticated:
return view_func(request, *args, **kwargs)
return HttpResponseRedirect(reverse('accounts-home'))
- return _wrapped_view
- return decorator(function)
+ return _wrapped_view
+ return decorator(function)
diff --git a/utils/similarity_utilities.py b/utils/similarity_utilities.py
index f8782e10b..74971eefc 100644
--- a/utils/similarity_utilities.py
+++ b/utils/similarity_utilities.py
@@ -69,9 +69,21 @@ def get_similar_sounds(sound, preset=DEFAULT_PRESET, num_results=settings.SOUNDS
return similar_sounds[0:num_results], count
-def api_search(target=None, filter=None, preset=None, metric_descriptor_names=None, num_results=None, offset=None, target_file=None, in_ids=None):
-
- cache_key = 'api-search-t-{}-f-{}-nr-{}-o-{}'.format(str(target).replace(" ", ""), str(filter).replace(" ", ""), num_results, offset)
+def api_search(
+ target=None,
+ filter=None,
+ preset=None,
+ metric_descriptor_names=None,
+ num_results=None,
+ offset=None,
+ target_file=None,
+ in_ids=None
+):
+
+ cache_key = 'api-search-t-{}-f-{}-nr-{}-o-{}'.format(
+ str(target).replace(" ", ""),
+ str(filter).replace(" ", ""), num_results, offset
+ )
cache_key = hash_cache_key(cache_key)
note = False
if in_ids:
@@ -94,7 +106,7 @@ def api_search(target=None, filter=None, preset=None, metric_descriptor_names=No
if target_file:
# If there is a file attahced, set the file as the target
target_type = 'file'
- target = None # If target is given as a file, we set target to None (just in case)
+ target = None # If target is given as a file, we set target to None (just in case)
else:
# In case there is no file, if the string target represents an integer value, then target is a sound_id, otherwise target is descriptor_values
if target.isdigit():
@@ -132,14 +144,18 @@ def get_sounds_descriptors(sound_ids, descriptor_names, normalization=True, only
# Check if at least some sound analysis data is already on cache
not_cached_sound_ids = sound_ids[:]
for id in sound_ids:
- analysis_data = cache.get(hash_cache_key(cache_key % (str(id), ",".join(sorted(descriptor_names)), str(normalization))))
+ analysis_data = cache.get(
+ hash_cache_key(cache_key % (str(id), ",".join(sorted(descriptor_names)), str(normalization)))
+ )
if analysis_data:
cached_data[str(id)] = analysis_data
# remove id form list so it is not included in similarity request
not_cached_sound_ids.remove(id)
if not_cached_sound_ids:
try:
- returned_data = Similarity.get_sounds_descriptors(not_cached_sound_ids, descriptor_names, normalization, only_leaf_descriptors)
+ returned_data = Similarity.get_sounds_descriptors(
+ not_cached_sound_ids, descriptor_names, normalization, only_leaf_descriptors
+ )
except Exception as e:
web_logger.info('Something wrong occurred with the "get sound descriptors" request (%s)\n\t%s' %\
(e, traceback.format_exc()))
@@ -149,8 +165,10 @@ def get_sounds_descriptors(sound_ids, descriptor_names, normalization=True, only
# save sound analysis information in cache
for key, item in returned_data.items():
- cache.set(hash_cache_key(cache_key % (key, ",".join(sorted(descriptor_names)), str(normalization))),
- item, SIMILARITY_CACHE_TIME)
+ cache.set(
+ hash_cache_key(cache_key % (key, ",".join(sorted(descriptor_names)), str(normalization))), item,
+ SIMILARITY_CACHE_TIME
+ )
returned_data.update(cached_data)
@@ -162,7 +180,7 @@ def delete_sound_from_gaia(sound_id):
try:
Similarity.delete(sound_id)
except Exception as e:
- web_logger.warning("Could not delete sound from gaia with id %d (%s)" % (sound_id, str(e)))
+ web_logger.warning("Could not delete sound from gaia with id %d (%s)" % (sound_id, str(e)))
def hash_cache_key(key):
diff --git a/utils/sound_upload.py b/utils/sound_upload.py
index 63e4b080e..b5a829c69 100644
--- a/utils/sound_upload.py
+++ b/utils/sound_upload.py
@@ -105,7 +105,7 @@ def get_processing_before_describe_sound_folder(audio_file_path):
"""
Get the path to the folder where the sound files generated during procesing-before-describe
should be stored.
- """
+ """
user_id = os.path.basename(os.path.dirname(audio_file_path))
hash = str(hashlib.md5(audio_file_path.encode()).hexdigest())
return os.path.join(settings.PROCESSING_BEFORE_DESCRIPTION_DIR, user_id, hash)
@@ -116,12 +116,7 @@ def get_processing_before_describe_sound_base_url(audio_file_path):
return settings.PROCESSING_BEFORE_DESCRIPTION_URL + '/'.join(path.split('/')[-2:]) + '/'
-def create_sound(user,
- sound_fields,
- apiv2_client=None,
- bulk_upload_progress=None,
- process=True,
- remove_exists=False):
+def create_sound(user, sound_fields, apiv2_client=None, bulk_upload_progress=None, process=True, remove_exists=False):
"""
This function is used to create sound objects uploaded via the sound describe form, the API or the bulk describe
feature.
@@ -190,7 +185,7 @@ def create_sound(user,
sound.set_license(license)
# 3 move to new path
- orig = os.path.splitext(os.path.basename(sound.original_filename))[0] # WATCH OUT!
+ orig = os.path.splitext(os.path.basename(sound.original_filename))[0] # WATCH OUT!
sound.base_filename_slug = "%d__%s__%s" % (sound.id, slugify(sound.user.username), slugify(orig))
new_original_path = sound.locations("path")
if sound.original_path != new_original_path:
@@ -226,10 +221,7 @@ def create_sound(user,
# Create geotag from lat,lon,zoom text format
if sound_fields['geotag']:
lat, lon, zoom = sound_fields['geotag'].split(',')
- geotag = GeoTag(user=user,
- lat=float(lat),
- lon=float(lon),
- zoom=int(zoom))
+ geotag = GeoTag(user=user, lat=float(lat), lon=float(lon), zoom=int(zoom))
geotag.save()
sound.geotag = geotag
else:
@@ -238,10 +230,7 @@ def create_sound(user,
lon = sound_fields.get('lon', None)
zoom = sound_fields.get('zoom', None)
if lat is not None and lon is not None and zoom is not None:
- geotag = GeoTag(user=user,
- lat=float(lat),
- lon=float(lon),
- zoom=int(zoom))
+ geotag = GeoTag(user=user, lat=float(lat), lon=float(lon), zoom=int(zoom))
geotag.save()
sound.geotag = geotag
@@ -289,11 +278,13 @@ def create_sound(user,
upload_source = 'bulk'
else:
upload_source = 'web'
- sounds_logger.info('Created Sound object (%s)' % json.dumps({
- 'sound_id': sound.id,
- 'username': sound.user.username,
- 'upload_source': upload_source,
- }))
+ sounds_logger.info(
+ 'Created Sound object (%s)' % json.dumps({
+ 'sound_id': sound.id,
+ 'username': sound.user.username,
+ 'upload_source': upload_source,
+ })
+ )
return sound
@@ -321,18 +312,19 @@ def get_csv_lines(csv_file_path):
elif csv_file_path.endswith('.xls'):
# Read from Excel format
wb = xlrd.open_workbook(csv_file_path)
- s = wb.sheet_by_index(0) # Get first excel sheet
+ s = wb.sheet_by_index(0) # Get first excel sheet
header = s.row_values(0)
- lines = [dict(zip(header, row)) for row in
- [[str(val) for val in s.row_values(i)] for i in range(1, s.nrows)]]
+ lines = [dict(zip(header, row)) for row in [[str(val) for val in s.row_values(i)] for i in range(1, s.nrows)]]
elif csv_file_path.endswith('.xlsx'):
# Read from Excel format
wb = openpyxl.load_workbook(filename=csv_file_path)
- s = wb[wb.sheetnames[0]] # Get first excel sheet
+ s = wb[wb.sheetnames[0]] # Get first excel sheet
rows = list(s.values)
header = list(rows[0])
- lines = [dict(zip(header, row)) for row in
- [[str(val) if val is not None else '' for val in rows[i]] for i in range(1, len(rows))]]
+ lines = [
+ dict(zip(header, row))
+ for row in [[str(val) if val is not None else '' for val in rows[i]] for i in range(1, len(rows))]
+ ]
else:
header = []
lines = []
@@ -374,11 +366,9 @@ def validate_input_csv_file(csv_header, csv_lines, sounds_base_dir, username=Non
# Check headers
if username is not None and csv_header != EXPECTED_HEADER_NO_USERNAME:
- global_errors.append('Invalid header. Header should be: %s'
- % ','.join(EXPECTED_HEADER_NO_USERNAME))
+ global_errors.append('Invalid header. Header should be: %s' % ','.join(EXPECTED_HEADER_NO_USERNAME))
elif username is None and csv_header != EXPECTED_HEADER:
- global_errors.append('Invalid header. Header should be: %s'
- % ','.join(EXPECTED_HEADER))
+ global_errors.append('Invalid header. Header should be: %s' % ','.join(EXPECTED_HEADER))
# Check that there are lines for sounds
if len(csv_lines) == 0:
@@ -496,7 +486,7 @@ def validate_input_csv_file(csv_header, csv_lines, sounds_base_dir, username=Non
})
lines_validated.append({
- 'line_no': n + 2, # Show line number with l1 = header, l2 = first sound, and soon
+ 'line_no': n + 2, # Show line number with l1 = header, l2 = first sound, and soon
'line_original': line,
'line_cleaned': line_cleaned,
'line_errors': line_errors,
@@ -505,8 +495,14 @@ def validate_input_csv_file(csv_header, csv_lines, sounds_base_dir, username=Non
return lines_validated, global_errors
-def bulk_describe_from_csv(csv_file_path, delete_already_existing=False, force_import=False, sounds_base_dir=None,
- username=None, bulkupload_progress_id=None):
+def bulk_describe_from_csv(
+ csv_file_path,
+ delete_already_existing=False,
+ force_import=False,
+ sounds_base_dir=None,
+ username=None,
+ bulkupload_progress_id=None
+):
"""
Reads through the lines of a CSV file containing metadata to describe (and create) new Sound objects and creates
them if the metadata is valid.
@@ -526,15 +522,15 @@ def bulk_describe_from_csv(csv_file_path, delete_already_existing=False, force_i
# Read and validate CSV
header, lines = get_csv_lines(csv_file_path)
lines_validated, global_errors = validate_input_csv_file(
- csv_header=header,
- csv_lines=lines,
- sounds_base_dir=sounds_base_dir,
- username=username)
+ csv_header=header, csv_lines=lines, sounds_base_dir=sounds_base_dir, username=username
+ )
# Print global error messages if any
if global_errors:
- console_logger.info('Major issues were found while validating the CSV file. '
- 'Fix them and re-run the command.')
+ console_logger.info(
+ 'Major issues were found while validating the CSV file. '
+ 'Fix them and re-run the command.'
+ )
for error in global_errors:
console_logger.info(f'- {error}')
return
@@ -543,8 +539,10 @@ def bulk_describe_from_csv(csv_file_path, delete_already_existing=False, force_i
lines_with_errors = [line for line in lines_validated if line['line_errors']]
if lines_with_errors:
if not force_import:
- console_logger.info('The following %i lines contain invalid data. Fix them or re-run with -f to import '
- 'skipping these lines:' % len(lines_with_errors))
+ console_logger.info(
+ 'The following %i lines contain invalid data. Fix them or re-run with -f to import '
+ 'skipping these lines:' % len(lines_with_errors)
+ )
else:
console_logger.info(f'Skipping the following {len(lines_with_errors)} lines due to invalid data')
for line in lines_with_errors:
@@ -559,8 +557,10 @@ def bulk_describe_from_csv(csv_file_path, delete_already_existing=False, force_i
try:
bulk_upload_progress_object = BulkUploadProgress.objects.get(id=bulkupload_progress_id)
except BulkUploadProgress.DoesNotExist:
- console_logger.info('BulkUploadProgress object with id %i can\'t be found, wont store progress '
- 'information.' % bulkupload_progress_id)
+ console_logger.info(
+ 'BulkUploadProgress object with id %i can\'t be found, wont store progress '
+ 'information.' % bulkupload_progress_id
+ )
# Start the actual process of uploading files
lines_ok = [line for line in lines_validated if not line['line_errors']]
@@ -581,22 +581,22 @@ def bulk_describe_from_csv(csv_file_path, delete_already_existing=False, force_i
try:
sound = create_sound(
- user=user,
- sound_fields={
- 'name': line_cleaned['name'],
- 'dest_path': dest_path,
- 'license': line_cleaned['license'],
- 'pack': line_cleaned['pack_name'],
- 'description': line_cleaned['description'],
- 'tags': line_cleaned['tags'],
- 'lat': line_cleaned['lat'],
- 'lon': line_cleaned['lon'],
- 'zoom': line_cleaned['zoom'],
- 'is_explicit': line_cleaned['is_explicit'],
- },
- process=False,
- bulk_upload_progress=bulk_upload_progress_object,
- remove_exists=delete_already_existing,
+ user=user,
+ sound_fields={
+ 'name': line_cleaned['name'],
+ 'dest_path': dest_path,
+ 'license': line_cleaned['license'],
+ 'pack': line_cleaned['pack_name'],
+ 'description': line_cleaned['description'],
+ 'tags': line_cleaned['tags'],
+ 'lat': line_cleaned['lat'],
+ 'lon': line_cleaned['lon'],
+ 'zoom': line_cleaned['zoom'],
+ 'is_explicit': line_cleaned['is_explicit'],
+ },
+ process=False,
+ bulk_upload_progress=bulk_upload_progress_object,
+ remove_exists=delete_already_existing,
)
if bulk_upload_progress_object:
bulk_upload_progress_object.store_progress_for_line(line['line_no'], sound.id)
@@ -610,7 +610,10 @@ def bulk_describe_from_csv(csv_file_path, delete_already_existing=False, force_i
if sound.pack:
sound.pack.process()
- message = 'l%i: Successfully added sound \'%s\' to Freesound.' % (line['line_no'], sound.original_filename,)
+ message = 'l%i: Successfully added sound \'%s\' to Freesound.' % (
+ line['line_no'],
+ sound.original_filename,
+ )
if error_sending_to_process is not None:
message += f' Sound could have not been sent to process ({error_sending_to_process}).'
console_logger.info(message)
@@ -622,19 +625,28 @@ def bulk_describe_from_csv(csv_file_path, delete_already_existing=False, force_i
if bulk_upload_progress_object:
bulk_upload_progress_object.store_progress_for_line(line['line_no'], message)
except AlreadyExistsException:
- message = 'l%i: The file %s is already part of Freesound, discarding it.' % (line['line_no'], dest_path,)
+ message = 'l%i: The file %s is already part of Freesound, discarding it.' % (
+ line['line_no'],
+ dest_path,
+ )
console_logger.info(message)
if bulk_upload_progress_object:
bulk_upload_progress_object.store_progress_for_line(line['line_no'], message)
except CantMoveException as e:
- message = 'l%i: %s.' % (line['line_no'], str(e),)
+ message = 'l%i: %s.' % (
+ line['line_no'],
+ str(e),
+ )
console_logger.info(message)
if bulk_upload_progress_object:
bulk_upload_progress_object.store_progress_for_line(line['line_no'], message)
except Exception as e:
# If another unexpected exception happens, show a message and continue with the process so that
# other sounds can be added
- message = 'l%i: Unexpected error %s.' % (line['line_no'], str(e),)
+ message = 'l%i: Unexpected error %s.' % (
+ line['line_no'],
+ str(e),
+ )
console_logger.info(message, exc_info=True)
if bulk_upload_progress_object:
bulk_upload_progress_object.store_progress_for_line(line['line_no'], message)
diff --git a/utils/spam.py b/utils/spam.py
index 88dbec62c..d653eb406 100644
--- a/utils/spam.py
+++ b/utils/spam.py
@@ -65,5 +65,5 @@ def is_spam(request, comment):
return True
else:
return False
- except (AkismetError, HTTPError, URLError): # failed to contact akismet...
+ except (AkismetError, HTTPError, URLError): # failed to contact akismet...
return False
diff --git a/utils/tagrecommendation_utilities.py b/utils/tagrecommendation_utilities.py
index dfb37c693..ea6a797f0 100644
--- a/utils/tagrecommendation_utilities.py
+++ b/utils/tagrecommendation_utilities.py
@@ -71,7 +71,7 @@ def get_recommended_tags_view(request):
(e, traceback.format_exc()))
return HttpResponseUnavailabileError()
- return HttpResponse(json.dumps([[],"-"]), content_type='application/javascript')
+ return HttpResponse(json.dumps([[], "-"]), content_type='application/javascript')
def get_id_of_last_indexed_sound():
@@ -86,13 +86,11 @@ def get_id_of_last_indexed_sound():
def post_sounds_to_tagrecommendation_service(sound_qs):
data_to_post = []
N_SOUNDS_PER_CALL = 10
- total_calls = int(ceil(float(len(sound_qs))/N_SOUNDS_PER_CALL))
+ total_calls = int(ceil(float(len(sound_qs)) / N_SOUNDS_PER_CALL))
print("Sending recommendation data...")
idx = 1
for count, sound in enumerate(sound_qs):
- data_to_post.append(
- (sound.id, list(sound.tags.select_related("tag").values_list('tag__name', flat=True)))
- )
+ data_to_post.append((sound.id, list(sound.tags.select_related("tag").values_list('tag__name', flat=True))))
if (count + 1) % N_SOUNDS_PER_CALL == 0:
ids = [element[0] for element in data_to_post]
tagss = [element[1] for element in data_to_post]
diff --git a/utils/tags.py b/utils/tags.py
index 94a314686..2ad5a547a 100644
--- a/utils/tags.py
+++ b/utils/tags.py
@@ -78,5 +78,6 @@ def clean_and_split_tags(tags):
tags = alphanum_only.sub(" ", tags)
tags = multi_dashes.sub("-", tags)
- common_words = "the of to and an in is it you that he was for on are with as i his they be at".split() #@UnusedVariable
+ common_words = "the of to and an in is it you that he was for on are with as i his they be at".split(
+ ) #@UnusedVariable
return {tag for tag in [tag.strip('-') for tag in tags.split()] if tag and tag not in common_words}
diff --git a/utils/test_helpers.py b/utils/test_helpers.py
index 08770113a..e038b9ad4 100644
--- a/utils/test_helpers.py
+++ b/utils/test_helpers.py
@@ -35,7 +35,9 @@
from utils.tags import clean_and_split_tags
-def create_test_files(filenames=None, directory=None, paths=None, n_bytes=1024, make_valid_wav_files=False, duration=0.0):
+def create_test_files(
+ filenames=None, directory=None, paths=None, n_bytes=1024, make_valid_wav_files=False, duration=0.0
+):
"""
This function generates test files with random content and saves them in the specified directory.
:param filenames: list of names for the files to generate
@@ -47,7 +49,7 @@ def create_test_files(filenames=None, directory=None, paths=None, n_bytes=1024,
"""
if paths is None:
paths = [os.path.join(directory, filename) for filename in filenames]
-
+
for path in paths:
os.makedirs(os.path.dirname(path), exist_ok=True)
if not make_valid_wav_files:
@@ -60,12 +62,21 @@ def create_test_files(filenames=None, directory=None, paths=None, n_bytes=1024,
pysndfile.sndio.write(path, scaled, format='wav', rate=44100)
-sound_counter = count() # Used in create_user_and_sounds to avoid repeating sound names
+sound_counter = count() # Used in create_user_and_sounds to avoid repeating sound names
-def create_user_and_sounds(num_sounds=1, num_packs=0, user=None, count_offset=0, tags=None,
- description=None, processing_state='PE', moderation_state='PE', type='wav',
- username="testuser"):
+def create_user_and_sounds(
+ num_sounds=1,
+ num_packs=0,
+ user=None,
+ count_offset=0,
+ tags=None,
+ description=None,
+ processing_state='PE',
+ moderation_state='PE',
+ type='wav',
+ username="testuser"
+):
"""Creates User, Sound and Pack objects useful for testing.
A counter is used to make sound names unique as well as other fields like md5 (see `sound_counter` variable).
@@ -99,16 +110,18 @@ def create_user_and_sounds(num_sounds=1, num_packs=0, user=None, count_offset=0,
pack = None
if packs:
pack = packs[i % len(packs)]
- sound = Sound.objects.create(user=user,
- original_filename="Test sound %i" % (i + count_offset),
- base_filename_slug="test_sound_%i" % (i + count_offset),
- license=License.objects.last(),
- description=description if description is not None else '',
- pack=pack,
- md5="fakemd5_%i" % (i + count_offset),
- type=type,
- processing_state=processing_state,
- moderation_state=moderation_state)
+ sound = Sound.objects.create(
+ user=user,
+ original_filename="Test sound %i" % (i + count_offset),
+ base_filename_slug="test_sound_%i" % (i + count_offset),
+ license=License.objects.last(),
+ description=description if description is not None else '',
+ pack=pack,
+ md5="fakemd5_%i" % (i + count_offset),
+ type=type,
+ processing_state=processing_state,
+ moderation_state=moderation_state
+ )
if tags is not None:
sound.set_tags(clean_and_split_tags(tags))
@@ -132,6 +145,7 @@ def ret_fun(*args, **kwargs):
with TemporaryDirectory() as tmpfolder:
with override_settings(**{settings_path_name: tmpfolder}):
return fun(*args, **kwargs)
+
return ret_fun
@@ -165,4 +179,6 @@ def ret_fun(*args, **kwargs):
def create_fake_perform_search_engine_query_results_tags_mode():
# This returns utils.search.SearchResults which was pickled from a real query in a local freesound instance
- return pickle.loads(b'\x80\x04\x95\x1c\x07\x00\x00\x00\x00\x00\x00\x8c\x0cutils.search\x94\x8c\rSearchResults\x94\x93\x94)\x81\x94}\x94(\x8c\x04docs\x94]\x94}\x94(\x8c\x02id\x94J\xef\x8b\x05\x00\x8c\x05score\x94G?\xf0\x00\x00\x00\x00\x00\x00\x8c\x0fn_more_in_group\x94K\x00\x8c\ngroup_docs\x94]\x94}\x94(\x8c\x02id\x94J\xef\x8b\x05\x00\x8c\x05score\x94G?\xf0\x00\x00\x00\x00\x00\x00ua\x8c\ngroup_name\x94\x8c\x06363503\x94ua\x8c\x06facets\x94}\x94\x8c\x03tag\x94]\x94(\x8c\x0ffield-recording\x94M\xa3\x07\x86\x94\x8c\x05noise\x94M\x86\x07\x86\x94\x8c\nelectronic\x94M\x0c\x07\x86\x94\x8c\x05voice\x94M\xc8\x05\x86\x94\x8c\x05metal\x94M8\x05\x86\x94\x8c\x04loop\x94M\x08\x05\x86\x94\x8c\x06effect\x94M\xed\x04\x86\x94\x8c\x05sound\x94M\xe4\x04\x86\x94\x8c\x04bass\x94M\xd7\x03\x86\x94\x8c\x05water\x94M\xd3\x03\x86\x94\x8c\x04male\x94M\xd0\x03\x86\x94\x8c\x02fx\x94M\xc4\x03\x86\x94\x8c\x04drum\x94Mf\x03\x86\x94\x8c\x07ambient\x94MS\x03\x86\x94\x8c\x05synth\x94M\x0b\x03\x86\x94\x8c\x03sfx\x94M\xf5\x02\x86\x94\x8c\npercussion\x94M\xe6\x02\x86\x94\x8c\x08ambience\x94M\xc3\x02\x86\x94\x8c\x0bmultisample\x94M\xb5\x02\x86\x94\x8c\nmezzoforte\x94M\xb2\x02\x86\x94\x8c\x04beat\x94M\xac\x02\x86\x94\x8c\x05words\x94M\xa6\x02\x86\x94\x8c\x06female\x94M\x80\x02\x86\x94\x8c\x06glitch\x94Mw\x02\x86\x94\x8c\x08zoom-h2n\x94Md\x02\x86\x94\x8c\x06nature\x94MV\x02\x86\x94\x8c\x07machine\x94MA\x02\x86\x94\x8c\x06speech\x94M=\x02\x86\x94\x8c\x04kick\x94M9\x02\x86\x94\x8c\x06sci-fi\x94M.\x02\x86\x94\x8c\x05remix\x94M"\x02\x86\x94\x8c\x03hit\x94M\x1a\x02\x86\x94\x8c\x04door\x94M\xfe\x01\x86\x94\x8c\x03mix\x94M\xfd\x01\x86\x94\x8c\x0cexperimental\x94M\xf5\x01\x86\x94\x8c\x05close\x94M\xe4\x01\x86\x94\x8c\x05birds\x94M\xe3\x01\x86\x94\x8c\x10flexible-grooves\x94M\xdf\x01\x86\x94\x8c\nsoundscape\x94M\xd7\x01\x86\x94\x8c\x0bnon-vibrato\x94M\xd5\x01\x86\x94\x8c\x0bchordophone\x94M\xd2\x01\x86\x94\x8c\nindustrial\x94M\xd1\x01\x86\x94\x8c\x0bsynthesizer\x94M\xd1\x01\x86\x94\x8c\x0fmtc500-m002-s14\x94M\xc9\x01\x86\x94\x8c\x04wood\x94M\xc2\x01\x86\x94\x8c\x04game\x94M\xa8\x01\x86\x94\x8c\x06engine\x94M\xa5\x01\x86\x94\x8c\x07english\x94M\xa4\x01\x86\x94\x8c\x04dark\x94M\x99\x01\x86\x94\x8c\nsound-trip\x94M\x95\x01\x86\x94\x8c\x06horror\x94M\x91\x01\x86\x94\x8c\x06guitar\x94M\x86\x01\x86\x94\x8c\x05drone\x94M\x84\x01\x86\x94\x8c\x06impact\x94M\x82\x01\x86\x94\x8c\trecording\x94M\x80\x01\x86\x94\x8c\x08electric\x94M}\x01\x86\x94\x8c\x05click\x94M{\x01\x86\x94\x8c\x05space\x94Mz\x01\x86\x94\x8c\x06sample\x94Mv\x01\x86\x94\x8c\natmosphere\x94Mu\x01\x86\x94\x8c\x0belectronica\x94Mu\x01\x86\x94\x8c\x05short\x94Mn\x01\x86\x94\x8c\x04wind\x94Me\x01\x86\x94\x8c\x06puzzle\x94Mc\x01\x86\x94\x8c\x04bell\x94Mb\x01\x86\x94\x8c\x07whisper\x94Mb\x01\x86\x94\x8c\x11string-instrument\x94M_\x01\x86\x94\x8c\x06mumble\x94M]\x01\x86\x94\x8c\x08alphabet\x94M[\x01\x86\x94\x8c\x0cbrian-cimmet\x94M[\x01\x86\x94\x8c\x0ccelia-madeoy\x94M[\x01\x86\x94\x8c\tcrossword\x94M[\x01\x86\x94\x8c\x0cdaniel-feyer\x94M[\x01\x86\x94\x8c\rdoug-peterson\x94M[\x01\x86\x94\x8c\x08gridplay\x94M[\x01\x86\x94\x8c\x07letters\x94M[\x01\x86\x94\x8c\x0emalcolm-ingram\x94M[\x01\x86\x94\x8c\x07solving\x94M[\x01\x86\x94\x8c\x0estanley-newman\x94M[\x01\x86\x94\x8c\x05vocal\x94M[\x01\x86\x94\x8c\x07digital\x94MR\x01\x86\x94\x8c\x03owi\x94ML\x01\x86\x94\x8c\x05scary\x94ML\x01\x86\x94\x8c\tdistorted\x94MJ\x01\x86\x94\x8c\x05motor\x94MI\x01\x86\x94\x8c\x04flex\x94ME\x01\x86\x94\x8c\x08computer\x94MD\x01\x86\x94\x8c\x04wave\x94MD\x01\x86\x94\x8c\x06reverb\x94MC\x01\x86\x94\x8c\x03car\x94MA\x01\x86\x94\x8c\x03low\x94M@\x01\x86\x94\x8c\ndistortion\x94M=\x01\x86\x94\x8c\nmechanical\x94M=\x01\x86\x94\x8c\x0csound-design\x94M9\x01\x86\x94\x8c\x06tenuto\x94M9\x01\x86\x94\x8c\x05drums\x94M8\x01\x86\x94\x8c\x04name\x94M4\x01\x86\x94\x8c\nportuguese\x94M4\x01\x86\x94\x8c\x12iberian-portuguese\x94M2\x01\x86\x94\x8c\x04hard\x94M0\x01\x86\x94es\x8c\x0chighlighting\x94}\x94\x8c\x1dnon_grouped_number_of_results\x94M\x92c\x8c\tnum_found\x94M\xb6$\x8c\x05start\x94K\x00\x8c\x08num_rows\x94K\x01\x8c\x06q_time\x94K\x10ub.')
\ No newline at end of file
+ return pickle.loads(
+ b'\x80\x04\x95\x1c\x07\x00\x00\x00\x00\x00\x00\x8c\x0cutils.search\x94\x8c\rSearchResults\x94\x93\x94)\x81\x94}\x94(\x8c\x04docs\x94]\x94}\x94(\x8c\x02id\x94J\xef\x8b\x05\x00\x8c\x05score\x94G?\xf0\x00\x00\x00\x00\x00\x00\x8c\x0fn_more_in_group\x94K\x00\x8c\ngroup_docs\x94]\x94}\x94(\x8c\x02id\x94J\xef\x8b\x05\x00\x8c\x05score\x94G?\xf0\x00\x00\x00\x00\x00\x00ua\x8c\ngroup_name\x94\x8c\x06363503\x94ua\x8c\x06facets\x94}\x94\x8c\x03tag\x94]\x94(\x8c\x0ffield-recording\x94M\xa3\x07\x86\x94\x8c\x05noise\x94M\x86\x07\x86\x94\x8c\nelectronic\x94M\x0c\x07\x86\x94\x8c\x05voice\x94M\xc8\x05\x86\x94\x8c\x05metal\x94M8\x05\x86\x94\x8c\x04loop\x94M\x08\x05\x86\x94\x8c\x06effect\x94M\xed\x04\x86\x94\x8c\x05sound\x94M\xe4\x04\x86\x94\x8c\x04bass\x94M\xd7\x03\x86\x94\x8c\x05water\x94M\xd3\x03\x86\x94\x8c\x04male\x94M\xd0\x03\x86\x94\x8c\x02fx\x94M\xc4\x03\x86\x94\x8c\x04drum\x94Mf\x03\x86\x94\x8c\x07ambient\x94MS\x03\x86\x94\x8c\x05synth\x94M\x0b\x03\x86\x94\x8c\x03sfx\x94M\xf5\x02\x86\x94\x8c\npercussion\x94M\xe6\x02\x86\x94\x8c\x08ambience\x94M\xc3\x02\x86\x94\x8c\x0bmultisample\x94M\xb5\x02\x86\x94\x8c\nmezzoforte\x94M\xb2\x02\x86\x94\x8c\x04beat\x94M\xac\x02\x86\x94\x8c\x05words\x94M\xa6\x02\x86\x94\x8c\x06female\x94M\x80\x02\x86\x94\x8c\x06glitch\x94Mw\x02\x86\x94\x8c\x08zoom-h2n\x94Md\x02\x86\x94\x8c\x06nature\x94MV\x02\x86\x94\x8c\x07machine\x94MA\x02\x86\x94\x8c\x06speech\x94M=\x02\x86\x94\x8c\x04kick\x94M9\x02\x86\x94\x8c\x06sci-fi\x94M.\x02\x86\x94\x8c\x05remix\x94M"\x02\x86\x94\x8c\x03hit\x94M\x1a\x02\x86\x94\x8c\x04door\x94M\xfe\x01\x86\x94\x8c\x03mix\x94M\xfd\x01\x86\x94\x8c\x0cexperimental\x94M\xf5\x01\x86\x94\x8c\x05close\x94M\xe4\x01\x86\x94\x8c\x05birds\x94M\xe3\x01\x86\x94\x8c\x10flexible-grooves\x94M\xdf\x01\x86\x94\x8c\nsoundscape\x94M\xd7\x01\x86\x94\x8c\x0bnon-vibrato\x94M\xd5\x01\x86\x94\x8c\x0bchordophone\x94M\xd2\x01\x86\x94\x8c\nindustrial\x94M\xd1\x01\x86\x94\x8c\x0bsynthesizer\x94M\xd1\x01\x86\x94\x8c\x0fmtc500-m002-s14\x94M\xc9\x01\x86\x94\x8c\x04wood\x94M\xc2\x01\x86\x94\x8c\x04game\x94M\xa8\x01\x86\x94\x8c\x06engine\x94M\xa5\x01\x86\x94\x8c\x07english\x94M\xa4\x01\x86\x94\x8c\x04dark\x94M\x99\x01\x86\x94\x8c\nsound-trip\x94M\x95\x01\x86\x94\x8c\x06horror\x94M\x91\x01\x86\x94\x8c\x06guitar\x94M\x86\x01\x86\x94\x8c\x05drone\x94M\x84\x01\x86\x94\x8c\x06impact\x94M\x82\x01\x86\x94\x8c\trecording\x94M\x80\x01\x86\x94\x8c\x08electric\x94M}\x01\x86\x94\x8c\x05click\x94M{\x01\x86\x94\x8c\x05space\x94Mz\x01\x86\x94\x8c\x06sample\x94Mv\x01\x86\x94\x8c\natmosphere\x94Mu\x01\x86\x94\x8c\x0belectronica\x94Mu\x01\x86\x94\x8c\x05short\x94Mn\x01\x86\x94\x8c\x04wind\x94Me\x01\x86\x94\x8c\x06puzzle\x94Mc\x01\x86\x94\x8c\x04bell\x94Mb\x01\x86\x94\x8c\x07whisper\x94Mb\x01\x86\x94\x8c\x11string-instrument\x94M_\x01\x86\x94\x8c\x06mumble\x94M]\x01\x86\x94\x8c\x08alphabet\x94M[\x01\x86\x94\x8c\x0cbrian-cimmet\x94M[\x01\x86\x94\x8c\x0ccelia-madeoy\x94M[\x01\x86\x94\x8c\tcrossword\x94M[\x01\x86\x94\x8c\x0cdaniel-feyer\x94M[\x01\x86\x94\x8c\rdoug-peterson\x94M[\x01\x86\x94\x8c\x08gridplay\x94M[\x01\x86\x94\x8c\x07letters\x94M[\x01\x86\x94\x8c\x0emalcolm-ingram\x94M[\x01\x86\x94\x8c\x07solving\x94M[\x01\x86\x94\x8c\x0estanley-newman\x94M[\x01\x86\x94\x8c\x05vocal\x94M[\x01\x86\x94\x8c\x07digital\x94MR\x01\x86\x94\x8c\x03owi\x94ML\x01\x86\x94\x8c\x05scary\x94ML\x01\x86\x94\x8c\tdistorted\x94MJ\x01\x86\x94\x8c\x05motor\x94MI\x01\x86\x94\x8c\x04flex\x94ME\x01\x86\x94\x8c\x08computer\x94MD\x01\x86\x94\x8c\x04wave\x94MD\x01\x86\x94\x8c\x06reverb\x94MC\x01\x86\x94\x8c\x03car\x94MA\x01\x86\x94\x8c\x03low\x94M@\x01\x86\x94\x8c\ndistortion\x94M=\x01\x86\x94\x8c\nmechanical\x94M=\x01\x86\x94\x8c\x0csound-design\x94M9\x01\x86\x94\x8c\x06tenuto\x94M9\x01\x86\x94\x8c\x05drums\x94M8\x01\x86\x94\x8c\x04name\x94M4\x01\x86\x94\x8c\nportuguese\x94M4\x01\x86\x94\x8c\x12iberian-portuguese\x94M2\x01\x86\x94\x8c\x04hard\x94M0\x01\x86\x94es\x8c\x0chighlighting\x94}\x94\x8c\x1dnon_grouped_number_of_results\x94M\x92c\x8c\tnum_found\x94M\xb6$\x8c\x05start\x94K\x00\x8c\x08num_rows\x94K\x01\x8c\x06q_time\x94K\x10ub.'
+ )
diff --git a/utils/tests/test_filesystem.py b/utils/tests/test_filesystem.py
index d9fc54ba4..4e0d2f6b9 100644
--- a/utils/tests/test_filesystem.py
+++ b/utils/tests/test_filesystem.py
@@ -6,6 +6,7 @@
class Test(TestCase):
+
def test_md5file(self):
with tempfile.NamedTemporaryFile() as tmp_fh:
tmp_fh.write(b"test_content\n")
diff --git a/utils/tests/test_forms.py b/utils/tests/test_forms.py
index c27e7ce4e..05bf6fa1f 100644
--- a/utils/tests/test_forms.py
+++ b/utils/tests/test_forms.py
@@ -24,6 +24,7 @@
class UtilsTest(TestCase):
+
def test_filename_has_valid_extension(self):
cases = [
("filaneme.wav", True),
@@ -40,26 +41,27 @@ def test_filename_has_valid_extension(self):
class TagFieldTest(TestCase):
+
def test_tag_field(self):
f = TagField()
# Split on spaces
self.assertEqual({"One", "two2", "3three"}, f.clean("3three One two2"))
-
+
# Split on commas
self.assertEqual({"one", "two", "three"}, f.clean("three, one,two"))
-
+
# Funny characters not allowed
err_message = "Tags must contain only letters a-z, digits 0-9 and hyphen."
with self.assertRaisesMessage(ValidationError, err_message):
f.clean("One t%wo")
-
+
# accents not allowed
with self.assertRaisesMessage(ValidationError, err_message):
f.clean("One twó")
-
+
# hyphens allowed
self.assertEqual({"tag", "tag-name", "another-name"}, f.clean("tag-name tag another-name"))
-
+
# multiple hyphens cut down to one
self.assertEqual({"tag", "tag-name", "another-name"}, f.clean("tag--name tag another----name"))
diff --git a/utils/tests/test_logging_filters.py b/utils/tests/test_logging_filters.py
index f6b0ff815..c6ef7c9c7 100644
--- a/utils/tests/test_logging_filters.py
+++ b/utils/tests/test_logging_filters.py
@@ -29,6 +29,7 @@
class DummyRequest:
"""A dummy request to check the X-Forwarded-For header in logging_filters.get_client_ip"""
+
def __init__(self, xforwardedfor):
self.headers = {"x-forwarded-for": xforwardedfor}
@@ -38,6 +39,7 @@ class LogRecordsStoreHandler(logging.Handler):
A logger handler class which stores LogRecord entries in a list
Inspiration from: https://stackoverflow.com/questions/57420008/python-after-logging-debug-how-to-view-its-logrecord
"""
+
def __init__(self, records_list):
self.records_list = records_list
super().__init__()
@@ -112,13 +114,11 @@ def test_api_log_filter(self):
'"filter": "license:%22Creative%20Commons%200%22"} #!# {"api_version": "v2", "api_auth_type": "Token", '
'"api_client_username": "test_uname", "api_enduser_username": "None", "api_client_id": '
'"fake_id", "api_client_name": "Test àpp", "ip": "1.1.1.1", '
- '"api_request_protocol": "https", "api_www": "none"}', {
- 'api_resource': 'sound instance',
- 'fields': 'id,url,name,duration,download,previews',
- 'filter': 'license:"Creative Commons 0"',
- 'api_client_username': 'test_uname',
- 'api_client_name': 'Test àpp'
- }), # Note: I'm only testing a couple of fields as the test is complete enough with that
+ '"api_request_protocol": "https", "api_www": "none"}',
+ {'api_resource': 'sound instance', 'fields': 'id,url,name,duration,download,previews',
+ 'filter': 'license:"Creative Commons 0"', 'api_client_username': 'test_uname',
+ 'api_client_name': 'Test àpp'
+ }), # Note: I'm only testing a couple of fields as the test is complete enough with that
]):
logger.debug(log_message)
log_record = logs_list[count]
@@ -146,6 +146,3 @@ def test_api_view_with_logging(self):
for prop, value in query_params.items():
self.assertTrue(hasattr(log_record, prop))
self.assertEqual(getattr(log_record, prop), value)
-
-
-
diff --git a/utils/tests/test_processing.py b/utils/tests/test_processing.py
index bdc6a5bdb..8e3e13643 100644
--- a/utils/tests/test_processing.py
+++ b/utils/tests/test_processing.py
@@ -52,11 +52,7 @@ def convert_using_ffmpeg_mock_fail(input_filename, output_filename, mono_out=Fal
def stereofy_mock(stereofy_executble_path, input_filename, output_filename):
- return dict(
- duration=123.5,
- channels=2,
- samplerate=44100,
- bitdepth=16)
+ return dict(duration=123.5, channels=2, samplerate=44100, bitdepth=16)
def stereofy_mock_fail(stereofy_executble_path, input_filename, output_filename):
@@ -80,12 +76,14 @@ def convert_to_ogg_mock_fail(input_filename, output_filename, quality):
def create_wave_images_mock(
- input_filename, output_filename_w, output_filename_s, image_width, image_height, fft_size, **kwargs):
+ input_filename, output_filename_w, output_filename_s, image_width, image_height, fft_size, **kwargs
+):
create_test_files(paths=[output_filename_w, output_filename_s])
def create_wave_images_mock_fail(
- input_filename, output_filename_w, output_filename_s, image_width, image_height, fft_size, **kwargs):
+ input_filename, output_filename_w, output_filename_s, image_width, image_height, fft_size, **kwargs
+):
raise AudioProcessingException("creation of display images has failed")
@@ -98,7 +96,7 @@ def pre_test(self, create_sound_file=True):
self.assertEqual(self.sound.processing_state, "PE")
if create_sound_file:
create_test_files(paths=[f"{self.sound.locations('path')}"], make_valid_wav_files=True, duration=2)
-
+
def setUp(self):
user, _, sounds = create_user_and_sounds(num_sounds=1, type="wav")
self.sound = sounds[0]
@@ -115,7 +113,7 @@ def test_sound_object_does_not_exist(self):
def test_sound_path_does_not_exist(self):
self.pre_test(create_sound_file=False)
result = FreesoundAudioProcessor(sound_id=Sound.objects.first().id).process()
- self.assertFalse(result) # Processing failed, retutned False
+ self.assertFalse(result) # Processing failed, retutned False
self.sound.refresh_from_db()
self.assertEqual(self.sound.processing_state, "FA")
self.assertEqual(self.sound.processing_ongoing_state, "FI")
@@ -131,7 +129,7 @@ def test_conversion_to_pcm_failed(self, *args):
self.pre_test()
result = FreesoundAudioProcessor(sound_id=Sound.objects.first().id).process()
# will fail because mocked version of convert_to_pcm fails
- self.assertFalse(result) # Processing failed, retutned False
+ self.assertFalse(result) # Processing failed, retutned False
self.sound.refresh_from_db()
self.assertEqual(self.sound.processing_state, "FA")
self.assertEqual(self.sound.processing_ongoing_state, "FI")
@@ -146,7 +144,7 @@ def test_stereofy_failed(self, *args):
self.pre_test()
result = FreesoundAudioProcessor(sound_id=Sound.objects.first().id).process()
# processing will fail because stereofy mock raises an exception
- self.assertFalse(result) # Processing failed, retutned False
+ self.assertFalse(result) # Processing failed, retutned False
self.sound.refresh_from_db()
self.assertEqual(self.sound.processing_state, "FA")
self.assertEqual(self.sound.processing_ongoing_state, "FI")
@@ -160,7 +158,7 @@ def test_set_audio_info_fields(self, *args):
self.pre_test()
FreesoundAudioProcessor(sound_id=Sound.objects.first().id).process()
self.sound.refresh_from_db()
- self.assertEqual(self.sound.duration, 2.0) # Assert that info properties were set
+ self.assertEqual(self.sound.duration, 2.0) # Assert that info properties were set
self.assertEqual(self.sound.channels, 1)
self.assertEqual(self.sound.samplerate, 44100)
self.assertEqual(self.sound.bitrate, 0)
@@ -175,7 +173,7 @@ def test_make_mp3_previews_fails(self, *args):
self.pre_test()
result = FreesoundAudioProcessor(sound_id=Sound.objects.first().id).process()
# processing will fail because convert_to_mp3 mock raises an exception
- self.assertFalse(result) # Processing failed, retutned False
+ self.assertFalse(result) # Processing failed, retutned False
self.sound.refresh_from_db()
self.assertEqual(self.sound.processing_state, "FA")
self.assertEqual(self.sound.processing_ongoing_state, "FI")
@@ -191,7 +189,7 @@ def test_make_ogg_previews_fails(self, *args):
self.pre_test()
result = FreesoundAudioProcessor(sound_id=Sound.objects.first().id).process()
# processing will fail because convert_to_ogg mock raises an exception
- self.assertFalse(result) # Processing failed, retutned False
+ self.assertFalse(result) # Processing failed, retutned False
self.sound.refresh_from_db()
self.assertEqual(self.sound.processing_state, "FA")
self.assertEqual(self.sound.processing_ongoing_state, "FI")
@@ -208,7 +206,7 @@ def test_create_images_fails(self, *args):
self.pre_test()
result = FreesoundAudioProcessor(sound_id=Sound.objects.first().id).process()
# processing will fail because create_wave_images mock raises an exception
- self.assertFalse(result) # Processing failed, retutned False
+ self.assertFalse(result) # Processing failed, retutned False
self.sound.refresh_from_db()
self.assertEqual(self.sound.processing_state, "FA")
self.assertEqual(self.sound.processing_ongoing_state, "FI")
@@ -238,7 +236,7 @@ def test_skip_previews(self, *args):
self.assertTrue(os.path.exists(self.sound.locations('display.wave.L.path')))
self.assertTrue(os.path.exists(self.sound.locations('display.wave_bw.L.path')))
- self.assertTrue(result) # Processing succeeded
+ self.assertTrue(result) # Processing succeeded
self.sound.refresh_from_db()
self.assertEqual(self.sound.processing_state, "OK")
self.assertEqual(self.sound.processing_ongoing_state, "FI")
@@ -267,7 +265,7 @@ def test_skip_displays(self, *args):
self.assertFalse(os.path.exists(self.sound.locations('display.wave.L.path')))
self.assertFalse(os.path.exists(self.sound.locations('display.wave_bw.L.path')))
- self.assertTrue(result) # Processing succeeded
+ self.assertTrue(result) # Processing succeeded
self.sound.refresh_from_db()
self.assertEqual(self.sound.processing_state, "OK")
self.assertEqual(self.sound.processing_ongoing_state, "FI")
@@ -292,5 +290,7 @@ def test_process_before_description(self):
create_test_files(paths=[uploaded_file_path], make_valid_wav_files=True, duration=2)
result = FreesoundAudioProcessorBeforeDescription(audio_file_path=uploaded_file_path).process()
self.assertTrue(result)
- self.assertListEqual(sorted(os.listdir(get_processing_before_describe_sound_folder(uploaded_file_path))),
- sorted(['wave.png', 'spectral.png', 'preview.ogg', 'preview.mp3', 'info.json']))
+ self.assertListEqual(
+ sorted(os.listdir(get_processing_before_describe_sound_folder(uploaded_file_path))),
+ sorted(['wave.png', 'spectral.png', 'preview.ogg', 'preview.mp3', 'info.json'])
+ )
diff --git a/utils/tests/test_search_general.py b/utils/tests/test_search_general.py
index 9210a8434..8502abf31 100644
--- a/utils/tests/test_search_general.py
+++ b/utils/tests/test_search_general.py
@@ -69,7 +69,7 @@ def test_search_prepare_parameters_without_query_params(self):
def test_search_prepare_parameters_with_query_params(self):
# "dog" query, search only in tags and descriptions, duration from 1-10 sec, only geotag, sort by duration, no group by pack
url_query_str = '?q=dog&f=duration:[1+TO+10]+is_geotagged:1&s=Duration+(longest+first)&advanced=1&a_tag=1&a_description=1&g='
- request = self.factory.get(reverse('sounds-search')+url_query_str)
+ request = self.factory.get(reverse('sounds-search') + url_query_str)
SessionMiddleware().process_request(request)
AuthenticationMiddleware().process_request(request)
request.session.save()
@@ -77,14 +77,18 @@ def test_search_prepare_parameters_with_query_params(self):
expected_default_query_params = {
'query_fields': {
- settings.SEARCH_SOUNDS_FIELD_ID: 0,
+ settings.SEARCH_SOUNDS_FIELD_ID:
+ 0,
settings.SEARCH_SOUNDS_FIELD_TAGS:
settings.SEARCH_SOUNDS_DEFAULT_FIELD_WEIGHTS[settings.SEARCH_SOUNDS_FIELD_TAGS],
settings.SEARCH_SOUNDS_FIELD_DESCRIPTION:
settings.SEARCH_SOUNDS_DEFAULT_FIELD_WEIGHTS[settings.SEARCH_SOUNDS_FIELD_DESCRIPTION],
- settings.SEARCH_SOUNDS_FIELD_USER_NAME: 0,
- settings.SEARCH_SOUNDS_FIELD_PACK_NAME: 0,
- settings.SEARCH_SOUNDS_FIELD_NAME: 0
+ settings.SEARCH_SOUNDS_FIELD_USER_NAME:
+ 0,
+ settings.SEARCH_SOUNDS_FIELD_PACK_NAME:
+ 0,
+ settings.SEARCH_SOUNDS_FIELD_NAME:
+ 0
},
'sort': settings.SEARCH_SOUNDS_SORT_OPTION_DURATION_LONG_FIRST,
'num_sounds': settings.SOUNDS_PER_PAGE,
@@ -107,11 +111,11 @@ def test_search_prepare_parameters_with_query_params(self):
}
expected_advanced_search_params_dict = {
- 'a_tag': '1',
- 'a_username': '',
- 'a_description': '1',
- 'a_packname': '',
- 'a_filename': '',
+ 'a_tag': '1',
+ 'a_username': '',
+ 'a_description': '1',
+ 'a_packname': '',
+ 'a_filename': '',
'a_soundid': '',
}
@@ -143,6 +147,7 @@ def test_remove_facet_filters_special_char(self):
def test_remove_facet_filters_special_char2(self):
query_filter_str = 'grouping_pack:"19265_Impacts, Hits, Friction & Tools" tag:"tools" samplerate:"44100" \
bitrate:"1379" duration:[0 TO 10]'
+
parsed_filters = parse_query_filter_string(query_filter_str)
filter_without_facet, has_facet_filter = remove_facet_filters(parsed_filters)
self.assertTrue(has_facet_filter)
@@ -157,7 +162,7 @@ def test_remove_facet_filters_special_char3(self):
def test_search_prepare_parameters_non_ascii_query(self):
# Simple test to check if some non ascii characters are correctly handled by search_prepare_parameters()
- request = self.factory.get(reverse('sounds-search')+'?q=Æ æ ¿ É')
+ request = self.factory.get(reverse('sounds-search') + '?q=Æ æ ¿ É')
SessionMiddleware().process_request(request)
AuthenticationMiddleware().process_request(request)
request.session.save()
@@ -172,12 +177,24 @@ def test_split_filter_query_duration_and_facet(self):
# duraton filter is not a facet, but should stay present when removing a facet.
expected_filter_query_split = [
- {'remove_url': 'duration:[0 TO 10]', 'name': 'license:"attribution"'},
+ {
+ 'remove_url': 'duration:[0 TO 10]',
+ 'name': 'license:"attribution"'
+ },
]
expected_filter_query_split = [
- {'remove_url': quote_plus('duration:[0 TO 10] username:"XavierFav" grouping_pack:"1_best-pack-ever"'), 'name': 'license:"attribution"'},
- {'remove_url': quote_plus('duration:[0 TO 10] license:"attribution" grouping_pack:"1_best-pack-ever"'), 'name': 'username:"XavierFav"'},
- {'remove_url': quote_plus('duration:[0 TO 10] license:"attribution" username:"XavierFav"'), 'name': 'pack:best-pack-ever'},
+ {
+ 'remove_url': quote_plus('duration:[0 TO 10] username:"XavierFav" grouping_pack:"1_best-pack-ever"'),
+ 'name': 'license:"attribution"'
+ },
+ {
+ 'remove_url': quote_plus('duration:[0 TO 10] license:"attribution" grouping_pack:"1_best-pack-ever"'),
+ 'name': 'username:"XavierFav"'
+ },
+ {
+ 'remove_url': quote_plus('duration:[0 TO 10] license:"attribution" username:"XavierFav"'),
+ 'name': 'pack:best-pack-ever'
+ },
]
# the order does not matter for the list of facet dicts.
@@ -187,28 +204,31 @@ def test_split_filter_query_duration_and_facet(self):
username_facer_dict_idx = filter_query_names.index('username:"XavierFav"')
grouping_pack_facet_dict_idx = filter_query_names.index('pack:best-pack-ever')
- # we use assertIn because the unicode strings that split_filter_query generates can incorporate
+ # we use assertIn because the unicode strings that split_filter_query generates can incorporate
# additional spaces at the end of the string, which is not a problem.
# Additonally, some additional spaces have been observed in the middle of the remove_url string. We replace double
- # spaces with single ones in this test. However, we should probably identify where does this additional spaces
+ # spaces with single ones in this test. However, we should probably identify where does this additional spaces
# come from.
# 1-Attribution
- self.assertIn(expected_filter_query_split[0]['name'],
- filter_query_split[cc_attribution_facet_dict_idx]['name'])
- self.assertIn(expected_filter_query_split[0]['remove_url'],
- filter_query_split[cc_attribution_facet_dict_idx]['remove_url'].replace('++', '+'))
+ self.assertIn(expected_filter_query_split[0]['name'], filter_query_split[cc_attribution_facet_dict_idx]['name'])
+ self.assertIn(
+ expected_filter_query_split[0]['remove_url'],
+ filter_query_split[cc_attribution_facet_dict_idx]['remove_url'].replace('++', '+')
+ )
# 2-Username
- self.assertIn(expected_filter_query_split[1]['name'],
- filter_query_split[username_facer_dict_idx]['name'])
- self.assertIn(expected_filter_query_split[1]['remove_url'],
- filter_query_split[username_facer_dict_idx]['remove_url'].replace('++', '+'))
+ self.assertIn(expected_filter_query_split[1]['name'], filter_query_split[username_facer_dict_idx]['name'])
+ self.assertIn(
+ expected_filter_query_split[1]['remove_url'],
+ filter_query_split[username_facer_dict_idx]['remove_url'].replace('++', '+')
+ )
# 3-Pack
- self.assertIn(expected_filter_query_split[2]['name'],
- filter_query_split[grouping_pack_facet_dict_idx]['name'])
- self.assertIn(expected_filter_query_split[2]['remove_url'],
- filter_query_split[grouping_pack_facet_dict_idx]['remove_url'].replace('++', '+'))
+ self.assertIn(expected_filter_query_split[2]['name'], filter_query_split[grouping_pack_facet_dict_idx]['name'])
+ self.assertIn(
+ expected_filter_query_split[2]['remove_url'],
+ filter_query_split[grouping_pack_facet_dict_idx]['remove_url'].replace('++', '+')
+ )
def test_split_filter_query_special_chars(self):
filter_query_string = 'license:"sampling+" grouping_pack:"1_example pack + @ #()*"'
@@ -217,32 +237,43 @@ def test_split_filter_query_special_chars(self):
filter_query_names = [filter_query_dict['name'] for filter_query_dict in filter_query_split]
expected_filter_query_split = [
- {'remove_url': quote_plus('grouping_pack:"1_example pack + @ #()*"'), 'name': 'license:"sampling+"'},
- {'remove_url': quote_plus('license:"sampling+"'), 'name': 'pack:example pack + @ #()*'},
+ {
+ 'remove_url': quote_plus('grouping_pack:"1_example pack + @ #()*"'),
+ 'name': 'license:"sampling+"'
+ },
+ {
+ 'remove_url': quote_plus('license:"sampling+"'),
+ 'name': 'pack:example pack + @ #()*'
+ },
]
cc_samplingplus_facet_dict_idx = filter_query_names.index('license:"sampling+"')
grouping_pack_facet_dict_idx = filter_query_names.index('pack:example pack + @ #()*')
- self.assertIn(expected_filter_query_split[0]['name'],
- filter_query_split[cc_samplingplus_facet_dict_idx]['name'])
- self.assertIn(expected_filter_query_split[0]['remove_url'],
- filter_query_split[cc_samplingplus_facet_dict_idx]['remove_url'])
-
- self.assertIn(expected_filter_query_split[1]['name'],
- filter_query_split[grouping_pack_facet_dict_idx]['name'])
- self.assertIn(expected_filter_query_split[1]['remove_url'],
- filter_query_split[grouping_pack_facet_dict_idx]['remove_url'])
-
- # most of these tests just ensure that no exception is returned when trying to parse filter strings
- # that gave problems while developping the filter string parser function
+ self.assertIn(
+ expected_filter_query_split[0]['name'], filter_query_split[cc_samplingplus_facet_dict_idx]['name']
+ )
+ self.assertIn(
+ expected_filter_query_split[0]['remove_url'],
+ filter_query_split[cc_samplingplus_facet_dict_idx]['remove_url']
+ )
+
+ self.assertIn(expected_filter_query_split[1]['name'], filter_query_split[grouping_pack_facet_dict_idx]['name'])
+ self.assertIn(
+ expected_filter_query_split[1]['remove_url'], filter_query_split[grouping_pack_facet_dict_idx]['remove_url']
+ )
+
+ # most of these tests just ensure that no exception is returned when trying to parse filter strings
+ # that gave problems while developping the filter string parser function
# utils.search.lucene_parser.parse_query_filter_string()
def test_parse_filter_query_special_created(self):
filter_query_string = 'created:[NOW-7DAY TO NOW] license:"Creative Commons 0"'
filter_query_split = parse_query_filter_string(filter_query_string)
- self.assertEqual(filter_query_split, [
- ['created', ':', '[', 'NOW-7DAY', ' TO ', 'NOW', ']'],
- ['license', ':', '"Creative Commons 0"'],
- ])
+ self.assertEqual(
+ filter_query_split, [
+ ['created', ':', '[', 'NOW-7DAY', ' TO ', 'NOW', ']'],
+ ['license', ':', '"Creative Commons 0"'],
+ ]
+ )
def test_parse_filter_query_special_char(self):
filter_query_string = 'grouping_pack:"32119_Conch Blowing (शङ्ख)"'
@@ -261,18 +292,18 @@ def test_parse_filter_query_special_char2(self):
def test_parse_filter_query_geofilter(self):
filter_query_string = 'tag:"cool" \'{!geofilt sfield=geotag pt=39.7750014,-94.2735586 d=50}\''
filter_query_split = parse_query_filter_string(filter_query_string)
- self.assertEqual(filter_query_split, [
- ['tag', ':', '"cool"'],
- ["'{!", 'geofilt sfield=geotag pt=39.7750014,-94.2735586 d=50', "}'"]
- ])
+ self.assertEqual(
+ filter_query_split,
+ [['tag', ':', '"cool"'], ["'{!", 'geofilt sfield=geotag pt=39.7750014,-94.2735586 d=50', "}'"]]
+ )
def test_parse_filter_composed_with_OR(self):
filter_query_string = 'tag:"cool" license:("Attribution" OR "Creative Commons 0")'
parsed_filters = parse_query_filter_string(filter_query_string)
- self.assertEqual(parsed_filters, [
- ['tag', ':', '"cool"'],
- ['license', ':', '(', '"Attribution"', "OR", '"Creative Commons 0"', ')']
- ])
+ self.assertEqual(
+ parsed_filters,
+ [['tag', ':', '"cool"'], ['license', ':', '(', '"Attribution"', "OR", '"Creative Commons 0"', ')']]
+ )
def test_parse_filter_nested_composed_with_OR(self):
filter_query_string = '("Attribution" OR ("Attribution" OR "Creative Commons 0"))'
@@ -287,10 +318,13 @@ def test_split_filter_query_cluster_facet(self):
parsed_filters = parse_query_filter_string(filter_query_string)
filter_query_split = split_filter_query(filter_query_string, parsed_filters, '1')
- expected_filter_query_split = [
- {'remove_url': quote_plus('duration:[0 TO 10]'), 'name': 'license:"attribution"'},
- {'remove_url': quote_plus('duration:[0 TO 10] license:"attribution"'), 'name': 'Cluster #1'}
- ]
+ expected_filter_query_split = [{
+ 'remove_url': quote_plus('duration:[0 TO 10]'),
+ 'name': 'license:"attribution"'
+ }, {
+ 'remove_url': quote_plus('duration:[0 TO 10] license:"attribution"'),
+ 'name': 'Cluster #1'
+ }]
# check that the cluster facet exists
filter_query_names = [filter_query_dict['name'] for filter_query_dict in filter_query_split]
@@ -301,12 +335,13 @@ def test_split_filter_query_cluster_facet(self):
cc_attribution_facet_dict_idx = filter_query_names.index('license:"attribution"')
cluster_facet_dict_idx = filter_query_names.index('Cluster #1')
- self.assertIn(expected_filter_query_split[0]['name'],
- filter_query_split[cc_attribution_facet_dict_idx]['name'])
- self.assertIn(expected_filter_query_split[0]['remove_url'],
- filter_query_split[cc_attribution_facet_dict_idx]['remove_url'])
+ self.assertIn(expected_filter_query_split[0]['name'], filter_query_split[cc_attribution_facet_dict_idx]['name'])
+ self.assertIn(
+ expected_filter_query_split[0]['remove_url'],
+ filter_query_split[cc_attribution_facet_dict_idx]['remove_url']
+ )
- self.assertIn(expected_filter_query_split[1]['name'],
- filter_query_split[cluster_facet_dict_idx]['name'])
- self.assertIn(expected_filter_query_split[1]['remove_url'],
- filter_query_split[cluster_facet_dict_idx]['remove_url'])
+ self.assertIn(expected_filter_query_split[1]['name'], filter_query_split[cluster_facet_dict_idx]['name'])
+ self.assertIn(
+ expected_filter_query_split[1]['remove_url'], filter_query_split[cluster_facet_dict_idx]['remove_url']
+ )
diff --git a/utils/tests/test_spam.py b/utils/tests/test_spam.py
index 29ffa38e0..8f89076bf 100644
--- a/utils/tests/test_spam.py
+++ b/utils/tests/test_spam.py
@@ -8,10 +8,9 @@
class SpamTest(TestCase):
+
def setUp(self):
- spam_user = User.objects.create_user(
- username="viagra-test-123", email="akismet-guaranteed-spam@example.com"
- )
+ spam_user = User.objects.create_user(username="viagra-test-123", email="akismet-guaranteed-spam@example.com")
self.client.force_login(spam_user)
rf = RequestFactory()
self.spam_request = rf.post("/some_form/")
diff --git a/utils/tests/test_text.py b/utils/tests/test_text.py
index 6060a82b6..305379078 100644
--- a/utils/tests/test_text.py
+++ b/utils/tests/test_text.py
@@ -18,7 +18,6 @@
# See AUTHORS file.
#
-
from django.test import TestCase
from utils.forms import HtmlCleaningCharField
@@ -77,16 +76,24 @@ def test_clean_html(self):
self.assertEqual('a b c d', ret)
# Also make sure links contains rel="nofollow"
- ret = clean_html('google', ok_attributes=ok_attributes, ok_tags=ok_tags)
+ ret = clean_html(
+ 'google', ok_attributes=ok_attributes, ok_tags=ok_tags
+ )
self.assertEqual('google', ret)
ret = clean_html('google', ok_attributes=ok_attributes, ok_tags=ok_tags)
self.assertEqual('google', ret)
- ret = clean_html('this should return the substring just fine
', ok_attributes=ok_attributes, ok_tags=ok_tags)
+ ret = clean_html(
+ 'this should return the substring just fine
',
+ ok_attributes=ok_attributes,
+ ok_tags=ok_tags
+ )
self.assertEqual('this should return the substring just fine', ret)
- ret = clean_html('', ok_attributes=ok_attributes, ok_tags=ok_tags)
+ ret = clean_html(
+ '', ok_attributes=ok_attributes, ok_tags=ok_tags
+ )
self.assertEqual('amazinggrace', ret)
ret = clean_html('click me', ok_attributes=ok_attributes, ok_tags=ok_tags)
@@ -107,27 +114,60 @@ def test_clean_html(self):
ret = clean_html('', ok_attributes=ok_attributes, ok_tags=ok_tags)
self.assertEqual('', ret)
- ret = clean_html('http://www.google.com', ok_attributes=ok_attributes, ok_tags=ok_tags)
- self.assertEqual('http://www.google.com', ret)
-
- ret = clean_html('http://www.google.com', ok_attributes=ok_attributes, ok_tags=ok_tags)
- self.assertEqual('http://www.google.com', ret)
-
- ret = clean_html('http://www.google.com http://www.google.com', ok_attributes=ok_attributes, ok_tags=ok_tags)
- self.assertEqual('http://www.google.com http://www.google.com', ret)
-
- ret = clean_html('', ok_attributes=ok_attributes, ok_tags=ok_tags)
- self.assertEqual('', ret)
+ ret = clean_html(
+ 'http://www.google.com',
+ ok_attributes=ok_attributes,
+ ok_tags=ok_tags
+ )
+ self.assertEqual(
+ 'http://www.google.com', ret
+ )
+
+ ret = clean_html(
+ 'http://www.google.com',
+ ok_attributes=ok_attributes,
+ ok_tags=ok_tags
+ )
+ self.assertEqual(
+ 'http://www.google.com', ret
+ )
+
+ ret = clean_html(
+ 'http://www.google.com http://www.google.com', ok_attributes=ok_attributes, ok_tags=ok_tags
+ )
+ self.assertEqual(
+ 'http://www.google.com http://www.google.com',
+ ret
+ )
+
+ ret = clean_html(
+ '',
+ ok_attributes=ok_attributes,
+ ok_tags=ok_tags
+ )
+ self.assertEqual(
+ '',
+ ret
+ )
ret = clean_html('abc http://www.google.com abc', ok_attributes=ok_attributes, ok_tags=ok_tags)
self.assertEqual('abc http://www.google.com abc', ret)
# The links inside <> are encoded by < and >
ret = clean_html('abc abc', ok_attributes=ok_attributes, ok_tags=ok_tags)
- self.assertEqual('abc < http://www.google.com > abc', ret)
-
- ret = clean_html('GALORE: https://freesound.iua.upf.edu/samplesViewSingle.php?id=22092\\nFreesound Moderator', ok_attributes=ok_attributes, ok_tags=ok_tags)
- self.assertEqual('GALORE: https://freesound.iua.upf.edu/samplesViewSingle.php?id=22092\\nFreesound Moderator', ret)
+ self.assertEqual(
+ 'abc < http://www.google.com > abc', ret
+ )
+
+ ret = clean_html(
+ 'GALORE: https://freesound.iua.upf.edu/samplesViewSingle.php?id=22092\\nFreesound Moderator',
+ ok_attributes=ok_attributes,
+ ok_tags=ok_tags
+ )
+ self.assertEqual(
+ 'GALORE: https://freesound.iua.upf.edu/samplesViewSingle.php?id=22092\\nFreesound Moderator',
+ ret
+ )
# Allow custom placeholders
ret = clean_html('my sound id', ok_attributes=ok_attributes, ok_tags=ok_tags)
@@ -136,7 +176,9 @@ def test_clean_html(self):
ret = clean_html('my sound url', ok_attributes=ok_attributes, ok_tags=ok_tags)
self.assertEqual('my sound url', ret)
- ret = clean_html('', ok_attributes=ok_attributes, ok_tags=ok_tags)
+ ret = clean_html(
+ '', ok_attributes=ok_attributes, ok_tags=ok_tags
+ )
self.assertEqual('', ret)
ret = clean_html('', ok_attributes=ok_attributes, ok_tags=ok_tags)
diff --git a/utils/tests/tests.py b/utils/tests/tests.py
index bebbe49a2..df0611640 100644
--- a/utils/tests/tests.py
+++ b/utils/tests/tests.py
@@ -56,7 +56,8 @@ def test_download_sounds(self):
base_filename_slug="test_sound_%i" % i,
license=License.objects.all()[0],
pack=pack,
- md5="fakemd5_%i" % i)
+ md5="fakemd5_%i" % i
+ )
licenses_url = (reverse('pack-licenses', args=["testuser", pack.id]))
ret = utils.downloads.download_sounds(licenses_url, pack)
self.assertEqual(ret.status_code, 200)
@@ -141,24 +142,30 @@ def test_should_suggest_donation_probabilty_1(self):
original_filename="Test sound",
base_filename_slug="test_sound_10",
license=License.objects.all()[0],
- md5="fakemd5_10")
+ md5="fakemd5_10"
+ )
for i in range(0, donations_settings.downloads_in_period):
Download.objects.create(user=user, sound=sound, license=License.objects.first())
self.assertEqual(utils.downloads.should_suggest_donation(user, times_shown_in_last_day), False)
- Download.objects.create(user=user, sound=sound, license=License.objects.first()) # downloads > donations_settings.downloads_in_period (modal shows)
+ Download.objects.create(
+ user=user, sound=sound, license=License.objects.first()
+ ) # downloads > donations_settings.downloads_in_period (modal shows)
self.assertEqual(utils.downloads.should_suggest_donation(user, times_shown_in_last_day), True)
# if the download objects are older than donations_settings.download_days, don't consider them
Download.objects.filter(user=user).update(
- created=datetime.datetime.now()-datetime.timedelta(days=donations_settings.download_days + 1))
+ created=datetime.datetime.now() - datetime.timedelta(days=donations_settings.download_days + 1)
+ )
self.assertEqual(utils.downloads.should_suggest_donation(user, times_shown_in_last_day), False)
# if user has donations but these are older than donations_settings.days_after_donation, do not consider them
Donation.objects.create(user=user, amount=1)
Donation.objects.filter(user=user).update(
- created=datetime.datetime.now()-datetime.timedelta(days=donations_settings.days_after_donation + 1))
+ created=datetime.datetime.now() - datetime.timedelta(days=donations_settings.days_after_donation + 1)
+ )
Download.objects.filter(user=user).update(
- created=datetime.datetime.now()) # Change downloads date again to be recent (modal show be shown)
+ created=datetime.datetime.now()
+ ) # Change downloads date again to be recent (modal show be shown)
self.assertEqual(utils.downloads.should_suggest_donation(user, times_shown_in_last_day), True)
def test_should_suggest_donation_probabilty_0(self):
@@ -191,25 +198,29 @@ def test_should_suggest_donation_probabilty_0(self):
original_filename="Test sound",
base_filename_slug="test_sound_10",
license=License.objects.all()[0],
- md5="fakemd5_10")
+ md5="fakemd5_10"
+ )
for i in range(0, donations_settings.downloads_in_period):
Download.objects.create(user=user, sound=sound, license=License.objects.first())
self.assertEqual(utils.downloads.should_suggest_donation(user, times_shown_in_last_day), False)
- Download.objects.create(user=user, sound=sound, license=License.objects.first()) # n downloads > donations_settings.downloads_in_period
+ Download.objects.create(
+ user=user, sound=sound, license=License.objects.first()
+ ) # n downloads > donations_settings.downloads_in_period
# In this case still not shown the modal as probability is 0.0
self.assertEqual(utils.downloads.should_suggest_donation(user, times_shown_in_last_day), False)
# if the download objects are older than donations_settings.download_days, don't consider them
Download.objects.filter(user=user).update(
- created=datetime.datetime.now() - datetime.timedelta(days=donations_settings.download_days + 1))
+ created=datetime.datetime.now() - datetime.timedelta(days=donations_settings.download_days + 1)
+ )
self.assertEqual(utils.downloads.should_suggest_donation(user, times_shown_in_last_day), False)
# if user has donations but these are older than donations_settings.days_after_donation, do not consider them
Donation.objects.create(user=user, amount=1)
Donation.objects.filter(user=user).update(
- created=datetime.datetime.now() - datetime.timedelta(days=donations_settings.days_after_donation + 1))
- Download.objects.filter(user=user).update(
- created=datetime.datetime.now())
+ created=datetime.datetime.now() - datetime.timedelta(days=donations_settings.days_after_donation + 1)
+ )
+ Download.objects.filter(user=user).update(created=datetime.datetime.now())
# Change downloads date again to be recent (however modal won't show because probability is 0.0)
self.assertEqual(utils.downloads.should_suggest_donation(user, times_shown_in_last_day), False)
@@ -274,115 +285,139 @@ def test_validate_input_csv_file(self):
os.makedirs(csv_file_base_path, exist_ok=True)
# Test CSV with all lines and metadata ok
- csv_file_path = self.create_file_with_lines('test_descriptions.csv', [
- 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit',
- 'file1.wv,New name for file1.wav,"tag1 tag2 tag3","41.4065, 2.19504, 18",'
- '"Description for file",Creative Commons 0,ambient,0', # All fields valid
- 'file2.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,0', # Only mandatory fields
- 'file3.wav,,"tag1 tag2 tag3",,'
- '"Description for file",Creative Commons 0,ambient,1', # All mandatory fields and some optional fields
- ], csv_file_base_path)
+ csv_file_path = self.create_file_with_lines(
+ 'test_descriptions.csv',
+ [
+ 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit',
+ 'file1.wv,New name for file1.wav,"tag1 tag2 tag3","41.4065, 2.19504, 18",'
+ '"Description for file",Creative Commons 0,ambient,0', # All fields valid
+ 'file2.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,0', # Only mandatory fields
+ 'file3.wav,,"tag1 tag2 tag3",,'
+ '"Description for file",Creative Commons 0,ambient,1', # All mandatory fields and some optional fields
+ ],
+ csv_file_base_path
+ )
header, lines = get_csv_lines(csv_file_path)
lines_validated, global_errors = \
validate_input_csv_file(header, lines, user_upload_path, username=user.username)
- self.assertEqual(len(global_errors), 0) # No global errors
- self.assertEqual(len([line for line in lines_validated if line['line_errors']]), 0) # No line errors
+ self.assertEqual(len(global_errors), 0) # No global errors
+ self.assertEqual(len([line for line in lines_validated if line['line_errors']]), 0) # No line errors
# Test username does not exist
lines_validated, global_errors = \
validate_input_csv_file(header, lines, user_upload_path, username="unexisting username")
- self.assertEqual(len(global_errors), 0) # No global errors
- self.assertEqual(len([line for line in lines_validated if line['line_errors']]), 3) # Three line errors
- self.assertTrue('username' in lines_validated[0]['line_errors']) # User does not exist error reported
- self.assertTrue('username' in lines_validated[1]['line_errors']) # User does not exist error reported
- self.assertTrue('username' in lines_validated[2]['line_errors']) # User does not exist error reported
+ self.assertEqual(len(global_errors), 0) # No global errors
+ self.assertEqual(len([line for line in lines_validated if line['line_errors']]), 3) # Three line errors
+ self.assertTrue('username' in lines_validated[0]['line_errors']) # User does not exist error reported
+ self.assertTrue('username' in lines_validated[1]['line_errors']) # User does not exist error reported
+ self.assertTrue('username' in lines_validated[2]['line_errors']) # User does not exist error reported
# Test missing/duplicated audiofile and wrong number of rows
- csv_file_path = self.create_file_with_lines('test_descriptions.csv', [
- 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit',
- 'file1.wv,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,1', # File exists, fields ok
- 'file2.wav,,"tag1 tag2 tag3",,,Creative Commons 0,,1', # Missing description
- 'file3.wav,,"tag1 tag2 tag3",,"Description for file",,1', # Wrong number of columns
- 'file6.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,1', # Audiofile does not exist
- 'file2.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,1', # Audiofile already described
- ], csv_file_base_path)
+ csv_file_path = self.create_file_with_lines(
+ 'test_descriptions.csv',
+ [
+ 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit',
+ 'file1.wv,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,1', # File exists, fields ok
+ 'file2.wav,,"tag1 tag2 tag3",,,Creative Commons 0,,1', # Missing description
+ 'file3.wav,,"tag1 tag2 tag3",,"Description for file",,1', # Wrong number of columns
+ 'file6.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,1', # Audiofile does not exist
+ 'file2.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,1', # Audiofile already described
+ ],
+ csv_file_base_path
+ )
header, lines = get_csv_lines(csv_file_path)
lines_validated, global_errors = \
validate_input_csv_file(header, lines, user_upload_path, username=user.username)
- self.assertEqual(len(global_errors), 0) # No global errors
- self.assertEqual(len([line for line in lines_validated if line['line_errors']]), 4) # Four lines have errors
- self.assertTrue('description' in lines_validated[1]['line_errors']) # Missing description error reported
- self.assertTrue('columns' in lines_validated[2]['line_errors']) # Wrong number of columns reported
- self.assertTrue('audio_filename' in lines_validated[3]['line_errors']) # Audiofile not exist error reported
- self.assertTrue('audio_filename' in lines_validated[4]['line_errors']) # File already described error reported
+ self.assertEqual(len(global_errors), 0) # No global errors
+ self.assertEqual(len([line for line in lines_validated if line['line_errors']]), 4) # Four lines have errors
+ self.assertTrue('description' in lines_validated[1]['line_errors']) # Missing description error reported
+ self.assertTrue('columns' in lines_validated[2]['line_errors']) # Wrong number of columns reported
+ self.assertTrue('audio_filename' in lines_validated[3]['line_errors']) # Audiofile not exist error reported
+ self.assertTrue(
+ 'audio_filename' in lines_validated[4]['line_errors']
+ ) # File already described error reported
# Test validation errors in individual fields
- csv_file_path = self.create_file_with_lines('test_descriptions.csv', [
- 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit',
- 'file1.wv,,"tag1 tag2",,"Description for file",Creative Commons 0,,1', # Wrong tags (less than 3)
- 'file2.wav,,"tag1,tag2",,"Description for file",Creative Commons 0,,1', # Wrong tags (less than 3)
- 'file3.wav,,"tag1,tag2",gr87g,"Description for file2",Creative Commons 0,,1', # Wrong geotag
- 'file4.wav,,"tag1,tag2",42.34,190.45,15,"Description for file",Creative Commons 0,,1', # Wrong geotag
- 'file5.wav,,"tag1 tag2 tag3",,"Description for file",Sampling+,,1', # Invalid license
- 'file6.wav,,"tag1 tag2 tag3",,"Description for file",Sampling+,,rt', # Invalid is_explicit
- ], csv_file_base_path)
+ csv_file_path = self.create_file_with_lines(
+ 'test_descriptions.csv',
+ [
+ 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit',
+ 'file1.wv,,"tag1 tag2",,"Description for file",Creative Commons 0,,1', # Wrong tags (less than 3)
+ 'file2.wav,,"tag1,tag2",,"Description for file",Creative Commons 0,,1', # Wrong tags (less than 3)
+ 'file3.wav,,"tag1,tag2",gr87g,"Description for file2",Creative Commons 0,,1', # Wrong geotag
+ 'file4.wav,,"tag1,tag2",42.34,190.45,15,"Description for file",Creative Commons 0,,1', # Wrong geotag
+ 'file5.wav,,"tag1 tag2 tag3",,"Description for file",Sampling+,,1', # Invalid license
+ 'file6.wav,,"tag1 tag2 tag3",,"Description for file",Sampling+,,rt', # Invalid is_explicit
+ ],
+ csv_file_base_path
+ )
header, lines = get_csv_lines(csv_file_path)
lines_validated, global_errors = \
validate_input_csv_file(header, lines, user_upload_path, username=user.username)
- self.assertEqual(len(global_errors), 0) # No global errors
- self.assertEqual(len([line for line in lines_validated if line['line_errors']]), 6) # Six lines have errors
- self.assertTrue('tags' in lines_validated[0]['line_errors']) # Wrong tags
- self.assertTrue('tags' in lines_validated[1]['line_errors']) # Wrong tags
- self.assertTrue('geotag' in lines_validated[2]['line_errors']) # Wrong geotag
- self.assertTrue('geotag' in lines_validated[3]['line_errors']) # Wrong geotag
- self.assertTrue('license' in lines_validated[4]['line_errors']) # Wrong license
- self.assertTrue('is_explicit' in lines_validated[5]['line_errors']) # Wrong is_explicit
+ self.assertEqual(len(global_errors), 0) # No global errors
+ self.assertEqual(len([line for line in lines_validated if line['line_errors']]), 6) # Six lines have errors
+ self.assertTrue('tags' in lines_validated[0]['line_errors']) # Wrong tags
+ self.assertTrue('tags' in lines_validated[1]['line_errors']) # Wrong tags
+ self.assertTrue('geotag' in lines_validated[2]['line_errors']) # Wrong geotag
+ self.assertTrue('geotag' in lines_validated[3]['line_errors']) # Wrong geotag
+ self.assertTrue('license' in lines_validated[4]['line_errors']) # Wrong license
+ self.assertTrue('is_explicit' in lines_validated[5]['line_errors']) # Wrong is_explicit
# Test wrong header global errors
- csv_file_path = self.create_file_with_lines('test_descriptions.csv', [
- 'audio_filename,name,tags,geotag,description,license,unknown_field',
- ], csv_file_base_path)
+ csv_file_path = self.create_file_with_lines(
+ 'test_descriptions.csv', [
+ 'audio_filename,name,tags,geotag,description,license,unknown_field',
+ ], csv_file_base_path
+ )
header, lines = get_csv_lines(csv_file_path)
lines_validated, global_errors = \
validate_input_csv_file(header, lines, user_upload_path, username=user.username)
- self.assertEqual(len(global_errors), 2) # Two global errors
- self.assertTrue('Invalid header' in global_errors[0]) # Invalid header error reported
- self.assertTrue('no lines with sound' in global_errors[1]) # No sounds in csv file error reported
-
- csv_file_path = self.create_file_with_lines('test_descriptions.csv', [
- 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit',
- ], csv_file_base_path)
+ self.assertEqual(len(global_errors), 2) # Two global errors
+ self.assertTrue('Invalid header' in global_errors[0]) # Invalid header error reported
+ self.assertTrue('no lines with sound' in global_errors[1]) # No sounds in csv file error reported
+
+ csv_file_path = self.create_file_with_lines(
+ 'test_descriptions.csv', [
+ 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit',
+ ], csv_file_base_path
+ )
header, lines = get_csv_lines(csv_file_path)
lines_validated, global_errors = \
validate_input_csv_file(header, lines, user_upload_path,
username=None) # Not passing username, header should now include 'username' field
- self.assertEqual(len(global_errors), 2) # One global error
- self.assertTrue('Invalid header' in global_errors[0]) # Invalid header error reported
- self.assertTrue('no lines with sound' in global_errors[1]) # No sounds in csv file error reported
-
- csv_file_path = self.create_file_with_lines('test_descriptions.csv', [
- 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit,username',
- ], csv_file_base_path)
+ self.assertEqual(len(global_errors), 2) # One global error
+ self.assertTrue('Invalid header' in global_errors[0]) # Invalid header error reported
+ self.assertTrue('no lines with sound' in global_errors[1]) # No sounds in csv file error reported
+
+ csv_file_path = self.create_file_with_lines(
+ 'test_descriptions.csv', [
+ 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit,username',
+ ], csv_file_base_path
+ )
header, lines = get_csv_lines(csv_file_path)
lines_validated, global_errors = \
validate_input_csv_file(header, lines, user_upload_path,
username=None) # Not passing username, header should now include 'username' field
- self.assertEqual(len(global_errors), 1) # One global error
- self.assertTrue('no lines with sound' in global_errors[0]) # No sounds in csv file error reported
+ self.assertEqual(len(global_errors), 1) # One global error
+ self.assertTrue('no lines with sound' in global_errors[0]) # No sounds in csv file error reported
# Test username errors when not passing username argument to validate_input_csv_file
- csv_file_path = self.create_file_with_lines('test_descriptions.csv', [
- 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit,username',
- 'file1.wv,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,1,new_username', # User does not exist
- 'file2.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,1', # Invlaid num columns
- 'file3.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,0,testuser', # All fields OK
- ], csv_file_base_path)
+ csv_file_path = self.create_file_with_lines(
+ 'test_descriptions.csv',
+ [
+ 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit,username',
+ 'file1.wv,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,1,new_username', # User does not exist
+ 'file2.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,1', # Invlaid num columns
+ 'file3.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,0,testuser', # All fields OK
+ ],
+ csv_file_base_path
+ )
header, lines = get_csv_lines(csv_file_path)
lines_validated, global_errors = validate_input_csv_file(header, lines, user_upload_path, username=None)
- self.assertEqual(len(global_errors), 0) # No global errors
- self.assertEqual(len([line for line in lines_validated if line['line_errors']]), 2) # Two lines have errors
- self.assertTrue('username' in lines_validated[0]['line_errors']) # User does not exist
- self.assertTrue('columns' in lines_validated[1]['line_errors']) # Invalid number of columns
+ self.assertEqual(len(global_errors), 0) # No global errors
+ self.assertEqual(len([line for line in lines_validated if line['line_errors']]), 2) # Two lines have errors
+ self.assertTrue('username' in lines_validated[0]['line_errors']) # User does not exist
+ self.assertTrue('columns' in lines_validated[1]['line_errors']) # Invalid number of columns
@override_uploads_path_with_temp_directory
@override_csv_path_with_temp_directory
@@ -399,62 +434,74 @@ def test_bulk_describe_from_csv(self):
os.makedirs(csv_file_base_path, exist_ok=True)
# Create Test CSV with some lines ok and some wrong lines
- csv_file_path = self.create_file_with_lines('test_descriptions.csv', [
- 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit',
- 'file1.wav,,"tag1 tag2 tag3","41.4065, 2.19504, 18","Description for file",Creative Commons 0,ambient,1', # OK
- 'file2.wav,,"tag1 tag2 tag3",,"Description for file",Invalid license,,1', # Invalid license
- 'file3.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,1', # Wrong number of columns
- 'file4.wav,,"tag1 tag2 tag3",dg,"Description for file",Creative Commons 0,,0', # Invalid geotag
- 'file5.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,0', # OK
- ], csv_file_base_path)
+ csv_file_path = self.create_file_with_lines(
+ 'test_descriptions.csv',
+ [
+ 'audio_filename,name,tags,geotag,description,license,pack_name,is_explicit',
+ 'file1.wav,,"tag1 tag2 tag3","41.4065, 2.19504, 18","Description for file",Creative Commons 0,ambient,1', # OK
+ 'file2.wav,,"tag1 tag2 tag3",,"Description for file",Invalid license,,1', # Invalid license
+ 'file3.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,1', # Wrong number of columns
+ 'file4.wav,,"tag1 tag2 tag3",dg,"Description for file",Creative Commons 0,,0', # Invalid geotag
+ 'file5.wav,,"tag1 tag2 tag3",,"Description for file",Creative Commons 0,,0', # OK
+ ],
+ csv_file_base_path
+ )
# Test case when no sounds are been created because CSV file has some errors and 'force_import' is set to False
- bulk_describe_from_csv(csv_file_path,
- delete_already_existing=False,
- force_import=False,
- sounds_base_dir=user_upload_path,
- username=user.username)
- self.assertEqual(user.sounds.count(), 0) # User has no sounds
+ bulk_describe_from_csv(
+ csv_file_path,
+ delete_already_existing=False,
+ force_import=False,
+ sounds_base_dir=user_upload_path,
+ username=user.username
+ )
+ self.assertEqual(user.sounds.count(), 0) # User has no sounds
# Test case using 'force_import' (only sounds for lines that validate ok will be created)
- bulk_describe_from_csv(csv_file_path,
- delete_already_existing=False,
- force_import=True,
- sounds_base_dir=user_upload_path,
- username=user.username)
- self.assertEqual(user.sounds.count(), 2) # The two sounds that had correct metadata have been added
- sound1 = Sound.objects.get(user=user, original_filename='file1.wav') # Get first correct sound
- sound1_id = sound1.id # This is used in a test below
- self.assertTrue(sound1.geotag) # Check sound has geotag object assigned
- self.assertEqual(sound1.pack.name, 'ambient') # Check sound has pack and name of pack is 'ambient'
- sound2 = Sound.objects.get(user=user, original_filename='file5.wav') # Get last correct sound
- sound2_id = sound2.id # This is used in a test below
- self.assertIsNone(sound2.geotag) # Check sound has no geotag
- self.assertIsNone(sound2.pack) # Check sound has no pack
+ bulk_describe_from_csv(
+ csv_file_path,
+ delete_already_existing=False,
+ force_import=True,
+ sounds_base_dir=user_upload_path,
+ username=user.username
+ )
+ self.assertEqual(user.sounds.count(), 2) # The two sounds that had correct metadata have been added
+ sound1 = Sound.objects.get(user=user, original_filename='file1.wav') # Get first correct sound
+ sound1_id = sound1.id # This is used in a test below
+ self.assertTrue(sound1.geotag) # Check sound has geotag object assigned
+ self.assertEqual(sound1.pack.name, 'ambient') # Check sound has pack and name of pack is 'ambient'
+ sound2 = Sound.objects.get(user=user, original_filename='file5.wav') # Get last correct sound
+ sound2_id = sound2.id # This is used in a test below
+ self.assertIsNone(sound2.geotag) # Check sound has no geotag
+ self.assertIsNone(sound2.pack) # Check sound has no pack
# Run again using 'force_import' and sounds won't be created because sounds already exist and md5 check fails
# NOTE: first we copy back the files that were already successfully added because otherwise these don't exist
shutil.copy(sound1.locations()['path'], os.path.join(user_upload_path, 'file1.wav'))
shutil.copy(sound2.locations()['path'], os.path.join(user_upload_path, 'file5.wav'))
- bulk_describe_from_csv(csv_file_path,
- delete_already_existing=False,
- force_import=True,
- sounds_base_dir=user_upload_path,
- username=user.username)
- self.assertEqual(user.sounds.count(), 2) # User still has two sounds, no new sounds added
+ bulk_describe_from_csv(
+ csv_file_path,
+ delete_already_existing=False,
+ force_import=True,
+ sounds_base_dir=user_upload_path,
+ username=user.username
+ )
+ self.assertEqual(user.sounds.count(), 2) # User still has two sounds, no new sounds added
# Run again using 'force_import' AND 'delete_already_existing' and existing sounds will be removed before
# creating the new ones
# NOTE: first we copy back the files that failed MD5 check as files are discarted (deleted) when MD5 fails
shutil.copy(sound1.locations()['path'], os.path.join(user_upload_path, 'file1.wav'))
shutil.copy(sound2.locations()['path'], os.path.join(user_upload_path, 'file5.wav'))
- bulk_describe_from_csv(csv_file_path,
- delete_already_existing=True,
- force_import=True,
- sounds_base_dir=user_upload_path,
- username=user.username)
- self.assertEqual(user.sounds.count(), 2) # User still has two sounds
- new_sound1 = Sound.objects.get(user=user, original_filename='file1.wav') # New version of first correct sound
- new_sound2 = Sound.objects.get(user=user, original_filename='file5.wav') # New version of last correct sound
- self.assertNotEqual(new_sound1.id, sound1_id) # Check that IDs are not the same
- self.assertNotEqual(new_sound2.id, sound2_id) # Check that IDs are not the same
+ bulk_describe_from_csv(
+ csv_file_path,
+ delete_already_existing=True,
+ force_import=True,
+ sounds_base_dir=user_upload_path,
+ username=user.username
+ )
+ self.assertEqual(user.sounds.count(), 2) # User still has two sounds
+ new_sound1 = Sound.objects.get(user=user, original_filename='file1.wav') # New version of first correct sound
+ new_sound2 = Sound.objects.get(user=user, original_filename='file5.wav') # New version of last correct sound
+ self.assertNotEqual(new_sound1.id, sound1_id) # Check that IDs are not the same
+ self.assertNotEqual(new_sound2.id, sound2_id) # Check that IDs are not the same
diff --git a/utils/text.py b/utils/text.py
index 47c8fec54..20f72987d 100644
--- a/utils/text.py
+++ b/utils/text.py
@@ -56,6 +56,7 @@ def is_valid_url(url):
class EmptyLinkFilter(Filter):
+
def __iter__(self):
remove_end_tag = False
for token in Filter.__iter__(self):
@@ -88,13 +89,14 @@ def clean_html(input, ok_tags=[], ok_attributes={}):
input = re.sub(r"\<(http\S+?)\>", r'< \1 >', input)
cleaner = bleach.Cleaner(
- filters=[
- EmptyLinkFilter,
- partial(bleach.linkifier.LinkifyFilter, callbacks=[nofollow]),
- ],
- attributes=ok_attributes,
- tags=ok_tags,
- strip=True)
+ filters=[
+ EmptyLinkFilter,
+ partial(bleach.linkifier.LinkifyFilter, callbacks=[nofollow]),
+ ],
+ attributes=ok_attributes,
+ tags=ok_tags,
+ strip=True
+ )
output = cleaner.clean(input)
return output
@@ -123,11 +125,11 @@ def text_may_be_spam(text):
return True
# If emails or short urls
- if re.search(r"[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(\s|$|\/|\]|\.)", text):
+ if re.search(r"[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(\s|$|\/|\]|\.)", text):
return True
# If consecutive numbers
- if re.search(r"\(|\)|\d{7}", text):
+ if re.search(r"\(|\)|\d{7}", text):
return True
# If non ascii characters
diff --git a/utils/urlpatterns.py b/utils/urlpatterns.py
index 511da79f8..77916dd9d 100644
--- a/utils/urlpatterns.py
+++ b/utils/urlpatterns.py
@@ -22,13 +22,16 @@
from django.http import HttpResponseRedirect
from django.urls import reverse
+
def redirect_inline(function=None, redirect_url_name='front-page', kwarg_keys=None, query_string=''):
"""
Redirects to a specific view, can be used inline when defining urlpatterns.
> redirect_inline(PasswordResetView.as_view(form_class=FsPasswordResetForm))
"""
+
def decorator(view_func):
+
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if kwarg_keys is not None:
@@ -39,5 +42,7 @@ def _wrapped_view(request, *args, **kwargs):
if query_string:
url += f'?{query_string}'
return HttpResponseRedirect(url)
+
return _wrapped_view
- return decorator(function)
\ No newline at end of file
+
+ return decorator(function)
diff --git a/utils/username.py b/utils/username.py
index 0d6c0e631..62de6fbb0 100644
--- a/utils/username.py
+++ b/utils/username.py
@@ -18,7 +18,6 @@
# See AUTHORS file.
#
-
from django.contrib.auth.models import User
from accounts.models import OldUsername
from django.http import Http404
diff --git a/wiki/admin.py b/wiki/admin.py
index c1fb4ea17..f35b0453a 100644
--- a/wiki/admin.py
+++ b/wiki/admin.py
@@ -21,9 +21,10 @@
from django.contrib import admin
from wiki.models import Page, Content
+
@admin.register(Page)
class PageAdmin(admin.ModelAdmin):
- list_display = ('name', )
+ list_display = ('name',)
def has_add_permission(self, request):
return False
@@ -31,8 +32,13 @@ def has_add_permission(self, request):
@admin.register(Content)
class ContentAdmin(admin.ModelAdmin):
- raw_id_fields = ('author', )
- list_display = ('page', 'author', 'title', 'created', )
+ raw_id_fields = ('author',)
+ list_display = (
+ 'page',
+ 'author',
+ 'title',
+ 'created',
+ )
def has_add_permission(self, request):
return False
diff --git a/wiki/models.py b/wiki/models.py
index 137c538ba..43146bafc 100644
--- a/wiki/models.py
+++ b/wiki/models.py
@@ -23,6 +23,7 @@
from django.utils.encoding import smart_str
from django.urls import reverse
+
class Page(models.Model):
name = models.CharField(max_length=256, db_index=True)
@@ -42,9 +43,9 @@ class Content(models.Model):
title = models.CharField(max_length=250)
body = models.TextField()
created = models.DateTimeField(db_index=True, auto_now_add=True)
-
+
class Meta:
- ordering = ('-created', )
+ ordering = ('-created',)
get_latest_by = 'created'
def __str__(self):
diff --git a/wiki/tests.py b/wiki/tests.py
index 5802c4472..748703265 100644
--- a/wiki/tests.py
+++ b/wiki/tests.py
@@ -132,8 +132,13 @@ def test_edit_page_no_page(self):
def test_edit_page_save(self):
# POST to the form and a new Content for this page is created
self.client.force_login(self.user1)
- resp = self.client.post(reverse('wiki-page-edit', kwargs={'name': 'help'}), data={'title': 'Page title',
- 'body': 'This is some body'})
+ resp = self.client.post(
+ reverse('wiki-page-edit', kwargs={'name': 'help'}),
+ data={
+ 'title': 'Page title',
+ 'body': 'This is some body'
+ }
+ )
content = self.page.content()
self.assertEqual(content.title, 'Page title')
self.assertEqual(content.body, 'This is some body')
diff --git a/wiki/views.py b/wiki/views.py
index fb2403fbe..3ad9dc29d 100644
--- a/wiki/views.py
+++ b/wiki/views.py
@@ -20,7 +20,7 @@
from django.http import HttpResponseRedirect, Http404
-from django.shortcuts import render
+from django.shortcuts import render
from django.urls import reverse
from wiki.models import Content, Page
@@ -54,8 +54,7 @@ def page(request, name):
# If there is still no content, then this page has no Content objects, return Blank
content = Content.objects.filter(page__name__iexact="blank").select_related().latest()
- tvars = {'content': content,
- 'name': name}
+ tvars = {'content': content, 'name': name}
return render(request, 'wiki/page.html', tvars)
@@ -81,9 +80,7 @@ def editpage(request, name):
content = None
form = ContentForm()
- tvars = {'content': content,
- 'form': form,
- 'name': name}
+ tvars = {'content': content, 'form': form, 'name': name}
return render(request, 'wiki/edit.html', tvars)
@@ -109,11 +106,10 @@ def history(request, name):
version1 = Content.objects.select_related().get(id=request.GET.get('version1'))
version2 = Content.objects.select_related().get(id=request.GET.get('version2'))
- diff = difflib.HtmlDiff(4, 55).make_table(version1.body.split('\n'), version2.body.split('\n'), 'version %d' % version1.id, 'version %d' % version2.id, True, 5)
+ diff = difflib.HtmlDiff(4, 55).make_table(
+ version1.body.split('\n'), version2.body.split('\n'), 'version %d' % version1.id,
+ 'version %d' % version2.id, True, 5
+ )
- tvars = {'page': page,
- 'versions': versions,
- 'version1': version1,
- 'version2': version2,
- 'diff': diff}
+ tvars = {'page': page, 'versions': versions, 'version1': version1, 'version2': version2, 'diff': diff}
return render(request, 'wiki/history.html', tvars)