This repository was archived by the owner on Apr 11, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 11
/
Copy pathmetadata_layer.py
2440 lines (2119 loc) · 94.4 KB
/
metadata_layer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""An abstract way of representing incoming metadata and applying it
to Identifiers and Editions.
This acts as an intermediary between the third-party integrations
(which have this information in idiosyncratic formats) and the
model. Doing a third-party integration should be as simple as putting
the information into this format.
"""
from collections import defaultdict
from sqlalchemy.orm.session import Session
from dateutil.parser import parse
from sqlalchemy.sql.expression import and_, or_
from sqlalchemy.orm.exc import (
NoResultFound,
)
from sqlalchemy.orm import aliased
import csv
import datetime
import logging
import re
from pymarc import MARCReader
from .classifier import Classifier
from .util import LanguageCodes
from .util.http import RemoteIntegrationException
from .util.personal_names import name_tidy
from .util.median import median
from .model import (
get_one,
get_one_or_create,
CirculationEvent,
Classification,
Collection,
Contributor,
CoverageRecord,
DataSource,
DeliveryMechanism,
Edition,
Equivalency,
Hyperlink,
Identifier,
License,
LicensePool,
LicensePoolDeliveryMechanism,
LinkRelations,
Subject,
Hyperlink,
PresentationCalculationPolicy,
RightsStatus,
Representation,
Resource,
Timestamp,
Work,
)
from .model.configuration import ExternalIntegrationLink
from .classifier import NO_VALUE, NO_NUMBER
from .analytics import Analytics
from .util.personal_names import display_name_to_sort_name
from .util.datetime_helpers import strptime_utc, to_utc, utc_now
class ReplacementPolicy(object):
"""How serious should we be about overwriting old metadata with
this new metadata?
"""
def __init__(
self,
identifiers=False,
subjects=False,
contributions=False,
links=False,
formats=False,
rights=False,
link_content=False,
mirrors=None,
content_modifier=None,
analytics=None,
http_get=None,
even_if_not_apparently_updated=False,
presentation_calculation_policy=None
):
self.identifiers = identifiers
self.subjects = subjects
self.contributions = contributions
self.links = links
self.rights = rights
self.formats = formats
self.link_content = link_content
self.even_if_not_apparently_updated = even_if_not_apparently_updated
self.mirrors = mirrors
self.content_modifier = content_modifier
self.analytics = analytics
self.http_get = http_get
self.presentation_calculation_policy = (
presentation_calculation_policy or
PresentationCalculationPolicy()
)
@classmethod
def from_license_source(cls, _db, **args):
"""When gathering data from the license source, overwrite all old data
from this source with new data from the same source. Also
overwrite an old rights status with an updated status and update
the list of available formats. Log availability changes to the
configured analytics services.
"""
return cls(
identifiers=True,
subjects=True,
contributions=True,
links=True,
rights=True,
formats=True,
analytics=Analytics(_db),
**args
)
@classmethod
def from_metadata_source(cls, **args):
"""When gathering data from a metadata source, overwrite all old data
from this source, but do not overwrite the rights status or
the available formats. License sources are the authority on rights
and formats, and metadata sources have no say in the matter.
"""
return cls(
identifiers=True,
subjects=True,
contributions=True,
links=True,
rights=False,
formats=False,
**args
)
@classmethod
def append_only(cls, **args):
"""Don't overwrite any information, just append it.
This should probably never be used.
"""
return cls(
identifiers=False,
subjects=False,
contributions=False,
links=False,
rights=False,
formats=False,
**args
)
class SubjectData(object):
def __init__(self, type, identifier, name=None, weight=1):
self.type = type
# Because subjects are sometimes evaluated according to keyword
# matching, it's important that any leading or trailing white
# space is removed during import.
self.identifier = identifier
if identifier:
self.identifier = identifier.strip()
self.name = name
if name:
self.name = name.strip()
self.weight = weight
@property
def key(self):
return self.type, self.identifier, self.name, self.weight
def __repr__(self):
return '<SubjectData type="%s" identifier="%s" name="%s" weight=%d>' % (
self.type, self.identifier, self.name, self.weight
)
class ContributorData(object):
def __init__(self, sort_name=None, display_name=None,
family_name=None, wikipedia_name=None, roles=None,
lc=None, viaf=None, biography=None, aliases=None, extra=None):
self.sort_name = sort_name
self.display_name = display_name
self.family_name = family_name
self.wikipedia_name = wikipedia_name
if roles is None:
roles = Contributor.AUTHOR_ROLE
if not isinstance(roles, list):
roles = [roles]
self.roles = roles
self.lc = lc
self.viaf = viaf
self.biography = biography
self.aliases = aliases or []
# extra is a dictionary of stuff like birthdates
self.extra = extra or dict()
# TODO: consider if it's time for ContributorData to connect back to Contributions
def __repr__(self):
return '<ContributorData sort="%s" display="%s" family="%s" wiki="%s" roles=%r lc=%s viaf=%s>' % (self.sort_name, self.display_name, self.family_name, self.wikipedia_name, self.roles, self.lc, self.viaf)
@classmethod
def from_contribution(cls, contribution):
"""Create a ContributorData object from a data-model Contribution
object.
"""
c = contribution.contributor
return cls(
sort_name=c.sort_name,
display_name=c.display_name,
family_name=c.family_name,
wikipedia_name=c.wikipedia_name,
lc=c.lc,
viaf=c.viaf,
biography=c.biography,
aliases=c.aliases,
roles=[contribution.role]
)
@classmethod
def lookup(cls, _db, sort_name=None, display_name=None, lc=None,
viaf=None):
"""Create a (potentially synthetic) ContributorData based on
the best available information in the database.
:return: A ContributorData.
"""
clauses = []
if sort_name:
clauses.append(Contributor.sort_name==sort_name)
if display_name:
clauses.append(Contributor.display_name==display_name)
if lc:
clauses.append(Contributor.lc==lc)
if viaf:
clauses.append(Contributor.viaf==viaf)
if not clauses:
raise ValueError("No Contributor information provided!")
or_clause = or_(*clauses)
contributors = _db.query(Contributor).filter(or_clause).all()
if len(contributors) == 0:
# We have no idea who this person is.
return None
# We found at least one matching Contributor. Let's try to
# build a composite ContributorData for the person.
values_by_field = defaultdict(set)
# If all the people we found share (e.g.) a VIAF field, then
# we can use that as a clue when doing a search -- anyone with
# that VIAF number is probably this person, even if their display
# name doesn't match.
for c in contributors:
if c.sort_name:
values_by_field['sort_name'].add(c.sort_name)
if c.display_name:
values_by_field['display_name'].add(c.display_name)
if c.lc:
values_by_field['lc'].add(c.lc)
if c.viaf:
values_by_field['viaf'].add(c.viaf)
# Use any passed-in values as default values for the
# ContributorData. Below, missing values may be filled in and
# inaccurate values may be replaced.
kwargs = dict(
sort_name=sort_name,
display_name=display_name,
lc=lc,
viaf=viaf
)
for k, values in list(values_by_field.items()):
if len(values) == 1:
# All the Contributors we found have the same
# value for this field. We can use it.
kwargs[k] = list(values)[0]
return ContributorData(roles=[], **kwargs)
def apply(self, destination, replace=None):
""" Update the passed-in Contributor-type object with this
ContributorData's information.
:param: destination -- the Contributor or ContributorData object to
write this ContributorData object's metadata to.
:param: replace -- Replacement policy (not currently used).
:return: the possibly changed Contributor object and a flag of whether it's been changed.
"""
log = logging.getLogger("Abstract metadata layer")
log.debug("Applying %r (%s) into %r (%s)", self, self.viaf, destination, destination.viaf)
made_changes = False
if self.sort_name and self.sort_name != destination.sort_name:
destination.sort_name = self.sort_name
made_changes = True
existing_aliases = set(destination.aliases)
new_aliases = list(destination.aliases)
for name in [self.sort_name] + self.aliases:
if name != destination.sort_name and name not in existing_aliases:
new_aliases.append(name)
made_changes = True
if new_aliases != destination.aliases:
destination.aliases = new_aliases
made_changes = True
for k, v in list(self.extra.items()):
if not k in destination.extra:
destination.extra[k] = v
if self.lc and self.lc != destination.lc:
destination.lc = self.lc
made_changes = True
if self.viaf and self.viaf != destination.viaf:
destination.viaf = self.viaf
made_changes = True
if (self.family_name and
self.family_name != destination.family_name):
destination.family_name = self.family_name
made_changes = True
if (self.display_name and
self.display_name != destination.display_name):
destination.display_name = self.display_name
made_changes = True
if (self.wikipedia_name and
self.wikipedia_name != destination.wikipedia_name):
destination.wikipedia_name = self.wikipedia_name
made_changes = True
if (self.biography and
self.biography != destination.biography):
destination.biography = self.biography
made_changes = True
# TODO: Contributor.merge_into also looks at
# contributions. Could maybe extract contributions from roles,
# but not sure it'd be useful.
return destination, made_changes
def find_sort_name(self, _db, identifiers, metadata_client):
"""Try as hard as possible to find this person's sort name.
"""
log = logging.getLogger("Abstract metadata layer")
if self.sort_name:
# log.debug(
# "%s already has a sort name: %s",
# self.display_name,
# self.sort_name
# )
return True
if not self.display_name:
raise ValueError(
"Cannot find sort name for a contributor with no display name!"
)
# Is there a contributor already in the database with this
# exact sort name? If so, use their display name.
# If not, take our best guess based on the display name.
sort_name = self.display_name_to_sort_name_from_existing_contributor(
_db, self.display_name)
if sort_name:
self.sort_name = sort_name
return True
# Time to break out the big guns. Ask the metadata wrangler
# if it can find a sort name for this display name.
if metadata_client:
try:
sort_name = self.display_name_to_sort_name_through_canonicalizer(
_db, identifiers, metadata_client
)
except RemoteIntegrationException as e:
# There was some kind of problem with the metadata
# wrangler. Act as though no metadata wrangler had
# been provided.
log = logging.getLogger("Abstract metadata layer")
log.error(
"Metadata client exception while determining sort name for %s",
self.display_name, exc_info=e
)
if sort_name:
self.sort_name = sort_name
return True
# If there's still no sort name, take our best guess based
# on the display name.
self.sort_name = display_name_to_sort_name(self.display_name)
return (self.sort_name is not None)
@classmethod
def display_name_to_sort_name_from_existing_contributor(self, _db, display_name):
"""Find the sort name for this book's author, assuming it's easy.
'Easy' means we already have an established sort name for a
Contributor with this exact display name.
If it's not easy, this will be taken care of later with a call to
the metadata wrangler's author canonicalization service.
If we have a copy of this book in our collection (the only
time an external list item is relevant), this will probably be
easy.
"""
contributors = _db.query(Contributor).filter(
Contributor.display_name==display_name).filter(
Contributor.sort_name != None).all()
if contributors:
log = logging.getLogger("Abstract metadata layer")
log.debug(
"Determined that sort name of %s is %s based on previously existing contributor",
display_name,
contributors[0].sort_name
)
return contributors[0].sort_name
return None
def _display_name_to_sort_name(
self, _db, metadata_client, identifier_obj
):
response = metadata_client.canonicalize_author_name(
identifier_obj, self.display_name)
sort_name = None
if isinstance(response, (bytes, str)):
sort_name = response
else:
log = logging.getLogger("Abstract metadata layer")
if (response.status_code == 200
and response.headers['Content-Type'].startswith('text/plain')):
sort_name = response.content
log.info(
"Canonicalizer found sort name for %r: %s => %s",
identifier_obj, self.display_name, sort_name
)
else:
log.warn(
"Canonicalizer could not find sort name for %r/%s",
identifier_obj, self.display_name
)
return sort_name
def display_name_to_sort_name_through_canonicalizer(
self, _db, identifiers, metadata_client):
sort_name = None
for identifier in identifiers:
if identifier.type != Identifier.ISBN:
continue
identifier_obj, ignore = identifier.load(_db)
sort_name = self._display_name_to_sort_name(
_db, metadata_client, identifier_obj
)
if sort_name:
break
if not sort_name:
sort_name = self._display_name_to_sort_name(
_db, metadata_client, None
)
return sort_name
class IdentifierData(object):
def __init__(self, type, identifier, weight=1):
self.type = type
self.weight = weight
self.identifier = identifier
def __repr__(self):
return '<IdentifierData type="%s" identifier="%s" weight="%s">' % (
self.type, self.identifier, self.weight
)
def load(self, _db):
return Identifier.for_foreign_id(
_db, self.type, self.identifier
)
class LinkData(object):
def __init__(self, rel, href=None, media_type=None, content=None,
thumbnail=None, rights_uri=None, rights_explanation=None,
original=None, transformation_settings=None):
if not rel:
raise ValueError("rel is required")
if not href and not content:
raise ValueError("Either href or content is required")
self.rel = rel
self.href = href
self.media_type = media_type
self.content = content
self.thumbnail = thumbnail
# This handles content sources like unglue.it that have rights for each link
# rather than each edition, and rights for cover images.
self.rights_uri = rights_uri
self.rights_explanation = rights_explanation
# If this LinkData is a derivative, it may also contain the original link
# and the settings used to transform the original into the derivative.
self.original = original
self.transformation_settings = transformation_settings or {}
@property
def guessed_media_type(self):
"""If the media type of a link is unknown, take a guess."""
if self.media_type:
# We know.
return self.media_type
if self.href:
# Take a guess.
return Representation.guess_url_media_type_from_path(self.href)
# No idea.
# TODO: We might be able to take a further guess based on the
# content and the link relation.
return None
def __repr__(self):
if self.content:
content = ", %d bytes content" % len(self.content)
else:
content = ''
if self.thumbnail:
thumbnail = ', has thumbnail'
else:
thumbnail = ''
return '<LinkData: rel="%s" href="%s" media_type=%r%s%s>' % (
self.rel, self.href, self.media_type, thumbnail,
content
)
def mirror_type(self):
"""Returns the type of mirror that should be used for the link.
"""
if self.rel in [Hyperlink.IMAGE, Hyperlink.THUMBNAIL_IMAGE]:
return ExternalIntegrationLink.COVERS
if self.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD:
return ExternalIntegrationLink.OPEN_ACCESS_BOOKS
elif self.rel == Hyperlink.GENERIC_OPDS_ACQUISITION:
return ExternalIntegrationLink.PROTECTED_ACCESS_BOOKS
class MeasurementData(object):
def __init__(self,
quantity_measured,
value,
weight=1,
taken_at=None):
if not quantity_measured:
raise ValueError("quantity_measured is required.")
if value is None:
raise ValueError("measurement value is required.")
self.quantity_measured = quantity_measured
if not isinstance(value, float) and not isinstance(value, int):
value = float(value)
self.value = value
self.weight = weight
self.taken_at = taken_at or utc_now()
def __repr__(self):
return '<MeasurementData quantity="%s" value=%f weight=%d taken=%s>' % (
self.quantity_measured, self.value, self.weight, self.taken_at
)
class FormatData(object):
def __init__(self, content_type, drm_scheme, link=None, rights_uri=None):
self.content_type = content_type
self.drm_scheme = drm_scheme
if link and not isinstance(link, LinkData):
raise TypeError(
"Expected LinkData object, got %s" % type(link)
)
self.link = link
self.rights_uri = rights_uri
if ((not self.rights_uri) and self.link and self.link.rights_uri):
self.rights_uri = self.link.rights_uri
class LicenseData(object):
def __init__(self, identifier, checkout_url, status_url, expires=None, remaining_checkouts=None,
concurrent_checkouts=None):
self.identifier = identifier
self.checkout_url = checkout_url
self.status_url = status_url
self.expires = expires
self.remaining_checkouts = remaining_checkouts
self.concurrent_checkouts = concurrent_checkouts
class TimestampData(object):
CLEAR_VALUE = Timestamp.CLEAR_VALUE
def __init__(self, start=None, finish=None, achievements=None,
counter=None, exception=None):
"""A constructor intended to be used by a service to customize its
eventual Timestamp.
service, service_type, and collection cannot be set through
this constructor, because they are generally not under the
control of the code that runs the service. They are set
afterwards, in finalize().
:param start: The time that the service should be considered to
have started running.
:param finish: The time that the service should be considered
to have stopped running.
:param achievements: A string describing what was achieved by the
service.
:param counter: A single integer item of state representing the point
at which the service left off.
:param exception: A traceback representing an exception that stopped
the progress of the service.
"""
# These are set by finalize().
self.service = None
self.service_type = None
self.collection_id = None
self.start = start
self.finish = finish
self.achievements = achievements
self.counter = counter
self.exception = exception
@property
def is_failure(self):
"""Does this TimestampData represent an unrecoverable failure?"""
return self.exception not in (None, self.CLEAR_VALUE)
@property
def is_complete(self):
"""Does this TimestampData represent an operation that has
completed?
An operation is completed if it has failed, or if the time of its
completion is known.
"""
return self.is_failure or self.finish not in (None, self.CLEAR_VALUE)
def finalize(self, service, service_type, collection, start=None,
finish=None, achievements=None, counter=None,
exception=None):
"""Finalize any values that were not set during the constructor.
This is intended to be run by the code that originally ran the
service.
The given values for `start`, `finish`, `achievements`,
`counter`, and `exception` will be used only if the service
did not specify its own values for those fields.
"""
self.service = service
self.service_type = service_type
if collection is None:
self.collection_id = None
else:
self.collection_id = collection.id
if self.start is None:
self.start = start
if self.finish is None:
if finish is None:
finish = utc_now()
self.finish = finish
if self.start is None:
self.start = self.finish
if self.counter is None:
self.counter = counter
if self.exception is None:
self.exception = exception
def collection(self, _db):
return get_one(_db, Collection, id=self.collection_id)
def apply(self, _db):
if any(x is None for x in [self.service, self.service_type]):
raise ValueError(
"Not enough information to write TimestampData to the database."
)
return Timestamp.stamp(
_db, self.service, self.service_type, self.collection(_db),
self.start, self.finish, self.achievements, self.counter,
self.exception
)
class MetaToModelUtility(object):
"""
Contains functionality common to both CirculationData and Metadata.
"""
log = logging.getLogger("Abstract metadata layer - mirror code")
def mirror_link(self, model_object, data_source, link, link_obj, policy):
"""Retrieve a copy of the given link and make sure it gets
mirrored. If it's a full-size image, create a thumbnail and
mirror that too.
The model_object can be either a pool or an edition.
"""
if link_obj.rel not in Hyperlink.MIRRORED:
# we only host locally open-source epubs and cover images
if link.href:
# The log message only makes sense if the resource is
# hosted elsewhere.
self.log.info("Not mirroring %s: rel=%s", link.href, link_obj.rel)
return
if (link.rights_uri
and link.rights_uri == RightsStatus.IN_COPYRIGHT):
self.log.info(
"Not mirroring %s: rights status=%s" % (
link.href, link.rights_uri
)
)
return
mirror_type = link.mirror_type()
if mirror_type in policy.mirrors:
mirror = policy.mirrors[mirror_type]
if not mirror:
return
else:
self.log.info(
"No mirror uploader with key %s found" % mirror_type
)
return
http_get = policy.http_get
_db = Session.object_session(link_obj)
original_url = link.href
self.log.info("About to mirror %s" % original_url)
pools = []
edition = None
title = None
identifier = None
if model_object:
if isinstance(model_object, LicensePool):
pools = [model_object]
identifier = model_object.identifier
if (identifier and identifier.primarily_identifies and identifier.primarily_identifies[0]):
edition = identifier.primarily_identifies[0]
elif isinstance(model_object, Edition):
pools = model_object.license_pools
identifier = model_object.primary_identifier
edition = model_object
if edition and edition.title:
title = edition.title
else:
title = getattr(self, 'title', None) or None
if ((not identifier) or (link_obj.identifier and identifier != link_obj.identifier)):
# insanity found
self.log.warn("Tried to mirror a link with an invalid identifier %r" % identifier)
return
max_age = None
if policy.link_content:
# We want to fetch the representation again, even if we
# already have a recent usable copy. If we fetch it and it
# hasn't changed, we'll keep using the one we have.
max_age = 0
# This will fetch a representation of the original and
# store it in the database.
representation, is_new = Representation.get(
_db, link.href, do_get=http_get,
presumed_media_type=link.media_type,
max_age=max_age,
)
# Make sure the (potentially newly-fetched) representation is
# associated with the resource.
link_obj.resource.representation = representation
# If we couldn't fetch this representation, don't mirror it,
# and if this was an open/protected access link, then suppress the associated
# license pool until someone fixes it manually.
# The license pool to suppress will be either the passed-in model_object (if it's of type pool),
# or the license pool associated with the passed-in model object (if it's of type edition).
if representation.fetch_exception:
if pools and link.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD:
for pool in pools:
pool.suppressed = True
pool.license_exception = "Fetch exception: %s" % representation.fetch_exception
self.log.error(pool.license_exception)
return
# If we fetched the representation and it hasn't changed,
# the previously mirrored version is fine. Don't mirror it
# again.
if representation.status_code == 304 and representation.mirror_url:
self.log.info(
"Representation has not changed, assuming mirror at %s is up to date.", representation.mirror_url
)
return
if representation.status_code // 100 not in (2,3):
self.log.info(
"Representation %s gave %s status code, not mirroring.",
representation.url, representation.status_code
)
return
if policy.content_modifier:
policy.content_modifier(representation)
# The metadata may have some idea about the media type for this
# LinkObject, but it could be wrong. If the representation we
# actually just saw is a mirrorable media type, that takes
# precedence. If we were expecting this link to be mirrorable
# but we actually saw something that's not, assume our original
# metadata was right and the server told us the wrong media type.
if representation.media_type and representation.mirrorable_media_type:
link.media_type = representation.media_type
if not representation.mirrorable_media_type:
if link.media_type:
self.log.info("Saw unsupported media type for %s: %s. Assuming original media type %s is correct",
representation.url, representation.media_type, link.media_type)
representation.media_type = link.media_type
else:
self.log.info("Not mirroring %s: unsupported media type %s",
representation.url, representation.media_type)
return
# Determine the best URL to use when mirroring this
# representation.
if link.media_type in Representation.BOOK_MEDIA_TYPES or \
link.media_type in Representation.AUDIOBOOK_MEDIA_TYPES:
url_title = title or identifier.identifier
extension = representation.extension()
mirror_url = mirror.book_url(
identifier,
data_source=data_source,
title=url_title,
extension=extension,
open_access=link.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD
)
else:
filename = representation.default_filename(
link_obj, representation.media_type
)
mirror_url = mirror.cover_image_url(
data_source, identifier, filename
)
# Mirror it.
collection = pools[0].collection if pools else None
mirror.mirror_one(representation, mirror_to=mirror_url, collection=collection)
# If we couldn't mirror an open/protected access link representation, suppress
# the license pool until someone fixes it manually.
if representation.mirror_exception:
if pools and link.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD:
for pool in pools:
pool.suppressed = True
pool.license_exception = "Mirror exception: %s" % representation.mirror_exception
self.log.error(pool.license_exception)
if link_obj.rel == Hyperlink.IMAGE:
# Create and mirror a thumbnail.
thumbnail_filename = representation.default_filename(
link_obj, Representation.PNG_MEDIA_TYPE
)
thumbnail_url = mirror.cover_image_url(
data_source, identifier, thumbnail_filename,
Edition.MAX_THUMBNAIL_HEIGHT
)
thumbnail, is_new = representation.scale(
max_height=Edition.MAX_THUMBNAIL_HEIGHT,
max_width=Edition.MAX_THUMBNAIL_WIDTH,
destination_url=thumbnail_url,
destination_media_type=Representation.PNG_MEDIA_TYPE,
force=True
)
if is_new:
# A thumbnail was created distinct from the original
# image. Mirror it as well.
mirror.mirror_one(thumbnail, mirror_to=thumbnail_url, collection=collection)
if link_obj.rel in Hyperlink.SELF_HOSTED_BOOKS:
# If we mirrored book content successfully, remove it from
# the database to save space. We do keep images in case we
# ever need to resize them or mirror them elsewhere.
if representation.mirrored_at and not representation.mirror_exception:
representation.content = None
class CirculationData(MetaToModelUtility):
"""Information about actual copies of a book that can be delivered to
patrons.
As distinct from Metadata, which is a container for information
about a book.
Basically,
Metadata : Edition :: CirculationData : Licensepool
"""
log = logging.getLogger(
"Abstract metadata layer - Circulation data"
)
def __init__(
self,
data_source,
primary_identifier,
licenses_owned=None,
licenses_available=None,
licenses_reserved=None,
patrons_in_hold_queue=None,
formats=None,
default_rights_uri=None,
links=None,
licenses=None,
last_checked=None,
):
"""Constructor.
:param data_source: The authority providing the lending licenses.
This may be a DataSource object or the name of the data source.
:param primary_identifier: An Identifier or IdentifierData representing
how the lending authority distinguishes this book from others.
"""
self._data_source = data_source
if isinstance(self._data_source, DataSource):
self.data_source_obj = self._data_source
self.data_source_name = self.data_source_obj.name
else:
self.data_source_obj = None
self.data_source_name = data_source
if isinstance(primary_identifier, Identifier):
self.primary_identifier_obj = primary_identifier
self._primary_identifier = IdentifierData(
primary_identifier.type, primary_identifier.identifier
)
else:
self.primary_identifier_obj = None
self._primary_identifier = primary_identifier
self.licenses_owned = licenses_owned
self.licenses_available = licenses_available
self.licenses_reserved = licenses_reserved
self.patrons_in_hold_queue = patrons_in_hold_queue
# If no 'last checked' data was provided, assume the data was
# just gathered.
self.last_checked = last_checked or utc_now()
# format contains pdf/epub, drm, link
self.formats = formats or []
self.default_rights_uri = None
self.set_default_rights_uri(data_source_name=self.data_source_name, default_rights_uri=default_rights_uri)
self.__links = None
self.links = links
# Information about individual terms for each license in a pool.
self.licenses = licenses or []
@property
def links(self):
return self.__links
@links.setter
def links(self, arg_links):
""" If got passed all links, undiscriminately, filter out to only those relevant to
pools (the rights-related links).
"""
# start by deleting any old links
self.__links = []
if not arg_links:
return
for link in arg_links:
if link.rel in Hyperlink.CIRCULATION_ALLOWED:
# TODO: what about Hyperlink.SAMPLE?
# only accept the types of links relevant to pools